title stringlengths 1 185 | diff stringlengths 0 32.2M | body stringlengths 0 123k ⌀ | url stringlengths 57 58 | created_at stringlengths 20 20 | closed_at stringlengths 20 20 | merged_at stringlengths 20 20 ⌀ | updated_at stringlengths 20 20 |
|---|---|---|---|---|---|---|---|
Support merging DataFrames on a combo of columns and index levels (GH 14355) | diff --git a/doc/source/merging.rst b/doc/source/merging.rst
index 7d981b815d01b..86d2ec2254057 100644
--- a/doc/source/merging.rst
+++ b/doc/source/merging.rst
@@ -518,14 +518,16 @@ standard database join operations between DataFrame objects:
- ``left``: A DataFrame object
- ``right``: Another DataFrame object
-- ``on``: Columns (names) to join on. Must be found in both the left and
- right DataFrame objects. If not passed and ``left_index`` and
+- ``on``: Column or index level names to join on. Must be found in both the left
+ and right DataFrame objects. If not passed and ``left_index`` and
``right_index`` are ``False``, the intersection of the columns in the
DataFrames will be inferred to be the join keys
-- ``left_on``: Columns from the left DataFrame to use as keys. Can either be
- column names or arrays with length equal to the length of the DataFrame
-- ``right_on``: Columns from the right DataFrame to use as keys. Can either be
- column names or arrays with length equal to the length of the DataFrame
+- ``left_on``: Columns or index levels from the left DataFrame to use as
+ keys. Can either be column names, index level names, or arrays with length
+ equal to the length of the DataFrame
+- ``right_on``: Columns or index levels from the right DataFrame to use as
+ keys. Can either be column names, index level names, or arrays with length
+ equal to the length of the DataFrame
- ``left_index``: If ``True``, use the index (row labels) from the left
DataFrame as its join key(s). In the case of a DataFrame with a MultiIndex
(hierarchical), the number of levels must match the number of join keys
@@ -563,6 +565,10 @@ standard database join operations between DataFrame objects:
.. versionadded:: 0.21.0
+.. note::
+
+ Support for specifying index levels as the ``on``, ``left_on``, and
+ ``right_on`` parameters was added in version 0.22.0.
The return type will be the same as ``left``. If ``left`` is a ``DataFrame``
and ``right`` is a subclass of DataFrame, the return type will still be
@@ -1121,6 +1127,56 @@ This is not Implemented via ``join`` at-the-moment, however it can be done using
labels=['left', 'right'], vertical=False);
plt.close('all');
+.. _merging.merge_on_columns_and_levels:
+
+Merging on a combination of columns and index levels
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. versionadded:: 0.22
+
+Strings passed as the ``on``, ``left_on``, and ``right_on`` parameters
+may refer to either column names or index level names. This enables merging
+``DataFrame`` instances on a combination of index levels and columns without
+resetting indexes.
+
+.. ipython:: python
+
+ left_index = pd.Index(['K0', 'K0', 'K1', 'K2'], name='key1')
+
+ left = pd.DataFrame({'A': ['A0', 'A1', 'A2', 'A3'],
+ 'B': ['B0', 'B1', 'B2', 'B3'],
+ 'key2': ['K0', 'K1', 'K0', 'K1']},
+ index=left_index)
+
+ right_index = pd.Index(['K0', 'K1', 'K2', 'K2'], name='key1')
+
+ right = pd.DataFrame({'C': ['C0', 'C1', 'C2', 'C3'],
+ 'D': ['D0', 'D1', 'D2', 'D3'],
+ 'key2': ['K0', 'K0', 'K0', 'K1']},
+ index=right_index)
+
+ result = left.merge(right, on=['key1', 'key2'])
+
+.. ipython:: python
+ :suppress:
+
+ @savefig merge_on_index_and_column.png
+ p.plot([left, right], result,
+ labels=['left', 'right'], vertical=False);
+ plt.close('all');
+
+.. note::
+
+ When DataFrames are merged on a string that matches an index level in both
+ frames, the index level is preserved as an index level in the resulting
+ DataFrame.
+
+.. note::
+
+ If a string matches both a column name and an index level name, then a
+ warning is issued and the column takes precedence. This will result in an
+ ambiguity error in a future version.
+
Overlapping value columns
~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt
index 4a4d60b4dfbb2..6a16ed2bcaac5 100644
--- a/doc/source/whatsnew/v0.22.0.txt
+++ b/doc/source/whatsnew/v0.22.0.txt
@@ -32,6 +32,37 @@ The :func:`get_dummies` now accepts a ``dtype`` argument, which specifies a dtyp
pd.get_dummies(df, columns=['c'], dtype=bool).dtypes
+.. _whatsnew_0220.enhancements.merge_on_columns_and_levels:
+
+Merging on a combination of columns and index levels
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Strings passed to :meth:`DataFrame.merge` as the ``on``, ``left_on``, and ``right_on``
+parameters may now refer to either column names or index level names.
+This enables merging ``DataFrame`` instances on a combination of index levels
+and columns without resetting indexes. See the :ref:`Merge on columns and
+levels <merging.merge_on_columns_and_levels>` documentation section.
+(:issue:`14355`)
+
+.. ipython:: python
+
+ left_index = pd.Index(['K0', 'K0', 'K1', 'K2'], name='key1')
+
+ left = pd.DataFrame({'A': ['A0', 'A1', 'A2', 'A3'],
+ 'B': ['B0', 'B1', 'B2', 'B3'],
+ 'key2': ['K0', 'K1', 'K0', 'K1']},
+ index=left_index)
+
+ right_index = pd.Index(['K0', 'K1', 'K2', 'K2'], name='key1')
+
+ right = pd.DataFrame({'C': ['C0', 'C1', 'C2', 'C3'],
+ 'D': ['D0', 'D1', 'D2', 'D3'],
+ 'key2': ['K0', 'K0', 'K0', 'K1']},
+ index=right_index)
+
+ left.merge(right, on=['key1', 'key2'])
+
+
.. _whatsnew_0220.enhancements.other:
Other Enhancements
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 7145fa709c345..e28d14aacdce3 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -147,16 +147,17 @@
* inner: use intersection of keys from both frames, similar to a SQL inner
join; preserve the order of the left keys
on : label or list
- Field names to join on. Must be found in both DataFrames. If on is
- None and not merging on indexes, then it merges on the intersection of
- the columns by default.
+ Column or index level names to join on. These must be found in both
+ DataFrames. If `on` is None and not merging on indexes then this defaults
+ to the intersection of the columns in both DataFrames.
left_on : label or list, or array-like
- Field names to join on in left DataFrame. Can be a vector or list of
- vectors of the length of the DataFrame to use a particular vector as
- the join key instead of columns
+ Column or index level names to join on in the left DataFrame. Can also
+ be an array or list of arrays of the length of the left DataFrame.
+ These arrays are treated as if they are columns.
right_on : label or list, or array-like
- Field names to join on in right DataFrame or vector/list of vectors per
- left_on docs
+ Column or index level names to join on in the right DataFrame. Can also
+ be an array or list of arrays of the length of the right DataFrame.
+ These arrays are treated as if they are columns.
left_index : boolean, default False
Use the index from the left DataFrame as the join key(s). If it is a
MultiIndex, the number of keys in the other DataFrame (either the index
@@ -195,6 +196,11 @@
.. versionadded:: 0.21.0
+Notes
+-----
+Support for specifying index levels as the `on`, `left_on`, and
+`right_on` parameters was added in version 0.22.0
+
Examples
--------
@@ -5196,12 +5202,12 @@ def join(self, other, on=None, how='left', lsuffix='', rsuffix='',
Index should be similar to one of the columns in this one. If a
Series is passed, its name attribute must be set, and that will be
used as the column name in the resulting joined DataFrame
- on : column name, tuple/list of column names, or array-like
- Column(s) in the caller to join on the index in other,
- otherwise joins index-on-index. If multiples
- columns given, the passed DataFrame must have a MultiIndex. Can
- pass an array as the join key if not already contained in the
- calling DataFrame. Like an Excel VLOOKUP operation
+ on : name, tuple/list of names, or array-like
+ Column or index level name(s) in the caller to join on the index
+ in `other`, otherwise joins index-on-index. If multiple
+ values given, the `other` DataFrame must have a MultiIndex. Can
+ pass an array as the join key if it is not already contained in
+ the calling DataFrame. Like an Excel VLOOKUP operation
how : {'left', 'right', 'outer', 'inner'}, default: 'left'
How to handle the operation of the two objects.
@@ -5226,6 +5232,9 @@ def join(self, other, on=None, how='left', lsuffix='', rsuffix='',
on, lsuffix, and rsuffix options are not supported when passing a list
of DataFrame objects
+ Support for specifying index levels as the `on` parameter was added
+ in version 0.22.0
+
Examples
--------
>>> caller = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3', 'K4', 'K5'],
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 782971a742b54..876349d856dea 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -27,6 +27,7 @@
is_re_compilable,
pandas_dtype)
from pandas.core.dtypes.cast import maybe_promote, maybe_upcast_putmask
+from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.missing import isna, notna
from pandas.core.dtypes.generic import ABCSeries, ABCPanel, ABCDataFrame
from pandas.core.common import (_count_not_none,
@@ -36,7 +37,7 @@
from pandas.core.base import PandasObject, SelectionMixin
from pandas.core.index import (Index, MultiIndex, _ensure_index,
- InvalidIndexError)
+ InvalidIndexError, RangeIndex)
import pandas.core.indexing as indexing
from pandas.core.indexing import maybe_convert_indices
from pandas.core.indexes.datetimes import DatetimeIndex
@@ -1038,6 +1039,313 @@ def equals(self, other):
return False
return self._data.equals(other._data)
+ # -------------------------------------------------------------------------
+ # Label or Level Combination Helpers
+ #
+ # A collection of helper methods for DataFrame/Series operations that
+ # accept a combination of column/index labels and levels. All such
+ # operations should utilize/extend these methods when possible so that we
+ # have consistent precedence and validation logic throughout the library.
+
+ def _is_level_reference(self, key, axis=0):
+ """
+ Test whether a key is a level reference for a given axis.
+
+ To be considered a level reference, `key` must be a string that:
+ - (axis=0): Matches the name of an index level and does NOT match
+ a column label.
+ - (axis=1): Matches the name of a column level and does NOT match
+ an index label.
+
+ Parameters
+ ----------
+ key: str
+ Potential level name for the given axis
+ axis: int, default 0
+ Axis that levels are associated with (0 for index, 1 for columns)
+
+ Returns
+ -------
+ is_level: bool
+ """
+ axis = self._get_axis_number(axis)
+
+ if self.ndim > 2:
+ raise NotImplementedError(
+ "_is_level_reference is not implemented for {type}"
+ .format(type=type(self)))
+
+ return (key is not None and
+ is_hashable(key) and
+ key in self.axes[axis].names and
+ not self._is_label_reference(key, axis=axis))
+
+ def _is_label_reference(self, key, axis=0):
+ """
+ Test whether a key is a label reference for a given axis.
+
+ To be considered a label reference, `key` must be a string that:
+ - (axis=0): Matches a column label
+ - (axis=1): Matches an index label
+
+ Parameters
+ ----------
+ key: str
+ Potential label name
+ axis: int, default 0
+ Axis perpendicular to the axis that labels are associated with
+ (0 means search for column labels, 1 means search for index labels)
+
+ Returns
+ -------
+ is_label: bool
+ """
+ axis = self._get_axis_number(axis)
+ other_axes = [ax for ax in range(self._AXIS_LEN) if ax != axis]
+
+ if self.ndim > 2:
+ raise NotImplementedError(
+ "_is_label_reference is not implemented for {type}"
+ .format(type=type(self)))
+
+ return (key is not None and
+ is_hashable(key) and
+ any(key in self.axes[ax] for ax in other_axes))
+
+ def _is_label_or_level_reference(self, key, axis=0):
+ """
+ Test whether a key is a label or level reference for a given axis.
+
+ To be considered either a label or a level reference, `key` must be a
+ string that:
+ - (axis=0): Matches a column label or an index level
+ - (axis=1): Matches an index label or a column level
+
+ Parameters
+ ----------
+ key: str
+ Potential label or level name
+ axis: int, default 0
+ Axis that levels are associated with (0 for index, 1 for columns)
+
+ Returns
+ -------
+ is_label_or_level: bool
+ """
+
+ if self.ndim > 2:
+ raise NotImplementedError(
+ "_is_label_or_level_reference is not implemented for {type}"
+ .format(type=type(self)))
+
+ return (self._is_level_reference(key, axis=axis) or
+ self._is_label_reference(key, axis=axis))
+
+ def _check_label_or_level_ambiguity(self, key, axis=0):
+ """
+ Check whether `key` matches both a level of the input `axis` and a
+ label of the other axis and raise a ``FutureWarning`` if this is the
+ case.
+
+ Note: This method will be altered to raise an ambiguity exception in
+ a future version.
+
+ Parameters
+ ----------
+ key: str or object
+ label or level name
+
+ axis: int, default 0
+ Axis that levels are associated with (0 for index, 1 for columns)
+
+ Returns
+ -------
+ ambiguous: bool
+
+ Raises
+ ------
+ FutureWarning
+ if `key` is ambiguous. This will become an ambiguity error in a
+ future version
+ """
+
+ axis = self._get_axis_number(axis)
+ other_axes = [ax for ax in range(self._AXIS_LEN) if ax != axis]
+
+ if self.ndim > 2:
+ raise NotImplementedError(
+ "_check_label_or_level_ambiguity is not implemented for {type}"
+ .format(type=type(self)))
+
+ if (key is not None and
+ is_hashable(key) and
+ key in self.axes[axis].names and
+ any(key in self.axes[ax] for ax in other_axes)):
+
+ # Build an informative and grammatical warning
+ level_article, level_type = (('an', 'index')
+ if axis == 0 else
+ ('a', 'column'))
+
+ label_article, label_type = (('a', 'column')
+ if axis == 0 else
+ ('an', 'index'))
+
+ msg = ("'{key}' is both {level_article} {level_type} level and "
+ "{label_article} {label_type} label.\n"
+ "Defaulting to {label_type}, but this will raise an "
+ "ambiguity error in a future version"
+ ).format(key=key,
+ level_article=level_article,
+ level_type=level_type,
+ label_article=label_article,
+ label_type=label_type)
+
+ warnings.warn(msg, FutureWarning, stacklevel=2)
+ return True
+ else:
+ return False
+
+ def _get_label_or_level_values(self, key, axis=0):
+ """
+ Return a 1-D array of values associated with `key`, a label or level
+ from the given `axis`.
+
+ Retrieval logic:
+ - (axis=0): Return column values if `key` matches a column label.
+ Otherwise return index level values if `key` matches an index
+ level.
+ - (axis=1): Return row values if `key` matches an index label.
+ Otherwise return column level values if 'key' matches a column
+ level
+
+ Parameters
+ ----------
+ key: str
+ Label or level name.
+ axis: int, default 0
+ Axis that levels are associated with (0 for index, 1 for columns)
+
+ Returns
+ -------
+ values: np.ndarray
+
+ Raises
+ ------
+ KeyError
+ if `key` matches neither a label nor a level
+ ValueError
+ if `key` matches multiple labels
+ """
+
+ axis = self._get_axis_number(axis)
+ other_axes = [ax for ax in range(self._AXIS_LEN) if ax != axis]
+
+ if self.ndim > 2:
+ raise NotImplementedError(
+ "_get_label_or_level_values is not implemented for {type}"
+ .format(type=type(self)))
+
+ if self._is_label_reference(key, axis=axis):
+ self._check_label_or_level_ambiguity(key, axis=axis)
+ values = self.xs(key, axis=other_axes[0])._values
+ elif self._is_level_reference(key, axis=axis):
+ values = self.axes[axis].get_level_values(key)._values
+ else:
+ raise KeyError(key)
+
+ # Check for duplicates
+ if values.ndim > 1:
+ label_axis_name = 'column' if axis == 0 else 'index'
+ raise ValueError(("The {label_axis_name} label '{key}' "
+ "is not unique")
+ .format(key=key,
+ label_axis_name=label_axis_name))
+
+ return values
+
+ def _drop_labels_or_levels(self, keys, axis=0):
+ """
+ Drop labels and/or levels for the given `axis`.
+
+ For each key in `keys`:
+ - (axis=0): If key matches a column label then drop the column.
+ Otherwise if key matches an index level then drop the level.
+ - (axis=1): If key matches an index label then drop the row.
+ Otherwise if key matches a column level then drop the level.
+
+ Parameters
+ ----------
+ keys: str or list of str
+ labels or levels to drop
+ axis: int, default 0
+ Axis that levels are associated with (0 for index, 1 for columns)
+
+ Returns
+ -------
+ dropped: DataFrame
+
+ Raises
+ ------
+ ValueError
+ if any `keys` match neither a label nor a level
+ """
+
+ axis = self._get_axis_number(axis)
+
+ if self.ndim > 2:
+ raise NotImplementedError(
+ "_drop_labels_or_levels is not implemented for {type}"
+ .format(type=type(self)))
+
+ # Validate keys
+ keys = com._maybe_make_list(keys)
+ invalid_keys = [k for k in keys if not
+ self._is_label_or_level_reference(k, axis=axis)]
+
+ if invalid_keys:
+ raise ValueError(("The following keys are not valid labels or "
+ "levels for axis {axis}: {invalid_keys}")
+ .format(axis=axis,
+ invalid_keys=invalid_keys))
+
+ # Compute levels and labels to drop
+ levels_to_drop = [k for k in keys
+ if self._is_level_reference(k, axis=axis)]
+
+ labels_to_drop = [k for k in keys
+ if not self._is_level_reference(k, axis=axis)]
+
+ # Perform copy upfront and then use inplace operations below.
+ # This ensures that we always perform exactly one copy.
+ # ``copy`` and/or ``inplace`` options could be added in the future.
+ dropped = self.copy()
+
+ if axis == 0:
+ # Handle dropping index levels
+ if levels_to_drop:
+ dropped.reset_index(levels_to_drop, drop=True, inplace=True)
+
+ # Handle dropping columns labels
+ if labels_to_drop:
+ dropped.drop(labels_to_drop, axis=1, inplace=True)
+ else:
+ # Handle dropping column levels
+ if levels_to_drop:
+ if isinstance(dropped.columns, MultiIndex):
+ # Drop the specified levels from the MultiIndex
+ dropped.columns = dropped.columns.droplevel(levels_to_drop)
+ else:
+ # Drop the last level of Index by replacing with
+ # a RangeIndex
+ dropped.columns = RangeIndex(dropped.columns.size)
+
+ # Handle dropping index labels
+ if labels_to_drop:
+ dropped.drop(labels_to_drop, axis=0, inplace=True)
+
+ return dropped
+
# ----------------------------------------------------------------------
# Iteration
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index 8338df33f5cde..d564e80b6068e 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -2830,16 +2830,11 @@ def is_in_obj(gpr):
elif is_in_axis(gpr): # df.groupby('name')
if gpr in obj:
- if validate and gpr in obj.index.names:
- warnings.warn(
- ("'%s' is both a column name and an index level.\n"
- "Defaulting to column but "
- "this will raise an ambiguity error in a "
- "future version") % gpr,
- FutureWarning, stacklevel=5)
+ if validate:
+ obj._check_label_or_level_ambiguity(gpr)
in_axis, name, gpr = True, gpr, obj[gpr]
exclusions.append(name)
- elif gpr in obj.index.names:
+ elif obj._is_level_reference(gpr):
in_axis, name, level, gpr = False, None, gpr, None
else:
raise KeyError(gpr)
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index 412c00dc95ec0..ec30e32f7f374 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -603,6 +603,8 @@ def get_result(self):
self._maybe_add_join_keys(result, left_indexer, right_indexer)
+ self._maybe_restore_index_levels(result)
+
return result
def _indicator_pre_merge(self, left, right):
@@ -645,6 +647,39 @@ def _indicator_post_merge(self, result):
axis=1)
return result
+ def _maybe_restore_index_levels(self, result):
+ """
+ Restore index levels specified as `on` parameters
+
+ Here we check for cases where `self.left_on` and `self.right_on` pairs
+ each reference an index level in their respective DataFrames. The
+ joined columns corresponding to these pairs are then restored to the
+ index of `result`.
+
+ **Note:** This method has side effects. It modifies `result` in-place
+
+ Parameters
+ ----------
+ result: DataFrame
+ merge result
+
+ Returns
+ -------
+ None
+ """
+ names_to_restore = []
+ for name, left_key, right_key in zip(self.join_names,
+ self.left_on,
+ self.right_on):
+ if (self.orig_left._is_level_reference(left_key) and
+ self.orig_right._is_level_reference(right_key) and
+ name not in result.index.names):
+
+ names_to_restore.append(name)
+
+ if names_to_restore:
+ result.set_index(names_to_restore, inplace=True)
+
def _maybe_add_join_keys(self, result, left_indexer, right_indexer):
left_has_missing = None
@@ -714,8 +749,17 @@ def _maybe_add_join_keys(self, result, left_indexer, right_indexer):
else:
key_col = Index(lvals).where(~mask, rvals)
- if name in result:
+ if result._is_label_reference(name):
result[name] = key_col
+ elif result._is_level_reference(name):
+ if isinstance(result.index, MultiIndex):
+ idx_list = [result.index.get_level_values(level_name)
+ if level_name != name else key_col
+ for level_name in result.index.names]
+
+ result.set_index(idx_list, inplace=True)
+ else:
+ result.index = Index(key_col, name=name)
else:
result.insert(i, name or 'key_{i}'.format(i=i), key_col)
@@ -812,7 +856,8 @@ def _get_merge_keys(self):
join_names.append(None) # what to do?
else:
if rk is not None:
- right_keys.append(right[rk]._values)
+ right_keys.append(
+ right._get_label_or_level_values(rk))
join_names.append(rk)
else:
# work-around for merge_asof(right_index=True)
@@ -821,7 +866,8 @@ def _get_merge_keys(self):
else:
if not is_rkey(rk):
if rk is not None:
- right_keys.append(right[rk]._values)
+ right_keys.append(
+ right._get_label_or_level_values(rk))
else:
# work-around for merge_asof(right_index=True)
right_keys.append(right.index)
@@ -834,7 +880,7 @@ def _get_merge_keys(self):
else:
right_keys.append(rk)
if lk is not None:
- left_keys.append(left[lk]._values)
+ left_keys.append(left._get_label_or_level_values(lk))
join_names.append(lk)
else:
# work-around for merge_asof(left_index=True)
@@ -846,7 +892,7 @@ def _get_merge_keys(self):
left_keys.append(k)
join_names.append(None)
else:
- left_keys.append(left[k]._values)
+ left_keys.append(left._get_label_or_level_values(k))
join_names.append(k)
if isinstance(self.right.index, MultiIndex):
right_keys = [lev._values.take(lab)
@@ -860,7 +906,7 @@ def _get_merge_keys(self):
right_keys.append(k)
join_names.append(None)
else:
- right_keys.append(right[k]._values)
+ right_keys.append(right._get_label_or_level_values(k))
join_names.append(k)
if isinstance(self.left.index, MultiIndex):
left_keys = [lev._values.take(lab)
@@ -870,10 +916,10 @@ def _get_merge_keys(self):
left_keys = [self.left.index.values]
if left_drop:
- self.left = self.left.drop(left_drop, axis=1)
+ self.left = self.left._drop_labels_or_levels(left_drop)
if right_drop:
- self.right = self.right.drop(right_drop, axis=1)
+ self.right = self.right._drop_labels_or_levels(right_drop)
return left_keys, right_keys, join_names
diff --git a/pandas/tests/generic/test_label_or_level_utils.py b/pandas/tests/generic/test_label_or_level_utils.py
new file mode 100644
index 0000000000000..456cb48020500
--- /dev/null
+++ b/pandas/tests/generic/test_label_or_level_utils.py
@@ -0,0 +1,431 @@
+import pytest
+import pandas as pd
+import pandas.util.testing as tm
+from pandas.core.dtypes.missing import array_equivalent
+
+
+# Fixtures
+# ========
+@pytest.fixture
+def df():
+ """DataFrame with columns 'L1', 'L2', and 'L3' """
+ return pd.DataFrame({'L1': [1, 2, 3],
+ 'L2': [11, 12, 13],
+ 'L3': ['A', 'B', 'C']})
+
+
+@pytest.fixture(params=[[], ['L1'], ['L1', 'L2'], ['L1', 'L2', 'L3']])
+def df_levels(request, df):
+ """DataFrame with columns or index levels 'L1', 'L2', and 'L3' """
+ levels = request.param
+
+ if levels:
+ df = df.set_index(levels)
+
+ return df
+
+
+@pytest.fixture
+def df_ambig(df):
+ """DataFrame with levels 'L1' and 'L2' and labels 'L1' and 'L3' """
+ df = df.set_index(['L1', 'L2'])
+
+ df['L1'] = df['L3']
+
+ return df
+
+
+@pytest.fixture
+def df_duplabels(df):
+ """DataFrame with level 'L1' and labels 'L2', 'L3', and 'L2' """
+ df = df.set_index(['L1'])
+ df = pd.concat([df, df['L2']], axis=1)
+
+ return df
+
+
+@pytest.fixture
+def panel():
+ with tm.assert_produces_warning(DeprecationWarning,
+ check_stacklevel=False):
+ return pd.Panel()
+
+
+# Test is label/level reference
+# =============================
+def get_labels_levels(df_levels):
+ expected_labels = list(df_levels.columns)
+ expected_levels = [name for name in df_levels.index.names
+ if name is not None]
+ return expected_labels, expected_levels
+
+
+def assert_label_reference(frame, labels, axis):
+ for label in labels:
+ assert frame._is_label_reference(label, axis=axis)
+ assert not frame._is_level_reference(label, axis=axis)
+ assert frame._is_label_or_level_reference(label, axis=axis)
+
+
+def assert_level_reference(frame, levels, axis):
+ for level in levels:
+ assert frame._is_level_reference(level, axis=axis)
+ assert not frame._is_label_reference(level, axis=axis)
+ assert frame._is_label_or_level_reference(level, axis=axis)
+
+
+# DataFrame
+# ---------
+@pytest.mark.parametrize('axis', [0, 1])
+def test_is_level_or_label_reference_df_simple(df_levels, axis):
+
+ # Compute expected labels and levels
+ expected_labels, expected_levels = get_labels_levels(df_levels)
+
+ # Transpose frame if axis == 1
+ if axis == 1:
+ df_levels = df_levels.T
+
+ # Perform checks
+ assert_level_reference(df_levels, expected_levels, axis=axis)
+ assert_label_reference(df_levels, expected_labels, axis=axis)
+
+
+@pytest.mark.parametrize('axis', [0, 1])
+def test_is_level_reference_df_ambig(df_ambig, axis):
+
+ # Transpose frame if axis == 1
+ if axis == 1:
+ df_ambig = df_ambig.T
+
+ # df has both an on-axis level and off-axis label named L1
+ # Therefore L1 should reference the label, not the level
+ assert_label_reference(df_ambig, ['L1'], axis=axis)
+
+ # df has an on-axis level named L2 and it is not ambiguous
+ # Therefore L2 is an level reference
+ assert_level_reference(df_ambig, ['L2'], axis=axis)
+
+ # df has a column named L3 and it not an level reference
+ assert_label_reference(df_ambig, ['L3'], axis=axis)
+
+
+# Series
+# ------
+def test_is_level_reference_series_simple_axis0(df):
+
+ # Make series with L1 as index
+ s = df.set_index('L1').L2
+ assert_level_reference(s, ['L1'], axis=0)
+ assert not s._is_level_reference('L2')
+
+ # Make series with L1 and L2 as index
+ s = df.set_index(['L1', 'L2']).L3
+ assert_level_reference(s, ['L1', 'L2'], axis=0)
+ assert not s._is_level_reference('L3')
+
+
+def test_is_level_reference_series_axis1_error(df):
+
+ # Make series with L1 as index
+ s = df.set_index('L1').L2
+
+ with tm.assert_raises_regex(ValueError, "No axis named 1"):
+ s._is_level_reference('L1', axis=1)
+
+
+# Panel
+# -----
+def test_is_level_reference_panel_error(panel):
+ msg = ("_is_level_reference is not implemented for {type}"
+ .format(type=type(panel)))
+
+ with tm.assert_raises_regex(NotImplementedError, msg):
+ panel._is_level_reference('L1', axis=0)
+
+
+def test_is_label_reference_panel_error(panel):
+ msg = ("_is_label_reference is not implemented for {type}"
+ .format(type=type(panel)))
+
+ with tm.assert_raises_regex(NotImplementedError, msg):
+ panel._is_label_reference('L1', axis=0)
+
+
+def test_is_label_or_level_reference_panel_error(panel):
+ msg = ("_is_label_or_level_reference is not implemented for {type}"
+ .format(type=type(panel)))
+
+ with tm.assert_raises_regex(NotImplementedError, msg):
+ panel._is_label_or_level_reference('L1', axis=0)
+
+
+# Test _check_label_or_level_ambiguity_df
+# =======================================
+
+# DataFrame
+# ---------
+@pytest.mark.parametrize('axis', [0, 1])
+def test_check_label_or_level_ambiguity_df(df_ambig, axis):
+
+ # Transpose frame if axis == 1
+ if axis == 1:
+ df_ambig = df_ambig.T
+
+ # df_ambig has both an on-axis level and off-axis label named L1
+ # Therefore L1 is ambiguous
+ with tm.assert_produces_warning(FutureWarning,
+ clear=True,
+ check_stacklevel=False) as w:
+
+ assert df_ambig._check_label_or_level_ambiguity('L1', axis=axis)
+ warning_msg = w[0].message.args[0]
+ if axis == 0:
+ assert warning_msg.startswith("'L1' is both an index level "
+ "and a column label")
+ else:
+ assert warning_msg.startswith("'L1' is both a column level "
+ "and an index label")
+
+ # df_ambig has an on-axis level named L2 and it is not ambiguous
+ # No warning should be raised
+ with tm.assert_produces_warning(None):
+ assert not df_ambig._check_label_or_level_ambiguity('L2', axis=axis)
+
+ # df_ambig has an off-axis label named L3 and it is not ambiguous
+ with tm.assert_produces_warning(None):
+ assert not df_ambig._is_level_reference('L3', axis=axis)
+
+
+# Series
+# ------
+def test_check_label_or_level_ambiguity_series(df):
+
+ # A series has no columns and therefore references are never ambiguous
+
+ # Make series with L1 as index
+ s = df.set_index('L1').L2
+ with tm.assert_produces_warning(None):
+ assert not s._check_label_or_level_ambiguity('L1', axis=0)
+ assert not s._check_label_or_level_ambiguity('L2', axis=0)
+
+ # Make series with L1 and L2 as index
+ s = df.set_index(['L1', 'L2']).L3
+ with tm.assert_produces_warning(None):
+ assert not s._check_label_or_level_ambiguity('L1', axis=0)
+ assert not s._check_label_or_level_ambiguity('L2', axis=0)
+ assert not s._check_label_or_level_ambiguity('L3', axis=0)
+
+
+def test_check_label_or_level_ambiguity_series_axis1_error(df):
+
+ # Make series with L1 as index
+ s = df.set_index('L1').L2
+
+ with tm.assert_raises_regex(ValueError, "No axis named 1"):
+ s._check_label_or_level_ambiguity('L1', axis=1)
+
+
+# Panel
+# -----
+def test_check_label_or_level_ambiguity_panel_error(panel):
+ msg = ("_check_label_or_level_ambiguity is not implemented for {type}"
+ .format(type=type(panel)))
+
+ with tm.assert_raises_regex(NotImplementedError, msg):
+ panel._check_label_or_level_ambiguity('L1', axis=0)
+
+
+# Test _get_label_or_level_values
+# ===============================
+def assert_label_values(frame, labels, axis):
+ for label in labels:
+ if axis == 0:
+ expected = frame[label]._values
+ else:
+ expected = frame.loc[label]._values
+
+ result = frame._get_label_or_level_values(label, axis=axis)
+ assert array_equivalent(expected, result)
+
+
+def assert_level_values(frame, levels, axis):
+ for level in levels:
+ if axis == 0:
+ expected = frame.index.get_level_values(level=level)._values
+ else:
+ expected = (frame.columns
+ .get_level_values(level=level)
+ ._values)
+
+ result = frame._get_label_or_level_values(level, axis=axis)
+ assert array_equivalent(expected, result)
+
+
+# DataFrame
+# ---------
+@pytest.mark.parametrize('axis', [0, 1])
+def test_get_label_or_level_values_df_simple(df_levels, axis):
+
+ # Compute expected labels and levels
+ expected_labels, expected_levels = get_labels_levels(df_levels)
+
+ # Transpose frame if axis == 1
+ if axis == 1:
+ df_levels = df_levels.T
+
+ # Perform checks
+ assert_label_values(df_levels, expected_labels, axis=axis)
+ assert_level_values(df_levels, expected_levels, axis=axis)
+
+
+@pytest.mark.parametrize('axis', [0, 1])
+def test_get_label_or_level_values_df_ambig(df_ambig, axis):
+
+ # Transpose frame if axis == 1
+ if axis == 1:
+ df_ambig = df_ambig.T
+
+ # df has both an on-axis level and off-axis label named L1
+ # Therefore L1 is ambiguous but will default to label
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False):
+ assert_label_values(df_ambig, ['L1'], axis=axis)
+
+ # df has an on-axis level named L2 and it is not ambiguous
+ with tm.assert_produces_warning(None):
+ assert_level_values(df_ambig, ['L2'], axis=axis)
+
+ # df has an off-axis label named L3 and it is not ambiguous
+ with tm.assert_produces_warning(None):
+ assert_label_values(df_ambig, ['L3'], axis=axis)
+
+
+@pytest.mark.parametrize('axis', [0, 1])
+def test_get_label_or_level_values_df_duplabels(df_duplabels, axis):
+
+ # Transpose frame if axis == 1
+ if axis == 1:
+ df_duplabels = df_duplabels.T
+
+ # df has unambiguous level 'L1'
+ assert_level_values(df_duplabels, ['L1'], axis=axis)
+
+ # df has unique label 'L3'
+ assert_label_values(df_duplabels, ['L3'], axis=axis)
+
+ # df has duplicate labels 'L2'
+ if axis == 0:
+ expected_msg = "The column label 'L2' is not unique"
+ else:
+ expected_msg = "The index label 'L2' is not unique"
+
+ with tm.assert_raises_regex(ValueError, expected_msg):
+ assert_label_values(df_duplabels, ['L2'], axis=axis)
+
+
+# Series
+# ------
+def test_get_label_or_level_values_series_axis0(df):
+
+ # Make series with L1 as index
+ s = df.set_index('L1').L2
+ assert_level_values(s, ['L1'], axis=0)
+
+ # Make series with L1 and L2 as index
+ s = df.set_index(['L1', 'L2']).L3
+ assert_level_values(s, ['L1', 'L2'], axis=0)
+
+
+def test_get_label_or_level_values_series_axis1_error(df):
+
+ # Make series with L1 as index
+ s = df.set_index('L1').L2
+
+ with tm.assert_raises_regex(ValueError, "No axis named 1"):
+ s._get_label_or_level_values('L1', axis=1)
+
+
+# Panel
+# -----
+def test_get_label_or_level_values_panel_error(panel):
+ msg = ("_get_label_or_level_values is not implemented for {type}"
+ .format(type=type(panel)))
+
+ with tm.assert_raises_regex(NotImplementedError, msg):
+ panel._get_label_or_level_values('L1', axis=0)
+
+
+# Test _drop_labels_or_levels
+# ===========================
+def assert_labels_dropped(frame, labels, axis):
+ for label in labels:
+ df_dropped = frame._drop_labels_or_levels(label, axis=axis)
+
+ if axis == 0:
+ assert label in frame.columns
+ assert label not in df_dropped.columns
+ else:
+ assert label in frame.index
+ assert label not in df_dropped.index
+
+
+def assert_levels_dropped(frame, levels, axis):
+ for level in levels:
+ df_dropped = frame._drop_labels_or_levels(level, axis=axis)
+
+ if axis == 0:
+ assert level in frame.index.names
+ assert level not in df_dropped.index.names
+ else:
+ assert level in frame.columns.names
+ assert level not in df_dropped.columns.names
+
+
+# DataFrame
+# ---------
+@pytest.mark.parametrize('axis', [0, 1])
+def test_drop_labels_or_levels_df(df_levels, axis):
+
+ # Compute expected labels and levels
+ expected_labels, expected_levels = get_labels_levels(df_levels)
+
+ # Transpose frame if axis == 1
+ if axis == 1:
+ df_levels = df_levels.T
+
+ # Perform checks
+ assert_labels_dropped(df_levels, expected_labels, axis=axis)
+ assert_levels_dropped(df_levels, expected_levels, axis=axis)
+
+ with tm.assert_raises_regex(ValueError, "not valid labels or levels"):
+ df_levels._drop_labels_or_levels('L4', axis=axis)
+
+
+# Series
+# ------
+def test_drop_labels_or_levels_series(df):
+
+ # Make series with L1 as index
+ s = df.set_index('L1').L2
+ assert_levels_dropped(s, ['L1'], axis=0)
+
+ with tm.assert_raises_regex(ValueError, "not valid labels or levels"):
+ s._drop_labels_or_levels('L4', axis=0)
+
+ # Make series with L1 and L2 as index
+ s = df.set_index(['L1', 'L2']).L3
+ assert_levels_dropped(s, ['L1', 'L2'], axis=0)
+
+ with tm.assert_raises_regex(ValueError, "not valid labels or levels"):
+ s._drop_labels_or_levels('L4', axis=0)
+
+
+# Panel
+# -----
+def test_drop_labels_or_levels_panel_error(panel):
+ msg = ("_drop_labels_or_levels is not implemented for {type}"
+ .format(type=type(panel)))
+
+ with tm.assert_raises_regex(NotImplementedError, msg):
+ panel._drop_labels_or_levels('L1', axis=0)
diff --git a/pandas/tests/groupby/test_index_as_string.py b/pandas/tests/groupby/test_index_as_string.py
index 3b6e15036cfe2..cee78eab3a636 100644
--- a/pandas/tests/groupby/test_index_as_string.py
+++ b/pandas/tests/groupby/test_index_as_string.py
@@ -108,7 +108,7 @@ def test_grouper_column_index_level_precedence(frame,
assert_frame_equal(result, expected)
- # Grouping with level Grouper should produce a difference result but
+ # Grouping with level Grouper should produce a different result but
# still no warning
with tm.assert_produces_warning(False):
not_expected = frame.groupby(level_groupers).mean()
diff --git a/pandas/tests/reshape/test_merge.py b/pandas/tests/reshape/test_merge.py
index 172667c9a0fb8..3fe285a5df8fb 100644
--- a/pandas/tests/reshape/test_merge.py
+++ b/pandas/tests/reshape/test_merge.py
@@ -69,6 +69,15 @@ def test_merge_common(self):
exp = merge(self.df, self.df2, on=['key1', 'key2'])
tm.assert_frame_equal(joined, exp)
+ def test_merge_index_as_on_arg(self):
+ # GH14355
+
+ left = self.df.set_index('key1')
+ right = self.df2.set_index('key1')
+ result = merge(left, right, on='key1')
+ expected = merge(self.df, self.df2, on='key1').set_index('key1')
+ assert_frame_equal(result, expected)
+
def test_merge_index_singlekey_right_vs_left(self):
left = DataFrame({'key': ['a', 'b', 'c', 'd', 'e', 'e', 'a'],
'v1': np.random.randn(7)})
diff --git a/pandas/tests/reshape/test_merge_index_as_string.py b/pandas/tests/reshape/test_merge_index_as_string.py
new file mode 100644
index 0000000000000..4c638f8e441fa
--- /dev/null
+++ b/pandas/tests/reshape/test_merge_index_as_string.py
@@ -0,0 +1,215 @@
+import numpy as np
+import pytest
+
+from pandas import DataFrame
+from pandas.util import testing as tm
+from pandas.util.testing import assert_frame_equal
+
+
+@pytest.fixture
+def df1():
+ return DataFrame(dict(
+ outer=[1, 1, 1, 2, 2, 2, 2, 3, 3, 4, 4],
+ inner=[1, 2, 3, 1, 2, 3, 4, 1, 2, 1, 2],
+ v1=np.linspace(0, 1, 11)))
+
+
+@pytest.fixture
+def df2():
+ return DataFrame(dict(
+ outer=[1, 1, 1, 1, 1, 1, 2, 2, 3, 3, 3, 3],
+ inner=[1, 2, 2, 3, 3, 4, 2, 3, 1, 1, 2, 3],
+ v2=np.linspace(10, 11, 12)))
+
+
+@pytest.fixture(params=[[], ['outer'], ['outer', 'inner']])
+def left_df(request, df1):
+ """ Construct left test DataFrame with specified levels
+ (any of 'outer', 'inner', and 'v1')"""
+ levels = request.param
+ if levels:
+ df1 = df1.set_index(levels)
+
+ return df1
+
+
+@pytest.fixture(params=[[], ['outer'], ['outer', 'inner']])
+def right_df(request, df2):
+ """ Construct right test DataFrame with specified levels
+ (any of 'outer', 'inner', and 'v2')"""
+ levels = request.param
+
+ if levels:
+ df2 = df2.set_index(levels)
+
+ return df2
+
+
+def compute_expected(df_left, df_right,
+ on=None, left_on=None, right_on=None, how=None):
+ """
+ Compute the expected merge result for the test case.
+
+ This method computes the expected result of merging two DataFrames on
+ a combination of their columns and index levels. It does so by
+ explicitly dropping/resetting their named index levels, performing a
+ merge on their columns, and then finally restoring the appropriate
+ index in the result.
+
+ Parameters
+ ----------
+ df_left : DataFrame
+ The left DataFrame (may have zero or more named index levels)
+ df_right : DataFrame
+ The right DataFrame (may have zero or more named index levels)
+ on : list of str
+ The on parameter to the merge operation
+ left_on : list of str
+ The left_on parameter to the merge operation
+ right_on : list of str
+ The right_on parameter to the merge operation
+ how : str
+ The how parameter to the merge operation
+
+ Returns
+ -------
+ DataFrame
+ The expected merge result
+ """
+
+ # Handle on param if specified
+ if on is not None:
+ left_on, right_on = on, on
+
+ # Compute input named index levels
+ left_levels = [n for n in df_left.index.names if n is not None]
+ right_levels = [n for n in df_right.index.names if n is not None]
+
+ # Compute output named index levels
+ output_levels = [i for i in left_on
+ if i in right_levels and i in left_levels]
+
+ # Drop index levels that aren't involved in the merge
+ drop_left = [n for n in left_levels if n not in left_on]
+ if drop_left:
+ df_left = df_left.reset_index(drop_left, drop=True)
+
+ drop_right = [n for n in right_levels if n not in right_on]
+ if drop_right:
+ df_right = df_right.reset_index(drop_right, drop=True)
+
+ # Convert remaining index levels to columns
+ reset_left = [n for n in left_levels if n in left_on]
+ if reset_left:
+ df_left = df_left.reset_index(level=reset_left)
+
+ reset_right = [n for n in right_levels if n in right_on]
+ if reset_right:
+ df_right = df_right.reset_index(level=reset_right)
+
+ # Perform merge
+ expected = df_left.merge(df_right,
+ left_on=left_on,
+ right_on=right_on,
+ how=how)
+
+ # Restore index levels
+ if output_levels:
+ expected = expected.set_index(output_levels)
+
+ return expected
+
+
+@pytest.mark.parametrize('on,how',
+ [(['outer'], 'inner'),
+ (['inner'], 'left'),
+ (['outer', 'inner'], 'right'),
+ (['inner', 'outer'], 'outer')])
+def test_merge_indexes_and_columns_on(left_df, right_df, on, how):
+
+ # Construct expected result
+ expected = compute_expected(left_df, right_df, on=on, how=how)
+
+ # Perform merge
+ result = left_df.merge(right_df, on=on, how=how)
+ assert_frame_equal(result, expected, check_like=True)
+
+
+@pytest.mark.parametrize('left_on,right_on,how',
+ [(['outer'], ['outer'], 'inner'),
+ (['inner'], ['inner'], 'right'),
+ (['outer', 'inner'], ['outer', 'inner'], 'left'),
+ (['inner', 'outer'], ['inner', 'outer'], 'outer')])
+def test_merge_indexes_and_columns_lefton_righton(
+ left_df, right_df, left_on, right_on, how):
+
+ # Construct expected result
+ expected = compute_expected(left_df, right_df,
+ left_on=left_on,
+ right_on=right_on,
+ how=how)
+
+ # Perform merge
+ result = left_df.merge(right_df,
+ left_on=left_on, right_on=right_on, how=how)
+ assert_frame_equal(result, expected, check_like=True)
+
+
+@pytest.mark.parametrize('left_index',
+ ['inner', ['inner', 'outer']])
+@pytest.mark.parametrize('how',
+ ['inner', 'left', 'right', 'outer'])
+def test_join_indexes_and_columns_on(df1, df2, left_index, how):
+
+ # Construct left_df
+ left_df = df1.set_index(left_index)
+
+ # Construct right_df
+ right_df = df2.set_index(['outer', 'inner'])
+
+ # Result
+ expected = (left_df.reset_index()
+ .join(right_df, on=['outer', 'inner'], how=how,
+ lsuffix='_x', rsuffix='_y')
+ .set_index(left_index))
+
+ # Perform join
+ result = left_df.join(right_df, on=['outer', 'inner'], how=how,
+ lsuffix='_x', rsuffix='_y')
+
+ assert_frame_equal(result, expected, check_like=True)
+
+
+def test_merge_index_column_precedence(df1, df2):
+
+ # Construct left_df with both an index and a column named 'outer'.
+ # We make this 'outer' column equal to the 'inner' column so that we
+ # can verify that the correct values are used by the merge operation
+ left_df = df1.set_index('outer')
+ left_df['outer'] = left_df['inner']
+
+ # Construct right_df with an index level named 'outer'
+ right_df = df2.set_index('outer')
+
+ # Construct expected result.
+ # The 'outer' column from left_df is chosen and the resulting
+ # frame has no index levels
+ expected = (left_df.reset_index(level='outer', drop=True)
+ .merge(right_df.reset_index(), on=['outer', 'inner']))
+
+ # Merge left_df and right_df on 'outer' and 'inner'
+ # 'outer' for left_df should refer to the 'outer' column, not the
+ # 'outer' index level and a FutureWarning should be raised
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ result = left_df.merge(right_df, on=['outer', 'inner'])
+
+ # Check results
+ assert_frame_equal(result, expected)
+
+ # Perform the same using the left_on and right_on parameters
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ result = left_df.merge(right_df,
+ left_on=['outer', 'inner'],
+ right_on=['outer', 'inner'])
+
+ assert_frame_equal(result, expected)
| This PR implements the changes proposed and discussed with @jorisvandenbossche @shoyer @TomAugspurger @jreback in #14355.
These changes allow the `on`, `left_on`, and `right_on` parameters of `DataFrame.merge` to accept a combination of column names and index level names. Any common index levels that are merged on are preserved as index levels in the resulting DataFrame, while all other index levels are removed.
In the case of a conflict, the column takes precedence and an ambiguity `FutureWarning` is raised.
- [x] closes #14355
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] documentation updated
- [x] whatsnew entry
**Note:** The new `df._get_column_or_level_values` method introduced in this PR is the same method introduced in #17361 to support sorting DataFrames on a combination of columns and index levels. I will keep this method in sync between the two PRs during the review process.
| https://api.github.com/repos/pandas-dev/pandas/pulls/17484 | 2017-09-09T14:42:49Z | 2017-12-01T16:45:20Z | 2017-12-01T16:45:20Z | 2017-12-01T17:28:58Z |
Make *_range functions consistent | diff --git a/doc/source/api.rst b/doc/source/api.rst
index 27a4ab9cc6cbc..1541bbccefe21 100644
--- a/doc/source/api.rst
+++ b/doc/source/api.rst
@@ -218,10 +218,19 @@ Top-level dealing with datetimelike
to_timedelta
date_range
bdate_range
+ cdate_range
period_range
timedelta_range
infer_freq
+Top-level dealing with intervals
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autosummary::
+ :toctree: generated/
+
+ interval_range
+
Top-level evaluation
~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst
index c86c58c3183f6..5422d5c53043d 100644
--- a/doc/source/timeseries.rst
+++ b/doc/source/timeseries.rst
@@ -1705,6 +1705,15 @@ has multiplied span.
pd.PeriodIndex(start='2014-01', freq='3M', periods=4)
+If ``start`` or ``end`` are ``Period`` objects, they will be used as anchor
+endpoints for a ``PeriodIndex`` with frequency matching that of the
+``PeriodIndex`` constructor.
+
+.. ipython:: python
+
+ pd.PeriodIndex(start=pd.Period('2017Q1', freq='Q'),
+ end=pd.Period('2017Q2', freq='Q'), freq='M')
+
Just like ``DatetimeIndex``, a ``PeriodIndex`` can also be used to index pandas
objects:
diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt
index 9da1f321ef574..939199d3f6fa6 100644
--- a/doc/source/whatsnew/v0.21.0.txt
+++ b/doc/source/whatsnew/v0.21.0.txt
@@ -218,7 +218,7 @@ Furthermore this will now correctly box the results of iteration for :func:`Data
.. ipython:: ipython
d = {'a':[1], 'b':['b']}
- df = pd,DataFrame(d)
+ df = pd.DataFrame(d)
Previously:
@@ -358,6 +358,59 @@ Previously, :func:`to_datetime` did not localize datetime ``Series`` data when `
Additionally, DataFrames with datetime columns that were parsed by :func:`read_sql_table` and :func:`read_sql_query` will also be localized to UTC only if the original SQL columns were timezone aware datetime columns.
+.. _whatsnew_0210.api.consistency_of_range_functions:
+
+Consistency of Range Functions
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In previous versions, there were some inconsistencies between the various range functions: func:`date_range`, func:`bdate_range`, func:`cdate_range`, func:`period_range`, func:`timedelta_range`, and func:`interval_range`. (:issue:`17471`).
+
+One of the inconsistent behaviors occurred when the ``start``, ``end`` and ``period`` parameters were all specified, potentially leading to ambiguous ranges. When all three parameters were passed, ``interval_range`` ignored the ``period`` parameter, ``period_range`` ignored the ``end`` parameter, and the other range functions raised. To promote consistency among the range functions, and avoid potentially ambiguous ranges, ``interval_range`` and ``period_range`` will now raise when all three parameters are passed.
+
+Previous Behavior:
+
+.. code-block:: ipython
+
+ In [2]: pd.interval_range(start=0, end=4, periods=6)
+ Out[2]:
+ IntervalIndex([(0, 1], (1, 2], (2, 3]]
+ closed='right',
+ dtype='interval[int64]')
+
+ In [3]: pd.period_range(start='2017Q1', end='2017Q4', periods=6, freq='Q')
+ Out[3]: PeriodIndex(['2017Q1', '2017Q2', '2017Q3', '2017Q4', '2018Q1', '2018Q2'], dtype='period[Q-DEC]', freq='Q-DEC')
+
+New Behavior:
+
+.. code-block:: ipython
+
+ In [2]: pd.interval_range(start=0, end=4, periods=6)
+ ---------------------------------------------------------------------------
+ ValueError: Of the three parameters: start, end, and periods, exactly two must be specified
+
+ In [3]: pd.period_range(start='2017Q1', end='2017Q4', periods=6, freq='Q')
+ ---------------------------------------------------------------------------
+ ValueError: Of the three parameters: start, end, and periods, exactly two must be specified
+
+Additionally, the endpoint parameter ``end`` was not included in the intervals produced by ``interval_range``. However, all other range functions include ``end`` in their output. To promote consistency among the range functions, ``interval_range`` will now include ``end`` as the right endpoint of the final interval, except if ``freq`` is specified in a way which skips ``end``.
+
+Previous Behavior:
+
+.. code-block:: ipython
+
+ In [4]: pd.interval_range(start=0, end=4)
+ Out[4]:
+ IntervalIndex([(0, 1], (1, 2], (2, 3]]
+ closed='right',
+ dtype='interval[int64]')
+
+
+New Behavior:
+
+ .. ipython:: python
+
+ pd.interval_range(start=0, end=4)
+
.. _whatsnew_0210.api:
Other API Changes
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index 4cfb7547e7d0a..1c8d0b334b91c 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -292,8 +292,8 @@ def __new__(cls, data=None,
if is_float(periods):
periods = int(periods)
elif not is_integer(periods):
- raise ValueError('Periods must be a number, got %s' %
- str(periods))
+ msg = 'periods must be a number, got {periods}'
+ raise TypeError(msg.format(periods=periods))
if data is None and freq is None:
raise ValueError("Must provide freq argument if no data is "
@@ -412,7 +412,8 @@ def __new__(cls, data=None,
def _generate(cls, start, end, periods, name, offset,
tz=None, normalize=False, ambiguous='raise', closed=None):
if com._count_not_none(start, end, periods) != 2:
- raise ValueError('Must specify two of start, end, or periods')
+ raise ValueError('Of the three parameters: start, end, and '
+ 'periods, exactly two must be specified')
_normalized = True
@@ -2004,7 +2005,7 @@ def _generate_regular_range(start, end, periods, offset):
def date_range(start=None, end=None, periods=None, freq='D', tz=None,
normalize=False, name=None, closed=None, **kwargs):
"""
- Return a fixed frequency datetime index, with day (calendar) as the default
+ Return a fixed frequency DatetimeIndex, with day (calendar) as the default
frequency
Parameters
@@ -2013,24 +2014,25 @@ def date_range(start=None, end=None, periods=None, freq='D', tz=None,
Left bound for generating dates
end : string or datetime-like, default None
Right bound for generating dates
- periods : integer or None, default None
- If None, must specify start and end
+ periods : integer, default None
+ Number of periods to generate
freq : string or DateOffset, default 'D' (calendar daily)
Frequency strings can have multiples, e.g. '5H'
- tz : string or None
+ tz : string, default None
Time zone name for returning localized DatetimeIndex, for example
Asia/Hong_Kong
normalize : bool, default False
Normalize start/end dates to midnight before generating date range
- name : str, default None
- Name of the resulting index
- closed : string or None, default None
+ name : string, default None
+ Name of the resulting DatetimeIndex
+ closed : string, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None)
Notes
-----
- 2 of start, end, or periods must be specified
+ Of the three parameters: ``start``, ``end``, and ``periods``, exactly two
+ must be specified.
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
@@ -2047,7 +2049,7 @@ def date_range(start=None, end=None, periods=None, freq='D', tz=None,
def bdate_range(start=None, end=None, periods=None, freq='B', tz=None,
normalize=True, name=None, closed=None, **kwargs):
"""
- Return a fixed frequency datetime index, with business day as the default
+ Return a fixed frequency DatetimeIndex, with business day as the default
frequency
Parameters
@@ -2056,8 +2058,8 @@ def bdate_range(start=None, end=None, periods=None, freq='B', tz=None,
Left bound for generating dates
end : string or datetime-like, default None
Right bound for generating dates
- periods : integer or None, default None
- If None, must specify start and end
+ periods : integer, default None
+ Number of periods to generate
freq : string or DateOffset, default 'B' (business daily)
Frequency strings can have multiples, e.g. '5H'
tz : string or None
@@ -2065,15 +2067,16 @@ def bdate_range(start=None, end=None, periods=None, freq='B', tz=None,
Asia/Beijing
normalize : bool, default False
Normalize start/end dates to midnight before generating date range
- name : str, default None
- Name for the resulting index
- closed : string or None, default None
+ name : string, default None
+ Name of the resulting DatetimeIndex
+ closed : string, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None)
Notes
-----
- 2 of start, end, or periods must be specified
+ Of the three parameters: ``start``, ``end``, and ``periods``, exactly two
+ must be specified.
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
@@ -2091,7 +2094,7 @@ def bdate_range(start=None, end=None, periods=None, freq='B', tz=None,
def cdate_range(start=None, end=None, periods=None, freq='C', tz=None,
normalize=True, name=None, closed=None, **kwargs):
"""
- **EXPERIMENTAL** Return a fixed frequency datetime index, with
+ **EXPERIMENTAL** Return a fixed frequency DatetimeIndex, with
CustomBusinessDay as the default frequency
.. warning:: EXPERIMENTAL
@@ -2105,29 +2108,30 @@ def cdate_range(start=None, end=None, periods=None, freq='C', tz=None,
Left bound for generating dates
end : string or datetime-like, default None
Right bound for generating dates
- periods : integer or None, default None
- If None, must specify start and end
+ periods : integer, default None
+ Number of periods to generate
freq : string or DateOffset, default 'C' (CustomBusinessDay)
Frequency strings can have multiples, e.g. '5H'
- tz : string or None
+ tz : string, default None
Time zone name for returning localized DatetimeIndex, for example
Asia/Beijing
normalize : bool, default False
Normalize start/end dates to midnight before generating date range
- name : str, default None
- Name for the resulting index
- weekmask : str, Default 'Mon Tue Wed Thu Fri'
+ name : string, default None
+ Name of the resulting DatetimeIndex
+ weekmask : string, Default 'Mon Tue Wed Thu Fri'
weekmask of valid business days, passed to ``numpy.busdaycalendar``
holidays : list
list/array of dates to exclude from the set of valid business days,
passed to ``numpy.busdaycalendar``
- closed : string or None, default None
+ closed : string, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None)
Notes
-----
- 2 of start, end, or periods must be specified
+ Of the three parameters: ``start``, ``end``, and ``periods``, exactly two
+ must be specified.
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index e0ed6c7ea35c0..6e80f6c900386 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -15,6 +15,8 @@
is_float_dtype,
is_interval_dtype,
is_scalar,
+ is_float,
+ is_number,
is_integer)
from pandas.core.indexes.base import (
Index, _ensure_index,
@@ -25,11 +27,15 @@
Interval, IntervalMixin, IntervalTree,
intervals_to_interval_bounds)
+from pandas.core.indexes.datetimes import date_range
+from pandas.core.indexes.timedeltas import timedelta_range
from pandas.core.indexes.multi import MultiIndex
from pandas.compat.numpy import function as nv
from pandas.core import common as com
from pandas.util._decorators import cache_readonly, Appender
from pandas.core.config import get_option
+from pandas.tseries.frequencies import to_offset
+from pandas.tseries.offsets import DateOffset
import pandas.core.indexes.base as ibase
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
@@ -1028,54 +1034,152 @@ def func(self, other):
IntervalIndex._add_logical_methods_disabled()
-def interval_range(start=None, end=None, freq=None, periods=None,
- name=None, closed='right', **kwargs):
+def _is_valid_endpoint(endpoint):
+ """helper for interval_range to check if start/end are valid types"""
+ return any([is_number(endpoint),
+ isinstance(endpoint, Timestamp),
+ isinstance(endpoint, Timedelta),
+ endpoint is None])
+
+
+def _is_type_compatible(a, b):
+ """helper for interval_range to check type compat of start/end/freq"""
+ is_ts_compat = lambda x: isinstance(x, (Timestamp, DateOffset))
+ is_td_compat = lambda x: isinstance(x, (Timedelta, DateOffset))
+ return ((is_number(a) and is_number(b)) or
+ (is_ts_compat(a) and is_ts_compat(b)) or
+ (is_td_compat(a) and is_td_compat(b)) or
+ com._any_none(a, b))
+
+
+def interval_range(start=None, end=None, periods=None, freq=None,
+ name=None, closed='right'):
"""
Return a fixed frequency IntervalIndex
Parameters
----------
- start : string or datetime-like, default None
- Left bound for generating data
- end : string or datetime-like, default None
- Right bound for generating data
- freq : interger, string or DateOffset, default 1
- periods : interger, default None
- name : str, default None
- Name of the resulting index
+ start : numeric or datetime-like, default None
+ Left bound for generating intervals
+ end : numeric or datetime-like, default None
+ Right bound for generating intervals
+ periods : integer, default None
+ Number of periods to generate
+ freq : numeric, string, or DateOffset, default None
+ The length of each interval. Must be consistent with the type of start
+ and end, e.g. 2 for numeric, or '5H' for datetime-like. Default is 1
+ for numeric and 'D' (calendar daily) for datetime-like.
+ name : string, default None
+ Name of the resulting IntervalIndex
closed : string, default 'right'
options are: 'left', 'right', 'both', 'neither'
Notes
-----
- 2 of start, end, or periods must be specified
+ Of the three parameters: ``start``, ``end``, and ``periods``, exactly two
+ must be specified.
Returns
-------
rng : IntervalIndex
+
+ Examples
+ --------
+
+ Numeric ``start`` and ``end`` is supported.
+
+ >>> pd.interval_range(start=0, end=5)
+ IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]]
+ closed='right', dtype='interval[int64]')
+
+ Additionally, datetime-like input is also supported.
+
+ >>> pd.interval_range(start='2017-01-01', end='2017-01-04')
+ IntervalIndex([(2017-01-01, 2017-01-02], (2017-01-02, 2017-01-03],
+ (2017-01-03, 2017-01-04]]
+ closed='right', dtype='interval[datetime64[ns]]')
+
+ The ``freq`` parameter specifies the frequency between the left and right.
+ endpoints of the individual intervals within the ``IntervalIndex``. For
+ numeric ``start`` and ``end``, the frequency must also be numeric.
+
+ >>> pd.interval_range(start=0, periods=4, freq=1.5)
+ IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]]
+ closed='right', dtype='interval[float64]')
+
+ Similarly, for datetime-like ``start`` and ``end``, the frequency must be
+ convertible to a DateOffset.
+
+ >>> pd.interval_range(start='2017-01-01', periods=3, freq='MS')
+ IntervalIndex([(2017-01-01, 2017-02-01], (2017-02-01, 2017-03-01],
+ (2017-03-01, 2017-04-01]]
+ closed='right', dtype='interval[datetime64[ns]]')
+
+ The ``closed`` parameter specifies which endpoints of the individual
+ intervals within the ``IntervalIndex`` are closed.
+
+ >>> pd.interval_range(end=5, periods=4, closed='both')
+ IntervalIndex([[1, 2], [2, 3], [3, 4], [4, 5]]
+ closed='both', dtype='interval[int64]')
"""
+ if com._count_not_none(start, end, periods) != 2:
+ raise ValueError('Of the three parameters: start, end, and periods, '
+ 'exactly two must be specified')
+
+ start = com._maybe_box_datetimelike(start)
+ end = com._maybe_box_datetimelike(end)
+ endpoint = next(com._not_none(start, end))
+
+ if not _is_valid_endpoint(start):
+ msg = 'start must be numeric or datetime-like, got {start}'
+ raise ValueError(msg.format(start=start))
+
+ if not _is_valid_endpoint(end):
+ msg = 'end must be numeric or datetime-like, got {end}'
+ raise ValueError(msg.format(end=end))
+
+ if is_float(periods):
+ periods = int(periods)
+ elif not is_integer(periods) and periods is not None:
+ msg = 'periods must be a number, got {periods}'
+ raise TypeError(msg.format(periods=periods))
+
+ freq = freq or (1 if is_number(endpoint) else 'D')
+ if not is_number(freq):
+ try:
+ freq = to_offset(freq)
+ except ValueError:
+ raise ValueError('freq must be numeric or convertible to '
+ 'DateOffset, got {freq}'.format(freq=freq))
- if freq is None:
- freq = 1
+ # verify type compatibility
+ if not all([_is_type_compatible(start, end),
+ _is_type_compatible(start, freq),
+ _is_type_compatible(end, freq)]):
+ raise TypeError("start, end, freq need to be type compatible")
- if start is None:
- if periods is None or end is None:
- raise ValueError("must specify 2 of start, end, periods")
- start = end - periods * freq
- if end is None:
- if periods is None or start is None:
- raise ValueError("must specify 2 of start, end, periods")
+ if is_number(endpoint):
+ if periods is None:
+ periods = int((end - start) // freq)
+
+ if start is None:
+ start = end - periods * freq
+
+ # force end to be consistent with freq (lower if freq skips over end)
end = start + periods * freq
- if periods is None:
- if start is None or end is None:
- raise ValueError("must specify 2 of start, end, periods")
- pass
-
- # must all be same units or None
- arr = np.array([start, end, freq])
- if is_object_dtype(arr):
- raise ValueError("start, end, freq need to be the same type")
-
- return IntervalIndex.from_breaks(np.arange(start, end, freq),
- name=name,
- closed=closed)
+
+ # end + freq for inclusive endpoint
+ breaks = np.arange(start, end + freq, freq)
+ elif isinstance(endpoint, Timestamp):
+ # add one to account for interval endpoints (n breaks = n-1 intervals)
+ if periods is not None:
+ periods += 1
+ breaks = date_range(start=start, end=end, periods=periods, freq=freq)
+ else:
+ # add one to account for interval endpoints (n breaks = n-1 intervals)
+ if periods is not None:
+ periods += 1
+ breaks = timedelta_range(start=start, end=end, periods=periods,
+ freq=freq)
+
+ return IntervalIndex.from_breaks(breaks, name=name, closed=closed)
diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py
index 0915462d4d421..fb47d1db48610 100644
--- a/pandas/core/indexes/period.py
+++ b/pandas/core/indexes/period.py
@@ -199,8 +199,8 @@ def __new__(cls, data=None, ordinal=None, freq=None, start=None, end=None,
if is_float(periods):
periods = int(periods)
elif not is_integer(periods):
- raise ValueError('Periods must be a number, got %s' %
- str(periods))
+ msg = 'periods must be a number, got {periods}'
+ raise TypeError(msg.format(periods=periods))
if name is None and hasattr(data, 'name'):
name = data.name
@@ -1051,8 +1051,9 @@ def tz_localize(self, tz, infer_dst=False):
def _get_ordinal_range(start, end, periods, freq, mult=1):
- if com._count_not_none(start, end, periods) < 2:
- raise ValueError('Must specify 2 of start, end, periods')
+ if com._count_not_none(start, end, periods) != 2:
+ raise ValueError('Of the three parameters: start, end, and periods, '
+ 'exactly two must be specified')
if freq is not None:
_, mult = _gfc(freq)
@@ -1066,9 +1067,9 @@ def _get_ordinal_range(start, end, periods, freq, mult=1):
is_end_per = isinstance(end, Period)
if is_start_per and is_end_per and start.freq != end.freq:
- raise ValueError('Start and end must have same freq')
+ raise ValueError('start and end must have same freq')
if (start is tslib.NaT or end is tslib.NaT):
- raise ValueError('Start and end must not be NaT')
+ raise ValueError('start and end must not be NaT')
if freq is None:
if is_start_per:
@@ -1157,24 +1158,55 @@ def pnow(freq=None):
def period_range(start=None, end=None, periods=None, freq='D', name=None):
"""
- Return a fixed frequency datetime index, with day (calendar) as the default
+ Return a fixed frequency PeriodIndex, with day (calendar) as the default
frequency
-
Parameters
----------
- start : starting value, period-like, optional
- end : ending value, period-like, optional
- periods : int, default None
- Number of periods in the index
- freq : str/DateOffset, default 'D'
+ start : string or period-like, default None
+ Left bound for generating periods
+ end : string or period-like, default None
+ Right bound for generating periods
+ periods : integer, default None
+ Number of periods to generate
+ freq : string or DateOffset, default 'D' (calendar daily)
Frequency alias
- name : str, default None
- Name for the resulting PeriodIndex
+ name : string, default None
+ Name of the resulting PeriodIndex
+
+ Notes
+ -----
+ Of the three parameters: ``start``, ``end``, and ``periods``, exactly two
+ must be specified.
+
+ To learn more about the frequency strings, please see `this link
+ <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
Returns
-------
prng : PeriodIndex
+
+ Examples
+ --------
+
+ >>> pd.period_range(start='2017-01-01', end='2018-01-01', freq='M')
+ PeriodIndex(['2017-01', '2017-02', '2017-03', '2017-04', '2017-05',
+ '2017-06', '2017-06', '2017-07', '2017-08', '2017-09',
+ '2017-10', '2017-11', '2017-12', '2018-01'],
+ dtype='period[M]', freq='M')
+
+ If ``start`` or ``end`` are ``Period`` objects, they will be used as anchor
+ endpoints for a ``PeriodIndex`` with frequency matching that of the
+ ``period_range`` constructor.
+
+ >>> pd.period_range(start=pd.Period('2017Q1', freq='Q'),
+ ... end=pd.Period('2017Q2', freq='Q'), freq='M')
+ PeriodIndex(['2017-03', '2017-04', '2017-05', '2017-06'],
+ dtype='period[M]', freq='M')
"""
+ if com._count_not_none(start, end, periods) != 2:
+ raise ValueError('Of the three parameters: start, end, and periods, '
+ 'exactly two must be specified')
+
return PeriodIndex(start=start, end=end, periods=periods,
freq=freq, name=name)
diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py
index 2823951c0f348..d7b7d56d74a3a 100644
--- a/pandas/core/indexes/timedeltas.py
+++ b/pandas/core/indexes/timedeltas.py
@@ -180,8 +180,8 @@ def __new__(cls, data=None, unit=None,
if is_float(periods):
periods = int(periods)
elif not is_integer(periods):
- raise ValueError('Periods must be a number, got %s' %
- str(periods))
+ msg = 'periods must be a number, got {periods}'
+ raise TypeError(msg.format(periods=periods))
if data is None and freq is None:
raise ValueError("Must provide freq argument if no data is "
@@ -234,7 +234,8 @@ def __new__(cls, data=None, unit=None,
@classmethod
def _generate(cls, start, end, periods, name, offset, closed=None):
if com._count_not_none(start, end, periods) != 2:
- raise ValueError('Must specify two of start, end, or periods')
+ raise ValueError('Of the three parameters: start, end, and '
+ 'periods, exactly two must be specified')
if start is not None:
start = Timedelta(start)
@@ -960,22 +961,22 @@ def _generate_regular_range(start, end, periods, offset):
def timedelta_range(start=None, end=None, periods=None, freq='D',
name=None, closed=None):
"""
- Return a fixed frequency timedelta index, with day as the default
+ Return a fixed frequency TimedeltaIndex, with day as the default
frequency
Parameters
----------
start : string or timedelta-like, default None
- Left bound for generating dates
- end : string or datetime-like, default None
- Right bound for generating dates
- periods : integer or None, default None
- If None, must specify start and end
+ Left bound for generating timedeltas
+ end : string or timedelta-like, default None
+ Right bound for generating timedeltas
+ periods : integer, default None
+ Number of periods to generate
freq : string or DateOffset, default 'D' (calendar daily)
Frequency strings can have multiples, e.g. '5H'
- name : str, default None
- Name of the resulting index
- closed : string or None, default None
+ name : string, default None
+ Name of the resulting TimedeltaIndex
+ closed : string, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None)
@@ -985,11 +986,34 @@ def timedelta_range(start=None, end=None, periods=None, freq='D',
Notes
-----
- 2 of start, end, or periods must be specified.
+ Of the three parameters: ``start``, ``end``, and ``periods``, exactly two
+ must be specified.
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
+
+ Examples
+ --------
+
+ >>> pd.timedelta_range(start='1 day', periods=4)
+ TimedeltaIndex(['1 days', '2 days', '3 days', '4 days'],
+ dtype='timedelta64[ns]', freq='D')
+
+ The ``closed`` parameter specifies which endpoint is included. The default
+ behavior is to include both endpoints.
+
+ >>> pd.timedelta_range(start='1 day', periods=4, closed='right')
+ TimedeltaIndex(['2 days', '3 days', '4 days'],
+ dtype='timedelta64[ns]', freq='D')
+
+ The ``freq`` parameter specifies the frequency of the TimedeltaIndex.
+ Only fixed frequencies can be passed, non-fixed frequencies such as
+ 'M' (month end) will raise.
+
+ >>> pd.timedelta_range(start='1 day', end='2 days', freq='6H')
+ TimedeltaIndex(['1 days 00:00:00', '1 days 06:00:00', '1 days 12:00:00',
+ '1 days 18:00:00', '2 days 00:00:00'],
+ dtype='timedelta64[ns]', freq='6H')
"""
return TimedeltaIndex(start=start, end=end, periods=periods,
- freq=freq, name=name,
- closed=closed)
+ freq=freq, name=name, closed=closed)
diff --git a/pandas/tests/indexes/datetimes/test_construction.py b/pandas/tests/indexes/datetimes/test_construction.py
index cf896b06130a2..a4706dd8a3767 100644
--- a/pandas/tests/indexes/datetimes/test_construction.py
+++ b/pandas/tests/indexes/datetimes/test_construction.py
@@ -307,8 +307,9 @@ def test_constructor_coverage(self):
exp = date_range('1/1/2000', periods=10)
tm.assert_index_equal(rng, exp)
- pytest.raises(ValueError, DatetimeIndex, start='1/1/2000',
- periods='foo', freq='D')
+ msg = 'periods must be a number, got foo'
+ with tm.assert_raises_regex(TypeError, msg):
+ DatetimeIndex(start='1/1/2000', periods='foo', freq='D')
pytest.raises(ValueError, DatetimeIndex, start='1/1/2000',
end='1/10/2000')
diff --git a/pandas/tests/indexes/datetimes/test_date_range.py b/pandas/tests/indexes/datetimes/test_date_range.py
index da4ca83c10dda..8d86bebdd4d5e 100644
--- a/pandas/tests/indexes/datetimes/test_date_range.py
+++ b/pandas/tests/indexes/datetimes/test_date_range.py
@@ -107,8 +107,10 @@ def test_date_range_ambiguous_arguments(self):
start = datetime(2011, 1, 1, 5, 3, 40)
end = datetime(2011, 1, 1, 8, 9, 40)
- pytest.raises(ValueError, date_range, start, end, freq='s',
- periods=10)
+ msg = ('Of the three parameters: start, end, and periods, '
+ 'exactly two must be specified')
+ with tm.assert_raises_regex(ValueError, msg):
+ date_range(start, end, periods=10, freq='s')
def test_date_range_businesshour(self):
idx = DatetimeIndex(['2014-07-04 09:00', '2014-07-04 10:00',
@@ -146,14 +148,29 @@ def test_date_range_businesshour(self):
def test_range_misspecified(self):
# GH #1095
+ msg = ('Of the three parameters: start, end, and periods, '
+ 'exactly two must be specified')
+
+ with tm.assert_raises_regex(ValueError, msg):
+ date_range(start='1/1/2000')
+
+ with tm.assert_raises_regex(ValueError, msg):
+ date_range(end='1/1/2000')
+
+ with tm.assert_raises_regex(ValueError, msg):
+ date_range(periods=10)
+
+ with tm.assert_raises_regex(ValueError, msg):
+ date_range(start='1/1/2000', freq='H')
- pytest.raises(ValueError, date_range, '1/1/2000')
- pytest.raises(ValueError, date_range, end='1/1/2000')
- pytest.raises(ValueError, date_range, periods=10)
+ with tm.assert_raises_regex(ValueError, msg):
+ date_range(end='1/1/2000', freq='H')
- pytest.raises(ValueError, date_range, '1/1/2000', freq='H')
- pytest.raises(ValueError, date_range, end='1/1/2000', freq='H')
- pytest.raises(ValueError, date_range, periods=10, freq='H')
+ with tm.assert_raises_regex(ValueError, msg):
+ date_range(periods=10, freq='H')
+
+ with tm.assert_raises_regex(ValueError, msg):
+ date_range()
def test_compat_replace(self):
# https://github.com/statsmodels/statsmodels/issues/3349
@@ -231,8 +248,13 @@ def test_constructor(self):
bdate_range(START, END, freq=BDay())
bdate_range(START, periods=20, freq=BDay())
bdate_range(end=START, periods=20, freq=BDay())
- pytest.raises(ValueError, date_range, '2011-1-1', '2012-1-1', 'B')
- pytest.raises(ValueError, bdate_range, '2011-1-1', '2012-1-1', 'B')
+
+ msg = 'periods must be a number, got B'
+ with tm.assert_raises_regex(TypeError, msg):
+ date_range('2011-1-1', '2012-1-1', 'B')
+
+ with tm.assert_raises_regex(TypeError, msg):
+ bdate_range('2011-1-1', '2012-1-1', 'B')
def test_naive_aware_conflicts(self):
naive = bdate_range(START, END, freq=BDay(), tz=None)
@@ -510,8 +532,13 @@ def test_constructor(self):
cdate_range(START, END, freq=CDay())
cdate_range(START, periods=20, freq=CDay())
cdate_range(end=START, periods=20, freq=CDay())
- pytest.raises(ValueError, date_range, '2011-1-1', '2012-1-1', 'C')
- pytest.raises(ValueError, cdate_range, '2011-1-1', '2012-1-1', 'C')
+
+ msg = 'periods must be a number, got C'
+ with tm.assert_raises_regex(TypeError, msg):
+ date_range('2011-1-1', '2012-1-1', 'C')
+
+ with tm.assert_raises_regex(TypeError, msg):
+ cdate_range('2011-1-1', '2012-1-1', 'C')
def test_cached_range(self):
DatetimeIndex._cached_range(START, END, offset=CDay())
diff --git a/pandas/tests/indexes/period/test_construction.py b/pandas/tests/indexes/period/test_construction.py
index e5b889e100307..639a9272c3808 100644
--- a/pandas/tests/indexes/period/test_construction.py
+++ b/pandas/tests/indexes/period/test_construction.py
@@ -436,11 +436,12 @@ def test_constructor_error(self):
start = Period('02-Apr-2005', 'B')
end_intv = Period('2006-12-31', ('w', 1))
- msg = 'Start and end must have same freq'
+ msg = 'start and end must have same freq'
with tm.assert_raises_regex(ValueError, msg):
PeriodIndex(start=start, end=end_intv)
- msg = 'Must specify 2 of start, end, periods'
+ msg = ('Of the three parameters: start, end, and periods, '
+ 'exactly two must be specified')
with tm.assert_raises_regex(ValueError, msg):
PeriodIndex(start=start)
diff --git a/pandas/tests/indexes/period/test_period_range.py b/pandas/tests/indexes/period/test_period_range.py
new file mode 100644
index 0000000000000..640f24f67f72f
--- /dev/null
+++ b/pandas/tests/indexes/period/test_period_range.py
@@ -0,0 +1,94 @@
+import pytest
+import pandas.util.testing as tm
+from pandas import date_range, NaT, period_range, Period, PeriodIndex
+
+
+class TestPeriodRange(object):
+
+ @pytest.mark.parametrize('freq', ['D', 'W', 'M', 'Q', 'A'])
+ def test_construction_from_string(self, freq):
+ # non-empty
+ expected = date_range(start='2017-01-01', periods=5,
+ freq=freq, name='foo').to_period()
+ start, end = str(expected[0]), str(expected[-1])
+
+ result = period_range(start=start, end=end, freq=freq, name='foo')
+ tm.assert_index_equal(result, expected)
+
+ result = period_range(start=start, periods=5, freq=freq, name='foo')
+ tm.assert_index_equal(result, expected)
+
+ result = period_range(end=end, periods=5, freq=freq, name='foo')
+ tm.assert_index_equal(result, expected)
+
+ # empty
+ expected = PeriodIndex([], freq=freq, name='foo')
+
+ result = period_range(start=start, periods=0, freq=freq, name='foo')
+ tm.assert_index_equal(result, expected)
+
+ result = period_range(end=end, periods=0, freq=freq, name='foo')
+ tm.assert_index_equal(result, expected)
+
+ result = period_range(start=end, end=start, freq=freq, name='foo')
+ tm.assert_index_equal(result, expected)
+
+ def test_construction_from_period(self):
+ # upsampling
+ start, end = Period('2017Q1', freq='Q'), Period('2018Q1', freq='Q')
+ expected = date_range(start='2017-03-31', end='2018-03-31', freq='M',
+ name='foo').to_period()
+ result = period_range(start=start, end=end, freq='M', name='foo')
+ tm.assert_index_equal(result, expected)
+
+ # downsampling
+ start, end = Period('2017-1', freq='M'), Period('2019-12', freq='M')
+ expected = date_range(start='2017-01-31', end='2019-12-31', freq='Q',
+ name='foo').to_period()
+ result = period_range(start=start, end=end, freq='Q', name='foo')
+ tm.assert_index_equal(result, expected)
+
+ # empty
+ expected = PeriodIndex([], freq='W', name='foo')
+
+ result = period_range(start=start, periods=0, freq='W', name='foo')
+ tm.assert_index_equal(result, expected)
+
+ result = period_range(end=end, periods=0, freq='W', name='foo')
+ tm.assert_index_equal(result, expected)
+
+ result = period_range(start=end, end=start, freq='W', name='foo')
+ tm.assert_index_equal(result, expected)
+
+ def test_errors(self):
+ # not enough params
+ msg = ('Of the three parameters: start, end, and periods, '
+ 'exactly two must be specified')
+ with tm.assert_raises_regex(ValueError, msg):
+ period_range(start='2017Q1')
+
+ with tm.assert_raises_regex(ValueError, msg):
+ period_range(end='2017Q1')
+
+ with tm.assert_raises_regex(ValueError, msg):
+ period_range(periods=5)
+
+ with tm.assert_raises_regex(ValueError, msg):
+ period_range()
+
+ # too many params
+ with tm.assert_raises_regex(ValueError, msg):
+ period_range(start='2017Q1', end='2018Q1', periods=8, freq='Q')
+
+ # start/end NaT
+ msg = 'start and end must not be NaT'
+ with tm.assert_raises_regex(ValueError, msg):
+ period_range(start=NaT, end='2018Q1')
+
+ with tm.assert_raises_regex(ValueError, msg):
+ period_range(start='2017Q1', end=NaT)
+
+ # invalid periods param
+ msg = 'periods must be a number, got foo'
+ with tm.assert_raises_regex(TypeError, msg):
+ period_range(start='2017Q1', periods='foo')
diff --git a/pandas/tests/indexes/test_interval.py b/pandas/tests/indexes/test_interval.py
index 18eefc3fbdca6..13c3b35e4d85d 100644
--- a/pandas/tests/indexes/test_interval.py
+++ b/pandas/tests/indexes/test_interval.py
@@ -2,10 +2,11 @@
import pytest
import numpy as np
-
+from datetime import timedelta
from pandas import (Interval, IntervalIndex, Index, isna,
interval_range, Timestamp, Timedelta,
- compat)
+ compat, date_range, timedelta_range, DateOffset)
+from pandas.tseries.offsets import Day
from pandas._libs.interval import IntervalTree
from pandas.tests.indexes.common import Base
import pandas.util.testing as tm
@@ -721,40 +722,278 @@ def test_is_non_overlapping_monotonic(self):
class TestIntervalRange(object):
- def test_construction(self):
- result = interval_range(0, 5, name='foo', closed='both')
+ @pytest.mark.parametrize('closed', ['left', 'right', 'neither', 'both'])
+ def test_construction_from_numeric(self, closed):
+ # combinations of start/end/periods without freq
expected = IntervalIndex.from_breaks(
- np.arange(0, 5), name='foo', closed='both')
+ np.arange(0, 6), name='foo', closed=closed)
+
+ result = interval_range(start=0, end=5, name='foo', closed=closed)
tm.assert_index_equal(result, expected)
- def test_errors(self):
+ result = interval_range(start=0, periods=5, name='foo', closed=closed)
+ tm.assert_index_equal(result, expected)
+
+ result = interval_range(end=5, periods=5, name='foo', closed=closed)
+ tm.assert_index_equal(result, expected)
+
+ # combinations of start/end/periods with freq
+ expected = IntervalIndex.from_tuples([(0, 2), (2, 4), (4, 6)],
+ name='foo', closed=closed)
+
+ result = interval_range(start=0, end=6, freq=2, name='foo',
+ closed=closed)
+ tm.assert_index_equal(result, expected)
+
+ result = interval_range(start=0, periods=3, freq=2, name='foo',
+ closed=closed)
+ tm.assert_index_equal(result, expected)
+
+ result = interval_range(end=6, periods=3, freq=2, name='foo',
+ closed=closed)
+ tm.assert_index_equal(result, expected)
+
+ # output truncates early if freq causes end to be skipped.
+ expected = IntervalIndex.from_tuples([(0.0, 1.5), (1.5, 3.0)],
+ name='foo', closed=closed)
+ result = interval_range(start=0, end=4, freq=1.5, name='foo',
+ closed=closed)
+ tm.assert_index_equal(result, expected)
+
+ @pytest.mark.parametrize('closed', ['left', 'right', 'neither', 'both'])
+ def test_construction_from_timestamp(self, closed):
+ # combinations of start/end/periods without freq
+ start, end = Timestamp('2017-01-01'), Timestamp('2017-01-06')
+ breaks = date_range(start=start, end=end)
+ expected = IntervalIndex.from_breaks(breaks, name='foo', closed=closed)
+
+ result = interval_range(start=start, end=end, name='foo',
+ closed=closed)
+ tm.assert_index_equal(result, expected)
+
+ result = interval_range(start=start, periods=5, name='foo',
+ closed=closed)
+ tm.assert_index_equal(result, expected)
+
+ result = interval_range(end=end, periods=5, name='foo',
+ closed=closed)
+ tm.assert_index_equal(result, expected)
+
+ # combinations of start/end/periods with fixed freq
+ freq = '2D'
+ start, end = Timestamp('2017-01-01'), Timestamp('2017-01-07')
+ breaks = date_range(start=start, end=end, freq=freq)
+ expected = IntervalIndex.from_breaks(breaks, name='foo', closed=closed)
+
+ result = interval_range(start=start, end=end, freq=freq, name='foo',
+ closed=closed)
+ tm.assert_index_equal(result, expected)
+
+ result = interval_range(start=start, periods=3, freq=freq, name='foo',
+ closed=closed)
+ tm.assert_index_equal(result, expected)
+
+ result = interval_range(end=end, periods=3, freq=freq, name='foo',
+ closed=closed)
+ tm.assert_index_equal(result, expected)
+
+ # output truncates early if freq causes end to be skipped.
+ end = Timestamp('2017-01-08')
+ result = interval_range(start=start, end=end, freq=freq, name='foo',
+ closed=closed)
+ tm.assert_index_equal(result, expected)
+
+ # combinations of start/end/periods with non-fixed freq
+ freq = 'M'
+ start, end = Timestamp('2017-01-01'), Timestamp('2017-12-31')
+ breaks = date_range(start=start, end=end, freq=freq)
+ expected = IntervalIndex.from_breaks(breaks, name='foo', closed=closed)
+
+ result = interval_range(start=start, end=end, freq=freq, name='foo',
+ closed=closed)
+ tm.assert_index_equal(result, expected)
+
+ result = interval_range(start=start, periods=11, freq=freq, name='foo',
+ closed=closed)
+ tm.assert_index_equal(result, expected)
+
+ result = interval_range(end=end, periods=11, freq=freq, name='foo',
+ closed=closed)
+ tm.assert_index_equal(result, expected)
+
+ # output truncates early if freq causes end to be skipped.
+ end = Timestamp('2018-01-15')
+ result = interval_range(start=start, end=end, freq=freq, name='foo',
+ closed=closed)
+ tm.assert_index_equal(result, expected)
+
+ @pytest.mark.parametrize('closed', ['left', 'right', 'neither', 'both'])
+ def test_construction_from_timedelta(self, closed):
+ # combinations of start/end/periods without freq
+ start, end = Timedelta('1 day'), Timedelta('6 days')
+ breaks = timedelta_range(start=start, end=end)
+ expected = IntervalIndex.from_breaks(breaks, name='foo', closed=closed)
+
+ result = interval_range(start=start, end=end, name='foo',
+ closed=closed)
+ tm.assert_index_equal(result, expected)
+
+ result = interval_range(start=start, periods=5, name='foo',
+ closed=closed)
+ tm.assert_index_equal(result, expected)
+
+ result = interval_range(end=end, periods=5, name='foo',
+ closed=closed)
+ tm.assert_index_equal(result, expected)
+
+ # combinations of start/end/periods with fixed freq
+ freq = '2D'
+ start, end = Timedelta('1 day'), Timedelta('7 days')
+ breaks = timedelta_range(start=start, end=end, freq=freq)
+ expected = IntervalIndex.from_breaks(breaks, name='foo', closed=closed)
+
+ result = interval_range(start=start, end=end, freq=freq, name='foo',
+ closed=closed)
+ tm.assert_index_equal(result, expected)
+
+ result = interval_range(start=start, periods=3, freq=freq, name='foo',
+ closed=closed)
+ tm.assert_index_equal(result, expected)
+
+ result = interval_range(end=end, periods=3, freq=freq, name='foo',
+ closed=closed)
+ tm.assert_index_equal(result, expected)
+
+ # output truncates early if freq causes end to be skipped.
+ end = Timedelta('7 days 1 hour')
+ result = interval_range(start=start, end=end, freq=freq, name='foo',
+ closed=closed)
+ tm.assert_index_equal(result, expected)
+
+ def test_constructor_coverage(self):
+ # float value for periods
+ expected = pd.interval_range(start=0, periods=10)
+ result = pd.interval_range(start=0, periods=10.5)
+ tm.assert_index_equal(result, expected)
+
+ # equivalent timestamp-like start/end
+ start, end = Timestamp('2017-01-01'), Timestamp('2017-01-15')
+ expected = pd.interval_range(start=start, end=end)
+
+ result = pd.interval_range(start=start.to_pydatetime(),
+ end=end.to_pydatetime())
+ tm.assert_index_equal(result, expected)
+
+ result = pd.interval_range(start=start.tz_localize('UTC'),
+ end=end.tz_localize('UTC'))
+ tm.assert_index_equal(result, expected)
+
+ result = pd.interval_range(start=start.asm8, end=end.asm8)
+ tm.assert_index_equal(result, expected)
+
+ # equivalent freq with timestamp
+ equiv_freq = ['D', Day(), Timedelta(days=1), timedelta(days=1),
+ DateOffset(days=1)]
+ for freq in equiv_freq:
+ result = pd.interval_range(start=start, end=end, freq=freq)
+ tm.assert_index_equal(result, expected)
+
+ # equivalent timedelta-like start/end
+ start, end = Timedelta(days=1), Timedelta(days=10)
+ expected = pd.interval_range(start=start, end=end)
+
+ result = pd.interval_range(start=start.to_pytimedelta(),
+ end=end.to_pytimedelta())
+ tm.assert_index_equal(result, expected)
+
+ result = pd.interval_range(start=start.asm8, end=end.asm8)
+ tm.assert_index_equal(result, expected)
+
+ # equivalent freq with timedelta
+ equiv_freq = ['D', Day(), Timedelta(days=1), timedelta(days=1)]
+ for freq in equiv_freq:
+ result = pd.interval_range(start=start, end=end, freq=freq)
+ tm.assert_index_equal(result, expected)
+ def test_errors(self):
# not enough params
- def f():
- interval_range(0)
+ msg = ('Of the three parameters: start, end, and periods, '
+ 'exactly two must be specified')
- pytest.raises(ValueError, f)
+ with tm.assert_raises_regex(ValueError, msg):
+ interval_range(start=0)
- def f():
- interval_range(periods=2)
+ with tm.assert_raises_regex(ValueError, msg):
+ interval_range(end=5)
- pytest.raises(ValueError, f)
+ with tm.assert_raises_regex(ValueError, msg):
+ interval_range(periods=2)
- def f():
+ with tm.assert_raises_regex(ValueError, msg):
interval_range()
- pytest.raises(ValueError, f)
+ # too many params
+ with tm.assert_raises_regex(ValueError, msg):
+ interval_range(start=0, end=5, periods=6)
# mixed units
- def f():
- interval_range(0, Timestamp('20130101'), freq=2)
+ msg = 'start, end, freq need to be type compatible'
+ with tm.assert_raises_regex(TypeError, msg):
+ interval_range(start=0, end=Timestamp('20130101'), freq=2)
- pytest.raises(ValueError, f)
+ with tm.assert_raises_regex(TypeError, msg):
+ interval_range(start=0, end=Timedelta('1 day'), freq=2)
- def f():
- interval_range(0, 10, freq=Timedelta('1day'))
+ with tm.assert_raises_regex(TypeError, msg):
+ interval_range(start=0, end=10, freq='D')
- pytest.raises(ValueError, f)
+ with tm.assert_raises_regex(TypeError, msg):
+ interval_range(start=Timestamp('20130101'), end=10, freq='D')
+
+ with tm.assert_raises_regex(TypeError, msg):
+ interval_range(start=Timestamp('20130101'),
+ end=Timedelta('1 day'), freq='D')
+
+ with tm.assert_raises_regex(TypeError, msg):
+ interval_range(start=Timestamp('20130101'),
+ end=Timestamp('20130110'), freq=2)
+
+ with tm.assert_raises_regex(TypeError, msg):
+ interval_range(start=Timedelta('1 day'), end=10, freq='D')
+
+ with tm.assert_raises_regex(TypeError, msg):
+ interval_range(start=Timedelta('1 day'),
+ end=Timestamp('20130110'), freq='D')
+
+ with tm.assert_raises_regex(TypeError, msg):
+ interval_range(start=Timedelta('1 day'),
+ end=Timedelta('10 days'), freq=2)
+
+ # invalid periods
+ msg = 'periods must be a number, got foo'
+ with tm.assert_raises_regex(TypeError, msg):
+ interval_range(start=0, periods='foo')
+
+ # invalid start
+ msg = 'start must be numeric or datetime-like, got foo'
+ with tm.assert_raises_regex(ValueError, msg):
+ interval_range(start='foo', periods=10)
+
+ # invalid end
+ msg = 'end must be numeric or datetime-like, got \(0, 1\]'
+ with tm.assert_raises_regex(ValueError, msg):
+ interval_range(end=Interval(0, 1), periods=10)
+
+ # invalid freq for datetime-like
+ msg = 'freq must be numeric or convertible to DateOffset, got foo'
+ with tm.assert_raises_regex(ValueError, msg):
+ interval_range(start=0, end=10, freq='foo')
+
+ with tm.assert_raises_regex(ValueError, msg):
+ interval_range(start=Timestamp('20130101'), periods=10, freq='foo')
+
+ with tm.assert_raises_regex(ValueError, msg):
+ interval_range(end=Timedelta('1 day'), periods=10, freq='foo')
class TestIntervalTree(object):
diff --git a/pandas/tests/indexes/timedeltas/test_construction.py b/pandas/tests/indexes/timedeltas/test_construction.py
index dd25e2cca2e55..70aadd9f57174 100644
--- a/pandas/tests/indexes/timedeltas/test_construction.py
+++ b/pandas/tests/indexes/timedeltas/test_construction.py
@@ -50,8 +50,9 @@ def test_constructor_coverage(self):
exp = timedelta_range('1 days', periods=10)
tm.assert_index_equal(rng, exp)
- pytest.raises(ValueError, TimedeltaIndex, start='1 days',
- periods='foo', freq='D')
+ msg = 'periods must be a number, got foo'
+ with tm.assert_raises_regex(TypeError, msg):
+ TimedeltaIndex(start='1 days', periods='foo', freq='D')
pytest.raises(ValueError, TimedeltaIndex, start='1 days',
end='10 days')
diff --git a/pandas/tests/indexes/timedeltas/test_timedelta_range.py b/pandas/tests/indexes/timedeltas/test_timedelta_range.py
index 4732a0ce110de..7624e1f79af15 100644
--- a/pandas/tests/indexes/timedeltas/test_timedelta_range.py
+++ b/pandas/tests/indexes/timedeltas/test_timedelta_range.py
@@ -1,5 +1,4 @@
import numpy as np
-
import pandas as pd
import pandas.util.testing as tm
from pandas.tseries.offsets import Day, Second
@@ -49,3 +48,23 @@ def test_timedelta_range(self):
expected = df.loc[pd.Timedelta('0s'):, :]
result = df.loc['0s':, :]
assert_frame_equal(expected, result)
+
+ def test_errors(self):
+ # not enough params
+ msg = ('Of the three parameters: start, end, and periods, '
+ 'exactly two must be specified')
+ with tm.assert_raises_regex(ValueError, msg):
+ timedelta_range(start='0 days')
+
+ with tm.assert_raises_regex(ValueError, msg):
+ timedelta_range(end='5 days')
+
+ with tm.assert_raises_regex(ValueError, msg):
+ timedelta_range(periods=2)
+
+ with tm.assert_raises_regex(ValueError, msg):
+ timedelta_range()
+
+ # too many params
+ with tm.assert_raises_regex(ValueError, msg):
+ timedelta_range(start='0 days', end='5 days', periods=10)
| - [X] closes #17471
- [X] tests added / passed
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [X] whatsnew entry
Comments:
- I couldn't find any existing tests of `period_range`, so I created a new file `test_period_range.py` in a similar fashion to the existing files `test_date_range.py` and `test_timedelta_range.py`.
- Cleaned up a few existing tests relevant to this issue (used `with` context where appropriate, etc.). | https://api.github.com/repos/pandas-dev/pandas/pulls/17482 | 2017-09-09T04:54:21Z | 2017-09-14T10:11:32Z | 2017-09-14T10:11:31Z | 2017-09-15T22:10:31Z |
Remove pyx dependencies from setup | diff --git a/setup.py b/setup.py
index 3269fe7972cf0..d64a78db7500a 100755
--- a/setup.py
+++ b/setup.py
@@ -347,14 +347,6 @@ class CheckSDist(sdist_class):
def initialize_options(self):
sdist_class.initialize_options(self)
- '''
- self._pyxfiles = []
- for root, dirs, files in os.walk('pandas'):
- for f in files:
- if f.endswith('.pyx'):
- self._pyxfiles.append(pjoin(root, f))
- '''
-
def run(self):
if 'cython' in cmdclass:
self.run_command('cython')
@@ -479,11 +471,10 @@ def pxd(name):
'_libs.lib': {'pyxfile': '_libs/lib',
'depends': lib_depends + tseries_depends},
'_libs.hashtable': {'pyxfile': '_libs/hashtable',
- 'pxdfiles': ['_libs/hashtable'],
'depends': (['pandas/_libs/src/klib/khash_python.h']
+ _pxi_dep['hashtable'])},
'_libs.tslib': {'pyxfile': '_libs/tslib',
- 'pxdfiles': ['_libs/src/util', '_libs/lib'],
+ 'pxdfiles': ['_libs/src/util'],
'depends': tseries_depends,
'sources': ['pandas/_libs/src/datetime/np_datetime.c',
'pandas/_libs/src/datetime/np_datetime_strings.c',
@@ -498,21 +489,20 @@ def pxd(name):
'_libs.index': {'pyxfile': '_libs/index',
'sources': ['pandas/_libs/src/datetime/np_datetime.c',
'pandas/_libs/src/datetime/np_datetime_strings.c'],
- 'pxdfiles': ['_libs/src/util', '_libs/hashtable'],
+ 'pxdfiles': ['_libs/src/util'],
'depends': _pxi_dep['index']},
'_libs.algos': {'pyxfile': '_libs/algos',
- 'pxdfiles': ['_libs/src/util', '_libs/algos', '_libs/hashtable'],
+ 'pxdfiles': ['_libs/src/util'],
'depends': _pxi_dep['algos']},
'_libs.groupby': {'pyxfile': '_libs/groupby',
- 'pxdfiles': ['_libs/src/util', '_libs/algos'],
- 'depends': _pxi_dep['groupby']},
+ 'pxdfiles': ['_libs/src/util'],
+ 'depends': _pxi_dep['groupby']},
'_libs.join': {'pyxfile': '_libs/join',
- 'pxdfiles': ['_libs/src/util', '_libs/hashtable'],
+ 'pxdfiles': ['_libs/src/util'],
'depends': _pxi_dep['join']},
'_libs.reshape': {'pyxfile': '_libs/reshape',
'depends': _pxi_dep['reshape']},
'_libs.interval': {'pyxfile': '_libs/interval',
- 'pxdfiles': ['_libs/hashtable'],
'depends': _pxi_dep['interval']},
'_libs.window': {'pyxfile': '_libs/window',
'pxdfiles': ['_libs/src/skiplist', '_libs/src/util'],
@@ -525,12 +515,9 @@ def pxd(name):
'sources': ['pandas/_libs/src/parser/tokenizer.c',
'pandas/_libs/src/parser/io.c']},
'_libs.sparse': {'pyxfile': '_libs/sparse',
- 'depends': (['pandas/_libs/sparse.pyx'] +
- _pxi_dep['sparse'])},
- '_libs.testing': {'pyxfile': '_libs/testing',
- 'depends': ['pandas/_libs/testing.pyx']},
- '_libs.hashing': {'pyxfile': '_libs/hashing',
- 'depends': ['pandas/_libs/hashing.pyx']},
+ 'depends': _pxi_dep['sparse']},
+ '_libs.testing': {'pyxfile': '_libs/testing'},
+ '_libs.hashing': {'pyxfile': '_libs/hashing'},
'io.sas._sas': {'pyxfile': 'io/sas/sas'},
}
| See discussion in #17419
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/17478 | 2017-09-08T17:40:58Z | 2017-09-10T14:43:37Z | 2017-09-10T14:43:37Z | 2017-09-11T15:49:58Z |
ENH: Add Styler.where | diff --git a/doc/source/api.rst b/doc/source/api.rst
index c32a541d19605..27a4ab9cc6cbc 100644
--- a/doc/source/api.rst
+++ b/doc/source/api.rst
@@ -2062,6 +2062,7 @@ Style Application
Styler.apply
Styler.applymap
+ Styler.where
Styler.format
Styler.set_precision
Styler.set_table_styles
diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt
index f50052347cfb5..577acd6861833 100644
--- a/doc/source/whatsnew/v0.21.0.txt
+++ b/doc/source/whatsnew/v0.21.0.txt
@@ -112,6 +112,7 @@ Other Enhancements
- `read_*` methods can now infer compression from non-string paths, such as ``pathlib.Path`` objects (:issue:`17206`).
- :func:`pd.read_sas()` now recognizes much more of the most frequently used date (datetime) formats in SAS7BDAT files (:issue:`15871`).
- :func:`DataFrame.items` and :func:`Series.items` is now present in both Python 2 and 3 and is lazy in all cases (:issue:`13918`, :issue:`17213`)
+- :func:`Styler.where` has been implemented. It is as a convenience for :func:`Styler.applymap` and enables simple DataFrame styling on the Jupyter notebook (:issue:`17474`).
diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py
index 87d672197be30..d7677e3642c26 100644
--- a/pandas/io/formats/style.py
+++ b/pandas/io/formats/style.py
@@ -618,11 +618,53 @@ def applymap(self, func, subset=None, **kwargs):
-------
self : Styler
+ See Also
+ --------
+ Styler.where
+
"""
self._todo.append((lambda instance: getattr(instance, '_applymap'),
(func, subset), kwargs))
return self
+ def where(self, cond, value, other=None, subset=None, **kwargs):
+ """
+ Apply a function elementwise, updating the HTML
+ representation with a style which is selected in
+ accordance with the return value of a function.
+
+ .. versionadded:: 0.21.0
+
+ Parameters
+ ----------
+ cond : callable
+ ``cond`` should take a scalar and return a boolean
+ value : str
+ applied when ``cond`` returns true
+ other : str
+ applied when ``cond`` returns false
+ subset : IndexSlice
+ a valid indexer to limit ``data`` to *before* applying the
+ function. Consider using a pandas.IndexSlice
+ kwargs : dict
+ pass along to ``cond``
+
+ Returns
+ -------
+ self : Styler
+
+ See Also
+ --------
+ Styler.applymap
+
+ """
+
+ if other is None:
+ other = ''
+
+ return self.applymap(lambda val: value if cond(val) else other,
+ subset=subset, **kwargs)
+
def set_precision(self, precision):
"""
Set the precision used to render.
diff --git a/pandas/tests/io/formats/test_style.py b/pandas/tests/io/formats/test_style.py
index 59d9f938734ab..811381e4cbd2a 100644
--- a/pandas/tests/io/formats/test_style.py
+++ b/pandas/tests/io/formats/test_style.py
@@ -265,6 +265,64 @@ def f(x):
col in self.df.loc[slice_].columns)
assert result == expected
+ def test_where_with_one_style(self):
+ # GH 17474
+ def f(x):
+ return x > 0.5
+
+ style1 = 'foo: bar'
+
+ result = self.df.style.where(f, style1)._compute().ctx
+ expected = dict(((r, c),
+ [style1 if f(self.df.loc[row, col]) else ''])
+ for r, row in enumerate(self.df.index)
+ for c, col in enumerate(self.df.columns))
+ assert result == expected
+
+ def test_where_subset(self):
+ # GH 17474
+ def f(x):
+ return x > 0.5
+
+ style1 = 'foo: bar'
+ style2 = 'baz: foo'
+
+ slices = [pd.IndexSlice[:], pd.IndexSlice[:, ['A']],
+ pd.IndexSlice[[1], :], pd.IndexSlice[[1], ['A']],
+ pd.IndexSlice[:2, ['A', 'B']]]
+
+ for slice_ in slices:
+ result = self.df.style.where(f, style1, style2,
+ subset=slice_)._compute().ctx
+ expected = dict(((r, c),
+ [style1 if f(self.df.loc[row, col]) else style2])
+ for r, row in enumerate(self.df.index)
+ for c, col in enumerate(self.df.columns)
+ if row in self.df.loc[slice_].index and
+ col in self.df.loc[slice_].columns)
+ assert result == expected
+
+ def test_where_subset_compare_with_applymap(self):
+ # GH 17474
+ def f(x):
+ return x > 0.5
+
+ style1 = 'foo: bar'
+ style2 = 'baz: foo'
+
+ def g(x):
+ return style1 if f(x) else style2
+
+ slices = [pd.IndexSlice[:], pd.IndexSlice[:, ['A']],
+ pd.IndexSlice[[1], :], pd.IndexSlice[[1], ['A']],
+ pd.IndexSlice[:2, ['A', 'B']]]
+
+ for slice_ in slices:
+ result = self.df.style.where(f, style1, style2,
+ subset=slice_)._compute().ctx
+ expected = self.df.style.applymap(g, subset=slice_)._compute().ctx
+ assert result == expected
+
def test_empty(self):
df = pd.DataFrame({'A': [1, 0]})
s = df.style
| - [x] closes #16255
- [x] tests added / passed
Added `test_where_subset` in `pandas/tests/io/formats/test_style.py`.
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
It makes the redundant style condition simple.
```python
df.style.applymap(lambda val: 'color: %s' % 'red' if val < 0 else 'black')
df.style.where(lambda val: val < 0, 'color: red', 'color: black')
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/17474 | 2017-09-08T08:26:57Z | 2017-09-11T00:01:42Z | 2017-09-11T00:01:42Z | 2017-09-11T00:02:20Z |
Fix typo in setup.py introduced by #17422 | diff --git a/setup.py b/setup.py
index 4e326beefa908..3269fe7972cf0 100755
--- a/setup.py
+++ b/setup.py
@@ -341,7 +341,7 @@ class CheckSDist(sdist_class):
'pandas/_libs/window.pyx',
'pandas/_libs/sparse.pyx',
'pandas/_libs/parsers.pyx',
- 'panads/_libs/tslibs/frequencies.pyx',
+ 'pandas/_libs/tslibs/frequencies.pyx',
'pandas/io/sas/sas.pyx']
def initialize_options(self):
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/17473 | 2017-09-08T02:47:29Z | 2017-09-08T10:05:06Z | 2017-09-08T10:05:06Z | 2017-10-30T16:25:02Z |
Follow up to #17422 | diff --git a/pandas/_libs/period.pyx b/pandas/_libs/period.pyx
index 8f89b812fec04..e2a3baa8d6e8b 100644
--- a/pandas/_libs/period.pyx
+++ b/pandas/_libs/period.pyx
@@ -10,17 +10,16 @@ from cpython cimport (
from numpy cimport (int8_t, int32_t, int64_t, import_array, ndarray,
NPY_INT64, NPY_DATETIME, NPY_TIMEDELTA)
import numpy as np
+import_array()
from libc.stdlib cimport free
-from pandas import compat
from pandas.compat import PY2
cimport cython
from datetime cimport (
is_leapyear,
- PyDateTime_IMPORT,
pandas_datetimestruct,
pandas_datetimestruct_to_datetime,
pandas_datetime_to_datetimestruct,
@@ -29,6 +28,7 @@ from datetime cimport (
cimport util, lib
+from util cimport is_period_object, is_string_object
from lib cimport is_null_datetimelike, is_period
from pandas._libs import tslib, lib
@@ -41,6 +41,8 @@ from tslib cimport (
_get_dst_info,
_nat_scalar_rules)
+from tslibs.frequencies cimport get_freq_code
+
from pandas.tseries import offsets
from pandas.core.tools.datetimes import parse_time_string
from pandas.tseries import frequencies
@@ -329,8 +331,6 @@ cdef list str_extra_fmts = ["^`AB`^", "^`CD`^", "^`EF`^",
"^`GH`^", "^`IJ`^", "^`KL`^"]
cdef object _period_strftime(int64_t value, int freq, object fmt):
- import sys
-
cdef:
Py_ssize_t i
date_info dinfo
@@ -683,7 +683,7 @@ cdef class _Period(object):
def _maybe_convert_freq(cls, object freq):
if isinstance(freq, (int, tuple)):
- code, stride = frequencies.get_freq_code(freq)
+ code, stride = get_freq_code(freq)
freq = frequencies._get_freq_str(code, stride)
freq = frequencies.to_offset(freq)
@@ -707,7 +707,7 @@ cdef class _Period(object):
return self
def __richcmp__(self, other, op):
- if isinstance(other, Period):
+ if is_period_object(other):
if other.freq != self.freq:
msg = _DIFFERENT_FREQ.format(self.freqstr, other.freqstr)
raise IncompatibleFrequency(msg)
@@ -753,7 +753,7 @@ cdef class _Period(object):
return NotImplemented
def __add__(self, other):
- if isinstance(self, Period):
+ if is_period_object(self):
if isinstance(other, (timedelta, np.timedelta64,
offsets.DateOffset,
Timedelta)):
@@ -765,13 +765,13 @@ cdef class _Period(object):
return Period(ordinal=ordinal, freq=self.freq)
else: # pragma: no cover
return NotImplemented
- elif isinstance(other, Period):
+ elif is_period_object(other):
return other + self
else:
return NotImplemented
def __sub__(self, other):
- if isinstance(self, Period):
+ if is_period_object(self):
if isinstance(other, (timedelta, np.timedelta64,
offsets.DateOffset,
Timedelta)):
@@ -780,7 +780,7 @@ cdef class _Period(object):
elif lib.is_integer(other):
ordinal = self.ordinal - other * self.freq.n
return Period(ordinal=ordinal, freq=self.freq)
- elif isinstance(other, Period):
+ elif is_period_object(other):
if other.freq != self.freq:
msg = _DIFFERENT_FREQ.format(self.freqstr, other.freqstr)
raise IncompatibleFrequency(msg)
@@ -789,7 +789,7 @@ cdef class _Period(object):
return -other.__sub__(self)
else: # pragma: no cover
return NotImplemented
- elif isinstance(other, Period):
+ elif is_period_object(other):
if self is NaT:
return NaT
return NotImplemented
@@ -813,8 +813,8 @@ cdef class _Period(object):
"""
freq = self._maybe_convert_freq(freq)
how = _validate_end_alias(how)
- base1, mult1 = frequencies.get_freq_code(self.freq)
- base2, mult2 = frequencies.get_freq_code(freq)
+ base1, mult1 = get_freq_code(self.freq)
+ base2, mult2 = get_freq_code(freq)
# mult1 can't be negative or 0
end = how == 'E'
@@ -860,17 +860,17 @@ cdef class _Period(object):
how = _validate_end_alias(how)
if freq is None:
- base, mult = frequencies.get_freq_code(self.freq)
+ base, mult = get_freq_code(self.freq)
freq = frequencies.get_to_timestamp_base(base)
- base, mult = frequencies.get_freq_code(freq)
+ base, mult = get_freq_code(freq)
val = self.asfreq(freq, how)
dt64 = period_ordinal_to_dt64(val.ordinal, base)
return Timestamp(dt64, tz=tz)
cdef _field(self, alias):
- base, mult = frequencies.get_freq_code(self.freq)
+ base, mult = get_freq_code(self.freq)
return get_period_field(alias, self.ordinal, base)
property year:
@@ -935,7 +935,7 @@ cdef class _Period(object):
return self.freq.freqstr
def __repr__(self):
- base, mult = frequencies.get_freq_code(self.freq)
+ base, mult = get_freq_code(self.freq)
formatted = period_format(self.ordinal, base)
return "Period('%s', '%s')" % (formatted, self.freqstr)
@@ -946,7 +946,7 @@ cdef class _Period(object):
Invoked by unicode(df) in py2 only. Yields a Unicode String in both
py2/py3.
"""
- base, mult = frequencies.get_freq_code(self.freq)
+ base, mult = get_freq_code(self.freq)
formatted = period_format(self.ordinal, base)
value = ("%s" % formatted)
return value
@@ -1096,7 +1096,7 @@ cdef class _Period(object):
>>> a.strftime('%b. %d, %Y was a %A')
'Jan. 01, 2001 was a Monday'
"""
- base, mult = frequencies.get_freq_code(self.freq)
+ base, mult = get_freq_code(self.freq)
return period_format(self.ordinal, base, fmt)
@@ -1161,10 +1161,10 @@ class Period(_Period):
ordinal = _ordinal_from_fields(year, month, quarter, day,
hour, minute, second, freq)
- elif isinstance(value, Period):
+ elif is_period_object(value):
other = value
- if freq is None or frequencies.get_freq_code(
- freq) == frequencies.get_freq_code(other.freq):
+ if freq is None or get_freq_code(
+ freq) == get_freq_code(other.freq):
ordinal = other.ordinal
freq = other.freq
else:
@@ -1174,7 +1174,7 @@ class Period(_Period):
elif is_null_datetimelike(value) or value in tslib._nat_strings:
ordinal = iNaT
- elif isinstance(value, compat.string_types) or lib.is_integer(value):
+ elif is_string_object(value) or lib.is_integer(value):
if lib.is_integer(value):
value = str(value)
value = value.upper()
@@ -1191,7 +1191,7 @@ class Period(_Period):
dt = value
if freq is None:
raise ValueError('Must supply freq for datetime value')
- elif isinstance(value, np.datetime64):
+ elif util.is_datetime64_object(value):
dt = Timestamp(value)
if freq is None:
raise ValueError('Must supply freq for datetime value')
@@ -1204,7 +1204,7 @@ class Period(_Period):
raise ValueError(msg)
if ordinal is None:
- base, mult = frequencies.get_freq_code(freq)
+ base, mult = get_freq_code(freq)
ordinal = get_period_ordinal(dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.microsecond, 0, base)
@@ -1214,7 +1214,7 @@ class Period(_Period):
def _ordinal_from_fields(year, month, quarter, day,
hour, minute, second, freq):
- base, mult = frequencies.get_freq_code(freq)
+ base, mult = get_freq_code(freq)
if quarter is not None:
year, month = _quarter_to_myear(year, quarter, freq)
@@ -1227,8 +1227,7 @@ def _quarter_to_myear(year, quarter, freq):
if quarter <= 0 or quarter > 4:
raise ValueError('Quarter must be 1 <= q <= 4')
- mnum = frequencies._month_numbers[
- frequencies._get_rule_month(freq)] + 1
+ mnum = tslib._MONTH_NUMBERS[tslib._get_rule_month(freq)] + 1
month = (mnum + (quarter - 1) * 3) % 12 + 1
if month > mnum:
year -= 1
diff --git a/pandas/_libs/tslibs/frequencies.pxd b/pandas/_libs/tslibs/frequencies.pxd
new file mode 100644
index 0000000000000..974eb4ab45df0
--- /dev/null
+++ b/pandas/_libs/tslibs/frequencies.pxd
@@ -0,0 +1,4 @@
+# -*- coding: utf-8 -*-
+# cython: profile=False
+
+cpdef get_freq_code(freqstr)
diff --git a/pandas/_libs/tslibs/frequencies.pyx b/pandas/_libs/tslibs/frequencies.pyx
index 35429e8ae87f0..f7889d76abbc7 100644
--- a/pandas/_libs/tslibs/frequencies.pyx
+++ b/pandas/_libs/tslibs/frequencies.pyx
@@ -150,6 +150,9 @@ _period_code_map = {
"N": 12000, # Nanosecondly
}
+_reverse_period_code_map = {
+ _period_code_map[key]: key for key in _period_code_map}
+
# Yearly aliases; careful not to put these in _reverse_period_code_map
_period_code_map.update({'Y' + key[1:]: _period_code_map[key]
for key in _period_code_map
diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py
index 6644a33245a84..085a3a784557b 100644
--- a/pandas/tseries/frequencies.py
+++ b/pandas/tseries/frequencies.py
@@ -20,7 +20,10 @@
from pandas._libs import lib, tslib
from pandas._libs.tslib import Timedelta
-from pandas._libs.tslibs.frequencies import get_freq_code, _base_and_stride
+from pandas._libs.tslibs.frequencies import ( # noqa
+ get_freq_code, _base_and_stride, _period_str_to_code,
+ _INVALID_FREQ_ERROR, opattern, _lite_rule_alias, _dont_uppercase,
+ _period_code_map, _reverse_period_code_map)
from pytz import AmbiguousTimeError
@@ -375,27 +378,6 @@ def get_period_alias(offset_str):
return _offset_to_period_map.get(offset_str, None)
-_lite_rule_alias = {
- 'W': 'W-SUN',
- 'Q': 'Q-DEC',
-
- 'A': 'A-DEC', # YearEnd(month=12),
- 'Y': 'A-DEC',
- 'AS': 'AS-JAN', # YearBegin(month=1),
- 'YS': 'AS-JAN',
- 'BA': 'BA-DEC', # BYearEnd(month=12),
- 'BY': 'BA-DEC',
- 'BAS': 'BAS-JAN', # BYearBegin(month=1),
- 'BYS': 'BAS-JAN',
-
- 'Min': 'T',
- 'min': 'T',
- 'ms': 'L',
- 'us': 'U',
- 'ns': 'N'
-}
-
-
_name_to_offset_map = {'days': Day(1),
'hours': Hour(1),
'minutes': Minute(1),
@@ -405,9 +387,6 @@ def get_period_alias(offset_str):
'nanoseconds': Nano(1)}
-_INVALID_FREQ_ERROR = "Invalid frequency: {0}"
-
-
@deprecate_kwarg(old_arg_name='freqstr', new_arg_name='freq')
def to_offset(freq):
"""
@@ -519,12 +498,6 @@ def to_offset(freq):
return delta
-# hack to handle WOM-1MON
-opattern = re.compile(
- r'([\-]?\d*|[\-]?\d*\.\d*)\s*([A-Za-z]+([\-][\dA-Za-z\-]+)?)'
-)
-
-
def get_base_alias(freqstr):
"""
Returns the base frequency alias, e.g., '5D' -> 'D'
@@ -532,9 +505,6 @@ def get_base_alias(freqstr):
return _base_and_stride(freqstr)[0]
-_dont_uppercase = set(('MS', 'ms'))
-
-
def get_offset(name):
"""
Return DateOffset object associated with rule name
@@ -583,96 +553,6 @@ def get_standard_freq(freq):
# ---------------------------------------------------------------------
# Period codes
-# period frequency constants corresponding to scikits timeseries
-# originals
-_period_code_map = {
- # Annual freqs with various fiscal year ends.
- # eg, 2005 for A-FEB runs Mar 1, 2004 to Feb 28, 2005
- "A-DEC": 1000, # Annual - December year end
- "A-JAN": 1001, # Annual - January year end
- "A-FEB": 1002, # Annual - February year end
- "A-MAR": 1003, # Annual - March year end
- "A-APR": 1004, # Annual - April year end
- "A-MAY": 1005, # Annual - May year end
- "A-JUN": 1006, # Annual - June year end
- "A-JUL": 1007, # Annual - July year end
- "A-AUG": 1008, # Annual - August year end
- "A-SEP": 1009, # Annual - September year end
- "A-OCT": 1010, # Annual - October year end
- "A-NOV": 1011, # Annual - November year end
-
- # Quarterly frequencies with various fiscal year ends.
- # eg, Q42005 for Q-OCT runs Aug 1, 2005 to Oct 31, 2005
- "Q-DEC": 2000, # Quarterly - December year end
- "Q-JAN": 2001, # Quarterly - January year end
- "Q-FEB": 2002, # Quarterly - February year end
- "Q-MAR": 2003, # Quarterly - March year end
- "Q-APR": 2004, # Quarterly - April year end
- "Q-MAY": 2005, # Quarterly - May year end
- "Q-JUN": 2006, # Quarterly - June year end
- "Q-JUL": 2007, # Quarterly - July year end
- "Q-AUG": 2008, # Quarterly - August year end
- "Q-SEP": 2009, # Quarterly - September year end
- "Q-OCT": 2010, # Quarterly - October year end
- "Q-NOV": 2011, # Quarterly - November year end
-
- "M": 3000, # Monthly
-
- "W-SUN": 4000, # Weekly - Sunday end of week
- "W-MON": 4001, # Weekly - Monday end of week
- "W-TUE": 4002, # Weekly - Tuesday end of week
- "W-WED": 4003, # Weekly - Wednesday end of week
- "W-THU": 4004, # Weekly - Thursday end of week
- "W-FRI": 4005, # Weekly - Friday end of week
- "W-SAT": 4006, # Weekly - Saturday end of week
-
- "B": 5000, # Business days
- "D": 6000, # Daily
- "H": 7000, # Hourly
- "T": 8000, # Minutely
- "S": 9000, # Secondly
- "L": 10000, # Millisecondly
- "U": 11000, # Microsecondly
- "N": 12000, # Nanosecondly
-}
-
-_reverse_period_code_map = {}
-for _k, _v in compat.iteritems(_period_code_map):
- _reverse_period_code_map[_v] = _k
-
-# Yearly aliases
-year_aliases = {}
-
-for k, v in compat.iteritems(_period_code_map):
- if k.startswith("A-"):
- alias = "Y" + k[1:]
- year_aliases[alias] = v
-
-_period_code_map.update(**year_aliases)
-del year_aliases
-
-_period_code_map.update({
- "Q": 2000, # Quarterly - December year end (default quarterly)
- "A": 1000, # Annual
- "W": 4000, # Weekly
- "C": 5000, # Custom Business Day
-})
-
-
-def _period_str_to_code(freqstr):
- freqstr = _lite_rule_alias.get(freqstr, freqstr)
-
- if freqstr not in _dont_uppercase:
- lower = freqstr.lower()
- freqstr = _lite_rule_alias.get(lower, freqstr)
-
- if freqstr not in _dont_uppercase:
- freqstr = freqstr.upper()
- try:
- return _period_code_map[freqstr]
- except KeyError:
- raise ValueError(_INVALID_FREQ_ERROR.format(freqstr))
-
def infer_freq(index, warn=True):
"""
| As a follow-up, this performs small touchups in a handful of places.
Implement .pxd file for tslibs.frequencies
cimport get_freq_code in period.pyx
Replace isinstance checks in period.pyx with C-equivalents form util
Remove unused imports from period.pyx
Remove code in tseries.frequencies that can now be imported from tslibs.frequencies
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/17472 | 2017-09-08T02:28:35Z | 2017-09-08T10:16:14Z | 2017-09-08T10:16:14Z | 2017-10-30T16:25:01Z |
Replace * imports with explicit imports; remove unused declared const… | diff --git a/pandas/_libs/src/skiplist.pyx b/pandas/_libs/src/skiplist.pyx
index 559b529822a69..1524dca38d0e0 100644
--- a/pandas/_libs/src/skiplist.pyx
+++ b/pandas/_libs/src/skiplist.pyx
@@ -15,7 +15,6 @@ cdef double Log2(double x):
return log(x) / log(2.)
cimport numpy as np
-from numpy cimport *
import numpy as np
from random import random
diff --git a/pandas/_libs/window.pyx b/pandas/_libs/window.pyx
index 9fb3d0662eb4f..b6bd6f92f6199 100644
--- a/pandas/_libs/window.pyx
+++ b/pandas/_libs/window.pyx
@@ -1,55 +1,29 @@
# cython: profile=False
# cython: boundscheck=False, wraparound=False, cdivision=True
-from numpy cimport *
+from cython cimport Py_ssize_t
+
cimport numpy as np
import numpy as np
cimport cython
-import_array()
+np.import_array()
cimport util
from libc.stdlib cimport malloc, free
-from numpy cimport NPY_INT8 as NPY_int8
-from numpy cimport NPY_INT16 as NPY_int16
-from numpy cimport NPY_INT32 as NPY_int32
-from numpy cimport NPY_INT64 as NPY_int64
-from numpy cimport NPY_FLOAT16 as NPY_float16
-from numpy cimport NPY_FLOAT32 as NPY_float32
-from numpy cimport NPY_FLOAT64 as NPY_float64
-
-from numpy cimport (int8_t, int16_t, int32_t, int64_t, uint8_t, uint16_t,
- uint32_t, uint64_t, float16_t, float32_t, float64_t)
-
-int8 = np.dtype(np.int8)
-int16 = np.dtype(np.int16)
-int32 = np.dtype(np.int32)
-int64 = np.dtype(np.int64)
-float16 = np.dtype(np.float16)
-float32 = np.dtype(np.float32)
-float64 = np.dtype(np.float64)
-
-cdef np.int8_t MINint8 = np.iinfo(np.int8).min
-cdef np.int16_t MINint16 = np.iinfo(np.int16).min
-cdef np.int32_t MINint32 = np.iinfo(np.int32).min
-cdef np.int64_t MINint64 = np.iinfo(np.int64).min
-cdef np.float16_t MINfloat16 = np.NINF
+
+from numpy cimport ndarray, double_t, int64_t, float64_t
+
cdef np.float32_t MINfloat32 = np.NINF
cdef np.float64_t MINfloat64 = np.NINF
-cdef np.int8_t MAXint8 = np.iinfo(np.int8).max
-cdef np.int16_t MAXint16 = np.iinfo(np.int16).max
-cdef np.int32_t MAXint32 = np.iinfo(np.int32).max
-cdef np.int64_t MAXint64 = np.iinfo(np.int64).max
-cdef np.float16_t MAXfloat16 = np.inf
cdef np.float32_t MAXfloat32 = np.inf
cdef np.float64_t MAXfloat64 = np.inf
cdef double NaN = <double> np.NaN
-cdef double nan = NaN
cdef inline int int_max(int a, int b): return a if a >= b else b
cdef inline int int_min(int a, int b): return a if a <= b else b
| …ants
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/17470 | 2017-09-07T22:06:42Z | 2017-09-08T00:46:13Z | 2017-09-08T00:46:13Z | 2017-09-08T02:33:14Z |
DOC: Removed Timedelta.is_populated and fixed spelling errors | diff --git a/doc/source/api.rst b/doc/source/api.rst
index d34cec86638fb..c32a541d19605 100644
--- a/doc/source/api.rst
+++ b/doc/source/api.rst
@@ -1704,7 +1704,7 @@ Methods
Timestamp.floor
Timestamp.freq
Timestamp.freqstr
- Timestamp.from_ordinal
+ Timestamp.fromordinal
Timestamp.fromtimestamp
Timestamp.isocalendar
Timestamp.isoformat
@@ -1769,9 +1769,7 @@ Properties
Timedelta.asm8
Timedelta.components
Timedelta.days
- Timedelta.delta
Timedelta.freq
- Timedelta.is_populated
Timedelta.max
Timedelta.microseconds
Timedelta.min
@@ -1789,10 +1787,9 @@ Methods
Timedelta.floor
Timedelta.isoformat
Timedelta.round
- Timdelta.to_pytimedelta
+ Timedelta.to_pytimedelta
Timedelta.to_timedelta64
Timedelta.total_seconds
- Timedelta.view
Window
------
| - [x] closes #17369
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Additional fixes per comments in #17424 | https://api.github.com/repos/pandas-dev/pandas/pulls/17469 | 2017-09-07T20:26:25Z | 2017-09-08T00:47:53Z | 2017-09-08T00:47:53Z | 2017-09-11T07:39:05Z |
Fixed rendering of ticks in error bars | diff --git a/doc/source/visualization.rst b/doc/source/visualization.rst
index 7db3b63fd8f08..2ae1a127fb218 100644
--- a/doc/source/visualization.rst
+++ b/doc/source/visualization.rst
@@ -1389,7 +1389,7 @@ Here is an example of one way to easily plot group means with standard deviation
# Plot
fig, ax = plt.subplots()
@savefig errorbar_example.png
- means.plot.bar(yerr=errors, ax=ax)
+ means.plot.bar(yerr=errors, ax=ax, capsize=4)
.. ipython:: python
:suppress:
diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index 9c6d01d236c57..dbe9dc7123e96 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -109,7 +109,10 @@ def _handle_date_column(col, utc=None, format=None):
issubclass(col.dtype.type, np.integer)):
# parse dates as timestamp
format = 's' if format is None else format
- return to_datetime(col, errors='coerce', unit=format, utc=utc)
+ if '%' in format:
+ return to_datetime(col, errors='coerce', format=format, utc=utc)
+ else:
+ return to_datetime(col, errors='coerce', unit=format, utc=utc)
elif is_datetime64tz_dtype(col):
# coerce to UTC timezone
# GH11216
| - [x] closes #17467
- [x] tests passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/17468 | 2017-09-07T19:14:44Z | 2017-10-20T20:27:05Z | null | 2017-10-20T20:27:06Z |
BUG: Fix TypeError caused by GH13374 | diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt
index f50052347cfb5..bfe7d974a6097 100644
--- a/doc/source/whatsnew/v0.21.0.txt
+++ b/doc/source/whatsnew/v0.21.0.txt
@@ -411,6 +411,7 @@ I/O
- Bug in :func:`read_csv` when called with a single-element list ``header`` would return a ``DataFrame`` of all NaN values (:issue:`7757`)
- Bug in :func:`read_stata` where value labels could not be read when using an iterator (:issue:`16923`)
- Bug in :func:`read_html` where import check fails when run in multiple threads (:issue:`16928`)
+- Bug in :func:`read_csv` where automatic delimiter detection caused a ``TypeError`` to be thrown when a bad line was encountered rather than the correct error message (:issue:`13374`)
Plotting
^^^^^^^^
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 6adf154aabba7..d9e83176d0d6e 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -2836,7 +2836,9 @@ def _rows_to_cols(self, content):
for row_num, actual_len in bad_lines:
msg = ('Expected %d fields in line %d, saw %d' %
(col_len, row_num + 1, actual_len))
- if len(self.delimiter) > 1 and self.quoting != csv.QUOTE_NONE:
+ if (self.delimiter and
+ len(self.delimiter) > 1 and
+ self.quoting != csv.QUOTE_NONE):
# see gh-13374
reason = ('Error could possibly be due to quotes being '
'ignored when a multi-char delimiter is used.')
diff --git a/pandas/tests/io/parser/python_parser_only.py b/pandas/tests/io/parser/python_parser_only.py
index a0784d3aeae2d..c3dc91b3f188c 100644
--- a/pandas/tests/io/parser/python_parser_only.py
+++ b/pandas/tests/io/parser/python_parser_only.py
@@ -218,6 +218,25 @@ def test_multi_char_sep_quotes(self):
self.read_csv(StringIO(data), sep=',,',
quoting=csv.QUOTE_NONE)
+ def test_none_delimiter(self):
+ # see gh-13374 and gh-17465
+
+ data = "a,b,c\n0,1,2\n3,4,5,6\n7,8,9"
+ expected = DataFrame({'a': [0, 7],
+ 'b': [1, 8],
+ 'c': [2, 9]})
+
+ # We expect the third line in the data to be
+ # skipped because it is malformed,
+ # but we do not expect any errors to occur.
+ result = self.read_csv(StringIO(data), header=0,
+ sep=None,
+ error_bad_lines=False,
+ warn_bad_lines=True,
+ engine='python',
+ tupleize_cols=True)
+ tm.assert_frame_equal(result, expected)
+
def test_skipfooter_bad_row(self):
# see gh-13879
# see gh-15910
| - [x] closes #13374
- [x] `0 failed, 9873 passed, 1955 skipped, 11 xfailed, 4 warnings in 1475.44 seconds`
> added ``test_none_delimiter`` in ``pandas/tests/io/parser/python_parser_only.py``
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
> Bug in :func:`read_csv` where automatic delimiter detection caused a ``TypeError`` to be thrown when a bad line was encountered rather than the correct error message (:issue:`13374`)
@gfyoung | https://api.github.com/repos/pandas-dev/pandas/pulls/17465 | 2017-09-07T16:59:51Z | 2017-09-10T07:30:49Z | 2017-09-10T07:30:49Z | 2017-09-10T12:48:46Z |
DOC: Change plot style to matplotlib decault from ggplot | diff --git a/doc/source/10min.rst b/doc/source/10min.rst
index ef6b2d6ef2c90..0a23f490e6628 100644
--- a/doc/source/10min.rst
+++ b/doc/source/10min.rst
@@ -11,7 +11,7 @@
np.random.seed(123456)
np.set_printoptions(precision=4, suppress=True)
import matplotlib
- matplotlib.style.use('ggplot')
+ # matplotlib.style.use('default')
pd.options.display.max_rows = 15
#### portions of this were borrowed from the
diff --git a/doc/source/computation.rst b/doc/source/computation.rst
index 23699393958cf..97472ca4dd938 100644
--- a/doc/source/computation.rst
+++ b/doc/source/computation.rst
@@ -8,7 +8,7 @@
np.set_printoptions(precision=4, suppress=True)
import pandas as pd
import matplotlib
- matplotlib.style.use('ggplot')
+ # matplotlib.style.use('default')
import matplotlib.pyplot as plt
plt.close('all')
pd.options.display.max_rows=15
diff --git a/doc/source/cookbook.rst b/doc/source/cookbook.rst
index 5bb3ba75fe51b..f13e5e67de07e 100644
--- a/doc/source/cookbook.rst
+++ b/doc/source/cookbook.rst
@@ -20,7 +20,7 @@
pd.options.display.max_rows=15
import matplotlib
- matplotlib.style.use('ggplot')
+ # matplotlib.style.use('default')
np.set_printoptions(precision=4, suppress=True)
diff --git a/doc/source/dsintro.rst b/doc/source/dsintro.rst
index ec0a1c7a00bf7..e5c7637ddb499 100644
--- a/doc/source/dsintro.rst
+++ b/doc/source/dsintro.rst
@@ -10,7 +10,7 @@
pd.options.display.max_rows = 15
import matplotlib
- matplotlib.style.use('ggplot')
+ # matplotlib.style.use('default')
import matplotlib.pyplot as plt
plt.close('all')
diff --git a/doc/source/gotchas.rst b/doc/source/gotchas.rst
index 9e6f98923fca6..8ae830d7fd76b 100644
--- a/doc/source/gotchas.rst
+++ b/doc/source/gotchas.rst
@@ -14,7 +14,7 @@ Frequently Asked Questions (FAQ)
import pandas as pd
pd.options.display.max_rows = 15
import matplotlib
- matplotlib.style.use('ggplot')
+ # matplotlib.style.use('default')
import matplotlib.pyplot as plt
plt.close('all')
diff --git a/doc/source/groupby.rst b/doc/source/groupby.rst
index e1231b9a4a200..ddc7d7fcd2ff1 100644
--- a/doc/source/groupby.rst
+++ b/doc/source/groupby.rst
@@ -10,7 +10,7 @@
import pandas as pd
pd.options.display.max_rows = 15
import matplotlib
- matplotlib.style.use('ggplot')
+ # matplotlib.style.use('default')
import matplotlib.pyplot as plt
plt.close('all')
from collections import OrderedDict
diff --git a/doc/source/missing_data.rst b/doc/source/missing_data.rst
index 65b411ccd4af2..1fffb2963b7ca 100644
--- a/doc/source/missing_data.rst
+++ b/doc/source/missing_data.rst
@@ -7,7 +7,7 @@
import pandas as pd
pd.options.display.max_rows=15
import matplotlib
- matplotlib.style.use('ggplot')
+ # matplotlib.style.use('default')
import matplotlib.pyplot as plt
.. _missing_data:
diff --git a/doc/source/visualization.rst b/doc/source/visualization.rst
index b5a261e3acac5..bed83758ee2ff 100644
--- a/doc/source/visualization.rst
+++ b/doc/source/visualization.rst
@@ -10,7 +10,7 @@
np.set_printoptions(precision=4, suppress=True)
pd.options.display.max_rows = 15
import matplotlib
- matplotlib.style.use('ggplot')
+ # matplotlib.style.use('default')
import matplotlib.pyplot as plt
plt.close('all')
@@ -24,13 +24,6 @@ We use the standard convention for referencing the matplotlib API:
import matplotlib.pyplot as plt
-The plots in this document are made using matplotlib's ``ggplot`` style (new in version 1.4):
-
-.. code-block:: python
-
- import matplotlib
- matplotlib.style.use('ggplot')
-
We provide the basics in pandas to easily create decent looking plots.
See the :ref:`ecosystem <ecosystem.visualization>` section for visualization
libraries that go beyond the basics documented here.
@@ -134,7 +127,7 @@ For example, a bar plot can be created the following way:
plt.figure();
@savefig bar_plot_ex.png
- df.iloc[5].plot(kind='bar'); plt.axhline(0, color='k')
+ df.iloc[5].plot(kind='bar');
.. versionadded:: 0.17.0
@@ -154,7 +147,7 @@ and :ref:`DataFrame.boxplot() <visualization.box>` methods, which use a separate
Finally, there are several :ref:`plotting functions <visualization.tools>` in ``pandas.plotting``
that take a :class:`Series` or :class:`DataFrame` as an argument. These
-include
+include:
* :ref:`Scatter Matrix <visualization.scatter_matrix>`
* :ref:`Andrews Curves <visualization.andrews_curves>`
@@ -1049,6 +1042,21 @@ be colored differently.
Plot Formatting
---------------
+Setting the plot style
+~~~~~~~~~~~~~~~~~~~~~~
+
+From version 1.5 and up, matplotlib offers a range of preconfigured plotting styles. Setting the
+style can be used to easily give plots the general look that you want.
+Setting the style is as easy as calling ``matplotlib.style.use(my_plot_style)`` before
+creating your plot. For example you could do ``matplotlib.style.use('ggplot')`` for ggplot-style
+plots.
+
+You can see the various available style names at ``matplotlib.style.available`` and it's very
+easy to try them out.
+
+General plot style arguments
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
Most plotting methods have a set of keyword arguments that control the
layout and formatting of the returned plot:
| A proposal to change plot style in the docs to ``seaborn`` from ``ggplot``.
See also #17423.
| https://api.github.com/repos/pandas-dev/pandas/pulls/17462 | 2017-09-07T11:36:36Z | 2017-09-25T08:13:56Z | 2017-09-25T08:13:56Z | 2017-09-26T09:39:46Z |
DOC: to_json | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index df5f1a8326acd..8d16b079ba2c8 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -1265,7 +1265,7 @@ def to_json(self, path_or_buf=None, orient=None, date_format=None,
Parameters
----------
path_or_buf : the path or buffer to write the result string
- if this is None, return a StringIO of the converted string
+ if this is None, return the converted string
orient : string
* Series
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/17461 | 2017-09-07T10:57:49Z | 2017-09-07T11:41:24Z | 2017-09-07T11:41:24Z | 2017-09-07T11:41:26Z |
ENH: Add optional argument keep_index to dataframe melt method | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 5991ec825c841..16162b699bfd1 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -4367,6 +4367,10 @@ def unstack(self, level=-1, fill_value=None):
Name to use for the 'value' column.
col_level : int or string, optional
If columns are a MultiIndex then use this level to melt.
+ keep_index : boolean, optional, default False
+ If True, the original index is reused.
+ In the resulting MulitIndex the names of the unpivoted columns
+ are added as an additional level to ensure uniqueness.
See also
--------
@@ -4439,11 +4443,11 @@ def unstack(self, level=-1, fill_value=None):
versionadded='.. versionadded:: 0.20.0\n',
other='melt'))
def melt(self, id_vars=None, value_vars=None, var_name=None,
- value_name='value', col_level=None):
+ value_name='value', col_level=None, keep_index=False):
from pandas.core.reshape.reshape import melt
return melt(self, id_vars=id_vars, value_vars=value_vars,
var_name=var_name, value_name=value_name,
- col_level=col_level)
+ col_level=col_level, keep_index=keep_index)
# ----------------------------------------------------------------------
# Time series-related
diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py
index b4abba8026b35..6a10fd218287b 100644
--- a/pandas/core/reshape/reshape.py
+++ b/pandas/core/reshape/reshape.py
@@ -720,8 +720,7 @@ def _convert_level_number(level_num, columns):
versionadded="",
other='DataFrame.melt'))
def melt(frame, id_vars=None, value_vars=None, var_name=None,
- value_name='value', col_level=None):
- # TODO: what about the existing index?
+ value_name='value', col_level=None, keep_index=False):
if id_vars is not None:
if not is_list_like(id_vars):
id_vars = [id_vars]
@@ -779,7 +778,22 @@ def melt(frame, id_vars=None, value_vars=None, var_name=None,
mdata[col] = np.asanyarray(frame.columns
._get_level_values(i)).repeat(N)
- return DataFrame(mdata, columns=mcolumns)
+ result = DataFrame(mdata, columns=mcolumns)
+
+ if keep_index:
+ orig_index_values = list(np.tile(frame.index.get_values(), K))
+
+ if len(frame.index.names) == len(set(frame.index.names)):
+ orig_index_names = frame.index.names
+ else:
+ orig_index_names = ["original_index_{i}".format(i=i)
+ for i in range(len(frame.index.names))]
+
+ result[orig_index_names] = DataFrame(orig_index_values)
+
+ result = result.set_index(orig_index_names + list(var_name))
+
+ return result
def lreshape(data, groups, dropna=True, label=None):
| Setting keep_index to True will reuse the original DataFrame index +
names of melted columns as additional level. closes issue #17440
- [ ] closes #17440
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
I appreciate any corrections, comments and/or help very much, as this is my first pull request on such a big project. Thank you.
| https://api.github.com/repos/pandas-dev/pandas/pulls/17459 | 2017-09-07T07:20:52Z | 2017-11-25T16:15:45Z | null | 2018-12-14T12:05:25Z |
Lock down kwargs in offsets signatures | diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt
index 61c05d1b226e0..8a6e0c24e235d 100644
--- a/doc/source/whatsnew/v0.21.0.txt
+++ b/doc/source/whatsnew/v0.21.0.txt
@@ -649,6 +649,7 @@ Other API Changes
- :func:`to_datetime` when passed a tz-aware ``origin=`` kwarg will now raise a more informative ``ValueError`` rather than a ``TypeError`` (:issue:`16842`)
- Renamed non-functional ``index`` to ``index_col`` in :func:`read_stata` to improve API consistency (:issue:`16342`)
- Bug in :func:`DataFrame.drop` caused boolean labels ``False`` and ``True`` to be treated as labels 0 and 1 respectively when dropping indices from a numeric index. This will now raise a ValueError (:issue:`16877`)
+- Restricted DateOffset keyword arguments. Previously, ``DateOffset`` subclasses allowed arbitrary keyword arguments which could lead to unexpected behavior. Now, only valid arguments will be accepted. (:issue:`17176`).
- Pandas no longer registers matplotlib converters on import. The converters
will be registered and used when the first plot is draw (:issue:`17710`)
diff --git a/pandas/tests/io/data/legacy_pickle/0.19.2/0.19.2_x86_64_darwin_2.7.14.pickle b/pandas/tests/io/data/legacy_pickle/0.19.2/0.19.2_x86_64_darwin_2.7.14.pickle
new file mode 100644
index 0000000000000..555be58cc33ac
Binary files /dev/null and b/pandas/tests/io/data/legacy_pickle/0.19.2/0.19.2_x86_64_darwin_2.7.14.pickle differ
diff --git a/pandas/tests/io/data/legacy_pickle/0.20.3/0.20.3_x86_64_darwin_2.7.14.pickle b/pandas/tests/io/data/legacy_pickle/0.20.3/0.20.3_x86_64_darwin_2.7.14.pickle
new file mode 100644
index 0000000000000..963e533c4d2b4
Binary files /dev/null and b/pandas/tests/io/data/legacy_pickle/0.20.3/0.20.3_x86_64_darwin_2.7.14.pickle differ
diff --git a/pandas/tests/io/generate_legacy_storage_files.py b/pandas/tests/io/generate_legacy_storage_files.py
index 1cb2081409312..0b60d37d36c08 100755
--- a/pandas/tests/io/generate_legacy_storage_files.py
+++ b/pandas/tests/io/generate_legacy_storage_files.py
@@ -45,7 +45,10 @@
from pandas.tseries.offsets import (
DateOffset, Hour, Minute, Day,
MonthBegin, MonthEnd, YearBegin,
- YearEnd, Week,
+ YearEnd, Week, WeekOfMonth, LastWeekOfMonth,
+ BusinessDay, BusinessHour, CustomBusinessDay, FY5253,
+ Easter,
+ SemiMonthEnd, SemiMonthBegin,
QuarterBegin, QuarterEnd)
from pandas.compat import u
import os
@@ -53,7 +56,7 @@
import numpy as np
import pandas
import platform as pl
-
+from datetime import timedelta
_loose_version = LooseVersion(pandas.__version__)
@@ -201,6 +204,12 @@ def create_data():
freq='M')
off = {'DateOffset': DateOffset(years=1),
+ 'DateOffset_h_ns': DateOffset(hour=6, nanoseconds=5824),
+ 'BusinessDay': BusinessDay(offset=timedelta(seconds=9)),
+ 'BusinessHour': BusinessHour(normalize=True, n=6, end='15:14'),
+ 'CustomBusinessDay': CustomBusinessDay(weekmask='Mon Fri'),
+ 'SemiMonthBegin': SemiMonthBegin(day_of_month=9),
+ 'SemiMonthEnd': SemiMonthEnd(day_of_month=24),
'MonthBegin': MonthBegin(1),
'MonthEnd': MonthEnd(1),
'QuarterBegin': QuarterBegin(1),
@@ -209,6 +218,11 @@ def create_data():
'YearBegin': YearBegin(1),
'YearEnd': YearEnd(1),
'Week': Week(1),
+ 'Week_Tues': Week(2, normalize=False, weekday=1),
+ 'WeekOfMonth': WeekOfMonth(week=3, weekday=4),
+ 'LastWeekOfMonth': LastWeekOfMonth(n=1, weekday=3),
+ 'FY5253': FY5253(n=2, weekday=6, startingMonth=7, variation="last"),
+ 'Easter': Easter(),
'Hour': Hour(1),
'Minute': Minute(1)}
diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py
index 3a2a613986dca..c65691618e654 100644
--- a/pandas/tseries/offsets.py
+++ b/pandas/tseries/offsets.py
@@ -581,6 +581,7 @@ def __setstate__(self, state):
if '_offset' in state: # pragma: no cover
raise ValueError('Unexpected key `_offset`')
state['_offset'] = state.pop('offset')
+ state['kwds']['offset'] = state['_offset']
self.__dict__ = state
if 'weekmask' in state and 'holidays' in state:
calendar, holidays = _get_calendar(weekmask=self.weekmask,
@@ -598,11 +599,11 @@ class BusinessDay(BusinessMixin, SingleConstructorOffset):
_prefix = 'B'
_adjust_dst = True
- def __init__(self, n=1, normalize=False, **kwds):
+ def __init__(self, n=1, normalize=False, offset=timedelta(0)):
self.n = int(n)
self.normalize = normalize
- self.kwds = kwds
- self._offset = kwds.get('offset', timedelta(0))
+ self.kwds = {'offset': offset}
+ self._offset = offset
def _offset_str(self):
def get_str(td):
@@ -693,14 +694,13 @@ def onOffset(self, dt):
class BusinessHourMixin(BusinessMixin):
- def __init__(self, **kwds):
+ def __init__(self, start='09:00', end='17:00', offset=timedelta(0)):
# must be validated here to equality check
- kwds['start'] = self._validate_time(kwds.get('start', '09:00'))
- kwds['end'] = self._validate_time(kwds.get('end', '17:00'))
+ kwds = {'offset': offset}
+ self.start = kwds['start'] = self._validate_time(start)
+ self.end = kwds['end'] = self._validate_time(end)
self.kwds = kwds
- self._offset = kwds.get('offset', timedelta(0))
- self.start = kwds.get('start', '09:00')
- self.end = kwds.get('end', '17:00')
+ self._offset = offset
def _validate_time(self, t_input):
from datetime import time as dt_time
@@ -923,10 +923,11 @@ class BusinessHour(BusinessHourMixin, SingleConstructorOffset):
_prefix = 'BH'
_anchor = 0
- def __init__(self, n=1, normalize=False, **kwds):
+ def __init__(self, n=1, normalize=False, start='09:00',
+ end='17:00', offset=timedelta(0)):
self.n = int(n)
self.normalize = normalize
- super(BusinessHour, self).__init__(**kwds)
+ super(BusinessHour, self).__init__(start=start, end=end, offset=offset)
@cache_readonly
def next_bday(self):
@@ -960,11 +961,11 @@ class CustomBusinessDay(BusinessDay):
_prefix = 'C'
def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
- holidays=None, calendar=None, **kwds):
+ holidays=None, calendar=None, offset=timedelta(0)):
self.n = int(n)
self.normalize = normalize
- self.kwds = kwds
- self._offset = kwds.get('offset', timedelta(0))
+ self._offset = offset
+ self.kwds = {}
calendar, holidays = _get_calendar(weekmask=weekmask,
holidays=holidays,
@@ -976,6 +977,7 @@ def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
self.kwds['weekmask'] = self.weekmask = weekmask
self.kwds['holidays'] = self.holidays = holidays
self.kwds['calendar'] = self.calendar = calendar
+ self.kwds['offset'] = offset
@apply_wraps
def apply(self, other):
@@ -1026,10 +1028,12 @@ class CustomBusinessHour(BusinessHourMixin, SingleConstructorOffset):
_anchor = 0
def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
- holidays=None, calendar=None, **kwds):
+ holidays=None, calendar=None,
+ start='09:00', end='17:00', offset=timedelta(0)):
self.n = int(n)
self.normalize = normalize
- super(CustomBusinessHour, self).__init__(**kwds)
+ super(CustomBusinessHour, self).__init__(start=start,
+ end=end, offset=offset)
calendar, holidays = _get_calendar(weekmask=weekmask,
holidays=holidays,
@@ -1121,7 +1125,7 @@ class SemiMonthOffset(DateOffset):
_default_day_of_month = 15
_min_day_of_month = 2
- def __init__(self, n=1, day_of_month=None, normalize=False, **kwds):
+ def __init__(self, n=1, normalize=False, day_of_month=None):
if day_of_month is None:
self.day_of_month = self._default_day_of_month
else:
@@ -1132,8 +1136,7 @@ def __init__(self, n=1, day_of_month=None, normalize=False, **kwds):
day=self.day_of_month))
self.n = int(n)
self.normalize = normalize
- self.kwds = kwds
- self.kwds['day_of_month'] = self.day_of_month
+ self.kwds = {'day_of_month': self.day_of_month}
@classmethod
def _from_name(cls, suffix=None):
@@ -1408,11 +1411,11 @@ class CustomBusinessMonthEnd(BusinessMixin, MonthOffset):
_prefix = 'CBM'
def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
- holidays=None, calendar=None, **kwds):
+ holidays=None, calendar=None, offset=timedelta(0)):
self.n = int(n)
self.normalize = normalize
- self.kwds = kwds
- self._offset = kwds.get('offset', timedelta(0))
+ self._offset = offset
+ self.kwds = {}
calendar, holidays = _get_calendar(weekmask=weekmask,
holidays=holidays,
@@ -1420,6 +1423,7 @@ def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
self.kwds['weekmask'] = self.weekmask = weekmask
self.kwds['holidays'] = self.holidays = holidays
self.kwds['calendar'] = self.calendar = calendar
+ self.kwds['offset'] = offset
@cache_readonly
def cbday(self):
@@ -1430,7 +1434,7 @@ def cbday(self):
def m_offset(self):
kwds = self.kwds
kwds = {key: kwds[key] for key in kwds
- if key not in ['calendar', 'weekmask', 'holidays']}
+ if key not in ['calendar', 'weekmask', 'holidays', 'offset']}
return MonthEnd(n=1, normalize=self.normalize, **kwds)
@apply_wraps
@@ -1478,20 +1482,21 @@ class CustomBusinessMonthBegin(BusinessMixin, MonthOffset):
_prefix = 'CBMS'
def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
- holidays=None, calendar=None, **kwds):
+ holidays=None, calendar=None, offset=timedelta(0)):
self.n = int(n)
self.normalize = normalize
- self.kwds = kwds
- self._offset = kwds.get('offset', timedelta(0))
+ self._offset = offset
+ self.kwds = {}
# _get_calendar does validation and possible transformation
# of calendar and holidays.
calendar, holidays = _get_calendar(weekmask=weekmask,
holidays=holidays,
calendar=calendar)
- kwds['calendar'] = self.calendar = calendar
- kwds['weekmask'] = self.weekmask = weekmask
- kwds['holidays'] = self.holidays = holidays
+ self.kwds['calendar'] = self.calendar = calendar
+ self.kwds['weekmask'] = self.weekmask = weekmask
+ self.kwds['holidays'] = self.holidays = holidays
+ self.kwds['offset'] = offset
@cache_readonly
def cbday(self):
@@ -1502,7 +1507,7 @@ def cbday(self):
def m_offset(self):
kwds = self.kwds
kwds = {key: kwds[key] for key in kwds
- if key not in ['calendar', 'weekmask', 'holidays']}
+ if key not in ['calendar', 'weekmask', 'holidays', 'offset']}
return MonthBegin(n=1, normalize=self.normalize, **kwds)
@apply_wraps
@@ -1540,17 +1545,17 @@ class Week(DateOffset):
_adjust_dst = True
_inc = timedelta(weeks=1)
- def __init__(self, n=1, normalize=False, **kwds):
+ def __init__(self, n=1, normalize=False, weekday=None):
self.n = n
self.normalize = normalize
- self.weekday = kwds.get('weekday', None)
+ self.weekday = weekday
if self.weekday is not None:
if self.weekday < 0 or self.weekday > 6:
raise ValueError('Day must be 0<=day<=6, got {day}'
.format(day=self.weekday))
- self.kwds = kwds
+ self.kwds = {'weekday': weekday}
def isAnchored(self):
return (self.n == 1 and self.weekday is not None)
@@ -1642,9 +1647,9 @@ class WeekOfMonth(DateOffset):
Parameters
----------
n : int
- week : {0, 1, 2, 3, ...}
+ week : {0, 1, 2, 3, ...}, default None
0 is 1st week of month, 1 2nd week, etc.
- weekday : {0, 1, ..., 6}
+ weekday : {0, 1, ..., 6}, default None
0: Mondays
1: Tuesdays
2: Wednesdays
@@ -1656,11 +1661,11 @@ class WeekOfMonth(DateOffset):
_adjust_dst = True
- def __init__(self, n=1, normalize=False, **kwds):
+ def __init__(self, n=1, normalize=False, week=None, weekday=None):
self.n = n
self.normalize = normalize
- self.weekday = kwds['weekday']
- self.week = kwds['week']
+ self.weekday = weekday
+ self.week = week
if self.n == 0:
raise ValueError('N cannot be 0')
@@ -1672,7 +1677,7 @@ def __init__(self, n=1, normalize=False, **kwds):
raise ValueError('Week must be 0<=week<=3, got {week}'
.format(week=self.week))
- self.kwds = kwds
+ self.kwds = {'weekday': weekday, 'week': week}
@apply_wraps
def apply(self, other):
@@ -1742,8 +1747,8 @@ class LastWeekOfMonth(DateOffset):
Parameters
----------
- n : int
- weekday : {0, 1, ..., 6}
+ n : int, default 1
+ weekday : {0, 1, ..., 6}, default None
0: Mondays
1: Tuesdays
2: Wednesdays
@@ -1751,12 +1756,13 @@ class LastWeekOfMonth(DateOffset):
4: Fridays
5: Saturdays
6: Sundays
+
"""
- def __init__(self, n=1, normalize=False, **kwds):
+ def __init__(self, n=1, normalize=False, weekday=None):
self.n = n
self.normalize = normalize
- self.weekday = kwds['weekday']
+ self.weekday = weekday
if self.n == 0:
raise ValueError('N cannot be 0')
@@ -1765,7 +1771,7 @@ def __init__(self, n=1, normalize=False, **kwds):
raise ValueError('Day must be 0<=day<=6, got {day}'
.format(day=self.weekday))
- self.kwds = kwds
+ self.kwds = {'weekday': weekday}
@apply_wraps
def apply(self, other):
@@ -1829,13 +1835,14 @@ class QuarterOffset(DateOffset):
# TODO: Consider combining QuarterOffset and YearOffset __init__ at some
# point
- def __init__(self, n=1, normalize=False, **kwds):
+ def __init__(self, n=1, normalize=False, startingMonth=None):
self.n = n
self.normalize = normalize
- self.startingMonth = kwds.get('startingMonth',
- self._default_startingMonth)
+ if startingMonth is None:
+ startingMonth = self._default_startingMonth
+ self.startingMonth = startingMonth
- self.kwds = kwds
+ self.kwds = {'startingMonth': startingMonth}
def isAnchored(self):
return (self.n == 1 and self.startingMonth is not None)
@@ -2017,13 +2024,14 @@ class YearOffset(DateOffset):
"""DateOffset that just needs a month"""
_adjust_dst = True
- def __init__(self, n=1, normalize=False, **kwds):
- self.month = kwds.get('month', self._default_month)
+ def __init__(self, n=1, normalize=False, month=None):
+ month = month if month is not None else self._default_month
+ self.month = month
if self.month < 1 or self.month > 12:
raise ValueError('Month must go from 1 to 12')
- DateOffset.__init__(self, n=n, normalize=normalize, **kwds)
+ DateOffset.__init__(self, n=n, normalize=normalize, month=month)
@classmethod
def _from_name(cls, suffix=None):
@@ -2262,15 +2270,17 @@ class FY5253(DateOffset):
_suffix_prefix_nearest = 'N'
_adjust_dst = True
- def __init__(self, n=1, normalize=False, **kwds):
+ def __init__(self, n=1, normalize=False, weekday=0, startingMonth=1,
+ variation="nearest"):
self.n = n
self.normalize = normalize
- self.startingMonth = kwds['startingMonth']
- self.weekday = kwds["weekday"]
+ self.startingMonth = startingMonth
+ self.weekday = weekday
- self.variation = kwds["variation"]
+ self.variation = variation
- self.kwds = kwds
+ self.kwds = {'weekday': weekday, 'startingMonth': startingMonth,
+ 'variation': variation}
if self.n == 0:
raise ValueError('N cannot be 0')
@@ -2510,24 +2520,29 @@ class FY5253Quarter(DateOffset):
_prefix = 'REQ'
_adjust_dst = True
- def __init__(self, n=1, normalize=False, **kwds):
+ def __init__(self, n=1, normalize=False, weekday=0, startingMonth=1,
+ qtr_with_extra_week=1, variation="nearest"):
self.n = n
self.normalize = normalize
- self.qtr_with_extra_week = kwds["qtr_with_extra_week"]
+ self.weekday = weekday
+ self.startingMonth = startingMonth
+ self.qtr_with_extra_week = qtr_with_extra_week
+ self.variation = variation
- self.kwds = kwds
+ self.kwds = {'weekday': weekday, 'startingMonth': startingMonth,
+ 'qtr_with_extra_week': qtr_with_extra_week,
+ 'variation': variation}
if self.n == 0:
raise ValueError('N cannot be 0')
@cache_readonly
def _offset(self):
- kwds = self.kwds
return FY5253(
- startingMonth=kwds['startingMonth'],
- weekday=kwds["weekday"],
- variation=kwds["variation"])
+ startingMonth=self.startingMonth,
+ weekday=self.weekday,
+ variation=self.variation)
def isAnchored(self):
return self.n == 1 and self._offset.isAnchored()
| Explicitly specify allowed kwargs in `__init__` for `Week`, `WeekOfMonth`, `LastWeekOfMonth` and `QuarterOffset`.
Ref #17176
Intended to be orthogonal/complementary to #17450.
Will add tests after confirming we're on the same page.
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/17458 | 2017-09-07T04:04:38Z | 2017-10-06T15:38:04Z | 2017-10-06T15:38:04Z | 2017-10-30T16:22:46Z |
cdef out dtype for _Timestamp._get_field | diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx
index a7b33c669a8b8..7e009652f7f0c 100644
--- a/pandas/_libs/tslib.pyx
+++ b/pandas/_libs/tslib.pyx
@@ -1275,6 +1275,7 @@ cdef class _Timestamp(datetime):
cpdef _get_field(self, field):
cdef:
int64_t val
+ ndarray[int32_t] out
val = self._maybe_convert_value_to_local()
out = get_date_field(np.array([val], dtype=np.int64), field)
return int(out[0])
@@ -1282,6 +1283,7 @@ cdef class _Timestamp(datetime):
cpdef _get_named_field(self, field):
cdef:
int64_t val
+ ndarray[object] out
val = self._maybe_convert_value_to_local()
out = get_date_name_field(np.array([val], dtype=np.int64), field)
return out[0]
@@ -1291,9 +1293,7 @@ cdef class _Timestamp(datetime):
'startingMonth', self.freq.kwds.get(
'month', 12)) if self.freq else 12
freqstr = self.freqstr if self.freq else None
- val = self.value
- if self.tz is not None and not _is_utc(self.tz):
- val = tz_convert_single(self.value, 'UTC', self.tz)
+ val = self._maybe_convert_value_to_local()
out = get_start_end_field(
np.array([val], dtype=np.int64), field, freqstr, month_kw)
return out[0]
| Follow-up to @jreback [comment](https://github.com/pandas-dev/pandas/pull/17377#discussion_r136948947) in #17377 to dtype the `out` variable in `_get_field` and `get_named_field`
Also, replaced value conversion in `_get_start_end_field` with `_maybe_convert_value_to_local` function created in #17377 | https://api.github.com/repos/pandas-dev/pandas/pulls/17457 | 2017-09-07T03:27:03Z | 2017-09-07T11:28:12Z | 2017-09-07T11:28:12Z | 2017-09-08T01:52:02Z |
ENH: Implement MultiIndex.is_monotonic_decreasing | diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt
index c808babeee5d9..3e26f3137e4fc 100644
--- a/doc/source/whatsnew/v0.21.0.txt
+++ b/doc/source/whatsnew/v0.21.0.txt
@@ -114,7 +114,7 @@ Other Enhancements
- :func:`pd.read_sas()` now recognizes much more of the most frequently used date (datetime) formats in SAS7BDAT files (:issue:`15871`).
- :func:`DataFrame.items` and :func:`Series.items` is now present in both Python 2 and 3 and is lazy in all cases (:issue:`13918`, :issue:`17213`)
- :func:`Styler.where` has been implemented. It is as a convenience for :func:`Styler.applymap` and enables simple DataFrame styling on the Jupyter notebook (:issue:`17474`).
-
+- :func:`MultiIndex.is_monotonic_decreasing` has been implemented. Previously returned ``False`` in all cases. (:issue:`16554`)
.. _whatsnew_0210.api_breaking:
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 8b2cf0e7c0b40..ea613a27b6521 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -706,13 +706,14 @@ def is_monotonic_increasing(self):
# we have mixed types and np.lexsort is not happy
return Index(self.values).is_monotonic
- @property
+ @cache_readonly
def is_monotonic_decreasing(self):
"""
return if the index is monotonic decreasing (only equal or
decreasing) values.
"""
- return False
+ # monotonic decreasing if and only if reverse is monotonic increasing
+ return self[::-1].is_monotonic_increasing
@cache_readonly
def is_unique(self):
diff --git a/pandas/tests/indexes/test_interval.py b/pandas/tests/indexes/test_interval.py
index 13c3b35e4d85d..dc59495f619b0 100644
--- a/pandas/tests/indexes/test_interval.py
+++ b/pandas/tests/indexes/test_interval.py
@@ -263,21 +263,109 @@ def test_take(self):
actual = self.index.take([0, 0, 1])
assert expected.equals(actual)
- def test_monotonic_and_unique(self):
- assert self.index.is_monotonic
- assert self.index.is_unique
+ def test_unique(self):
+ # unique non-overlapping
+ idx = IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)])
+ assert idx.is_unique
+ # unique overlapping - distinct endpoints
idx = IntervalIndex.from_tuples([(0, 1), (0.5, 1.5)])
- assert idx.is_monotonic
assert idx.is_unique
- idx = IntervalIndex.from_tuples([(0, 1), (2, 3), (1, 2)])
- assert not idx.is_monotonic
+ # unique overlapping - shared endpoints
+ idx = pd.IntervalIndex.from_tuples([(1, 2), (1, 3), (2, 3)])
+ assert idx.is_unique
+
+ # unique nested
+ idx = IntervalIndex.from_tuples([(-1, 1), (-2, 2)])
+ assert idx.is_unique
+
+ # duplicate
+ idx = IntervalIndex.from_tuples([(0, 1), (0, 1), (2, 3)])
+ assert not idx.is_unique
+
+ # unique mixed
+ idx = IntervalIndex.from_tuples([(0, 1), ('a', 'b')])
assert idx.is_unique
- idx = IntervalIndex.from_tuples([(0, 2), (0, 2)])
+ # duplicate mixed
+ idx = IntervalIndex.from_tuples([(0, 1), ('a', 'b'), (0, 1)])
assert not idx.is_unique
+
+ # empty
+ idx = IntervalIndex([])
+ assert idx.is_unique
+
+ def test_monotonic(self):
+ # increasing non-overlapping
+ idx = IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)])
+ assert idx.is_monotonic
+ assert idx._is_strictly_monotonic_increasing
+ assert not idx.is_monotonic_decreasing
+ assert not idx._is_strictly_monotonic_decreasing
+
+ # decreasing non-overlapping
+ idx = IntervalIndex.from_tuples([(4, 5), (2, 3), (1, 2)])
+ assert not idx.is_monotonic
+ assert not idx._is_strictly_monotonic_increasing
+ assert idx.is_monotonic_decreasing
+ assert idx._is_strictly_monotonic_decreasing
+
+ # unordered non-overlapping
+ idx = IntervalIndex.from_tuples([(0, 1), (4, 5), (2, 3)])
+ assert not idx.is_monotonic
+ assert not idx._is_strictly_monotonic_increasing
+ assert not idx.is_monotonic_decreasing
+ assert not idx._is_strictly_monotonic_decreasing
+
+ # increasing overlapping
+ idx = IntervalIndex.from_tuples([(0, 2), (0.5, 2.5), (1, 3)])
+ assert idx.is_monotonic
+ assert idx._is_strictly_monotonic_increasing
+ assert not idx.is_monotonic_decreasing
+ assert not idx._is_strictly_monotonic_decreasing
+
+ # decreasing overlapping
+ idx = IntervalIndex.from_tuples([(1, 3), (0.5, 2.5), (0, 2)])
+ assert not idx.is_monotonic
+ assert not idx._is_strictly_monotonic_increasing
+ assert idx.is_monotonic_decreasing
+ assert idx._is_strictly_monotonic_decreasing
+
+ # unordered overlapping
+ idx = IntervalIndex.from_tuples([(0.5, 2.5), (0, 2), (1, 3)])
+ assert not idx.is_monotonic
+ assert not idx._is_strictly_monotonic_increasing
+ assert not idx.is_monotonic_decreasing
+ assert not idx._is_strictly_monotonic_decreasing
+
+ # increasing overlapping shared endpoints
+ idx = pd.IntervalIndex.from_tuples([(1, 2), (1, 3), (2, 3)])
+ assert idx.is_monotonic
+ assert idx._is_strictly_monotonic_increasing
+ assert not idx.is_monotonic_decreasing
+ assert not idx._is_strictly_monotonic_decreasing
+
+ # decreasing overlapping shared endpoints
+ idx = pd.IntervalIndex.from_tuples([(2, 3), (1, 3), (1, 2)])
+ assert not idx.is_monotonic
+ assert not idx._is_strictly_monotonic_increasing
+ assert idx.is_monotonic_decreasing
+ assert idx._is_strictly_monotonic_decreasing
+
+ # stationary
+ idx = IntervalIndex.from_tuples([(0, 1), (0, 1)])
+ assert idx.is_monotonic
+ assert not idx._is_strictly_monotonic_increasing
+ assert idx.is_monotonic_decreasing
+ assert not idx._is_strictly_monotonic_decreasing
+
+ # empty
+ idx = IntervalIndex([])
assert idx.is_monotonic
+ assert idx._is_strictly_monotonic_increasing
+ assert idx.is_monotonic_decreasing
+ assert idx._is_strictly_monotonic_decreasing
@pytest.mark.xfail(reason='not a valid repr as we use interval notation')
def test_repr(self):
diff --git a/pandas/tests/indexes/test_multi.py b/pandas/tests/indexes/test_multi.py
index 86308192c9166..b1b5413b4d081 100644
--- a/pandas/tests/indexes/test_multi.py
+++ b/pandas/tests/indexes/test_multi.py
@@ -2381,7 +2381,7 @@ def test_level_setting_resets_attributes(self):
# if this fails, probably didn't reset the cache correctly.
assert not ind.is_monotonic
- def test_is_monotonic(self):
+ def test_is_monotonic_increasing(self):
i = MultiIndex.from_product([np.arange(10),
np.arange(10)], names=['one', 'two'])
assert i.is_monotonic
@@ -2442,14 +2442,89 @@ def test_is_monotonic(self):
assert not i.is_monotonic
assert not i._is_strictly_monotonic_increasing
- def test_is_strictly_monotonic(self):
+ # empty
+ i = MultiIndex.from_arrays([[], []])
+ assert i.is_monotonic
+ assert Index(i.values).is_monotonic
+ assert i._is_strictly_monotonic_increasing
+ assert Index(i.values)._is_strictly_monotonic_increasing
+
+ def test_is_monotonic_decreasing(self):
+ i = MultiIndex.from_product([np.arange(9, -1, -1),
+ np.arange(9, -1, -1)],
+ names=['one', 'two'])
+ assert i.is_monotonic_decreasing
+ assert i._is_strictly_monotonic_decreasing
+ assert Index(i.values).is_monotonic_decreasing
+ assert i._is_strictly_monotonic_decreasing
+
+ i = MultiIndex.from_product([np.arange(10),
+ np.arange(10, 0, -1)],
+ names=['one', 'two'])
+ assert not i.is_monotonic_decreasing
+ assert not i._is_strictly_monotonic_decreasing
+ assert not Index(i.values).is_monotonic_decreasing
+ assert not Index(i.values)._is_strictly_monotonic_decreasing
+
+ i = MultiIndex.from_product([np.arange(10, 0, -1),
+ np.arange(10)], names=['one', 'two'])
+ assert not i.is_monotonic_decreasing
+ assert not i._is_strictly_monotonic_decreasing
+ assert not Index(i.values).is_monotonic_decreasing
+ assert not Index(i.values)._is_strictly_monotonic_decreasing
+
+ i = MultiIndex.from_product([[2.0, np.nan, 1.0], ['c', 'b', 'a']])
+ assert not i.is_monotonic_decreasing
+ assert not i._is_strictly_monotonic_decreasing
+ assert not Index(i.values).is_monotonic_decreasing
+ assert not Index(i.values)._is_strictly_monotonic_decreasing
+
+ # string ordering
+ i = MultiIndex(levels=[['qux', 'foo', 'baz', 'bar'],
+ ['three', 'two', 'one']],
+ labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
+ [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
+ names=['first', 'second'])
+ assert not i.is_monotonic_decreasing
+ assert not Index(i.values).is_monotonic_decreasing
+ assert not i._is_strictly_monotonic_decreasing
+ assert not Index(i.values)._is_strictly_monotonic_decreasing
+
+ i = MultiIndex(levels=[['qux', 'foo', 'baz', 'bar'],
+ ['zenith', 'next', 'mom']],
+ labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
+ [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
+ names=['first', 'second'])
+ assert i.is_monotonic_decreasing
+ assert Index(i.values).is_monotonic_decreasing
+ assert i._is_strictly_monotonic_decreasing
+ assert Index(i.values)._is_strictly_monotonic_decreasing
+
+ # mixed levels, hits the TypeError
+ i = MultiIndex(
+ levels=[[4, 3, 2, 1], ['nl0000301109', 'nl0000289965',
+ 'nl0000289783', 'lu0197800237',
+ 'gb00b03mlx29']],
+ labels=[[0, 1, 1, 2, 2, 2, 3], [4, 2, 0, 0, 1, 3, -1]],
+ names=['household_id', 'asset_id'])
+
+ assert not i.is_monotonic_decreasing
+ assert not i._is_strictly_monotonic_decreasing
+
+ # empty
+ i = MultiIndex.from_arrays([[], []])
+ assert i.is_monotonic_decreasing
+ assert Index(i.values).is_monotonic_decreasing
+ assert i._is_strictly_monotonic_decreasing
+ assert Index(i.values)._is_strictly_monotonic_decreasing
+
+ def test_is_strictly_monotonic_increasing(self):
idx = pd.MultiIndex(levels=[['bar', 'baz'], ['mom', 'next']],
labels=[[0, 0, 1, 1], [0, 0, 0, 1]])
assert idx.is_monotonic_increasing
assert not idx._is_strictly_monotonic_increasing
- @pytest.mark.xfail(reason="buggy MultiIndex.is_monotonic_decresaing.")
- def test__is_strictly_monotonic_decreasing(self):
+ def test_is_strictly_monotonic_decreasing(self):
idx = pd.MultiIndex(levels=[['baz', 'bar'], ['next', 'mom']],
labels=[[0, 0, 1, 1], [0, 0, 0, 1]])
assert idx.is_monotonic_decreasing
| - [X] closes #16554
- [X] tests added / passed
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [X] whatsnew entry
Note that this is a dupe PR of #16573, which appears to have gone stale.
Implementation ended up being easier than what I suggested in the issue; can use the fact that an index is monotonic decreasing if it's reverse is monotonic increasing. Just a matter of reversing the index and calling `is_monotonic_increasing`.
Regarding `MultiIndex` tests:
- Added a test case to the `is_monotonic_increasing` test to verify it's working for an empty index.
- The `is_monotonic_decreasing` test cases consist of decreasing versions of the test cases in the `is_monotonic_increasing` test.
Regarding `IntervalIndex` tests:
- Added a test for `IntervalIndex.is_monotonic_increasing` and `IntervalIndex.is_monotonic_decreasing` since they use the MultiIndex implementation under the hood.
- Split a test that originally checked both monotonic and `is_unique` into separate tests. Expanded the test cases for both tests. | https://api.github.com/repos/pandas-dev/pandas/pulls/17455 | 2017-09-06T22:46:15Z | 2017-09-19T20:28:15Z | 2017-09-19T20:28:15Z | 2017-09-19T22:37:59Z |
ENH: gb.is_monotonic_increasing #17015 | diff --git a/doc/source/api.rst b/doc/source/api.rst
index 44f87aa3e1cec..103b0fe9ff019 100644
--- a/doc/source/api.rst
+++ b/doc/source/api.rst
@@ -2240,6 +2240,8 @@ The following methods are available only for ``SeriesGroupBy`` objects.
SeriesGroupBy.nunique
SeriesGroupBy.unique
SeriesGroupBy.value_counts
+ SeriesGroupBy.is_monotonic_increasing
+ SeriesGroupBy.is_monotonic_decreasing
The following methods are available only for ``DataFrameGroupBy`` objects.
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt
index acab9d0bbebf8..3990edac8584a 100644
--- a/doc/source/whatsnew/v0.23.0.txt
+++ b/doc/source/whatsnew/v0.23.0.txt
@@ -323,6 +323,7 @@ Other Enhancements
- ``IntervalIndex.astype`` now supports conversions between subtypes when passed an ``IntervalDtype`` (:issue:`19197`)
- :class:`IntervalIndex` and its associated constructor methods (``from_arrays``, ``from_breaks``, ``from_tuples``) have gained a ``dtype`` parameter (:issue:`19262`)
+- Added :func:`SeriesGroupBy.is_monotonic_increasing` and :func:`SeriesGroupBy.is_monotonic_decreasing` (:issue:`17015`)
.. _whatsnew_0230.api_breaking:
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index 0363bcd02aa16..b1615f720368d 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -336,7 +336,9 @@
]) | _plotting_methods
_series_apply_whitelist = ((_common_apply_whitelist |
- {'nlargest', 'nsmallest'}) -
+ {'nlargest', 'nsmallest',
+ 'is_monotonic_increasing',
+ 'is_monotonic_decreasing'}) -
{'boxplot'}) | frozenset(['dtype', 'unique'])
_dataframe_apply_whitelist = ((_common_apply_whitelist |
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index 6eacd45deb7bc..4cf7c8013aa2b 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -2639,7 +2639,7 @@ def test_group_shift_with_null_key(self):
# Generate a moderately large dataframe with occasional missing
# values in column `B`, and then group by [`A`, `B`]. This should
# force `-1` in `labels` array of `g.grouper.group_info` exactly
- # at those places, where the group-by key is partilly missing.
+ # at those places, where the group-by key is partially missing.
df = DataFrame([(i % 12, i % 3 if i % 3 else np.nan, i)
for i in range(n_rows)], dtype=float,
columns=["A", "B", "Z"], index=None)
@@ -2764,6 +2764,65 @@ def test_cummin_cummax(self):
expected = pd.Series([1, 2, 1], name='b')
tm.assert_series_equal(result, expected)
+ @pytest.mark.parametrize('in_vals, out_vals', [
+
+ # Basics: strictly increasing (T), strictly decreasing (F),
+ # abs val increasing (F), non-strictly increasing (T)
+ ([1, 2, 5, 3, 2, 0, 4, 5, -6, 1, 1],
+ [True, False, False, True]),
+
+ # Test with inf vals
+ ([1, 2.1, np.inf, 3, 2, np.inf, -np.inf, 5, 11, 1, -np.inf],
+ [True, False, True, False]),
+
+ # Test with nan vals; should always be False
+ ([1, 2, np.nan, 3, 2, np.nan, np.nan, 5, -np.inf, 1, np.nan],
+ [False, False, False, False]),
+ ])
+ def test_is_monotonic_increasing(self, in_vals, out_vals):
+ # GH 17015
+ source_dict = {
+ 'A': ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11'],
+ 'B': ['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'c', 'd', 'd'],
+ 'C': in_vals}
+ df = pd.DataFrame(source_dict)
+ result = df.groupby('B').C.is_monotonic_increasing
+ index = Index(list('abcd'), name='B')
+ expected = pd.Series(index=index, data=out_vals, name='C')
+ tm.assert_series_equal(result, expected)
+
+ # Also check result equal to manually taking x.is_monotonic_increasing.
+ expected = (
+ df.groupby(['B']).C.apply(lambda x: x.is_monotonic_increasing))
+ tm.assert_series_equal(result, expected)
+
+ @pytest.mark.parametrize('in_vals, out_vals', [
+ # Basics: strictly decreasing (T), strictly increasing (F),
+ # abs val decreasing (F), non-strictly increasing (T)
+ ([10, 9, 7, 3, 4, 5, -3, 2, 0, 1, 1],
+ [True, False, False, True]),
+
+ # Test with inf vals
+ ([np.inf, 1, -np.inf, np.inf, 2, -3, -np.inf, 5, -3, -np.inf, -np.inf],
+ [True, True, False, True]),
+
+ # Test with nan vals; should always be False
+ ([1, 2, np.nan, 3, 2, np.nan, np.nan, 5, -np.inf, 1, np.nan],
+ [False, False, False, False]),
+ ])
+ def test_is_monotonic_decreasing(self, in_vals, out_vals):
+ # GH 17015
+ source_dict = {
+ 'A': ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11'],
+ 'B': ['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'c', 'd', 'd'],
+ 'C': in_vals}
+
+ df = pd.DataFrame(source_dict)
+ result = df.groupby('B').C.is_monotonic_decreasing
+ index = Index(list('abcd'), name='B')
+ expected = pd.Series(index=index, data=out_vals, name='C')
+ tm.assert_series_equal(result, expected)
+
def test_apply_numeric_coercion_when_datetime(self):
# In the past, group-by/apply operations have been over-eager
# in converting dtypes to numeric, in the presence of datetime
diff --git a/pandas/tests/groupby/test_whitelist.py b/pandas/tests/groupby/test_whitelist.py
index 3117525d899f6..8d6e074881cbb 100644
--- a/pandas/tests/groupby/test_whitelist.py
+++ b/pandas/tests/groupby/test_whitelist.py
@@ -88,6 +88,8 @@
'unique',
'nlargest',
'nsmallest',
+ 'is_monotonic_increasing',
+ 'is_monotonic_decreasing',
])
@@ -184,7 +186,7 @@ def test_regression_whitelist_methods(
axis, skipna, sort):
# GH6944
# GH 17537
- # explicitly test the whitelest methods
+ # explicitly test the whitelist methods
if axis == 0:
frame = raw_frame
@@ -249,7 +251,8 @@ def test_tab_completion(mframe):
'cumsum', 'cumcount', 'ngroup', 'all', 'shift', 'skew',
'take', 'tshift', 'pct_change', 'any', 'mad', 'corr', 'corrwith',
'cov', 'dtypes', 'ndim', 'diff', 'idxmax', 'idxmin',
- 'ffill', 'bfill', 'pad', 'backfill', 'rolling', 'expanding', 'pipe'}
+ 'ffill', 'bfill', 'pad', 'backfill', 'rolling', 'expanding', 'pipe',
+ }
assert results == expected
| - [x] closes [#17015](https://github.com/pandas-dev/pandas/issues/17015#issuecomment-316238679)
- [x] tests added / passed:
updated test_tab_completion to expect additional functions `is_monotonic_increasing` and `is_monotonic_decreasing`
added (semi-redundant) tests of gb.is_monotonically_increasing() - strictly increasing/decreasing, non-strictly, w/ inf vals, w/ nan vals
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
added at the end of the `Other Enhancements` section
This is my first PR to Pandas, so I don't doubt that I'll need to edit things. | https://api.github.com/repos/pandas-dev/pandas/pulls/17453 | 2017-09-06T20:44:00Z | 2018-02-15T08:36:10Z | 2018-02-15T08:36:10Z | 2018-02-15T08:36:35Z |
Fix bug where offset.copy() != offset | diff --git a/pandas/tests/tseries/test_offsets.py b/pandas/tests/tseries/test_offsets.py
index e03b3e0a85e5e..3239fff22ef50 100644
--- a/pandas/tests/tseries/test_offsets.py
+++ b/pandas/tests/tseries/test_offsets.py
@@ -1952,6 +1952,11 @@ def _check_roundtrip(obj):
_check_roundtrip(self._object(2))
_check_roundtrip(self._object() * 2)
+ def test_copy(self):
+ # GH 17452
+ off = self._object(weekmask='Mon Wed Fri')
+ assert off == off.copy()
+
class TestCustomBusinessMonthEnd(CustomBusinessMonthBase, Base):
_object = CBMonthEnd
diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py
index 7ccecaa84e6d6..d82a3a209af6b 100644
--- a/pandas/tseries/offsets.py
+++ b/pandas/tseries/offsets.py
@@ -11,6 +11,7 @@
from dateutil.relativedelta import relativedelta, weekday
from dateutil.easter import easter
from pandas._libs import tslib, Timestamp, OutOfBoundsDatetime, Timedelta
+from pandas.util._decorators import cache_readonly
import functools
import operator
@@ -573,9 +574,9 @@ def __setstate__(self, state):
"""Reconstruct an instance from a pickled state"""
self.__dict__ = state
if 'weekmask' in state and 'holidays' in state:
- calendar, holidays = self.get_calendar(weekmask=self.weekmask,
- holidays=self.holidays,
- calendar=None)
+ calendar, holidays = _get_calendar(weekmask=self.weekmask,
+ holidays=self.holidays,
+ calendar=None)
self.kwds['calendar'] = self.calendar = calendar
self.kwds['holidays'] = self.holidays = holidays
self.kwds['weekmask'] = state['weekmask']
@@ -978,9 +979,9 @@ def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
self.normalize = normalize
self.kwds = kwds
self.offset = kwds.get('offset', timedelta(0))
- calendar, holidays = self.get_calendar(weekmask=weekmask,
- holidays=holidays,
- calendar=calendar)
+ calendar, holidays = _get_calendar(weekmask=weekmask,
+ holidays=holidays,
+ calendar=calendar)
# CustomBusinessDay instances are identified by the
# following two attributes. See DateOffset._params()
# holidays, weekmask
@@ -989,36 +990,6 @@ def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
self.kwds['holidays'] = self.holidays = holidays
self.kwds['calendar'] = self.calendar = calendar
- def get_calendar(self, weekmask, holidays, calendar):
- """Generate busdaycalendar"""
- if isinstance(calendar, np.busdaycalendar):
- if not holidays:
- holidays = tuple(calendar.holidays)
- elif not isinstance(holidays, tuple):
- holidays = tuple(holidays)
- else:
- # trust that calendar.holidays and holidays are
- # consistent
- pass
- return calendar, holidays
-
- if holidays is None:
- holidays = []
- try:
- holidays = holidays + calendar.holidays().tolist()
- except AttributeError:
- pass
- holidays = [self._to_dt64(dt, dtype='datetime64[D]') for dt in
- holidays]
- holidays = tuple(sorted(holidays))
-
- kwargs = {'weekmask': weekmask}
- if holidays:
- kwargs['holidays'] = holidays
-
- busdaycalendar = np.busdaycalendar(**kwargs)
- return busdaycalendar, holidays
-
@apply_wraps
def apply(self, other):
if self.n <= 0:
@@ -1050,25 +1021,10 @@ def apply(self, other):
def apply_index(self, i):
raise NotImplementedError
- @staticmethod
- def _to_dt64(dt, dtype='datetime64'):
- # Currently
- # > np.datetime64(dt.datetime(2013,5,1),dtype='datetime64[D]')
- # numpy.datetime64('2013-05-01T02:00:00.000000+0200')
- # Thus astype is needed to cast datetime to datetime64[D]
- if getattr(dt, 'tzinfo', None) is not None:
- i8 = tslib.pydt_to_i8(dt)
- dt = tslib.tz_convert_single(i8, 'UTC', dt.tzinfo)
- dt = Timestamp(dt)
- dt = np.datetime64(dt)
- if dt.dtype.name != dtype:
- dt = dt.astype(dtype)
- return dt
-
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
- day64 = self._to_dt64(dt, 'datetime64[D]')
+ day64 = _to_dt64(dt, 'datetime64[D]')
return np.is_busday(day64, busdaycal=self.calendar)
@@ -1087,19 +1043,25 @@ def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
self.n = int(n)
self.normalize = normalize
super(CustomBusinessHour, self).__init__(**kwds)
+
+ calendar, holidays = _get_calendar(weekmask=weekmask,
+ holidays=holidays,
+ calendar=calendar)
+ self.kwds['weekmask'] = self.weekmask = weekmask
+ self.kwds['holidays'] = self.holidays = holidays
+ self.kwds['calendar'] = self.calendar = calendar
+
+ @cache_readonly
+ def next_bday(self):
# used for moving to next businessday
if self.n >= 0:
nb_offset = 1
else:
nb_offset = -1
- self.next_bday = CustomBusinessDay(n=nb_offset,
- weekmask=weekmask,
- holidays=holidays,
- calendar=calendar)
-
- self.kwds['weekmask'] = self.next_bday.weekmask
- self.kwds['holidays'] = self.next_bday.holidays
- self.kwds['calendar'] = self.next_bday.calendar
+ return CustomBusinessDay(n=nb_offset,
+ weekmask=self.weekmask,
+ holidays=self.holidays,
+ calendar=self.calendar)
class MonthOffset(SingleConstructorOffset):
@@ -1471,11 +1433,25 @@ def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
self.normalize = normalize
self.kwds = kwds
self.offset = kwds.get('offset', timedelta(0))
- self.cbday = CustomBusinessDay(n=self.n, normalize=normalize,
- weekmask=weekmask, holidays=holidays,
- calendar=calendar, **kwds)
- self.m_offset = MonthEnd(n=1, normalize=normalize, **kwds)
- self.kwds['calendar'] = self.cbday.calendar # cache numpy calendar
+
+ calendar, holidays = _get_calendar(weekmask=weekmask,
+ holidays=holidays,
+ calendar=calendar)
+ self.kwds['weekmask'] = self.weekmask = weekmask
+ self.kwds['holidays'] = self.holidays = holidays
+ self.kwds['calendar'] = self.calendar = calendar
+
+ @cache_readonly
+ def cbday(self):
+ kwds = self.kwds
+ return CustomBusinessDay(n=self.n, normalize=self.normalize, **kwds)
+
+ @cache_readonly
+ def m_offset(self):
+ kwds = self.kwds
+ kwds = {key: kwds[key] for key in kwds
+ if key not in ['calendar', 'weekmask', 'holidays']}
+ return MonthEnd(n=1, normalize=self.normalize, **kwds)
@apply_wraps
def apply(self, other):
@@ -1531,11 +1507,27 @@ def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
self.normalize = normalize
self.kwds = kwds
self.offset = kwds.get('offset', timedelta(0))
- self.cbday = CustomBusinessDay(n=self.n, normalize=normalize,
- weekmask=weekmask, holidays=holidays,
- calendar=calendar, **kwds)
- self.m_offset = MonthBegin(n=1, normalize=normalize, **kwds)
- self.kwds['calendar'] = self.cbday.calendar # cache numpy calendar
+
+ # _get_calendar does validation and possible transformation
+ # of calendar and holidays.
+ calendar, holidays = _get_calendar(weekmask=weekmask,
+ holidays=holidays,
+ calendar=calendar)
+ kwds['calendar'] = self.calendar = calendar
+ kwds['weekmask'] = self.weekmask = weekmask
+ kwds['holidays'] = self.holidays = holidays
+
+ @cache_readonly
+ def cbday(self):
+ kwds = self.kwds
+ return CustomBusinessDay(n=self.n, normalize=self.normalize, **kwds)
+
+ @cache_readonly
+ def m_offset(self):
+ kwds = self.kwds
+ kwds = {key: kwds[key] for key in kwds
+ if key not in ['calendar', 'weekmask', 'holidays']}
+ return MonthBegin(n=1, normalize=self.normalize, **kwds)
@apply_wraps
def apply(self, other):
@@ -2861,6 +2853,54 @@ class Nano(Tick):
CBMonthBegin = CustomBusinessMonthBegin
CDay = CustomBusinessDay
+# ---------------------------------------------------------------------
+# Business Calendar helpers
+
+
+def _get_calendar(weekmask, holidays, calendar):
+ """Generate busdaycalendar"""
+ if isinstance(calendar, np.busdaycalendar):
+ if not holidays:
+ holidays = tuple(calendar.holidays)
+ elif not isinstance(holidays, tuple):
+ holidays = tuple(holidays)
+ else:
+ # trust that calendar.holidays and holidays are
+ # consistent
+ pass
+ return calendar, holidays
+
+ if holidays is None:
+ holidays = []
+ try:
+ holidays = holidays + calendar.holidays().tolist()
+ except AttributeError:
+ pass
+ holidays = [_to_dt64(dt, dtype='datetime64[D]') for dt in holidays]
+ holidays = tuple(sorted(holidays))
+
+ kwargs = {'weekmask': weekmask}
+ if holidays:
+ kwargs['holidays'] = holidays
+
+ busdaycalendar = np.busdaycalendar(**kwargs)
+ return busdaycalendar, holidays
+
+
+def _to_dt64(dt, dtype='datetime64'):
+ # Currently
+ # > np.datetime64(dt.datetime(2013,5,1),dtype='datetime64[D]')
+ # numpy.datetime64('2013-05-01T02:00:00.000000+0200')
+ # Thus astype is needed to cast datetime to datetime64[D]
+ if getattr(dt, 'tzinfo', None) is not None:
+ i8 = tslib.pydt_to_i8(dt)
+ dt = tslib.tz_convert_single(i8, 'UTC', dt.tzinfo)
+ dt = Timestamp(dt)
+ dt = np.datetime64(dt)
+ if dt.dtype.name != dtype:
+ dt = dt.astype(dtype)
+ return dt
+
def _get_firstbday(wkday):
"""
| Add test that will currently fail
Make some attributes into cache_readonlys
Last one for now.
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/17452 | 2017-09-06T18:03:33Z | 2017-09-14T22:52:54Z | 2017-09-14T22:52:54Z | 2017-10-30T16:24:51Z |
Use cache_readonly attrs to minimize attrs set in __init__ | diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py
index 7ccecaa84e6d6..36aeff35d3923 100644
--- a/pandas/tseries/offsets.py
+++ b/pandas/tseries/offsets.py
@@ -11,6 +11,7 @@
from dateutil.relativedelta import relativedelta, weekday
from dateutil.easter import easter
from pandas._libs import tslib, Timestamp, OutOfBoundsDatetime, Timedelta
+from pandas.util._decorators import cache_readonly
import functools
import operator
@@ -938,12 +939,14 @@ def __init__(self, n=1, normalize=False, **kwds):
self.normalize = normalize
super(BusinessHour, self).__init__(**kwds)
+ @cache_readonly
+ def next_bday(self):
# used for moving to next businessday
if self.n >= 0:
nb_offset = 1
else:
nb_offset = -1
- self.next_bday = BusinessDay(n=nb_offset)
+ return BusinessDay(n=nb_offset)
class CustomBusinessDay(BusinessDay):
@@ -1570,6 +1573,7 @@ class Week(DateOffset):
Always generate specific day of week. 0 for Monday
"""
_adjust_dst = True
+ _inc = timedelta(weeks=1)
def __init__(self, n=1, normalize=False, **kwds):
self.n = n
@@ -1581,7 +1585,6 @@ def __init__(self, n=1, normalize=False, **kwds):
raise ValueError('Day must be 0<=day<=6, got {day}'
.format(day=self.weekday))
- self._inc = timedelta(weeks=1)
self.kwds = kwds
def isAnchored(self):
@@ -1985,13 +1988,6 @@ class QuarterEnd(QuarterOffset):
_default_startingMonth = 3
_prefix = 'Q'
- def __init__(self, n=1, normalize=False, **kwds):
- self.n = n
- self.normalize = normalize
- self.startingMonth = kwds.get('startingMonth', 3)
-
- self.kwds = kwds
-
def isAnchored(self):
return (self.n == 1 and self.startingMonth is not None)
@@ -2324,12 +2320,28 @@ def __init__(self, n=1, normalize=False, **kwds):
raise ValueError('{variation} is not a valid variation'
.format(variation=self.variation))
+ @cache_readonly
+ def _relativedelta_forward(self):
+ if self.variation == "nearest":
+ weekday_offset = weekday(self.weekday)
+ return relativedelta(weekday=weekday_offset)
+ else:
+ return None
+
+ @cache_readonly
+ def _relativedelta_backward(self):
if self.variation == "nearest":
weekday_offset = weekday(self.weekday)
- self._rd_forward = relativedelta(weekday=weekday_offset)
- self._rd_backward = relativedelta(weekday=weekday_offset(-1))
+ return relativedelta(weekday=weekday_offset(-1))
else:
- self._offset_lwom = LastWeekOfMonth(n=1, weekday=self.weekday)
+ return None
+
+ @cache_readonly
+ def _offset_lwom(self):
+ if self.variation == "nearest":
+ return None
+ else:
+ return LastWeekOfMonth(n=1, weekday=self.weekday)
def isAnchored(self):
return self.n == 1 \
@@ -2433,8 +2445,8 @@ def _get_year_end_nearest(self, dt):
if target_date.weekday() == self.weekday:
return target_date
else:
- forward = target_date + self._rd_forward
- backward = target_date + self._rd_backward
+ forward = target_date + self._relativedelta_forward
+ backward = target_date + self._relativedelta_backward
if forward - target_date < target_date - backward:
return forward
@@ -2550,7 +2562,10 @@ def __init__(self, n=1, normalize=False, **kwds):
if self.n == 0:
raise ValueError('N cannot be 0')
- self._offset = FY5253(
+ @cache_readonly
+ def _offset(self):
+ kwds = self.kwds
+ return FY5253(
startingMonth=kwds['startingMonth'],
weekday=kwds["weekday"],
variation=kwds["variation"])
@@ -2660,9 +2675,6 @@ class Easter(DateOffset):
"""
_adjust_dst = True
- def __init__(self, n=1, **kwds):
- super(Easter, self).__init__(n, **kwds)
-
@apply_wraps
def apply(self, other):
currentEaster = easter(other.year)
| Trying to break the fixes to `offsets` into small pieces. Recall the goal is to make `DateOffset` immutable so as to fix the very slow `__eq__` calls that get made by `Period.__eq__` and the `PeriodIndex` constructor.
As a step towards immutability, this PR is trying to reduce the number of attributes set in `__init__`, as suggested by @shoyer a while back.
In two places the `__init__` methods are identical to those of the parent class, so are removed. | https://api.github.com/repos/pandas-dev/pandas/pulls/17450 | 2017-09-06T17:34:54Z | 2017-09-17T21:26:29Z | 2017-09-17T21:26:29Z | 2017-10-30T16:23:43Z |
COMPAT: handle pyarrow deprecation of timestamps_to_ms in .from_pandas with pyarrow < 0.6.0 | diff --git a/ci/requirements-3.5.sh b/ci/requirements-3.5.sh
index 33db9c28c78a9..d694ad3679ac1 100644
--- a/ci/requirements-3.5.sh
+++ b/ci/requirements-3.5.sh
@@ -8,4 +8,4 @@ echo "install 35"
conda remove -n pandas python-dateutil --force
pip install python-dateutil
-conda install -n pandas -c conda-forge feather-format pyarrow=0.4.1
+conda install -n pandas -c conda-forge feather-format pyarrow=0.5.0
diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt
index 81e52266f972e..0fa4b702327f6 100644
--- a/doc/source/whatsnew/v0.21.0.txt
+++ b/doc/source/whatsnew/v0.21.0.txt
@@ -125,7 +125,7 @@ Other Enhancements
- :func:`DataFrame.select_dtypes` now accepts scalar values for include/exclude as well as list-like. (:issue:`16855`)
- :func:`date_range` now accepts 'YS' in addition to 'AS' as an alias for start of year (:issue:`9313`)
- :func:`date_range` now accepts 'Y' in addition to 'A' as an alias for end of year (:issue:`9313`)
-- Integration with `Apache Parquet <https://parquet.apache.org/>`__, including a new top-level :func:`read_parquet` and :func:`DataFrame.to_parquet` method, see :ref:`here <io.parquet>`.
+- Integration with `Apache Parquet <https://parquet.apache.org/>`__, including a new top-level :func:`read_parquet` and :func:`DataFrame.to_parquet` method, see :ref:`here <io.parquet>`. (:issue:`15838`, :issue:`17438`)
- :func:`DataFrame.add_prefix` and :func:`DataFrame.add_suffix` now accept strings containing the '%' character. (:issue:`17151`)
- `read_*` methods can now infer compression from non-string paths, such as ``pathlib.Path`` objects (:issue:`17206`).
- :func:`pd.read_sas()` now recognizes much more of the most frequently used date (datetime) formats in SAS7BDAT files (:issue:`15871`).
diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py
index 09603fd6fdcce..4b507b7f5df6f 100644
--- a/pandas/io/parquet.py
+++ b/pandas/io/parquet.py
@@ -58,13 +58,23 @@ def __init__(self):
"\nor via pip\n"
"pip install -U pyarrow\n")
+ self._pyarrow_lt_050 = LooseVersion(pyarrow.__version__) < '0.5.0'
+ self._pyarrow_lt_060 = LooseVersion(pyarrow.__version__) < '0.6.0'
self.api = pyarrow
- def write(self, df, path, compression='snappy', **kwargs):
+ def write(self, df, path, compression='snappy',
+ coerce_timestamps='ms', **kwargs):
path, _, _ = get_filepath_or_buffer(path)
- table = self.api.Table.from_pandas(df, timestamps_to_ms=True)
- self.api.parquet.write_table(
- table, path, compression=compression, **kwargs)
+ if self._pyarrow_lt_060:
+ table = self.api.Table.from_pandas(df, timestamps_to_ms=True)
+ self.api.parquet.write_table(
+ table, path, compression=compression, **kwargs)
+
+ else:
+ table = self.api.Table.from_pandas(df)
+ self.api.parquet.write_table(
+ table, path, compression=compression,
+ coerce_timestamps=coerce_timestamps, **kwargs)
def read(self, path):
path, _, _ = get_filepath_or_buffer(path)
| closes #17438 | https://api.github.com/repos/pandas-dev/pandas/pulls/17447 | 2017-09-06T10:23:03Z | 2017-09-06T12:23:07Z | 2017-09-06T12:23:06Z | 2017-09-12T10:27:25Z |
Use setdefault on kwds to standardize behavior | diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py
index 7ccecaa84e6d6..4d28917e3eb43 100644
--- a/pandas/tseries/offsets.py
+++ b/pandas/tseries/offsets.py
@@ -592,7 +592,7 @@ def __init__(self, n=1, normalize=False, **kwds):
self.n = int(n)
self.normalize = normalize
self.kwds = kwds
- self.offset = kwds.get('offset', timedelta(0))
+ self.offset = kwds.setdefault('offset', timedelta(0))
@property
def freqstr(self):
@@ -708,9 +708,9 @@ def __init__(self, **kwds):
kwds['start'] = self._validate_time(kwds.get('start', '09:00'))
kwds['end'] = self._validate_time(kwds.get('end', '17:00'))
self.kwds = kwds
- self.offset = kwds.get('offset', timedelta(0))
- self.start = kwds.get('start', '09:00')
- self.end = kwds.get('end', '17:00')
+ self.offset = kwds.setdefault('offset', timedelta(0))
+ self.start = kwds['start']
+ self.end = kwds['end']
def _validate_time(self, t_input):
from datetime import time as dt_time
@@ -977,7 +977,7 @@ def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
self.n = int(n)
self.normalize = normalize
self.kwds = kwds
- self.offset = kwds.get('offset', timedelta(0))
+ self.offset = kwds.setdefault('offset', timedelta(0))
calendar, holidays = self.get_calendar(weekmask=weekmask,
holidays=holidays,
calendar=calendar)
@@ -1470,7 +1470,7 @@ def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
self.n = int(n)
self.normalize = normalize
self.kwds = kwds
- self.offset = kwds.get('offset', timedelta(0))
+ self.offset = kwds.setdefault('offset', timedelta(0))
self.cbday = CustomBusinessDay(n=self.n, normalize=normalize,
weekmask=weekmask, holidays=holidays,
calendar=calendar, **kwds)
@@ -1530,7 +1530,7 @@ def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
self.n = int(n)
self.normalize = normalize
self.kwds = kwds
- self.offset = kwds.get('offset', timedelta(0))
+ self.offset = kwds.setdefault('offset', timedelta(0))
self.cbday = CustomBusinessDay(n=self.n, normalize=normalize,
weekmask=weekmask, holidays=holidays,
calendar=calendar, **kwds)
@@ -1574,7 +1574,7 @@ class Week(DateOffset):
def __init__(self, n=1, normalize=False, **kwds):
self.n = n
self.normalize = normalize
- self.weekday = kwds.get('weekday', None)
+ self.weekday = kwds.setdefault('weekday', None)
if self.weekday is not None:
if self.weekday < 0 or self.weekday > 6:
@@ -1864,8 +1864,8 @@ class QuarterOffset(DateOffset):
def __init__(self, n=1, normalize=False, **kwds):
self.n = n
self.normalize = normalize
- self.startingMonth = kwds.get('startingMonth',
- self._default_startingMonth)
+ self.startingMonth = kwds.setdefault('startingMonth',
+ self._default_startingMonth)
self.kwds = kwds
@@ -1988,7 +1988,8 @@ class QuarterEnd(QuarterOffset):
def __init__(self, n=1, normalize=False, **kwds):
self.n = n
self.normalize = normalize
- self.startingMonth = kwds.get('startingMonth', 3)
+ self.startingMonth = kwds.setdefault('startingMonth',
+ self._default_startingMonth)
self.kwds = kwds
@@ -2063,7 +2064,7 @@ class YearOffset(DateOffset):
_adjust_dst = True
def __init__(self, n=1, normalize=False, **kwds):
- self.month = kwds.get('month', self._default_month)
+ self.month = kwds.setdefault('month', self._default_month)
if self.month < 1 or self.month > 12:
raise ValueError('Month must go from 1 to 12')
@@ -2738,7 +2739,7 @@ def __eq__(self, other):
if isinstance(other, Tick):
return self.delta == other.delta
else:
- return DateOffset.__eq__(self, other)
+ return False
# This is identical to DateOffset.__hash__, but has to be redefined here
# for Python 3, because we've redefined __eq__.
@@ -2754,7 +2755,7 @@ def __ne__(self, other):
if isinstance(other, Tick):
return self.delta != other.delta
else:
- return DateOffset.__ne__(self, other)
+ return True
@property
def delta(self):
| `DateOffset.kwds` does not behave consistently, see brief discussion [here](https://github.com/pandas-dev/pandas/issues/17176#issuecomment-325214819).
This PR changes `kwds.get(name, default)` to `kwds.setdefault(name, default)` in each of the `__init__` methods in `tseries.offsets`. This ensures that explicitly passing the default value gives the same result as not using the kwarg.
This also edits `Tick.__ne__` and `Tick.__eq__` to skip calling `DateOffset.__ne__` and `DateOffset.__eq__`, respectively, since these will always return `True` and `False`, respectively. I make no claims as to the performance implications, but if a reader wanted to make an inference...
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/17443 | 2017-09-06T01:35:14Z | 2017-09-07T04:07:49Z | null | 2017-09-07T04:07:58Z |
DOC: cleaned references to pandas v0.15 and v0.16 in docs | diff --git a/doc/source/10min.rst b/doc/source/10min.rst
index def49a641a0ff..ef6b2d6ef2c90 100644
--- a/doc/source/10min.rst
+++ b/doc/source/10min.rst
@@ -655,7 +655,7 @@ the quarter end:
Categoricals
------------
-Since version 0.15, pandas can include categorical data in a ``DataFrame``. For full docs, see the
+pandas can include categorical data in a ``DataFrame``. For full docs, see the
:ref:`categorical introduction <categorical>` and the :ref:`API documentation <api.categorical>`.
.. ipython:: python
diff --git a/doc/source/advanced.rst b/doc/source/advanced.rst
index 4af476cd5a7e1..3f145cf955664 100644
--- a/doc/source/advanced.rst
+++ b/doc/source/advanced.rst
@@ -26,12 +26,6 @@ See the :ref:`Indexing and Selecting Data <indexing>` for general indexing docum
should be avoided. See :ref:`Returning a View versus Copy
<indexing.view_versus_copy>`
-.. warning::
-
- In 0.15.0 ``Index`` has internally been refactored to no longer sub-class ``ndarray``
- but instead subclass ``PandasObject``, similarly to the rest of the pandas objects. This should be
- a transparent change with only very limited API implications (See the :ref:`Internal Refactoring <whatsnew_0150.refactoring>`)
-
See the :ref:`cookbook<cookbook.selection>` for some advanced strategies
.. _advanced.hierarchical:
@@ -638,12 +632,9 @@ In the following sub-sections we will highlite some other index types.
CategoricalIndex
~~~~~~~~~~~~~~~~
-.. versionadded:: 0.16.1
-
-We introduce a ``CategoricalIndex``, a new type of index object that is useful for supporting
-indexing with duplicates. This is a container around a ``Categorical`` (introduced in v0.15.0)
-and allows efficient indexing and storage of an index with a large number of duplicated elements. Prior to 0.16.1,
-setting the index of a ``DataFrame/Series`` with a ``category`` dtype would convert this to regular object-based ``Index``.
+``CategoricalIndex`` is a type of index that is useful for supporting
+indexing with duplicates. This is a container around a ``Categorical``
+and allows efficient indexing and storage of an index with a large number of duplicated elements.
.. ipython:: python
diff --git a/doc/source/basics.rst b/doc/source/basics.rst
index 5880703b1d271..42c28df3a6030 100644
--- a/doc/source/basics.rst
+++ b/doc/source/basics.rst
@@ -719,8 +719,6 @@ on an entire ``DataFrame`` or ``Series``, row- or column-wise, or elementwise.
Tablewise Function Application
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.. versionadded:: 0.16.2
-
``DataFrames`` and ``Series`` can of course just be passed into functions.
However, if the function needs to be called in a chain, consider using the :meth:`~DataFrame.pipe` method.
Compare the following
@@ -1860,8 +1858,10 @@ dtypes
------
The main types stored in pandas objects are ``float``, ``int``, ``bool``,
-``datetime64[ns]`` and ``datetime64[ns, tz]`` (in >= 0.17.0), ``timedelta[ns]``, ``category`` (in >= 0.15.0), and ``object``. In addition these dtypes
-have item sizes, e.g. ``int64`` and ``int32``. See :ref:`Series with TZ <timeseries.timezone_series>` for more detail on ``datetime64[ns, tz]`` dtypes.
+``datetime64[ns]`` and ``datetime64[ns, tz]`` (in >= 0.17.0), ``timedelta[ns]``,
+``category`` and ``object``. In addition these dtypes have item sizes, e.g.
+``int64`` and ``int32``. See :ref:`Series with TZ <timeseries.timezone_series>`
+for more detail on ``datetime64[ns, tz]`` dtypes.
A convenient :attr:`~DataFrame.dtypes` attribute for DataFrames returns a Series with the data type of each column.
diff --git a/doc/source/categorical.rst b/doc/source/categorical.rst
index 02d7920bc4a84..8835c4a1533d0 100644
--- a/doc/source/categorical.rst
+++ b/doc/source/categorical.rst
@@ -16,13 +16,6 @@
Categorical Data
****************
-.. versionadded:: 0.15
-
-.. note::
- While there was `pandas.Categorical` in earlier versions, the ability to use
- categorical data in `Series` and `DataFrame` is new.
-
-
This is an introduction to pandas categorical data type, including a short comparison
with R's ``factor``.
@@ -295,10 +288,6 @@ Sorting and Order
.. _categorical.sort:
-.. warning::
-
- The default for construction has changed in v0.16.0 to ``ordered=False``, from the prior implicit ``ordered=True``
-
If categorical data is ordered (``s.cat.ordered == True``), then the order of the categories has a
meaning and certain operations are possible. If the categorical is unordered, ``.min()/.max()`` will raise a `TypeError`.
@@ -803,13 +792,11 @@ Following table summarizes the results of ``Categoricals`` related concatenation
Getting Data In/Out
-------------------
-.. versionadded:: 0.15.2
+You can write data that contains ``category`` dtypes to a ``HDFStore``.
+See :ref:`here <io.hdf5-categorical>` for an example and caveats.
-Writing data (`Series`, `Frames`) to a HDF store that contains a ``category`` dtype was implemented
-in 0.15.2. See :ref:`here <io.hdf5-categorical>` for an example and caveats.
-
-Writing data to and reading data from *Stata* format files was implemented in
-0.15.2. See :ref:`here <io.stata-categorical>` for an example and caveats.
+It is also possible to write data to and reading data from *Stata* format files.
+See :ref:`here <io.stata-categorical>` for an example and caveats.
Writing to a CSV file will convert the data, effectively removing any information about the
categorical (categories and ordering). So if you read back the CSV file you have to convert the
@@ -928,32 +915,6 @@ an ``object`` dtype is a constant times the length of the data.
s.astype('category').nbytes
-Old style constructor usage
-~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-In earlier versions than pandas 0.15, a `Categorical` could be constructed by passing in precomputed
-`codes` (called then `labels`) instead of values with categories. The `codes` were interpreted as
-pointers to the categories with `-1` as `NaN`. This type of constructor usage is replaced by
-the special constructor :func:`Categorical.from_codes`.
-
-Unfortunately, in some special cases, using code which assumes the old style constructor usage
-will work with the current pandas version, resulting in subtle bugs:
-
-.. code-block:: python
-
- >>> cat = pd.Categorical([1,2], [1,2,3])
- >>> # old version
- >>> cat.get_values()
- array([2, 3], dtype=int64)
- >>> # new version
- >>> cat.get_values()
- array([1, 2], dtype=int64)
-
-.. warning::
- If you used `Categoricals` with older versions of pandas, please audit your code before
- upgrading and change your code to use the :func:`~pandas.Categorical.from_codes`
- constructor.
-
`Categorical` is not a `numpy` array
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -982,8 +943,7 @@ Dtype comparisons work:
dtype == np.str_
np.str_ == dtype
-To check if a Series contains Categorical data, with pandas 0.16 or later, use
-``hasattr(s, 'cat')``:
+To check if a Series contains Categorical data, use ``hasattr(s, 'cat')``:
.. ipython:: python
@@ -1023,13 +983,13 @@ basic type) and applying along columns will also convert to object.
Categorical Index
~~~~~~~~~~~~~~~~~
-.. versionadded:: 0.16.1
-
-A new ``CategoricalIndex`` index type is introduced in version 0.16.1. See the
-:ref:`advanced indexing docs <indexing.categoricalindex>` for a more detailed
+``CategoricalIndex`` is a type of index that is useful for supporting
+indexing with duplicates. This is a container around a ``Categorical``
+and allows efficient indexing and storage of an index with a large number of duplicated elements.
+See the :ref:`advanced indexing docs <indexing.categoricalindex>` for a more detailed
explanation.
-Setting the index, will create create a ``CategoricalIndex``
+Setting the index will create a ``CategoricalIndex``
.. ipython:: python
@@ -1041,10 +1001,6 @@ Setting the index, will create create a ``CategoricalIndex``
# This now sorts by the categories order
df.sort_index()
-In previous versions (<0.16.1) there is no index of type ``category``, so
-setting the index to categorical column will convert the categorical data to a
-"normal" dtype first and therefore remove any custom ordering of the categories.
-
Side Effects
~~~~~~~~~~~~
diff --git a/doc/source/comparison_with_r.rst b/doc/source/comparison_with_r.rst
index f895cdc25e620..eb97aeeb7e696 100644
--- a/doc/source/comparison_with_r.rst
+++ b/doc/source/comparison_with_r.rst
@@ -505,8 +505,6 @@ For more details and examples see :ref:`the reshaping documentation
|factor|_
~~~~~~~~~
-.. versionadded:: 0.15
-
pandas has a data type for categorical data.
.. code-block:: r
diff --git a/doc/source/computation.rst b/doc/source/computation.rst
index 76a030d355e33..23699393958cf 100644
--- a/doc/source/computation.rst
+++ b/doc/source/computation.rst
@@ -924,15 +924,12 @@ EWM has a ``min_periods`` argument, which has the same
meaning it does for all the ``.expanding`` and ``.rolling`` methods:
no output values will be set until at least ``min_periods`` non-null values
are encountered in the (expanding) window.
-(This is a change from versions prior to 0.15.0, in which the ``min_periods``
-argument affected only the ``min_periods`` consecutive entries starting at the
-first non-null value.)
-EWM also has an ``ignore_na`` argument, which deterines how
+EWM also has an ``ignore_na`` argument, which determines how
intermediate null values affect the calculation of the weights.
When ``ignore_na=False`` (the default), weights are calculated based on absolute
positions, so that intermediate null values affect the result.
-When ``ignore_na=True`` (which reproduces the behavior in versions prior to 0.15.0),
+When ``ignore_na=True``,
weights are calculated by ignoring intermediate null values.
For example, assuming ``adjust=True``, if ``ignore_na=False``, the weighted
average of ``3, NaN, 5`` would be calculated as
diff --git a/doc/source/cookbook.rst b/doc/source/cookbook.rst
index f51c3e679b36f..5bb3ba75fe51b 100644
--- a/doc/source/cookbook.rst
+++ b/doc/source/cookbook.rst
@@ -256,12 +256,6 @@ Panels
pf = pd.Panel({'df1':df1,'df2':df2,'df3':df3});pf
- #Assignment using Transpose (pandas < 0.15)
- pf = pf.transpose(2,0,1)
- pf['E'] = pd.DataFrame(data, rng, cols)
- pf = pf.transpose(1,2,0);pf
-
- #Direct assignment (pandas > 0.15)
pf.loc[:,:,'F'] = pd.DataFrame(data, rng, cols);pf
`Mask a panel by using np.where and then reconstructing the panel with the new masked values
diff --git a/doc/source/dsintro.rst b/doc/source/dsintro.rst
index 4652ccbf0ad34..ec0a1c7a00bf7 100644
--- a/doc/source/dsintro.rst
+++ b/doc/source/dsintro.rst
@@ -453,8 +453,6 @@ available to insert at a particular location in the columns:
Assigning New Columns in Method Chains
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.. versionadded:: 0.16.0
-
Inspired by `dplyr's
<http://cran.rstudio.com/web/packages/dplyr/vignettes/introduction.html#mutate>`__
``mutate`` verb, DataFrame has an :meth:`~pandas.DataFrame.assign`
diff --git a/doc/source/gotchas.rst b/doc/source/gotchas.rst
index a3062b4086673..9e6f98923fca6 100644
--- a/doc/source/gotchas.rst
+++ b/doc/source/gotchas.rst
@@ -22,8 +22,8 @@ Frequently Asked Questions (FAQ)
DataFrame memory usage
----------------------
-As of pandas version 0.15.0, the memory usage of a dataframe (including
-the index) is shown when accessing the ``info`` method of a dataframe. A
+The memory usage of a dataframe (including the index)
+is shown when accessing the ``info`` method of a dataframe. A
configuration option, ``display.memory_usage`` (see :ref:`options`),
specifies if the dataframe's memory usage will be displayed when
invoking the ``df.info()`` method.
diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst
index a6e7df57be4e5..88e62b5d301a3 100644
--- a/doc/source/indexing.rst
+++ b/doc/source/indexing.rst
@@ -47,12 +47,6 @@ advanced indexing.
should be avoided. See :ref:`Returning a View versus Copy
<indexing.view_versus_copy>`
-.. warning::
-
- In 0.15.0 ``Index`` has internally been refactored to no longer subclass ``ndarray``
- but instead subclass ``PandasObject``, similarly to the rest of the pandas objects. This should be
- a transparent change with only very limited API implications (See the :ref:`Internal Refactoring <whatsnew_0150.refactoring>`)
-
.. warning::
Indexing on an integer-based Index with floats has been clarified in 0.18.0, for a summary of the changes, see :ref:`here <whatsnew_0180.float_indexers>`.
@@ -660,7 +654,6 @@ For getting *multiple* indexers, using ``.get_indexer``
Selecting Random Samples
------------------------
-.. versionadded::0.16.1
A random selection of rows or columns from a Series, DataFrame, or Panel with the :meth:`~DataFrame.sample` method. The method will sample rows by default, and accepts a specific number of rows/columns to return, or a fraction of rows.
@@ -1510,8 +1503,6 @@ See :ref:`Advanced Indexing <advanced>` for usage of MultiIndexes.
ind.name = "bob"
ind
-.. versionadded:: 0.15.0
-
``set_names``, ``set_levels``, and ``set_labels`` also take an optional
`level`` argument
@@ -1527,11 +1518,6 @@ Set operations on Index objects
.. _indexing.set_ops:
-.. warning::
-
- In 0.15.0. the set operations ``+`` and ``-`` were deprecated in order to provide these for numeric type operations on certain
- index types. ``+`` can be replace by ``.union()`` or ``|``, and ``-`` by ``.difference()``.
-
The two main operations are ``union (|)``, ``intersection (&)``
These can be directly called as instance methods or used via overloaded
operators. Difference is provided via the ``.difference()`` method.
diff --git a/doc/source/install.rst b/doc/source/install.rst
index 8dc8224ea6cb2..c805f84d0faaa 100644
--- a/doc/source/install.rst
+++ b/doc/source/install.rst
@@ -18,7 +18,7 @@ Instructions for installing from source,
Python version support
----------------------
-Officially Python 2.7, 3.4, 3.5, and 3.6
+Officially Python 2.7, 3.5, and 3.6.
Installing pandas
-----------------
@@ -183,21 +183,17 @@ installed), make sure you have `pytest
>>> import pandas as pd
>>> pd.test()
- Running unit tests for pandas
- pandas version 0.18.0
- numpy version 1.10.2
- pandas is installed in pandas
- Python version 2.7.11 |Continuum Analytics, Inc.|
- (default, Dec 6 2015, 18:57:58) [GCC 4.2.1 (Apple Inc. build 5577)]
- nose version 1.3.7
+ running: pytest --skip-slow --skip-network C:\Users\TP\Anaconda3\envs\py36\lib\site-packages\pandas
+ ============================= test session starts =============================
+ platform win32 -- Python 3.6.2, pytest-3.2.1, py-1.4.34, pluggy-0.4.0
+ rootdir: C:\Users\TP\Documents\Python\pandasdev\pandas, inifile: setup.cfg
+ collected 12145 items / 3 skipped
+
..................................................................S......
........S................................................................
.........................................................................
- ----------------------------------------------------------------------
- Ran 9252 tests in 368.339s
-
- OK (SKIP=117)
+ ==================== 12130 passed, 12 skipped in 368.339 seconds =====================
Dependencies
------------
diff --git a/doc/source/io.rst b/doc/source/io.rst
index 33523ea171f3a..de3150035c446 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -592,8 +592,7 @@ Ignoring line comments and empty lines
++++++++++++++++++++++++++++++++++++++
If the ``comment`` parameter is specified, then completely commented lines will
-be ignored. By default, completely blank lines will be ignored as well. Both of
-these are API changes introduced in version 0.15.
+be ignored. By default, completely blank lines will be ignored as well.
.. ipython:: python
@@ -2701,8 +2700,6 @@ Using a list to get multiple sheets:
# Returns the 1st and 4th sheet, as a dictionary of DataFrames.
read_excel('path_to_file.xls',sheet_name=['Sheet1',3])
-.. versionadded:: 0.16
-
``read_excel`` can read more than one sheet, by setting ``sheet_name`` to either
a list of sheet names, a list of sheet positions, or ``None`` to read all sheets.
Sheets can be specified by sheet index or sheet name, using an integer or string,
@@ -3241,11 +3238,10 @@ for some advanced strategies
.. warning::
- As of version 0.15.0, pandas requires ``PyTables`` >= 3.0.0. Stores written with prior versions of pandas / ``PyTables`` >= 2.3 are fully compatible (this was the previous minimum ``PyTables`` required version).
-
-.. warning::
-
- There is a ``PyTables`` indexing bug which may appear when querying stores using an index. If you see a subset of results being returned, upgrade to ``PyTables`` >= 3.2. Stores created previously will need to be rewritten using the updated version.
+ pandas requires ``PyTables`` >= 3.0.0.
+ There is a indexing bug in ``PyTables`` < 3.2 which may appear when querying stores using an index.
+ If you see a subset of results being returned, upgrade to ``PyTables`` >= 3.2.
+ Stores created previously will need to be rewritten using the updated version.
.. warning::
@@ -4210,10 +4206,8 @@ object : ``strings`` ``np.nan``
Categorical Data
++++++++++++++++
-.. versionadded:: 0.15.2
-
-Writing data to a ``HDFStore`` that contains a ``category`` dtype was implemented
-in 0.15.2. Queries work the same as if it was an object array. However, the ``category`` dtyped data is
+You can write data that contains ``category`` dtypes to a ``HDFStore``.
+Queries work the same as if it was an object array. However, the ``category`` dtyped data is
stored in a more efficient manner.
.. ipython:: python
@@ -4228,21 +4222,6 @@ stored in a more efficient manner.
result
result.dtypes
-.. warning::
-
- The format of the ``Categorical`` is readable by prior versions of pandas (< 0.15.2), but will retrieve
- the data as an integer based column (e.g. the ``codes``). However, the ``categories`` *can* be retrieved
- but require the user to select them manually using the explicit meta path.
-
- The data is stored like so:
-
- .. ipython:: python
-
- cstore
-
- # to get the categories
- cstore.select('dfcat/meta/A/meta')
-
.. ipython:: python
:suppress:
:okexcept:
@@ -4746,8 +4725,6 @@ You can check if a table exists using :func:`~pandas.io.sql.has_table`
Schema support
''''''''''''''
-.. versionadded:: 0.15.0
-
Reading from and writing to different schema's is supported through the ``schema``
keyword in the :func:`~pandas.read_sql_table` and :func:`~pandas.DataFrame.to_sql`
functions. Note however that this depends on the database flavor (sqlite does not
@@ -4975,8 +4952,6 @@ be used to read the file incrementally.
pd.read_stata('stata.dta')
-.. versionadded:: 0.16.0
-
Specifying a ``chunksize`` yields a
:class:`~pandas.io.stata.StataReader` instance that can be used to
read ``chunksize`` lines from the file at a time. The ``StataReader``
@@ -5034,8 +5009,6 @@ values will have ``object`` data type.
Categorical Data
++++++++++++++++
-.. versionadded:: 0.15.2
-
``Categorical`` data can be exported to *Stata* data files as value labeled data.
The exported data consists of the underlying category codes as integer data values
and the categories as value labels. *Stata* does not have an explicit equivalent
diff --git a/doc/source/remote_data.rst b/doc/source/remote_data.rst
index 7980133582125..9af66058a7aaa 100644
--- a/doc/source/remote_data.rst
+++ b/doc/source/remote_data.rst
@@ -11,14 +11,13 @@ Remote Data Access
DataReader
----------
-The sub-package ``pandas.io.data`` is removed in favor of a separately
-installable `pandas-datareader package
+The sub-package ``pandas.io.data`` was deprecated in v.0.17 and removed in
+`v.0.19 <http://pandas-docs.github.io/pandas-docs-travis/whatsnew.html#v0-19-0-october-2-2016>`__.
+ Instead there has been created a separately installable `pandas-datareader package
<https://github.com/pydata/pandas-datareader>`_. This will allow the data
-modules to be independently updated to your pandas installation. The API for
-``pandas-datareader v0.1.1`` is the same as in ``pandas v0.16.1``.
-(:issue:`8961`)
+modules to be independently updated on your pandas installation.
- You should replace the imports of the following:
+ For code older than < 0.19 you should replace the imports of the following:
.. code-block:: python
diff --git a/doc/source/reshaping.rst b/doc/source/reshaping.rst
index 3dce73b302c7c..fab83222b313f 100644
--- a/doc/source/reshaping.rst
+++ b/doc/source/reshaping.rst
@@ -569,8 +569,6 @@ This function is often used along with discretization functions like ``cut``:
See also :func:`Series.str.get_dummies <pandas.Series.str.get_dummies>`.
-.. versionadded:: 0.15.0
-
:func:`get_dummies` also accepts a DataFrame. By default all categorical
variables (categorical in the statistical sense,
those with `object` or `categorical` dtype) are encoded as dummy variables.
@@ -675,4 +673,4 @@ handling of NaN:
you can use ``df["cat_col"] = pd.Categorical(df["col"])`` or
``df["cat_col"] = df["col"].astype("category")``. For full docs on :class:`~pandas.Categorical`,
see the :ref:`Categorical introduction <categorical>` and the
- :ref:`API documentation <api.categorical>`. This feature was introduced in version 0.15.
+ :ref:`API documentation <api.categorical>`.
diff --git a/doc/source/sparse.rst b/doc/source/sparse.rst
index b4884cf1c4141..cf16cee501a3e 100644
--- a/doc/source/sparse.rst
+++ b/doc/source/sparse.rst
@@ -216,8 +216,6 @@ To convert a ``SparseDataFrame`` back to sparse SciPy matrix in COO format, you
SparseSeries
~~~~~~~~~~~~
-.. versionadded:: 0.16.0
-
A :meth:`SparseSeries.to_coo` method is implemented for transforming a ``SparseSeries`` indexed by a ``MultiIndex`` to a ``scipy.sparse.coo_matrix``.
The method requires a ``MultiIndex`` with two or more levels.
diff --git a/doc/source/timedeltas.rst b/doc/source/timedeltas.rst
index daa2c262c8c86..d055c49dc4721 100644
--- a/doc/source/timedeltas.rst
+++ b/doc/source/timedeltas.rst
@@ -23,13 +23,12 @@
Time Deltas
***********
-.. note::
-
- Starting in v0.15.0, we introduce a new scalar type ``Timedelta``, which is a subclass of ``datetime.timedelta``, and behaves in a similar manner,
- but allows compatibility with ``np.timedelta64`` types as well as a host of custom representation, parsing, and attributes.
+Timedeltas are differences in times, expressed in difference units, e.g. days, hours, minutes,
+seconds. They can be both positive and negative.
-Timedeltas are differences in times, expressed in difference units, e.g. days, hours, minutes, seconds.
-They can be both positive and negative.
+``Timedelta`` is a subclass of ``datetime.timedelta``, and behaves in a similar manner,
+but allows compatibility with ``np.timedelta64`` types as well as a host of custom representation,
+parsing, and attributes.
Parsing
-------
@@ -78,15 +77,10 @@ Further, operations among the scalars yield another scalar ``Timedelta``.
to_timedelta
~~~~~~~~~~~~
-.. warning::
-
- Prior to 0.15.0 ``pd.to_timedelta`` would return a ``Series`` for list-like/Series input, and a ``np.timedelta64`` for scalar input.
- It will now return a ``TimedeltaIndex`` for list-like input, ``Series`` for Series input, and ``Timedelta`` for scalar input.
-
- The arguments to ``pd.to_timedelta`` are now ``(arg, unit='ns', box=True)``, previously were ``(arg, box=True, unit='ns')`` as these are more logical.
-
-Using the top-level ``pd.to_timedelta``, you can convert a scalar, array, list, or Series from a recognized timedelta format / value into a ``Timedelta`` type.
-It will construct Series if the input is a Series, a scalar if the input is scalar-like, otherwise will output a ``TimedeltaIndex``.
+Using the top-level ``pd.to_timedelta``, you can convert a scalar, array, list,
+or Series from a recognized timedelta format / value into a ``Timedelta`` type.
+It will construct Series if the input is a Series, a scalar if the input is
+scalar-like, otherwise it will output a ``TimedeltaIndex``.
You can parse a single string to a Timedelta:
@@ -328,8 +322,6 @@ You can convert a ``Timedelta`` to an `ISO 8601 Duration`_ string with the
TimedeltaIndex
--------------
-.. versionadded:: 0.15.0
-
To generate an index with time delta, you can use either the ``TimedeltaIndex`` or
the ``timedelta_range`` constructor.
diff --git a/doc/source/visualization.rst b/doc/source/visualization.rst
index 839390c8778aa..b5a261e3acac5 100644
--- a/doc/source/visualization.rst
+++ b/doc/source/visualization.rst
@@ -229,8 +229,6 @@ To get horizontal bar plots, use the ``barh`` method:
Histograms
~~~~~~~~~~
-.. versionadded:: 0.15.0
-
Histogram can be drawn by using the :meth:`DataFrame.plot.hist` and :meth:`Series.plot.hist` methods.
.. ipython:: python
@@ -328,8 +326,6 @@ The ``by`` keyword can be specified to plot grouped histograms:
Box Plots
~~~~~~~~~
-.. versionadded:: 0.15.0
-
Boxplot can be drawn calling :meth:`Series.plot.box` and :meth:`DataFrame.plot.box`,
or :meth:`DataFrame.boxplot` to visualize the distribution of values within each column.
diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt
index 81e52266f972e..3fc476b309a73 100644
--- a/doc/source/whatsnew/v0.21.0.txt
+++ b/doc/source/whatsnew/v0.21.0.txt
@@ -483,3 +483,4 @@ Other
^^^^^
- Bug in :func:`eval` where the ``inplace`` parameter was being incorrectly handled (:issue:`16732`)
- Several ``NaT`` method docstrings (e.g. :func:`NaT.ctime`) were incorrect (:issue:`17327`)
+- The documentation has had references to versions < v0.16 removed and cleaned up (:issue:`17442`, :issue:`17442` & :issue:`#17404`)
| Last round of cleanup of references to older pandas versions (here v0.15 and v0.16).
I'm not intending to go further up: v0.16.2 was released in june 2015, and allowing up to 2 years old references to be seen in the docs themselves is reasonable IMO. (but other may have a different view, so if there's a "demand" I could change the cut-off point, its not so much work).
See also #17420 and #17404.
I also removed a reference to python 3.4 in the ``install.rst``. In the same document, I also "updated" the test section to show python3.6 (just a (very) minor signal that pandas is python 3 positive). | https://api.github.com/repos/pandas-dev/pandas/pulls/17442 | 2017-09-05T19:07:58Z | 2017-09-07T01:00:50Z | 2017-09-07T01:00:50Z | 2017-09-07T02:14:43Z |
Rebase | rebase with pandas master
| https://api.github.com/repos/pandas-dev/pandas/pulls/17439 | 2017-09-05T11:24:32Z | 2017-09-05T11:24:51Z | null | 2017-09-05T11:24:51Z | |
Remove unused imports from period.pyx | diff --git a/pandas/_libs/period.pyx b/pandas/_libs/period.pyx
index 816b7ebfff86d..818b0ba323d14 100644
--- a/pandas/_libs/period.pyx
+++ b/pandas/_libs/period.pyx
@@ -6,20 +6,18 @@ from cpython cimport (
PyObject_RichCompareBool,
Py_EQ, Py_NE)
-from numpy cimport (int8_t, int32_t, int64_t, import_array, ndarray,
- NPY_INT64, NPY_DATETIME, NPY_TIMEDELTA)
+from numpy cimport int64_t, ndarray
import numpy as np
from libc.stdlib cimport free
-from pandas import compat
from pandas.compat import PY2
cimport cython
from datetime cimport (
+ PyDateTime_Check, PyDate_Check,
is_leapyear,
- PyDateTime_IMPORT,
pandas_datetimestruct,
pandas_datetimestruct_to_datetime,
pandas_datetime_to_datetimestruct,
@@ -27,11 +25,16 @@ from datetime cimport (
INT32_MIN)
-cimport util, lib
+cimport util
+from util cimport (
+ is_integer_object,
+ is_period_object,
+ is_string_object,
+ is_datetime64_object)
-from lib cimport is_null_datetimelike, is_period
-from pandas._libs import tslib, lib
-from pandas._libs.tslib import (Timedelta, Timestamp, iNaT,
+from lib cimport is_null_datetimelike
+from pandas._libs import tslib
+from pandas._libs.tslib import (Timestamp, iNaT,
NaT, _get_utcoffset)
from tslib cimport (
maybe_get_tz,
@@ -328,7 +331,6 @@ cdef list str_extra_fmts = ["^`AB`^", "^`CD`^", "^`EF`^",
"^`GH`^", "^`IJ`^", "^`KL`^"]
cdef object _period_strftime(int64_t value, int freq, object fmt):
- import sys
cdef:
Py_ssize_t i
@@ -485,7 +487,7 @@ def extract_freq(ndarray[object] values):
try:
# now Timestamp / NaT has freq attr
- if is_period(p):
+ if is_period_object(p):
return p.freq
except AttributeError:
pass
@@ -706,7 +708,7 @@ cdef class _Period(object):
return self
def __richcmp__(self, other, op):
- if isinstance(other, Period):
+ if is_period_object(other):
if other.freq != self.freq:
msg = _DIFFERENT_FREQ.format(self.freqstr, other.freqstr)
raise IncompatibleFrequency(msg)
@@ -728,8 +730,7 @@ cdef class _Period(object):
return hash((self.ordinal, self.freqstr))
def _add_delta(self, other):
- if isinstance(other, (timedelta, np.timedelta64,
- offsets.Tick, Timedelta)):
+ if isinstance(other, (timedelta, np.timedelta64, offsets.Tick)):
offset = frequencies.to_offset(self.freq.rule_code)
if isinstance(offset, offsets.Tick):
nanos = tslib._delta_to_nanoseconds(other)
@@ -752,34 +753,32 @@ cdef class _Period(object):
return NotImplemented
def __add__(self, other):
- if isinstance(self, Period):
+ if is_period_object(self):
if isinstance(other, (timedelta, np.timedelta64,
- offsets.DateOffset,
- Timedelta)):
+ offsets.DateOffset)):
return self._add_delta(other)
elif other is NaT:
return NaT
- elif lib.is_integer(other):
+ elif is_integer_object(other):
ordinal = self.ordinal + other * self.freq.n
return Period(ordinal=ordinal, freq=self.freq)
else: # pragma: no cover
return NotImplemented
- elif isinstance(other, Period):
+ elif is_period_object(other):
return other + self
else:
return NotImplemented
def __sub__(self, other):
- if isinstance(self, Period):
+ if is_period_object(self):
if isinstance(other, (timedelta, np.timedelta64,
- offsets.DateOffset,
- Timedelta)):
+ offsets.DateOffset)):
neg_other = -other
return self + neg_other
- elif lib.is_integer(other):
+ elif is_integer_object(other):
ordinal = self.ordinal - other * self.freq.n
return Period(ordinal=ordinal, freq=self.freq)
- elif isinstance(other, Period):
+ elif is_period_object(other):
if other.freq != self.freq:
msg = _DIFFERENT_FREQ.format(self.freqstr, other.freqstr)
raise IncompatibleFrequency(msg)
@@ -788,7 +787,7 @@ cdef class _Period(object):
return -other.__sub__(self)
else: # pragma: no cover
return NotImplemented
- elif isinstance(other, Period):
+ elif is_period_object(other):
if self is NaT:
return NaT
return NotImplemented
@@ -1136,7 +1135,7 @@ class Period(_Period):
raise ValueError(("Only value or ordinal but not both should be "
"given but not both"))
elif ordinal is not None:
- if not lib.is_integer(ordinal):
+ if not is_integer_object(ordinal):
raise ValueError("Ordinal must be an integer")
if freq is None:
raise ValueError('Must supply freq for ordinal value')
@@ -1160,7 +1159,7 @@ class Period(_Period):
ordinal = _ordinal_from_fields(year, month, quarter, day,
hour, minute, second, freq)
- elif isinstance(value, Period):
+ elif is_period_object(value):
other = value
if freq is None or frequencies.get_freq_code(
freq) == frequencies.get_freq_code(other.freq):
@@ -1173,8 +1172,8 @@ class Period(_Period):
elif is_null_datetimelike(value) or value in tslib._nat_strings:
ordinal = iNaT
- elif isinstance(value, compat.string_types) or lib.is_integer(value):
- if lib.is_integer(value):
+ elif is_string_object(value) or is_integer_object(value):
+ if is_integer_object(value):
value = str(value)
value = value.upper()
dt, _, reso = parse_time_string(value, freq)
@@ -1186,15 +1185,15 @@ class Period(_Period):
raise ValueError(
"Invalid frequency or could not infer: %s" % reso)
- elif isinstance(value, datetime):
+ elif PyDateTime_Check(value):
dt = value
if freq is None:
raise ValueError('Must supply freq for datetime value')
- elif isinstance(value, np.datetime64):
+ elif is_datetime64_object(value):
dt = Timestamp(value)
if freq is None:
raise ValueError('Must supply freq for datetime value')
- elif isinstance(value, date):
+ elif PyDate_Check(value):
dt = datetime(year=value.year, month=value.month, day=value.day)
if freq is None:
raise ValueError('Must supply freq for datetime value')
| Replace isinstance checks with more efficient versions from util;
Remove redundant checks for isinstance(..., (timedelta, Timedelta)) since
Timedelta subclasses timedelta
The main goal is to reduce the inter-dependencies between lib/tslib/period(/frequencies/offsets). | https://api.github.com/repos/pandas-dev/pandas/pulls/17434 | 2017-09-05T03:35:33Z | 2017-09-05T14:14:35Z | null | 2017-10-30T16:23:39Z |
BUG: TimedeltaIndex.intersection | diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 008828cf4f309..510baa6639d60 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -1200,6 +1200,12 @@ def is_monotonic(self):
""" alias for is_monotonic_increasing (deprecated) """
return self.is_monotonic_increasing
+ @property
+ def _is_strictly_monotonic(self):
+ """ Checks if the index is sorted """
+ return (self._is_strictly_monotonic_increasing or
+ self._is_strictly_monotonic_decreasing)
+
@property
def is_monotonic_increasing(self):
"""
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py
index c3232627fce74..000cfb7a6df5f 100644
--- a/pandas/core/indexes/datetimelike.py
+++ b/pandas/core/indexes/datetimelike.py
@@ -28,6 +28,7 @@
from pandas._libs.period import Period
from pandas.core.indexes.base import Index, _index_shared_docs
+from pandas.tseries.offsets import index_offsets_equal
from pandas.util._decorators import Appender, cache_readonly
import pandas.core.dtypes.concat as _concat
import pandas.tseries.frequencies as frequencies
@@ -854,6 +855,94 @@ def _concat_same_dtype(self, to_concat, name):
new_data = np.concatenate([c.asi8 for c in to_concat])
return self._simple_new(new_data, **attribs)
+ def _intersect_ascending(self, other):
+ # to make our life easier, "sort" the two ranges
+ if self[0] <= other[0]:
+ left, right = self, other
+ else:
+ left, right = other, self
+
+ end = min(left[-1], right[-1])
+ start = right[0]
+
+ if end < start:
+ return []
+ return left.values[slice(*left.slice_locs(start, end))]
+
+ def _intersect_descending(self, other):
+ # this is essentially a flip of _intersect_ascending
+ if self[0] >= other[0]:
+ left, right = self, other
+ else:
+ left, right = other, self
+
+ start = min(left[0], right[0])
+ end = right[-1]
+
+ if end > start:
+ return Index()
+ return left.values[slice(*left.slice_locs(start, end))]
+
+ def intersection(self, other):
+ """
+ Specialized intersection for DateTimeIndexOpsMixin objects.
+ May be much faster than Index.intersection.
+
+ Parameters
+ ----------
+ other : Index or array-like
+
+ Returns
+ -------
+ Index
+ A shallow copied intersection between the two things passed in
+ """
+ self._assert_can_do_setop(other)
+
+ if self.equals(other):
+ return self._get_consensus_name(other)
+
+ lengths = len(self), len(other)
+ if lengths[0] == 0:
+ return self
+ if lengths[1] == 0:
+ return other
+
+ if not isinstance(other, Index):
+ result = Index.intersection(self, other)
+ return result
+ elif (index_offsets_equal(self, other) or
+ (not self._is_strictly_monotonic or
+ not other._is_strictly_monotonic)):
+ result = Index.intersection(self, other)
+ result = self._shallow_copy(result._values, name=result.name,
+ tz=getattr(self, 'tz', None),
+ freq=None
+ )
+ if result.freq is None:
+ result.offset = frequencies.to_offset(result.inferred_freq)
+ return result
+
+ # handle intersecting things like this
+ # idx1 = pd.to_timedelta((1, 2, 3, 4, 5, 6, 7, 8), unit='s')
+ # idx2 = pd.to_timedelta((2, 3, 4, 8), unit='s')
+ if lengths[0] != lengths[1] and (
+ max(self) != max(other) or min(self) != min(other)):
+ return Index.intersection(self, other)
+
+ # coerce into same order
+ self_ascending = self.is_monotonic_increasing
+ if self_ascending != other.is_monotonic_increasing:
+ other = other.sort_values(ascending=self_ascending)
+
+ if self_ascending:
+ intersected_slice = self._intersect_ascending(other)
+ else:
+ intersected_slice = self._intersect_descending(other)
+
+ intersected = self._shallow_copy(intersected_slice)
+ return intersected._get_consensus_name(other)
+
def _ensure_datetimelike_to_i8(other):
""" helper for coercing an input scalar or array to i8 """
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index 4cfb7547e7d0a..359dccf253f9b 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -1189,62 +1189,6 @@ def _wrap_union_result(self, other, result):
raise ValueError('Passed item and index have different timezone')
return self._simple_new(result, name=name, freq=None, tz=self.tz)
- def intersection(self, other):
- """
- Specialized intersection for DatetimeIndex objects. May be much faster
- than Index.intersection
-
- Parameters
- ----------
- other : DatetimeIndex or array-like
-
- Returns
- -------
- y : Index or DatetimeIndex
- """
- self._assert_can_do_setop(other)
- if not isinstance(other, DatetimeIndex):
- try:
- other = DatetimeIndex(other)
- except (TypeError, ValueError):
- pass
- result = Index.intersection(self, other)
- if isinstance(result, DatetimeIndex):
- if result.freq is None:
- result.offset = to_offset(result.inferred_freq)
- return result
-
- elif (other.offset is None or self.offset is None or
- other.offset != self.offset or
- not other.offset.isAnchored() or
- (not self.is_monotonic or not other.is_monotonic)):
- result = Index.intersection(self, other)
- result = self._shallow_copy(result._values, name=result.name,
- tz=result.tz, freq=None)
- if result.freq is None:
- result.offset = to_offset(result.inferred_freq)
- return result
-
- if len(self) == 0:
- return self
- if len(other) == 0:
- return other
- # to make our life easier, "sort" the two ranges
- if self[0] <= other[0]:
- left, right = self, other
- else:
- left, right = other, self
-
- end = min(left[-1], right[-1])
- start = right[0]
-
- if end < start:
- return type(self)(data=[])
- else:
- lslice = slice(*left.slice_locs(start, end))
- left_chunk = left.values[lslice]
- return self._shallow_copy(left_chunk)
-
def _parsed_string_to_bounds(self, reso, parsed):
"""
Calculate datetime bounds for parsed time string and its resolution.
diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py
index 2823951c0f348..eccbcc13ef4c5 100644
--- a/pandas/core/indexes/timedeltas.py
+++ b/pandas/core/indexes/timedeltas.py
@@ -596,48 +596,6 @@ def _wrap_union_result(self, other, result):
name = self.name if self.name == other.name else None
return self._simple_new(result, name=name, freq=None)
- def intersection(self, other):
- """
- Specialized intersection for TimedeltaIndex objects. May be much faster
- than Index.intersection
-
- Parameters
- ----------
- other : TimedeltaIndex or array-like
-
- Returns
- -------
- y : Index or TimedeltaIndex
- """
- self._assert_can_do_setop(other)
- if not isinstance(other, TimedeltaIndex):
- try:
- other = TimedeltaIndex(other)
- except (TypeError, ValueError):
- pass
- result = Index.intersection(self, other)
- return result
-
- if len(self) == 0:
- return self
- if len(other) == 0:
- return other
- # to make our life easier, "sort" the two ranges
- if self[0] <= other[0]:
- left, right = self, other
- else:
- left, right = other, self
-
- end = min(left[-1], right[-1])
- start = right[0]
-
- if end < start:
- return type(self)(data=[])
- else:
- lslice = slice(*left.slice_locs(start, end))
- left_chunk = left.values[lslice]
- return self._shallow_copy(left_chunk)
-
def _maybe_promote(self, other):
if other.inferred_type == 'timedelta':
other = TimedeltaIndex(other)
diff --git a/pandas/tests/indexes/timedeltas/test_setops.py b/pandas/tests/indexes/timedeltas/test_setops.py
index 22546d25273a7..366ae2c3cbee1 100644
--- a/pandas/tests/indexes/timedeltas/test_setops.py
+++ b/pandas/tests/indexes/timedeltas/test_setops.py
@@ -1,4 +1,5 @@
import numpy as np
+import pytest
import pandas as pd
import pandas.util.testing as tm
@@ -74,3 +75,97 @@ def test_intersection_bug_1708(self):
result = index_1 & index_2
expected = timedelta_range('1 day 01:00:00', periods=3, freq='h')
tm.assert_index_equal(result, expected)
+
+
+@pytest.mark.parametrize('idx1,idx2,expected', [
+ (pd.to_timedelta(range(2, 6), unit='s'),
+ pd.to_timedelta(range(3), unit='s'),
+ TimedeltaIndex(['00:00:002'])),
+ (pd.to_timedelta(range(3), unit='s'),
+ pd.to_timedelta(range(2, 6), unit='s'),
+ TimedeltaIndex(['00:00:002'])),
+])
+def test_intersection_intersects_ascending(idx1, idx2, expected):
+ result = idx1.intersection(idx2)
+ assert result.equals(expected)
+
+
+@pytest.mark.parametrize('idx1,idx2,expected', [
+ (pd.to_timedelta(range(6, 3, -1), unit='s'),
+ pd.to_timedelta(range(5, 1, -1), unit='s'),
+ TimedeltaIndex(['00:00:05', '00:00:04'])),
+ (pd.to_timedelta(range(5, 1, -1), unit='s'),
+ pd.to_timedelta(range(6, 3, -1), unit='s'),
+ TimedeltaIndex(['00:00:05', '00:00:04'])),
+])
+def test_intersection_intersects_descending(idx1, idx2, expected):
+ # GH 17391
+ result = idx1.intersection(idx2)
+ assert result.equals(expected)
+
+
+def test_intersection_intersects_descending_no_intersect():
+ idx1 = pd.to_timedelta(range(6, 4, -1), unit='s')
+ idx2 = pd.to_timedelta(range(4, 1, -1), unit='s')
+ result = idx1.intersection(idx2)
+ assert len(result) == 0
+
+
+def test_intersection_intersects_len_1():
+ idx1 = pd.to_timedelta(range(1, 2), unit='s')
+ idx2 = pd.to_timedelta(range(1, 0, -1), unit='s')
+ intersection = idx1.intersection(idx2)
+ expected = TimedeltaIndex(['00:00:01'],
+ dtype='timedelta64[ns]')
+ tm.assert_index_equal(intersection, expected)
+
+
+def test_intersection_can_intersect_self():
+ idx = pd.to_timedelta(range(1, 2), unit='s')
+ result = idx.intersection(idx)
+ tm.assert_index_equal(idx, result)
+
+
+def test_intersection_not_sorted():
+ idx1 = pd.to_timedelta((1, 3, 2, 5, 4), unit='s')
+ idx2 = pd.to_timedelta((1, 2, 3, 5, 4), unit='s')
+ result = idx1.intersection(idx2)
+ expected = idx1
+ tm.assert_index_equal(result, expected)
+
+
+def test_intersection_not_unique():
+ idx1 = pd.to_timedelta((1, 2, 2, 3, 3, 5), unit='s')
+ idx2 = pd.to_timedelta((1, 2, 3, 4), unit='s')
+ result = idx1.intersection(idx2)
+ expected = pd.to_timedelta((1, 2, 2, 3, 3), unit='s')
+ tm.assert_index_equal(result, expected)
+
+ result = idx2.intersection(idx1)
+ expected = pd.to_timedelta((1, 2, 2, 3, 3), unit='s')
+ tm.assert_index_equal(result, expected)
+
+
+@pytest.mark.parametrize("index1, index2, expected", [
+ (pd.to_timedelta((1, 2, 3, 4, 5, 6, 7, 8), unit='s'),
+ pd.to_timedelta((2, 3, 4, 8), unit='s'),
+ pd.to_timedelta((2, 3, 4, 8), unit='s')),
+ (pd.to_timedelta((1, 2, 3, 4, 5), unit='s'),
+ pd.to_timedelta((2, 3, 4), unit='s'),
+ pd.to_timedelta((2, 3, 4), unit='s')),
+ (pd.to_timedelta((2, 4, 5, 6), unit='s'),
+ pd.to_timedelta((2, 3, 4), unit='s'),
+ pd.to_timedelta((2, 4), unit='s')),
+])
+def test_intersection_different_lengths(index1, index2, expected):
+ def intersect(idx1, idx2, expected):
+ result = idx1.intersection(idx2)
+ tm.assert_index_equal(result, expected)
+ result = idx2.intersection(idx1)
+ tm.assert_index_equal(result, expected)
+
+ intersect(index1, index2, expected)
+ intersect(index1.sort_values(ascending=False),
+ index2.sort_values(ascending=False),
+ expected.sort_values(ascending=False)
+ )
diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py
index 7ccecaa84e6d6..1a41ba9dba2a4 100644
--- a/pandas/tseries/offsets.py
+++ b/pandas/tseries/offsets.py
@@ -119,6 +119,26 @@ def _is_normalized(dt):
return False
return True
+
+def index_offsets_equal(first, second):
+ """
+ Checks if the two indexes have an offset, and if they equal each other
+ Parameters
+ ----------
+ first: Index
+ second: Index
+
+ Returns
+ -------
+ bool
+ """
+ first = getattr(first, 'offset', None)
+ second = getattr(second, 'offset', None)
+ are_offsets_equal = True
+ if first is None or second is None or first != second:
+ are_offsets_equal = False
+ return are_offsets_equal
+
# ---------------------------------------------------------------------
# DateOffset
| - [ ] closes #17391
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/17433 | 2017-09-05T02:53:32Z | 2017-12-10T23:32:17Z | null | 2018-07-24T13:27:12Z |
Dont re-pin total_seconds as it is already implemented | diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx
index 50e0b77c6d3a0..bd0d0fe5559d3 100644
--- a/pandas/_libs/tslib.pyx
+++ b/pandas/_libs/tslib.pyx
@@ -858,6 +858,9 @@ class NaTType(_NaT):
return (__nat_unpickle, (None, ))
def total_seconds(self):
+ """
+ Total duration of timedelta in seconds (to ns precision)
+ """
# GH 10939
return np.nan
@@ -3892,8 +3895,9 @@ for field in fields:
_nat_methods = ['date', 'now', 'replace', 'to_pydatetime',
'today', 'round', 'floor', 'ceil', 'tz_convert',
'tz_localize']
-_nan_methods = ['weekday', 'isoweekday', 'total_seconds']
-_implemented_methods = ['to_datetime', 'to_datetime64', 'isoformat']
+_nan_methods = ['weekday', 'isoweekday']
+_implemented_methods = [
+ 'to_datetime', 'to_datetime64', 'isoformat', 'total_seconds']
_implemented_methods.extend(_nat_methods)
_implemented_methods.extend(_nan_methods)
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/17432 | 2017-09-04T19:34:51Z | 2017-09-07T00:14:06Z | 2017-09-07T00:14:06Z | 2017-10-30T16:23:41Z |
Remove unused _day and _month attrs | diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx
index 50e0b77c6d3a0..8fbc606ccdfe2 100644
--- a/pandas/_libs/tslib.pyx
+++ b/pandas/_libs/tslib.pyx
@@ -829,8 +829,6 @@ class NaTType(_NaT):
cdef _NaT base
base = _NaT.__new__(cls, 1, 1, 1)
- base._day = -1
- base._month = -1
base.value = NPY_NAT
return base
| closes #17429
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/17431 | 2017-09-04T19:32:03Z | 2017-09-04T23:32:35Z | 2017-09-04T23:32:35Z | 2017-10-30T16:23:50Z |
BUG: Plotting Timedelta on y-axis #16953 | diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt
index 273cbd8357f85..44438d57dc0ad 100644
--- a/doc/source/whatsnew/v0.21.0.txt
+++ b/doc/source/whatsnew/v0.21.0.txt
@@ -400,7 +400,7 @@ I/O
Plotting
^^^^^^^^
- Bug in plotting methods using ``secondary_y`` and ``fontsize`` not setting secondary axis font size (:issue:`12565`)
-
+- Bug when plotting ``timedelta`` and ``datetime`` dtypes on y-axis (:issue:`16953`)
Groupby/Resample/Rolling
^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py
index e5b9497993172..a0b7e93efd05c 100644
--- a/pandas/plotting/_core.py
+++ b/pandas/plotting/_core.py
@@ -342,7 +342,13 @@ def _compute_plot_data(self):
label = 'None'
data = data.to_frame(name=label)
- numeric_data = data._convert(datetime=True)._get_numeric_data()
+ # GH16953, _convert is needed as fallback, for ``Series``
+ # with ``dtype == object``
+ data = data._convert(datetime=True, timedelta=True)
+ numeric_data = data.select_dtypes(include=[np.number,
+ "datetime",
+ "datetimetz",
+ "timedelta"])
try:
is_empty = numeric_data.empty
diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py
index 67098529a0111..f3b287a8889c3 100644
--- a/pandas/tests/plotting/test_frame.py
+++ b/pandas/tests/plotting/test_frame.py
@@ -380,6 +380,82 @@ def test_subplots_timeseries(self):
self._check_ticks_props(ax, xlabelsize=7, xrot=45,
ylabelsize=7)
+ def test_subplots_timeseries_y_axis(self):
+ # GH16953
+ data = {"numeric": np.array([1, 2, 5]),
+ "timedelta": [pd.Timedelta(-10, unit="s"),
+ pd.Timedelta(10, unit="m"),
+ pd.Timedelta(10, unit="h")],
+ "datetime_no_tz": [pd.to_datetime("2017-08-01 00:00:00"),
+ pd.to_datetime("2017-08-01 02:00:00"),
+ pd.to_datetime("2017-08-02 00:00:00")],
+ "datetime_all_tz": [pd.to_datetime("2017-08-01 00:00:00",
+ utc=True),
+ pd.to_datetime("2017-08-01 02:00:00",
+ utc=True),
+ pd.to_datetime("2017-08-02 00:00:00",
+ utc=True)],
+ "text": ["This", "should", "fail"]}
+ testdata = DataFrame(data)
+
+ ax_numeric = testdata.plot(y="numeric")
+ assert (ax_numeric.get_lines()[0].get_data()[1] ==
+ testdata["numeric"].values).all()
+ ax_timedelta = testdata.plot(y="timedelta")
+ assert (ax_timedelta.get_lines()[0].get_data()[1] ==
+ testdata["timedelta"].values).all()
+ ax_datetime_no_tz = testdata.plot(y="datetime_no_tz")
+ assert (ax_datetime_no_tz.get_lines()[0].get_data()[1] ==
+ testdata["datetime_no_tz"].values).all()
+ ax_datetime_all_tz = testdata.plot(y="datetime_all_tz")
+ assert (ax_datetime_all_tz.get_lines()[0].get_data()[1] ==
+ testdata["datetime_all_tz"].values).all()
+ with pytest.raises(TypeError):
+ testdata.plot(y="text")
+
+ @pytest.mark.xfail(reason='not support for period, categorical, '
+ 'datetime_mixed_tz')
+ def test_subplots_timeseries_y_axis_not_supported(self):
+ """
+ This test will fail for:
+ period:
+ since period isn't yet implemented in ``select_dtypes``
+ and because it will need a custom value converter +
+ tick formater (as was done for x-axis plots)
+
+ categorical:
+ because it will need a custom value converter +
+ tick formater (also doesn't work for x-axis, as of now)
+
+ datetime_mixed_tz:
+ because of the way how pandas handels ``Series`` of
+ ``datetime`` objects with different timezone,
+ generally converting ``datetime`` objects in a tz-aware
+ form could help with this problem
+ """
+ data = {"numeric": np.array([1, 2, 5]),
+ "period": [pd.Period('2017-08-01 00:00:00', freq='H'),
+ pd.Period('2017-08-01 02:00', freq='H'),
+ pd.Period('2017-08-02 00:00:00', freq='H')],
+ "categorical": pd.Categorical(["c", "b", "a"],
+ categories=["a", "b", "c"],
+ ordered=False),
+ "datetime_mixed_tz": [pd.to_datetime("2017-08-01 00:00:00",
+ utc=True),
+ pd.to_datetime("2017-08-01 02:00:00"),
+ pd.to_datetime("2017-08-02 00:00:00")]}
+ testdata = pd.DataFrame(data)
+ ax_period = testdata.plot(x="numeric", y="period")
+ assert (ax_period.get_lines()[0].get_data()[1] ==
+ testdata["period"].values).all()
+ ax_categorical = testdata.plot(x="numeric", y="categorical")
+ assert (ax_categorical.get_lines()[0].get_data()[1] ==
+ testdata["categorical"].values).all()
+ ax_datetime_mixed_tz = testdata.plot(x="numeric",
+ y="datetime_mixed_tz")
+ assert (ax_datetime_mixed_tz.get_lines()[0].get_data()[1] ==
+ testdata["datetime_mixed_tz"].values).all()
+
@pytest.mark.slow
def test_subplots_layout(self):
# GH 6667
| - [x] closes #16953
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
This fixes the issue with a TypeError being raised if the y-data were of type Timedelta or Datetime.
The plot of Timedelta doesn't look pretty since it is converted to ns representation, but matplotlib has already an open issue for that.
PS: Greetings to the people from the EuroSciPy 2017 sprints ^^
| https://api.github.com/repos/pandas-dev/pandas/pulls/17430 | 2017-09-04T19:07:43Z | 2017-09-06T12:03:40Z | 2017-09-06T12:03:40Z | 2017-10-15T19:44:37Z |
Added line decoding before sniffing at PythonParser#_make_reader | diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 8b1a921536a1d..605a4ec8505e4 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -2125,6 +2125,8 @@ class MyDialect(csv.Dialect):
self.pos += 1
self.line_pos += 1
+ if self.encoding is not None:
+ line = line.decode(self.encoding)
sniffed = csv.Sniffer().sniff(line)
dia.delimiter = sniffed.delimiter
if self.encoding is not None:
| Fixes issue when reading CSV from unencoded IO stream (i.e. WSGI request body). CSV sniffer uses regexp internally and crashes with `TypeError: cannot use a string pattern on a bytes-like object`.
This change fixes the issue. Better way, however would be to add proper encoding support to `csv.Sniffer()`.
- [ ] closes #xxxx
- [ ] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/17427 | 2017-09-04T10:17:41Z | 2017-10-28T15:44:01Z | null | 2023-05-11T01:16:20Z |
DOC: Add Timestamp, Period, Timedelta, and Interval to api.rst | diff --git a/doc/source/api.rst b/doc/source/api.rst
index 12e6c7ad7f630..d34cec86638fb 100644
--- a/doc/source/api.rst
+++ b/doc/source/api.rst
@@ -1599,6 +1599,201 @@ Conversion
TimedeltaIndex.floor
TimedeltaIndex.ceil
+.. currentmodule:: pandas
+
+Scalars
+-------
+
+Period
+~~~~~~
+.. autosummary::
+ :toctree: generated/
+
+ Period
+
+Attributes
+~~~~~~~~~~
+.. autosummary::
+ :toctree: generated/
+
+ Period.day
+ Period.dayofweek
+ Period.dayofyear
+ Period.days_in_month
+ Period.daysinmonth
+ Period.end_time
+ Period.freq
+ Period.freqstr
+ Period.hour
+ Period.is_leap_year
+ Period.minute
+ Period.month
+ Period.now
+ Period.ordinal
+ Period.quarter
+ Period.qyear
+ Period.second
+ Period.start_time
+ Period.strftime
+ Period.week
+ Period.weekday
+ Period.weekofyear
+ Period.year
+
+Methods
+~~~~~~~
+.. autosummary::
+ :toctree: generated/
+
+ Period.asfreq
+ Period.strftime
+ Period.to_timestamp
+
+Timestamp
+~~~~~~~~~
+.. autosummary::
+ :toctree: generated/
+
+ Timestamp
+
+Properties
+~~~~~~~~~~
+.. autosummary::
+ :toctree: generated/
+
+ Timestamp.asm8
+ Timestamp.day
+ Timestamp.dayofweek
+ Timestamp.dayofyear
+ Timestamp.days_in_month
+ Timestamp.daysinmonth
+ Timestamp.hour
+ Timestamp.is_leap_year
+ Timestamp.is_month_end
+ Timestamp.is_month_start
+ Timestamp.is_quarter_end
+ Timestamp.is_quarter_start
+ Timestamp.is_year_end
+ Timestamp.is_year_start
+ Timestamp.max
+ Timestamp.microsecond
+ Timestamp.min
+ Timestamp.month
+ Timestamp.nanosecond
+ Timestamp.quarter
+ Timestamp.resolution
+ Timestamp.second
+ Timestamp.tz
+ Timestamp.tzinfo
+ Timestamp.value
+ Timestamp.weekday_name
+ Timestamp.weekofyear
+ Timestamp.year
+
+Methods
+~~~~~~~
+.. autosummary::
+ :toctree: generated/
+
+ Timestamp.astimezone
+ Timestamp.ceil
+ Timestamp.combine
+ Timestamp.ctime
+ Timestamp.date
+ Timestamp.dst
+ Timestamp.floor
+ Timestamp.freq
+ Timestamp.freqstr
+ Timestamp.from_ordinal
+ Timestamp.fromtimestamp
+ Timestamp.isocalendar
+ Timestamp.isoformat
+ Timestamp.isoweekday
+ Timestamp.normalize
+ Timestamp.now
+ Timestamp.replace
+ Timestamp.round
+ Timestamp.strftime
+ Timestamp.strptime
+ Timestamp.time
+ Timestamp.timetuple
+ Timestamp.timetz
+ Timestamp.to_datetime64
+ Timestamp.to_julian_date
+ Timestamp.to_period
+ Timestamp.to_pydatetime
+ Timestamp.today
+ Timestamp.toordinal
+ Timestamp.tz_convert
+ Timestamp.tz_localize
+ Timestamp.tzname
+ Timestamp.utcfromtimestamp
+ Timestamp.utcnow
+ Timestamp.utcoffset
+ Timestamp.utctimetuple
+ Timestamp.weekday
+
+Interval
+~~~~~~~~
+.. autosummary::
+ :toctree: generated/
+
+ Interval
+
+Properties
+~~~~~~~~~~
+.. autosummary::
+ :toctree generated/
+
+ Interval.closed
+ Interval.closed_left
+ Interval.closed_right
+ Interval.left
+ Interval.mid
+ Interval.open_left
+ Interval.open_right
+ Interval.right
+
+Timedelta
+~~~~~~~~~
+.. autosummary::
+ :toctree: generated/
+
+ Timedelta
+
+Properties
+~~~~~~~~~~
+.. autosummary::
+ :toctree generated/
+
+ Timedelta.asm8
+ Timedelta.components
+ Timedelta.days
+ Timedelta.delta
+ Timedelta.freq
+ Timedelta.is_populated
+ Timedelta.max
+ Timedelta.microseconds
+ Timedelta.min
+ Timedelta.nanoseconds
+ Timedelta.resolution
+ Timedelta.seconds
+ Timedelta.value
+
+Methods
+~~~~~~~
+.. autosummary::
+ :toctree generated/
+
+ Timedelta.ceil
+ Timedelta.floor
+ Timedelta.isoformat
+ Timedelta.round
+ Timdelta.to_pytimedelta
+ Timedelta.to_timedelta64
+ Timedelta.total_seconds
+ Timedelta.view
+
Window
------
.. currentmodule:: pandas.core.window
diff --git a/pandas/_libs/period.pyx b/pandas/_libs/period.pyx
index 816b7ebfff86d..8a88bc33e431f 100644
--- a/pandas/_libs/period.pyx
+++ b/pandas/_libs/period.pyx
@@ -1101,7 +1101,7 @@ cdef class _Period(object):
class Period(_Period):
"""
- Represents an period of time
+ Represents a period of time
Parameters
----------
| - [x] closes #17369
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/17424 | 2017-09-03T01:08:33Z | 2017-09-07T11:35:41Z | 2017-09-07T11:35:41Z | 2017-09-11T04:05:00Z |
PERF: Implement get_freq_code in cython frequencies | diff --git a/asv_bench/benchmarks/period.py b/asv_bench/benchmarks/period.py
index f9837191a7bae..78d66295f28cc 100644
--- a/asv_bench/benchmarks/period.py
+++ b/asv_bench/benchmarks/period.py
@@ -2,6 +2,35 @@
from pandas import Series, Period, PeriodIndex, date_range
+class PeriodProperties(object):
+ def setup(self):
+ self.per = Period('2012-06-01', freq='M')
+
+ def time_year(self):
+ self.per.year
+
+ def time_month(self):
+ self.per.month
+
+ def time_quarter(self):
+ self.per.quarter
+
+ def time_day(self):
+ self.per.day
+
+ def time_hour(self):
+ self.per.hour
+
+ def time_minute(self):
+ self.per.second
+
+ def time_second(self):
+ self.per.second
+
+ def time_leap_year(self):
+ self.per.is_leapyear
+
+
class Constructor(object):
goal_time = 0.2
diff --git a/pandas/_libs/tslibs/__init__.py b/pandas/_libs/tslibs/__init__.py
new file mode 100644
index 0000000000000..e69de29bb2d1d
diff --git a/pandas/_libs/tslibs/frequencies.pyx b/pandas/_libs/tslibs/frequencies.pyx
new file mode 100644
index 0000000000000..35429e8ae87f0
--- /dev/null
+++ b/pandas/_libs/tslibs/frequencies.pyx
@@ -0,0 +1,201 @@
+# -*- coding: utf-8 -*-
+# cython: profile=False
+import re
+
+cimport cython
+
+import numpy as np
+cimport numpy as np
+np.import_array()
+
+from util cimport is_integer_object
+
+
+cpdef get_freq_code(freqstr):
+ """
+ Return freq str or tuple to freq code and stride (mult)
+
+ Parameters
+ ----------
+ freqstr : str or tuple
+
+ Returns
+ -------
+ return : tuple of base frequency code and stride (mult)
+
+ Example
+ -------
+ >>> get_freq_code('3D')
+ (6000, 3)
+
+ >>> get_freq_code('D')
+ (6000, 1)
+
+ >>> get_freq_code(('D', 3))
+ (6000, 3)
+ """
+ if getattr(freqstr, '_typ', None) == 'dateoffset':
+ freqstr = (freqstr.rule_code, freqstr.n)
+
+ if isinstance(freqstr, tuple):
+ if (is_integer_object(freqstr[0]) and
+ is_integer_object(freqstr[1])):
+ # e.g., freqstr = (2000, 1)
+ return freqstr
+ else:
+ # e.g., freqstr = ('T', 5)
+ try:
+ code = _period_str_to_code(freqstr[0])
+ stride = freqstr[1]
+ except:
+ if is_integer_object(freqstr[1]):
+ raise
+ code = _period_str_to_code(freqstr[1])
+ stride = freqstr[0]
+ return code, stride
+
+ if is_integer_object(freqstr):
+ return (freqstr, 1)
+
+ base, stride = _base_and_stride(freqstr)
+ code = _period_str_to_code(base)
+
+ return code, stride
+
+
+# hack to handle WOM-1MON
+opattern = re.compile(
+ r'([\-]?\d*|[\-]?\d*\.\d*)\s*([A-Za-z]+([\-][\dA-Za-z\-]+)?)'
+)
+
+
+cpdef _base_and_stride(freqstr):
+ """
+ Return base freq and stride info from string representation
+
+ Examples
+ --------
+ _freq_and_stride('5Min') -> 'Min', 5
+ """
+ groups = opattern.match(freqstr)
+
+ if not groups:
+ raise ValueError("Could not evaluate {freq}".format(freq=freqstr))
+
+ stride = groups.group(1)
+
+ if len(stride):
+ stride = int(stride)
+ else:
+ stride = 1
+
+ base = groups.group(2)
+
+ return (base, stride)
+
+
+# ---------------------------------------------------------------------
+# Period codes
+
+# period frequency constants corresponding to scikits timeseries
+# originals
+_period_code_map = {
+ # Annual freqs with various fiscal year ends.
+ # eg, 2005 for A-FEB runs Mar 1, 2004 to Feb 28, 2005
+ "A-DEC": 1000, # Annual - December year end
+ "A-JAN": 1001, # Annual - January year end
+ "A-FEB": 1002, # Annual - February year end
+ "A-MAR": 1003, # Annual - March year end
+ "A-APR": 1004, # Annual - April year end
+ "A-MAY": 1005, # Annual - May year end
+ "A-JUN": 1006, # Annual - June year end
+ "A-JUL": 1007, # Annual - July year end
+ "A-AUG": 1008, # Annual - August year end
+ "A-SEP": 1009, # Annual - September year end
+ "A-OCT": 1010, # Annual - October year end
+ "A-NOV": 1011, # Annual - November year end
+
+ # Quarterly frequencies with various fiscal year ends.
+ # eg, Q42005 for Q-OCT runs Aug 1, 2005 to Oct 31, 2005
+ "Q-DEC": 2000, # Quarterly - December year end
+ "Q-JAN": 2001, # Quarterly - January year end
+ "Q-FEB": 2002, # Quarterly - February year end
+ "Q-MAR": 2003, # Quarterly - March year end
+ "Q-APR": 2004, # Quarterly - April year end
+ "Q-MAY": 2005, # Quarterly - May year end
+ "Q-JUN": 2006, # Quarterly - June year end
+ "Q-JUL": 2007, # Quarterly - July year end
+ "Q-AUG": 2008, # Quarterly - August year end
+ "Q-SEP": 2009, # Quarterly - September year end
+ "Q-OCT": 2010, # Quarterly - October year end
+ "Q-NOV": 2011, # Quarterly - November year end
+
+ "M": 3000, # Monthly
+
+ "W-SUN": 4000, # Weekly - Sunday end of week
+ "W-MON": 4001, # Weekly - Monday end of week
+ "W-TUE": 4002, # Weekly - Tuesday end of week
+ "W-WED": 4003, # Weekly - Wednesday end of week
+ "W-THU": 4004, # Weekly - Thursday end of week
+ "W-FRI": 4005, # Weekly - Friday end of week
+ "W-SAT": 4006, # Weekly - Saturday end of week
+
+ "B": 5000, # Business days
+ "D": 6000, # Daily
+ "H": 7000, # Hourly
+ "T": 8000, # Minutely
+ "S": 9000, # Secondly
+ "L": 10000, # Millisecondly
+ "U": 11000, # Microsecondly
+ "N": 12000, # Nanosecondly
+}
+
+# Yearly aliases; careful not to put these in _reverse_period_code_map
+_period_code_map.update({'Y' + key[1:]: _period_code_map[key]
+ for key in _period_code_map
+ if key.startswith('A-')})
+
+_period_code_map.update({
+ "Q": 2000, # Quarterly - December year end (default quarterly)
+ "A": 1000, # Annual
+ "W": 4000, # Weekly
+ "C": 5000, # Custom Business Day
+ })
+
+_dont_uppercase = set(('MS', 'ms'))
+
+_lite_rule_alias = {
+ 'W': 'W-SUN',
+ 'Q': 'Q-DEC',
+
+ 'A': 'A-DEC', # YearEnd(month=12),
+ 'Y': 'A-DEC',
+ 'AS': 'AS-JAN', # YearBegin(month=1),
+ 'YS': 'AS-JAN',
+ 'BA': 'BA-DEC', # BYearEnd(month=12),
+ 'BY': 'BA-DEC',
+ 'BAS': 'BAS-JAN', # BYearBegin(month=1),
+ 'BYS': 'BAS-JAN',
+
+ 'Min': 'T',
+ 'min': 'T',
+ 'ms': 'L',
+ 'us': 'U',
+ 'ns': 'N'}
+
+_INVALID_FREQ_ERROR = "Invalid frequency: {0}"
+
+
+cpdef _period_str_to_code(freqstr):
+ freqstr = _lite_rule_alias.get(freqstr, freqstr)
+
+ if freqstr not in _dont_uppercase:
+ lower = freqstr.lower()
+ freqstr = _lite_rule_alias.get(lower, freqstr)
+
+ if freqstr not in _dont_uppercase:
+ freqstr = freqstr.upper()
+ try:
+ return _period_code_map[freqstr]
+ except KeyError:
+ raise ValueError(_INVALID_FREQ_ERROR.format(freqstr))
diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py
index 7f34bcaf52926..6644a33245a84 100644
--- a/pandas/tseries/frequencies.py
+++ b/pandas/tseries/frequencies.py
@@ -8,7 +8,6 @@
from pandas.core.dtypes.generic import ABCSeries
from pandas.core.dtypes.common import (
- is_integer,
is_period_arraylike,
is_timedelta64_dtype,
is_datetime64_dtype)
@@ -21,6 +20,7 @@
from pandas._libs import lib, tslib
from pandas._libs.tslib import Timedelta
+from pandas._libs.tslibs.frequencies import get_freq_code, _base_and_stride
from pytz import AmbiguousTimeError
@@ -298,58 +298,6 @@ def get_freq(freq):
return freq
-def get_freq_code(freqstr):
- """
- Return freq str or tuple to freq code and stride (mult)
-
- Parameters
- ----------
- freqstr : str or tuple
-
- Returns
- -------
- return : tuple of base frequency code and stride (mult)
-
- Example
- -------
- >>> get_freq_code('3D')
- (6000, 3)
-
- >>> get_freq_code('D')
- (6000, 1)
-
- >>> get_freq_code(('D', 3))
- (6000, 3)
- """
- if isinstance(freqstr, DateOffset):
- freqstr = (freqstr.rule_code, freqstr.n)
-
- if isinstance(freqstr, tuple):
- if (is_integer(freqstr[0]) and
- is_integer(freqstr[1])):
- # e.g., freqstr = (2000, 1)
- return freqstr
- else:
- # e.g., freqstr = ('T', 5)
- try:
- code = _period_str_to_code(freqstr[0])
- stride = freqstr[1]
- except:
- if is_integer(freqstr[1]):
- raise
- code = _period_str_to_code(freqstr[1])
- stride = freqstr[0]
- return code, stride
-
- if is_integer(freqstr):
- return (freqstr, 1)
-
- base, stride = _base_and_stride(freqstr)
- code = _period_str_to_code(base)
-
- return code, stride
-
-
def _get_freq_str(base, mult=1):
code = _reverse_period_code_map.get(base)
if mult == 1:
@@ -577,31 +525,6 @@ def to_offset(freq):
)
-def _base_and_stride(freqstr):
- """
- Return base freq and stride info from string representation
-
- Examples
- --------
- _freq_and_stride('5Min') -> 'Min', 5
- """
- groups = opattern.match(freqstr)
-
- if not groups:
- raise ValueError("Could not evaluate {freq}".format(freq=freqstr))
-
- stride = groups.group(1)
-
- if len(stride):
- stride = int(stride)
- else:
- stride = 1
-
- base = groups.group(2)
-
- return (base, stride)
-
-
def get_base_alias(freqstr):
"""
Returns the base frequency alias, e.g., '5D' -> 'D'
diff --git a/setup.py b/setup.py
index 444db5bc4d275..4e326beefa908 100755
--- a/setup.py
+++ b/setup.py
@@ -341,6 +341,7 @@ class CheckSDist(sdist_class):
'pandas/_libs/window.pyx',
'pandas/_libs/sparse.pyx',
'pandas/_libs/parsers.pyx',
+ 'panads/_libs/tslibs/frequencies.pyx',
'pandas/io/sas/sas.pyx']
def initialize_options(self):
@@ -492,6 +493,8 @@ def pxd(name):
'sources': ['pandas/_libs/src/datetime/np_datetime.c',
'pandas/_libs/src/datetime/np_datetime_strings.c',
'pandas/_libs/src/period_helper.c']},
+ '_libs.tslibs.frequencies': {'pyxfile': '_libs/tslibs/frequencies',
+ 'pxdfiles': ['_libs/src/util']},
'_libs.index': {'pyxfile': '_libs/index',
'sources': ['pandas/_libs/src/datetime/np_datetime.c',
'pandas/_libs/src/datetime/np_datetime_strings.c'],
@@ -653,6 +656,7 @@ def pxd(name):
'pandas.io.formats',
'pandas.io.clipboard',
'pandas._libs',
+ 'pandas._libs.tslibs',
'pandas.plotting',
'pandas.stats',
'pandas.types',
| There's a whole bunch of `tseries.frequencies` and `tseries.offsets` that should be moved into cython to improve `Period` (and `PeriodIndex, and to a lesser extend `Timestamp`) performance. This PR just starts with the function `get_freq_code`.
To keep the PR small, there are a bunch of things it _doesnt_ do that can further improve things in follow-ups:
- Nothing is cdef'd or declared in a pxd file
- `_libs.period` still gets `get_freq_code` from `tseries.frequencies` instead of directly from `tslibs.frequencies`
- A handful of now-duplicate constants in `tseries.frequencies` are not removed.
```
In [6]: per = pd.Period.now('min')
In [7]: %timeit per.month
```
Before:
```
The slowest run took 15.03 times longer than the fastest. This could mean that an intermediate result is being cached.
1000000 loops, best of 3: 1.4 µs per loop
```
After:
```
The slowest run took 25.64 times longer than the fastest. This could mean that an intermediate result is being cached.
1000000 loops, best of 3: 744 ns per loop
```
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/17422 | 2017-09-02T17:50:06Z | 2017-09-08T01:00:05Z | 2017-09-08T01:00:05Z | 2017-10-30T16:25:04Z |
Remove unnecessary iNaT checks from _Period properties | diff --git a/asv_bench/benchmarks/period.py b/asv_bench/benchmarks/period.py
index 78d66295f28cc..df3c2bf3e4b46 100644
--- a/asv_bench/benchmarks/period.py
+++ b/asv_bench/benchmarks/period.py
@@ -78,6 +78,65 @@ def time_value_counts_pindex(self):
self.i.value_counts()
+class Properties(object):
+ def setup(self):
+ self.per = Period('2017-09-06 08:28', freq='min')
+
+ def time_year(self):
+ self.per.year
+
+ def time_month(self):
+ self.per.month
+
+ def time_day(self):
+ self.per.day
+
+ def time_hour(self):
+ self.per.hour
+
+ def time_minute(self):
+ self.per.minute
+
+ def time_second(self):
+ self.per.second
+
+ def time_is_leap_year(self):
+ self.per.is_leap_year
+
+ def time_quarter(self):
+ self.per.quarter
+
+ def time_qyear(self):
+ self.per.qyear
+
+ def time_week(self):
+ self.per.week
+
+ def time_daysinmonth(self):
+ self.per.daysinmonth
+
+ def time_dayofweek(self):
+ self.per.dayofweek
+
+ def time_dayofyear(self):
+ self.per.dayofyear
+
+ def time_start_time(self):
+ self.per.start_time
+
+ def time_end_time(self):
+ self.per.end_time
+
+ def time_to_timestamp():
+ self.per.to_timestamp()
+
+ def time_now():
+ self.per.now()
+
+ def time_asfreq():
+ self.per.asfreq('A')
+
+
class period_standard_indexing(object):
goal_time = 0.2
diff --git a/pandas/_libs/period.pyx b/pandas/_libs/period.pyx
index 9e473a7f362b4..babe0f7c6834d 100644
--- a/pandas/_libs/period.pyx
+++ b/pandas/_libs/period.pyx
@@ -107,6 +107,8 @@ cdef extern from "period_helper.h":
int pday(int64_t ordinal, int freq) except INT32_MIN
int pweekday(int64_t ordinal, int freq) except INT32_MIN
int pday_of_week(int64_t ordinal, int freq) except INT32_MIN
+ # TODO: pday_of_week and pweekday are identical. Make one an alias instead
+ # of importing them separately.
int pday_of_year(int64_t ordinal, int freq) except INT32_MIN
int pweek(int64_t ordinal, int freq) except INT32_MIN
int phour(int64_t ordinal, int freq) except INT32_MIN
@@ -868,58 +870,81 @@ cdef class _Period(object):
dt64 = period_ordinal_to_dt64(val.ordinal, base)
return Timestamp(dt64, tz=tz)
- cdef _field(self, alias):
+ @property
+ def year(self):
+ base, mult = get_freq_code(self.freq)
+ return pyear(self.ordinal, base)
+
+ @property
+ def month(self):
+ base, mult = get_freq_code(self.freq)
+ return pmonth(self.ordinal, base)
+
+ @property
+ def day(self):
+ base, mult = get_freq_code(self.freq)
+ return pday(self.ordinal, base)
+
+ @property
+ def hour(self):
+ base, mult = get_freq_code(self.freq)
+ return phour(self.ordinal, base)
+
+ @property
+ def minute(self):
+ base, mult = get_freq_code(self.freq)
+ return pminute(self.ordinal, base)
+
+ @property
+ def second(self):
+ base, mult = get_freq_code(self.freq)
+ return psecond(self.ordinal, base)
+
+ @property
+ def weekofyear(self):
+ base, mult = get_freq_code(self.freq)
+ return pweek(self.ordinal, base)
+
+ @property
+ def week(self):
+ return self.weekofyear
+
+ @property
+ def dayofweek(self):
+ base, mult = get_freq_code(self.freq)
+ return pweekday(self.ordinal, base)
+
+ @property
+ def weekday(self):
+ return self.dayofweek
+
+ @property
+ def dayofyear(self):
+ base, mult = get_freq_code(self.freq)
+ return pday_of_year(self.ordinal, base)
+
+ @property
+ def quarter(self):
base, mult = get_freq_code(self.freq)
- return get_period_field(alias, self.ordinal, base)
-
- property year:
- def __get__(self):
- return self._field(0)
- property month:
- def __get__(self):
- return self._field(3)
- property day:
- def __get__(self):
- return self._field(4)
- property hour:
- def __get__(self):
- return self._field(5)
- property minute:
- def __get__(self):
- return self._field(6)
- property second:
- def __get__(self):
- return self._field(7)
- property weekofyear:
- def __get__(self):
- return self._field(8)
- property week:
- def __get__(self):
- return self.weekofyear
- property dayofweek:
- def __get__(self):
- return self._field(10)
- property weekday:
- def __get__(self):
- return self.dayofweek
- property dayofyear:
- def __get__(self):
- return self._field(9)
- property quarter:
- def __get__(self):
- return self._field(2)
- property qyear:
- def __get__(self):
- return self._field(1)
- property days_in_month:
- def __get__(self):
- return self._field(11)
- property daysinmonth:
- def __get__(self):
- return self.days_in_month
- property is_leap_year:
- def __get__(self):
- return bool(is_leapyear(self._field(0)))
+ return pquarter(self.ordinal, base)
+
+ @property
+ def qyear(self):
+ base, mult = get_freq_code(self.freq)
+ return pqyear(self.ordinal, base)
+
+ @property
+ def days_in_month(self):
+ base, mult = get_freq_code(self.freq)
+ return pdays_in_month(self.ordinal, base)
+
+ @property
+ def daysinmonth(self):
+ return self.days_in_month
+
+ @property
+ def is_leap_year(self):
+ return bool(is_leapyear(self.year))
@classmethod
def now(cls, freq=None):
| In the status quo, `pd.Period.year` goes through several layers of redirection, one of which checks for `self.ordinal == iNaT`.
```
cdef class _Period(object):
[...]
cdef _field(self, alias):
base, mult = frequencies.get_freq_code(self.freq)
return get_period_field(alias, self.ordinal, base)
property year:
def __get__(self):
return self._field(0)
[...]
def get_period_field(int code, int64_t value, int freq):
cdef accessor f = _get_accessor_func(code)
if f is NULL:
raise ValueError('Unrecognized period code: %d' % code)
if value == iNaT:
return np.nan
return f(value, freq)
```
But `Period.ordinal` will never be `iNaT`, because `Period.__new__` returns `NaT` in that case instead of a `Period` object. So we can skip the `value == iNaT` check in `get_period_field`. With that out of the way, we can skip `_get_accessor_func` and `_field` and and just write in `pyear(ordinal, freq)`.
This PR changes this property lookup for `year`, `month`, `day`, ...
Speedups ~10% across the board. Before:
```
In [2]: per = pd.Timestamp.now().to_period('M')
In [4]: %timeit per.month
The slowest run took 12.73 times longer than the fastest. This could mean that an intermediate result is being cached.
1000000 loops, best of 3: 1.4 µs per loop
In [5]: %timeit per.second
The slowest run took 15.01 times longer than the fastest. This could mean that an intermediate result is being cached.
1000000 loops, best of 3: 1.4 µs per loop
In [6]: %timeit per.year
The slowest run took 14.73 times longer than the fastest. This could mean that an intermediate result is being cached.
100000 loops, best of 3: 1.36 µs per loop
In [7]: %timeit per.is_leap_year
The slowest run took 16.94 times longer than the fastest. This could mean that an intermediate result is being cached.
1000000 loops, best of 3: 1.37 µs per loop
In [8]: per = pd.Timestamp.now().to_period('ms')
In [9]: %timeit per.is_leap_year
The slowest run took 16.74 times longer than the fastest. This could mean that an intermediate result is being cached.
1000000 loops, best of 3: 1.5 µs per loop
In [10]: %timeit per.minute
The slowest run took 13.71 times longer than the fastest. This could mean that an intermediate result is being cached.
1000000 loops, best of 3: 1.37 µs per loop
```
After:
```
In [2]: per = pd.Timestamp.now().to_period('M')
In [3]: %timeit per.month
The slowest run took 19.96 times longer than the fastest. This could mean that an intermediate result is being cached.
1000000 loops, best of 3: 1.21 µs per loop
In [4]: %timeit per.second
The slowest run took 15.26 times longer than the fastest. This could mean that an intermediate result is being cached.
1000000 loops, best of 3: 1.25 µs per loop
In [5]: %timeit per.year
The slowest run took 16.55 times longer than the fastest. This could mean that an intermediate result is being cached.
1000000 loops, best of 3: 1.21 µs per loop
In [6]: %timeit per.is_leap_year
The slowest run took 15.96 times longer than the fastest. This could mean that an intermediate result is being cached.
1000000 loops, best of 3: 1.24 µs per loop
In [7]: per = pd.Timestamp.now().to_period('ms')
In [8]: %timeit per.is_leap_year
The slowest run took 15.72 times longer than the fastest. This could mean that an intermediate result is being cached.
1000000 loops, best of 3: 1.33 µs per loop
In [9]: %timeit per.minute
The slowest run took 17.03 times longer than the fastest. This could mean that an intermediate result is being cached.
1000000 loops, best of 3: 1.23 µs per loop
```
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/17421 | 2017-09-02T17:08:09Z | 2017-09-15T01:33:04Z | 2017-09-15T01:33:04Z | 2017-10-30T16:24:50Z |
DOC: Clean-up references to v12 to v14 (both included) | diff --git a/doc/source/advanced.rst b/doc/source/advanced.rst
index 711c3e9a95d05..4af476cd5a7e1 100644
--- a/doc/source/advanced.rst
+++ b/doc/source/advanced.rst
@@ -270,9 +270,6 @@ Passing a list of labels or tuples works similar to reindexing:
Using slicers
~~~~~~~~~~~~~
-.. versionadded:: 0.14.0
-
-In 0.14.0 we added a new way to slice multi-indexed objects.
You can slice a multi-index by providing multiple indexers.
You can provide any of the selectors as if you are indexing by label, see :ref:`Selection by Label <indexing.label>`,
@@ -384,7 +381,7 @@ selecting data at a particular level of a MultiIndex easier.
.. ipython:: python
- # using the slicers (new in 0.14.0)
+ # using the slicers
df.loc[(slice(None),'one'),:]
You can also select on the columns with :meth:`~pandas.MultiIndex.xs`, by
@@ -397,7 +394,7 @@ providing the axis argument
.. ipython:: python
- # using the slicers (new in 0.14.0)
+ # using the slicers
df.loc[:,(slice(None),'one')]
:meth:`~pandas.MultiIndex.xs` also allows selection with multiple keys
@@ -408,11 +405,9 @@ providing the axis argument
.. ipython:: python
- # using the slicers (new in 0.14.0)
+ # using the slicers
df.loc[:,('bar','one')]
-.. versionadded:: 0.13.0
-
You can pass ``drop_level=False`` to :meth:`~pandas.MultiIndex.xs` to retain
the level that was selected
@@ -743,16 +738,6 @@ Prior to 0.18.0, the ``Int64Index`` would provide the default index for all ``ND
Float64Index
~~~~~~~~~~~~
-.. note::
-
- As of 0.14.0, ``Float64Index`` is backed by a native ``float64`` dtype
- array. Prior to 0.14.0, ``Float64Index`` was backed by an ``object`` dtype
- array. Using a ``float64`` dtype in the backend speeds up arithmetic
- operations by about 30x and boolean indexing operations on the
- ``Float64Index`` itself are about 2x as fast.
-
-.. versionadded:: 0.13.0
-
By default a ``Float64Index`` will be automatically created when passing floating, or mixed-integer-floating values in index creation.
This enables a pure label-based slicing paradigm that makes ``[],ix,loc`` for scalar indexing and slicing work exactly the
same.
diff --git a/doc/source/basics.rst b/doc/source/basics.rst
index 35eb14eda238f..5880703b1d271 100644
--- a/doc/source/basics.rst
+++ b/doc/source/basics.rst
@@ -347,7 +347,7 @@ That is because NaNs do not compare as equals:
np.nan == np.nan
-So, as of v0.13.1, NDFrames (such as Series, DataFrames, and Panels)
+So, NDFrames (such as Series, DataFrames, and Panels)
have an :meth:`~DataFrame.equals` method for testing equality, with NaNs in
corresponding locations treated as equal.
@@ -1104,10 +1104,6 @@ Applying with a ``Panel`` will pass a ``Series`` to the applied function. If the
function returns a ``Series``, the result of the application will be a ``Panel``. If the applied function
reduces to a scalar, the result of the application will be a ``DataFrame``.
-.. note::
-
- Prior to 0.13.1 ``apply`` on a ``Panel`` would only work on ``ufuncs`` (e.g. ``np.sum/np.max``).
-
.. ipython:: python
import pandas.util.testing as tm
@@ -1800,8 +1796,6 @@ Series has the :meth:`~Series.searchsorted` method, which works similar to
smallest / largest values
~~~~~~~~~~~~~~~~~~~~~~~~~
-.. versionadded:: 0.14.0
-
``Series`` has the :meth:`~Series.nsmallest` and :meth:`~Series.nlargest` methods which return the
smallest or largest :math:`n` values. For a large ``Series`` this can be much
faster than sorting the entire Series and calling ``head(n)`` on the result.
@@ -2168,8 +2162,6 @@ Selecting columns based on ``dtype``
.. _basics.selectdtypes:
-.. versionadded:: 0.14.1
-
The :meth:`~DataFrame.select_dtypes` method implements subsetting of columns
based on their ``dtype``.
diff --git a/doc/source/comparison_with_r.rst b/doc/source/comparison_with_r.rst
index 194e022e34c7c..f895cdc25e620 100644
--- a/doc/source/comparison_with_r.rst
+++ b/doc/source/comparison_with_r.rst
@@ -247,8 +247,6 @@ For more details and examples see :ref:`the reshaping documentation
|subset|_
~~~~~~~~~~
-.. versionadded:: 0.13
-
The :meth:`~pandas.DataFrame.query` method is similar to the base R ``subset``
function. In R you might want to get the rows of a ``data.frame`` where one
column's values are less than another column's values:
@@ -277,8 +275,6 @@ For more details and examples see :ref:`the query documentation
|with|_
~~~~~~~~
-.. versionadded:: 0.13
-
An expression using a data.frame called ``df`` in R with the columns ``a`` and
``b`` would be evaluated using ``with`` like so:
diff --git a/doc/source/cookbook.rst b/doc/source/cookbook.rst
index 32e7a616fe856..f51c3e679b36f 100644
--- a/doc/source/cookbook.rst
+++ b/doc/source/cookbook.rst
@@ -818,7 +818,7 @@ The :ref:`Concat <merging.concatenation>` docs. The :ref:`Join <merging.join>` d
df1 = pd.DataFrame(np.random.randn(6, 3), index=rng, columns=['A', 'B', 'C'])
df2 = df1.copy()
-ignore_index is needed in pandas < v0.13, and depending on df construction
+Depending on df construction, ``ignore_index`` may be needed
.. ipython:: python
diff --git a/doc/source/enhancingperf.rst b/doc/source/enhancingperf.rst
index 685a8690a53d5..264bd1de1fc77 100644
--- a/doc/source/enhancingperf.rst
+++ b/doc/source/enhancingperf.rst
@@ -213,17 +213,18 @@ the rows, applying our ``integrate_f_typed``, and putting this in the zeros arra
.. warning::
- In 0.13.0 since ``Series`` has internaly been refactored to no longer sub-class ``ndarray``
- but instead subclass ``NDFrame``, you can **not pass** a ``Series`` directly as a ``ndarray`` typed parameter
- to a cython function. Instead pass the actual ``ndarray`` using the ``.values`` attribute of the Series.
+ You can **not pass** a ``Series`` directly as a ``ndarray`` typed parameter
+ to a cython function. Instead pass the actual ``ndarray`` using the
+ ``.values`` attribute of the Series. The reason is that the cython
+ definition is specific to an ndarray and not the passed Series.
- Prior to 0.13.0
+ So, do not do this:
.. code-block:: python
apply_integrate_f(df['a'], df['b'], df['N'])
- Use ``.values`` to get the underlying ``ndarray``
+ But rather, use ``.values`` to get the underlying ``ndarray``
.. code-block:: python
@@ -399,10 +400,8 @@ Read more in the `numba docs <http://numba.pydata.org/>`__.
.. _enhancingperf.eval:
-Expression Evaluation via :func:`~pandas.eval` (Experimental)
--------------------------------------------------------------
-
-.. versionadded:: 0.13
+Expression Evaluation via :func:`~pandas.eval`
+-----------------------------------------------
The top-level function :func:`pandas.eval` implements expression evaluation of
:class:`~pandas.Series` and :class:`~pandas.DataFrame` objects.
@@ -539,10 +538,8 @@ Now let's do the same thing but with comparisons:
of type ``bool`` or ``np.bool_``. Again, you should perform these kinds of
operations in plain Python.
-The ``DataFrame.eval`` method (Experimental)
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. versionadded:: 0.13
+The ``DataFrame.eval`` method
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
In addition to the top level :func:`pandas.eval` function you can also
evaluate an expression in the "context" of a :class:`~pandas.DataFrame`.
@@ -646,19 +643,6 @@ whether the query modifies the original frame.
Local Variables
~~~~~~~~~~~~~~~
-In pandas version 0.14 the local variable API has changed. In pandas 0.13.x,
-you could refer to local variables the same way you would in standard Python.
-For example,
-
-.. code-block:: python
-
- df = pd.DataFrame(np.random.randn(5, 2), columns=['a', 'b'])
- newcol = np.random.randn(len(df))
- df.eval('b + newcol')
-
- UndefinedVariableError: name 'newcol' is not defined
-
-As you can see from the exception generated, this syntax is no longer allowed.
You must *explicitly reference* any local variable that you want to use in an
expression by placing the ``@`` character in front of the name. For example,
diff --git a/doc/source/groupby.rst b/doc/source/groupby.rst
index 53c0b771555f8..e1231b9a4a200 100644
--- a/doc/source/groupby.rst
+++ b/doc/source/groupby.rst
@@ -766,8 +766,6 @@ missing values with the ``ffill()`` method.
Filtration
----------
-.. versionadded:: 0.12
-
The ``filter`` method returns a subset of the original object. Suppose we
want to take only elements that belong to groups with a group sum greater
than 2.
@@ -858,8 +856,6 @@ In this example, we chopped the collection of time series into yearly chunks
then independently called :ref:`fillna <missing_data.fillna>` on the
groups.
-.. versionadded:: 0.14.1
-
The ``nlargest`` and ``nsmallest`` methods work on ``Series`` style groupbys:
.. ipython:: python
@@ -1048,19 +1044,6 @@ Just like for a DataFrame or Series you can call head and tail on a groupby:
This shows the first or last n rows from each group.
-.. warning::
-
- Before 0.14.0 this was implemented with a fall-through apply,
- so the result would incorrectly respect the as_index flag:
-
- .. code-block:: python
-
- >>> g.head(1): # was equivalent to g.apply(lambda x: x.head(1))
- A B
- A
- 1 0 1 2
- 5 2 5 6
-
.. _groupby.nth:
Taking the nth row of each group
@@ -1113,8 +1096,6 @@ You can also select multiple rows from each group by specifying multiple nth val
Enumerate group items
~~~~~~~~~~~~~~~~~~~~~
-.. versionadded:: 0.13.0
-
To see the order in which each row appears within its group, use the
``cumcount`` method:
diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst
index 4687e46490562..a6e7df57be4e5 100644
--- a/doc/source/indexing.rst
+++ b/doc/source/indexing.rst
@@ -248,8 +248,6 @@ as an attribute:
- In any of these cases, standard indexing will still work, e.g. ``s['1']``, ``s['min']``, and ``s['index']`` will
access the corresponding element or column.
- - The ``Series/Panel`` accesses are available starting in 0.13.0.
-
If you are using the IPython environment, you may also use tab-completion to
see these accessible attributes.
@@ -529,7 +527,6 @@ Out of range slice indexes are handled gracefully just as in Python/Numpy.
.. ipython:: python
# these are allowed in python/numpy.
- # Only works in Pandas starting from v0.14.0.
x = list('abcdef')
x
x[4:10]
@@ -539,14 +536,8 @@ Out of range slice indexes are handled gracefully just as in Python/Numpy.
s.iloc[4:10]
s.iloc[8:10]
-.. note::
-
- Prior to v0.14.0, ``iloc`` would not accept out of bounds indexers for
- slices, e.g. a value that exceeds the length of the object being indexed.
-
-
-Note that this could result in an empty axis (e.g. an empty DataFrame being
-returned)
+Note that using slices that go out of bounds can result in
+an empty axis (e.g. an empty DataFrame being returned)
.. ipython:: python
@@ -745,8 +736,6 @@ Finally, one can also set a seed for ``sample``'s random number generator using
Setting With Enlargement
------------------------
-.. versionadded:: 0.13
-
The ``.loc/[]`` operations can perform enlargement when setting a non-existant key for that axis.
In the ``Series`` case this is effectively an appending operation
@@ -1020,8 +1009,6 @@ partial setting via ``.loc`` (but on the contents rather than the axis labels)
df2[ df2[1:4] > 0 ] = 3
df2
-.. versionadded:: 0.13
-
Where can also accept ``axis`` and ``level`` parameters to align the input when
performing the ``where``.
@@ -1064,8 +1051,6 @@ as condition and ``other`` argument.
The :meth:`~pandas.DataFrame.query` Method (Experimental)
---------------------------------------------------------
-.. versionadded:: 0.13
-
:class:`~pandas.DataFrame` objects have a :meth:`~pandas.DataFrame.query`
method that allows selection using an expression.
@@ -1506,8 +1491,6 @@ The name, if set, will be shown in the console display:
Setting metadata
~~~~~~~~~~~~~~~~
-.. versionadded:: 0.13.0
-
Indexes are "mostly immutable", but it is possible to set and change their
metadata, like the index ``name`` (or, for ``MultiIndex``, ``levels`` and
``labels``).
@@ -1790,7 +1773,7 @@ Evaluation order matters
Furthermore, in chained expressions, the order may determine whether a copy is returned or not.
If an expression will set values on a copy of a slice, then a ``SettingWithCopy``
-exception will be raised (this raise/warn behavior is new starting in 0.13.0)
+warning will be issued.
You can control the action of a chained assignment via the option ``mode.chained_assignment``,
which can take the values ``['raise','warn',None]``, where showing a warning is the default.
diff --git a/doc/source/install.rst b/doc/source/install.rst
index f92c43839ee31..8dc8224ea6cb2 100644
--- a/doc/source/install.rst
+++ b/doc/source/install.rst
@@ -107,7 +107,7 @@ following command::
To install a specific pandas version::
- conda install pandas=0.13.1
+ conda install pandas=0.20.3
To install other packages, IPython for example::
diff --git a/doc/source/io.rst b/doc/source/io.rst
index f68358764a40e..33523ea171f3a 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -1310,8 +1310,6 @@ column widths for contiguous columns:
The parser will take care of extra white spaces around the columns
so it's ok to have extra separation between the columns in the file.
-.. versionadded:: 0.13.0
-
By default, ``read_fwf`` will try to infer the file's ``colspecs`` by using the
first 100 rows of the file. It can do it only in cases when the columns are
aligned and correctly separated by the provided ``delimiter`` (default delimiter
@@ -1407,8 +1405,7 @@ Reading columns with a ``MultiIndex``
By specifying list of row locations for the ``header`` argument, you
can read in a ``MultiIndex`` for the columns. Specifying non-consecutive
-rows will skip the intervening rows. In order to have the pre-0.13 behavior
-of tupleizing columns, specify ``tupleize_cols=True``.
+rows will skip the intervening rows.
.. ipython:: python
@@ -1418,7 +1415,7 @@ of tupleizing columns, specify ``tupleize_cols=True``.
print(open('mi.csv').read())
pd.read_csv('mi.csv',header=[0,1,2,3],index_col=[0,1])
-Starting in 0.13.0, ``read_csv`` will be able to interpret a more common format
+``read_csv`` is also able to interpret a more common format
of multi-columns indices.
.. ipython:: python
@@ -2012,8 +2009,6 @@ The speedup is less noticeable for smaller datasets:
Normalization
'''''''''''''
-.. versionadded:: 0.13.0
-
pandas provides a utility function to take a dict or list of dicts and *normalize* this semi-structured data
into a flat table.
@@ -2198,8 +2193,6 @@ Reading HTML Content
We **highly encourage** you to read the :ref:`HTML Table Parsing gotchas <io.html.gotchas>`
below regarding the issues surrounding the BeautifulSoup4/html5lib/lxml parsers.
-.. versionadded:: 0.12.0
-
The top-level :func:`~pandas.io.html.read_html` function can accept an HTML
string/file/URL and will parse HTML tables into list of pandas DataFrames.
Let's look at a few examples.
@@ -2653,10 +2646,6 @@ of sheet names can simply be passed to ``read_excel`` with no loss in performanc
# equivalent using the read_excel function
data = read_excel('path_to_file.xls', ['Sheet1', 'Sheet2'], index_col=None, na_values=['NA'])
-.. versionadded:: 0.12
-
-``ExcelFile`` has been moved to the top level namespace.
-
.. versionadded:: 0.17
``read_excel`` can take an ``ExcelFile`` object as input
@@ -2716,9 +2705,6 @@ Using a list to get multiple sheets:
``read_excel`` can read more than one sheet, by setting ``sheet_name`` to either
a list of sheet names, a list of sheet positions, or ``None`` to read all sheets.
-
-.. versionadded:: 0.13
-
Sheets can be specified by sheet index or sheet name, using an integer or string,
respectively.
@@ -2866,9 +2852,9 @@ Files with a ``.xls`` extension will be written using ``xlwt`` and those with a
``.xlsx`` extension will be written using ``xlsxwriter`` (if available) or
``openpyxl``.
-The DataFrame will be written in a way that tries to mimic the REPL output. One
-difference from 0.12.0 is that the ``index_label`` will be placed in the second
-row instead of the first. You can get the previous behaviour by setting the
+The DataFrame will be written in a way that tries to mimic the REPL output.
+The ``index_label`` will be placed in the second
+row instead of the first. You can place it in the first row by setting the
``merge_cells`` option in ``to_excel()`` to ``False``:
.. code-block:: python
@@ -2945,8 +2931,6 @@ Added support for Openpyxl >= 2.2
Excel writer engines
''''''''''''''''''''
-.. versionadded:: 0.13
-
``pandas`` chooses an Excel writer via two methods:
1. the ``engine`` keyword argument
@@ -3074,14 +3058,19 @@ any pickled pandas object (or any other pickled object) from file:
Loading pickled data received from untrusted sources can be unsafe.
- See: http://docs.python.org/2.7/library/pickle.html
+ See: https://docs.python.org/3.6/library/pickle.html
.. warning::
- Several internal refactorings, 0.13 (:ref:`Series Refactoring <whatsnew_0130.refactoring>`), and 0.15 (:ref:`Index Refactoring <whatsnew_0150.refactoring>`),
- preserve compatibility with pickles created prior to these versions. However, these must
- be read with ``pd.read_pickle``, rather than the default python ``pickle.load``.
- See `this question <http://stackoverflow.com/questions/20444593/pandas-compiled-from-source-default-pickle-behavior-changed>`__
+ Several internal refactorings have been done while still preserving
+ compatibility with pickles created with older versions of pandas. However,
+ for such cases, pickled dataframes, series etc, must be read with
+ ``pd.read_pickle``, rather than ``pickle.load``.
+
+ See `here <http://pandas.pydata.org/pandas-docs/stable/whatsnew.html#whatsnew-0130-refactoring>`__
+ and `here <http://pandas.pydata.org/pandas-docs/stable/whatsnew.html#whatsnew-0150-refactoring>`__
+ for some examples of compatibility-breaking changes. See
+ `this question <http://stackoverflow.com/questions/20444593/pandas-compiled-from-source-default-pickle-behavior-changed>`__
for a detailed explanation.
.. _io.pickle.compression:
@@ -3150,9 +3139,7 @@ The default is to 'infer
msgpack
-------
-.. versionadded:: 0.13.0
-
-Starting in 0.13.0, pandas is supporting the ``msgpack`` format for
+pandas supports the ``msgpack`` format for
object serialization. This is a lightweight portable binary format, similar
to binary JSON, that is highly space efficient, and provides good performance
both on the writing (serialization), and reading (deserialization).
@@ -3424,10 +3411,6 @@ This is also true for the major axis of a ``Panel``:
Fixed Format
''''''''''''
-.. note::
-
- This was prior to 0.13.0 the ``Storer`` format.
-
The examples above show storing using ``put``, which write the HDF5 to ``PyTables`` in a fixed array format, called
the ``fixed`` format. These types of stores are **not** appendable once written (though you can simply
remove them and rewrite). Nor are they **queryable**; they must be
@@ -3460,8 +3443,6 @@ other sessions. In addition, delete & query type operations are
supported. This format is specified by ``format='table'`` or ``format='t'``
to ``append`` or ``put`` or ``to_hdf``
-.. versionadded:: 0.13
-
This format can be set as an option as well ``pd.set_option('io.hdf.default_format','table')`` to
enable ``put/append/to_hdf`` to by default store in the ``table`` format.
@@ -3765,9 +3746,7 @@ space. These are in terms of the total number of rows in a table.
Using timedelta64[ns]
+++++++++++++++++++++
-.. versionadded:: 0.13
-
-Beginning in 0.13.0, you can store and query using the ``timedelta64[ns]`` type. Terms can be
+You can store and query using the ``timedelta64[ns]`` type. Terms can be
specified in the format: ``<float>(<unit>)``, where float may be signed (and fractional), and unit can be
``D,s,ms,us,ns`` for the timedelta. Here's an example:
@@ -3889,8 +3868,6 @@ The default is 50,000 rows returned in a chunk.
.. note::
- .. versionadded:: 0.12.0
-
You can also use the iterator with ``read_hdf`` which will open, then
automatically close the store when finished iterating.
@@ -4603,8 +4580,6 @@ included in Python's standard library by default.
You can find an overview of supported drivers for each SQL dialect in the
`SQLAlchemy docs <http://docs.sqlalchemy.org/en/latest/dialects/index.html>`__.
-.. versionadded:: 0.14.0
-
If SQLAlchemy is not installed, a fallback is only provided for sqlite (and
for mysql for backwards compatibility, but this is deprecated and will be
removed in a future version).
@@ -4937,8 +4912,6 @@ Full documentation can be found `here <https://pandas-gbq.readthedocs.io/>`__
Stata Format
------------
-.. versionadded:: 0.12.0
-
.. _io.stata_writer:
Writing to Stata format
diff --git a/doc/source/merging.rst b/doc/source/merging.rst
index d956f1ca54e6b..a5ee1b1a9384c 100644
--- a/doc/source/merging.rst
+++ b/doc/source/merging.rst
@@ -1053,8 +1053,6 @@ As you can see, this drops any rows where there was no match.
Joining a single Index to a Multi-index
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.. versionadded:: 0.14.0
-
You can join a singly-indexed ``DataFrame`` with a level of a multi-indexed ``DataFrame``.
The level will match on the name of the index of the singly-indexed frame against
a level name of the multi-indexed frame.
diff --git a/doc/source/missing_data.rst b/doc/source/missing_data.rst
index 64a321d67a825..65b411ccd4af2 100644
--- a/doc/source/missing_data.rst
+++ b/doc/source/missing_data.rst
@@ -263,8 +263,6 @@ and ``bfill()`` is equivalent to ``fillna(method='bfill')``
Filling with a PandasObject
~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.. versionadded:: 0.12
-
You can also fillna using a dict or Series that is alignable. The labels of the dict or index of the Series
must match the columns of the frame you wish to fill. The
use case of this is to fill a DataFrame with the mean of that column.
@@ -280,8 +278,6 @@ use case of this is to fill a DataFrame with the mean of that column.
dff.fillna(dff.mean())
dff.fillna(dff.mean()['B':'C'])
-.. versionadded:: 0.13
-
Same result as above, but is aligning the 'fill' value which is
a Series in this case.
@@ -320,11 +316,6 @@ examined :ref:`in the API <api.dataframe.missing>`.
Interpolation
~~~~~~~~~~~~~
-.. versionadded:: 0.13.0
-
- :meth:`~pandas.DataFrame.interpolate`, and :meth:`~pandas.Series.interpolate` have
- revamped interpolation methods and functionality.
-
.. versionadded:: 0.17.0
The ``limit_direction`` keyword argument was added.
diff --git a/doc/source/options.rst b/doc/source/options.rst
index 51d02bc89692a..1592caf90546c 100644
--- a/doc/source/options.rst
+++ b/doc/source/options.rst
@@ -306,7 +306,7 @@ display.float_format None The callable should accept a fl
See core.format.EngFormatter for an example.
display.large_repr truncate For DataFrames exceeding max_rows/max_cols,
the repr (and HTML repr) can show
- a truncated table (the default from 0.13),
+ a truncated table (the default),
or switch to the view from df.info()
(the behaviour in earlier versions of pandas).
allowable settings, ['truncate', 'info']
diff --git a/doc/source/text.rst b/doc/source/text.rst
index e3e4b24d17f44..85b8aa6aa1857 100644
--- a/doc/source/text.rst
+++ b/doc/source/text.rst
@@ -211,8 +211,6 @@ Extracting Substrings
Extract first match in each subject (extract)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-.. versionadded:: 0.13.0
-
.. warning::
In version 0.18.0, ``extract`` gained the ``expand`` argument. When
diff --git a/doc/source/timedeltas.rst b/doc/source/timedeltas.rst
index 07effcfdff33b..daa2c262c8c86 100644
--- a/doc/source/timedeltas.rst
+++ b/doc/source/timedeltas.rst
@@ -242,8 +242,6 @@ Numeric reduction operation for ``timedelta64[ns]`` will return ``Timedelta`` ob
Frequency Conversion
--------------------
-.. versionadded:: 0.13
-
Timedelta Series, ``TimedeltaIndex``, and ``Timedelta`` scalars can be converted to other 'frequencies' by dividing by another timedelta,
or by astyping to a specific timedelta type. These operations yield Series and propagate ``NaT`` -> ``nan``.
Note that division by the numpy scalar is true division, while astyping is equivalent of floor division.
diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst
index aded5e4402df2..c86c58c3183f6 100644
--- a/doc/source/timeseries.rst
+++ b/doc/source/timeseries.rst
@@ -177,7 +177,7 @@ you can pass the ``dayfirst`` flag:
.. note::
Specifying a ``format`` argument will potentially speed up the conversion
- considerably and on versions later then 0.13.0 explicitly specifying
+ considerably and explicitly specifying
a format string of '%Y%m%d' takes a faster path still.
If you pass a single string to ``to_datetime``, it returns single ``Timestamp``.
@@ -1946,9 +1946,11 @@ These can easily be converted to a ``PeriodIndex``
Time Zone Handling
------------------
-Pandas provides rich support for working with timestamps in different time zones using ``pytz`` and ``dateutil`` libraries.
-``dateutil`` support is new in 0.14.1 and currently only supported for fixed offset and tzfile zones. The default library is ``pytz``.
-Support for ``dateutil`` is provided for compatibility with other applications e.g. if you use ``dateutil`` in other python packages.
+Pandas provides rich support for working with timestamps in different time
+zones using ``pytz`` and ``dateutil`` libraries. ``dateutil`` currently is only
+supported for fixed offset and tzfile zones. The default library is ``pytz``.
+Support for ``dateutil`` is provided for compatibility with other
+applications e.g. if you use ``dateutil`` in other python packages.
Working with Time Zones
~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/source/visualization.rst b/doc/source/visualization.rst
index c637246537ca1..839390c8778aa 100644
--- a/doc/source/visualization.rst
+++ b/doc/source/visualization.rst
@@ -512,8 +512,6 @@ Compare to:
Area Plot
~~~~~~~~~
-.. versionadded:: 0.14
-
You can create area plots with :meth:`Series.plot.area` and :meth:`DataFrame.plot.area`.
Area plots are stacked by default. To produce stacked area plot, each column must be either all positive or all negative values.
@@ -550,8 +548,6 @@ To produce an unstacked plot, pass ``stacked=False``. Alpha value is set to 0.5
Scatter Plot
~~~~~~~~~~~~
-.. versionadded:: 0.13
-
Scatter plot can be drawn by using the :meth:`DataFrame.plot.scatter` method.
Scatter plot requires numeric columns for x and y axis.
These can be specified by ``x`` and ``y`` keywords each.
@@ -619,8 +615,6 @@ See the :meth:`scatter <matplotlib.axes.Axes.scatter>` method and the
Hexagonal Bin Plot
~~~~~~~~~~~~~~~~~~
-.. versionadded:: 0.14
-
You can create hexagonal bin plots with :meth:`DataFrame.plot.hexbin`.
Hexbin plots can be a useful alternative to scatter plots if your data are
too dense to plot each point individually.
@@ -682,8 +676,6 @@ See the :meth:`hexbin <matplotlib.axes.Axes.hexbin>` method and the
Pie plot
~~~~~~~~
-.. versionadded:: 0.14
-
You can create a pie plot with :meth:`DataFrame.plot.pie` or :meth:`Series.plot.pie`.
If your data includes any ``NaN``, they will be automatically filled with 0.
A ``ValueError`` will be raised if there are any negative values in your data.
@@ -1365,8 +1357,6 @@ Another option is passing an ``ax`` argument to :meth:`Series.plot` to plot on a
Plotting With Error Bars
~~~~~~~~~~~~~~~~~~~~~~~~
-.. versionadded:: 0.14
-
Plotting with error bars is now supported in the :meth:`DataFrame.plot` and :meth:`Series.plot`
Horizontal and vertical errorbars can be supplied to the ``xerr`` and ``yerr`` keyword arguments to :meth:`~DataFrame.plot()`. The error values can be specified using a variety of formats.
@@ -1407,8 +1397,6 @@ Here is an example of one way to easily plot group means with standard deviation
Plotting Tables
~~~~~~~~~~~~~~~
-.. versionadded:: 0.14
-
Plotting with matplotlib table is now supported in :meth:`DataFrame.plot` and :meth:`Series.plot` with a ``table`` keyword. The ``table`` keyword can accept ``bool``, :class:`DataFrame` or :class:`Series`. The simple way to draw a table is to specify ``table=True``. Data will be transposed to meet matplotlib's default layout.
.. ipython:: python
@@ -1585,10 +1573,6 @@ available in matplotlib. Although this formatting does not provide the same
level of refinement you would get when plotting via pandas, it can be faster
when plotting a large number of points.
-.. note::
-
- The speed up for large data sets only applies to pandas 0.14.0 and later.
-
.. ipython:: python
:suppress:
| - [ ] closes #xxxx
- [ ] tests added / passed
- [x ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
This is a continution of #17375 and cleans up references to old versions of pandas in the documentation.
Somme issues, I'd appreciate input on:
* In ``enhancingperf.rst`` there is under "Expression Evaluation via :func:`~pandas.eval` (Experimental)" and "The ``DataFrame.eval`` method (Experimental)" a line ``..versionadded:: 0.13``.
In general I think it's a bit weird that something introduced back in 0.13 still is marked as experimental. I've let the versionadded stay for now, to somehow mark that ``eval`` is quite old even though it is experimental. Are there thoughts whether ``eval`` should still be marked experimental? | https://api.github.com/repos/pandas-dev/pandas/pulls/17420 | 2017-09-02T13:33:38Z | 2017-09-05T10:30:32Z | 2017-09-05T10:30:32Z | 2017-09-11T21:10:52Z |
Implement _is_utc in timezones | diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx
index 42ba0c1cadaec..bf4d53683c9b7 100644
--- a/pandas/_libs/index.pyx
+++ b/pandas/_libs/index.pyx
@@ -17,6 +17,7 @@ cimport tslib
from hashtable cimport HashTable
+from tslibs.timezones cimport _is_utc
from pandas._libs import tslib, algos, hashtable as _hash
from pandas._libs.tslib import Timestamp, Timedelta
from datetime import datetime, timedelta
@@ -32,9 +33,6 @@ cdef extern from "datetime.h":
cdef int64_t iNaT = util.get_nat()
-from dateutil.tz import tzutc as _du_utc
-import pytz
-UTC = pytz.utc
PyDateTime_IMPORT
@@ -559,9 +557,6 @@ cdef inline _to_i8(object val):
return ival
return val
-cdef inline bint _is_utc(object tz):
- return tz is UTC or isinstance(tz, _du_utc)
-
cdef class MultiIndexObjectEngine(ObjectEngine):
"""
diff --git a/pandas/_libs/period.pyx b/pandas/_libs/period.pyx
index e2a3baa8d6e8b..08962bca824ca 100644
--- a/pandas/_libs/period.pyx
+++ b/pandas/_libs/period.pyx
@@ -34,9 +34,9 @@ from lib cimport is_null_datetimelike, is_period
from pandas._libs import tslib, lib
from pandas._libs.tslib import (Timedelta, Timestamp, iNaT,
NaT, _get_utcoffset)
+from tslibs.timezones cimport _is_utc
from tslib cimport (
maybe_get_tz,
- _is_utc,
_is_tzlocal,
_get_dst_info,
_nat_scalar_rules)
diff --git a/pandas/_libs/tslib.pxd b/pandas/_libs/tslib.pxd
index aa8cbcb2cedc7..1d81c3cc15cd8 100644
--- a/pandas/_libs/tslib.pxd
+++ b/pandas/_libs/tslib.pxd
@@ -3,7 +3,6 @@ from numpy cimport ndarray, int64_t
cdef convert_to_tsobject(object, object, object, bint, bint)
cpdef convert_to_timedelta64(object, object)
cpdef object maybe_get_tz(object)
-cdef bint _is_utc(object)
cdef bint _is_tzlocal(object)
cdef object _get_dst_info(object)
cdef bint _nat_scalar_rules[6]
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx
index 7e009652f7f0c..b1f794a0030d1 100644
--- a/pandas/_libs/tslib.pyx
+++ b/pandas/_libs/tslib.pyx
@@ -107,6 +107,8 @@ cdef int64_t NPY_NAT = util.get_nat()
iNaT = NPY_NAT
+from tslibs.timezones cimport _is_utc
+
cdef inline object create_timestamp_from_ts(
int64_t value, pandas_datetimestruct dts,
object tz, object freq):
@@ -1713,8 +1715,6 @@ def _localize_pydatetime(object dt, object tz):
def get_timezone(tz):
return _get_zone(tz)
-cdef inline bint _is_utc(object tz):
- return tz is UTC or isinstance(tz, _dateutil_tzutc)
cdef inline object _get_zone(object tz):
"""
diff --git a/pandas/_libs/tslibs/__init__.py b/pandas/_libs/tslibs/__init__.py
index e69de29bb2d1d..f3aa0424f0376 100644
--- a/pandas/_libs/tslibs/__init__.py
+++ b/pandas/_libs/tslibs/__init__.py
@@ -0,0 +1,2 @@
+# -*- coding: utf-8 -*-
+# cython: profile=False
diff --git a/pandas/_libs/tslibs/timezones.pxd b/pandas/_libs/tslibs/timezones.pxd
new file mode 100644
index 0000000000000..0708282abe1d0
--- /dev/null
+++ b/pandas/_libs/tslibs/timezones.pxd
@@ -0,0 +1,4 @@
+# -*- coding: utf-8 -*-
+# cython: profile=False
+
+cdef bint _is_utc(object tz)
diff --git a/pandas/_libs/tslibs/timezones.pyx b/pandas/_libs/tslibs/timezones.pyx
new file mode 100644
index 0000000000000..43709e77b70d5
--- /dev/null
+++ b/pandas/_libs/tslibs/timezones.pyx
@@ -0,0 +1,12 @@
+# -*- coding: utf-8 -*-
+# cython: profile=False
+
+# dateutil compat
+from dateutil.tz import tzutc as _dateutil_tzutc
+
+import pytz
+UTC = pytz.utc
+
+
+cdef inline bint _is_utc(object tz):
+ return tz is UTC or isinstance(tz, _dateutil_tzutc)
diff --git a/setup.py b/setup.py
index d64a78db7500a..434ca64473916 100755
--- a/setup.py
+++ b/setup.py
@@ -341,6 +341,7 @@ class CheckSDist(sdist_class):
'pandas/_libs/window.pyx',
'pandas/_libs/sparse.pyx',
'pandas/_libs/parsers.pyx',
+ 'pandas/_libs/tslibs/timezones.pyx',
'pandas/_libs/tslibs/frequencies.pyx',
'pandas/io/sas/sas.pyx']
@@ -479,6 +480,7 @@ def pxd(name):
'sources': ['pandas/_libs/src/datetime/np_datetime.c',
'pandas/_libs/src/datetime/np_datetime_strings.c',
'pandas/_libs/src/period_helper.c']},
+ '_libs.tslibs.timezones': {'pyxfile': '_libs/tslibs/timezones'},
'_libs.period': {'pyxfile': '_libs/period',
'depends': tseries_depends,
'sources': ['pandas/_libs/src/datetime/np_datetime.c',
| This is the first of a bunch of PRs to take the place of #17274.
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/17419 | 2017-09-01T22:59:08Z | 2017-09-11T11:22:57Z | 2017-09-11T11:22:57Z | 2017-10-30T16:24:54Z |
DOC/TST: Add examples to MultiIndex.get_level_values + related changes | diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 6a30eaefaaae7..a9098126a38e3 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -2529,15 +2529,23 @@ def set_value(self, arr, key, value):
def _get_level_values(self, level):
"""
Return an Index of values for requested level, equal to the length
- of the index
+ of the index.
Parameters
----------
- level : int
+ level : int or str
+ ``level`` is either the integer position of the level in the
+ MultiIndex, or the name of the level.
Returns
-------
values : Index
+ ``self``, as there is only one level in the Index.
+
+ See also
+ ---------
+ pandas.MultiIndex.get_level_values : get values for a level of a
+ MultiIndex
"""
self._validate_index_level(level)
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index d7d5b6d128a2c..8b2cf0e7c0b40 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -882,15 +882,34 @@ def _get_level_values(self, level):
def get_level_values(self, level):
"""
Return vector of label values for requested level,
- equal to the length of the index
+ equal to the length of the index.
Parameters
----------
- level : int or level name
+ level : int or str
+ ``level`` is either the integer position of the level in the
+ MultiIndex, or the name of the level.
Returns
-------
values : Index
+ ``values`` is a level of this MultiIndex converted to
+ a single :class:`Index` (or subclass thereof).
+
+ Examples
+ ---------
+
+ Create a MultiIndex:
+
+ >>> mi = pd.MultiIndex.from_arrays((list('abc'), list('def')))
+ >>> mi.names = ['level_1', 'level_2']
+
+ Get level values by supplying level as either integer or name:
+
+ >>> mi.get_level_values(0)
+ Index(['a', 'b', 'c'], dtype='object', name='level_1')
+ >>> mi.get_level_values('level_2')
+ Index(['d', 'e', 'f'], dtype='object', name='level_2')
"""
level = self._get_level_number(level)
values = self._get_level_values(level)
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index aa32e75ba0d58..f96dbdcfb8acf 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -1438,6 +1438,12 @@ def test_get_level_values(self):
result = self.strIndex.get_level_values(0)
tm.assert_index_equal(result, self.strIndex)
+ # test for name (GH 17414)
+ index_with_name = self.strIndex.copy()
+ index_with_name.name = 'a'
+ result = index_with_name.get_level_values('a')
+ tm.assert_index_equal(result, index_with_name)
+
def test_slice_keep_name(self):
idx = Index(['a', 'b'], name='asdf')
assert idx.name == idx[1:].name
| - [ ] closes #xxxx
- [x ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
I've added examples to ``MultiIndex.get_level_values``. Also I've done some related changes:
* made return value of ``Index._get_level_values``
* Added test for when supplying name to ``Index._get_level_values``. The method could always take level name, but the doc said only integer could be supplied. | https://api.github.com/repos/pandas-dev/pandas/pulls/17414 | 2017-09-01T17:55:32Z | 2017-09-06T14:55:13Z | 2017-09-06T14:55:13Z | 2017-09-11T21:17:58Z |
DEPS: Added tz keyword to to_datetime function, deprecates utc | diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt
index 273cbd8357f85..96e21852e9189 100644
--- a/doc/source/whatsnew/v0.21.0.txt
+++ b/doc/source/whatsnew/v0.21.0.txt
@@ -132,7 +132,6 @@ Other Enhancements
- :func:`DataFrame.items` and :func:`Series.items` is now present in both Python 2 and 3 and is lazy in all cases (:issue:`13918`, :issue:`17213`)
-
.. _whatsnew_0210.api_breaking:
Backwards incompatible API changes
@@ -328,6 +327,10 @@ Deprecations
- ``pd.options.html.border`` has been deprecated in favor of ``pd.options.display.html.border`` (:issue:`15793`).
+
+- :func: `to_datetime` now takes ``tz`` keyword argument, ``utc`` argument is deprecated (:issue:`13712`)
+
+
.. _whatsnew_0210.prior_deprecations:
Removal of prior version deprecations/changes
diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py
index c0f234a36803d..dc388fe4e1a55 100644
--- a/pandas/core/tools/datetimes.py
+++ b/pandas/core/tools/datetimes.py
@@ -20,6 +20,7 @@
ABCDataFrame, ABCDateOffset)
from pandas.core.dtypes.missing import notna
from pandas.core import algorithms
+from pandas.util._decorators import deprecate_kwarg
import pandas.compat as compat
@@ -182,9 +183,12 @@ def _guess_datetime_format_for_array(arr, **kwargs):
return _guess_datetime_format(arr[non_nan_elements[0]], **kwargs)
+@deprecate_kwarg(old_arg_name='utc', new_arg_name='tz',
+ mapping={True: 'UTC'})
def to_datetime(arg, errors='raise', dayfirst=False, yearfirst=False,
- utc=None, box=True, format=None, exact=True,
- unit=None, infer_datetime_format=False, origin='unix'):
+ box=True, format=None, exact=True,
+ unit=None, infer_datetime_format=False, origin='unix',
+ tz=None):
"""
Convert argument to datetime.
@@ -223,6 +227,14 @@ def to_datetime(arg, errors='raise', dayfirst=False, yearfirst=False,
utc : boolean, default None
Return UTC DatetimeIndex if True (converting any tz-aware
datetime.datetime objects as well).
+
+ .. deprecated: 0.21.0
+
+ tz : pytz.timezone or dateutil.tz.tzfile, default None
+ Define the timezone.
+
+ .. versionadded: 0.21.0
+
box : boolean, default True
- If True returns a DatetimeIndex
@@ -343,8 +355,6 @@ def to_datetime(arg, errors='raise', dayfirst=False, yearfirst=False,
"""
from pandas.core.indexes.datetimes import DatetimeIndex
- tz = 'utc' if utc else None
-
def _convert_listlike(arg, box, format, name=None, tz=tz):
if isinstance(arg, (list, tuple)):
@@ -354,8 +364,8 @@ def _convert_listlike(arg, box, format, name=None, tz=tz):
if is_datetime64tz_dtype(arg):
if not isinstance(arg, DatetimeIndex):
return DatetimeIndex(arg, tz=tz, name=name)
- if utc:
- arg = arg.tz_convert(None).tz_localize('UTC')
+ if tz:
+ arg = arg.tz_convert(None).tz_localize(tz)
return arg
elif is_datetime64_ns_dtype(arg):
@@ -431,6 +441,10 @@ def _convert_listlike(arg, box, format, name=None, tz=tz):
result = arg
if result is None and (format is None or infer_datetime_format):
+ if tz == 'utc' or tz == 'UTC':
+ utc = True
+ else:
+ utc = False
result = tslib.array_to_datetime(
arg,
errors=errors,
diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py
index 50669ee357bbd..98ce7e32cc4b9 100644
--- a/pandas/tests/indexes/datetimes/test_tools.py
+++ b/pandas/tests/indexes/datetimes/test_tools.py
@@ -270,6 +270,23 @@ def test_to_datetime_utc_is_true(self):
expected = pd.DatetimeIndex(data=date_range)
tm.assert_index_equal(result, expected)
+ def test_to_datetime_tz_kw(self):
+ # See gh-13712
+ for tz in [None, 'US/Eastern', 'Asia/Tokyo', 'UTC']:
+ data = ['20140101 000000', '20140102 000000', '20140103 000000']
+ start = pd.Timestamp(data[0], tz=tz)
+ end = pd.Timestamp(data[-1], tz=tz)
+ date_range = pd.bdate_range(start, end)
+
+ result = pd.to_datetime(data, format='%Y%m%d %H%M%S', tz=tz)
+ expected = pd.DatetimeIndex(data=date_range)
+ tm.assert_numpy_array_equal(result.values, expected.values)
+
+ if result.tz is None:
+ assert expected.tz is None
+ else:
+ assert result.tz.zone == expected.tz.zone
+
def test_to_datetime_tz_psycopg2(self):
# xref 8260
| - [x] closes #13712
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry | https://api.github.com/repos/pandas-dev/pandas/pulls/17413 | 2017-09-01T11:59:53Z | 2017-12-10T23:33:10Z | null | 2017-12-10T23:33:10Z |
DOC: to_datetime format argument examples | diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst
index 3b8f105bb1b47..7399deb1319d8 100644
--- a/doc/source/timeseries.rst
+++ b/doc/source/timeseries.rst
@@ -175,12 +175,8 @@ you can pass the ``dayfirst`` flag:
can't be parsed with the day being first it will be parsed as if
``dayfirst`` were False.
-.. note::
- Specifying a ``format`` argument will potentially speed up the conversion
- considerably and explicitly specifying
- a format string of '%Y%m%d' takes a faster path still.
-
If you pass a single string to ``to_datetime``, it returns single ``Timestamp``.
+
Also, ``Timestamp`` can accept the string input.
Note that ``Timestamp`` doesn't accept string parsing option like ``dayfirst``
or ``format``, use ``to_datetime`` if these are required.
@@ -191,6 +187,25 @@ or ``format``, use ``to_datetime`` if these are required.
pd.Timestamp('2010/11/12')
+Providing a Format Argument
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+In addition to the required datetime string, a ``format`` argument can be passed to ensure specific parsing.
+It will potentially speed up the conversion considerably.
+
+For example:
+
+.. ipython:: python
+
+ pd.to_datetime('2010/11/12', format='%Y/%m/%d')
+
+ pd.to_datetime('12-11-2010 00:00', format='%d-%m-%Y %H:%M')
+
+For more information on how to specify the ``format`` options, see https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior.
+
+Assembling datetime from multiple DataFrame columns
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
.. versionadded:: 0.18.1
You can also pass a ``DataFrame`` of integer or string columns to assemble into a ``Series`` of ``Timestamps``.
| - [ ] closes #16669
| https://api.github.com/repos/pandas-dev/pandas/pulls/17412 | 2017-09-01T11:41:47Z | 2017-09-18T13:45:09Z | 2017-09-18T13:45:09Z | 2017-09-18T18:21:22Z |
BUG: fillna ignoring axis=1 parameter. #17399 | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 37247ab133948..16ee642e5dd63 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -4758,8 +4758,8 @@ def fillna(self, value=None, method=None, axis=None, inplace=False,
return result if not inplace else None
elif not is_list_like(value):
- new_data = self._data.fillna(value=value, limit=limit,
- inplace=inplace,
+ new_data = self._data.fillna(value=value, axis=axis,
+ limit=limit, inplace=inplace,
downcast=downcast)
elif isinstance(value, DataFrame) and self.ndim == 2:
new_data = self.where(self.notna(), value)
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index ba90503e3bf40..68d86702f2a3d 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -375,17 +375,14 @@ def apply(self, func, mgr=None, **kwargs):
return result
def fillna(self, value, limit=None, inplace=False, downcast=None,
- mgr=None):
+ axis=0, mgr=None):
""" fillna on the block with the value. If we fail, then convert to
ObjectBlock and try again
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not self._can_hold_na:
- if inplace:
- return self
- else:
- return self.copy()
+ return self if inplace else self.copy()
mask = isna(self.values)
if limit is not None:
@@ -396,8 +393,7 @@ def fillna(self, value, limit=None, inplace=False, downcast=None,
if self.ndim > 2:
raise NotImplementedError("number of dimensions for 'fillna' "
"is currently limited to 2")
- mask[mask.cumsum(self.ndim - 1) > limit] = False
-
+ mask[mask.cumsum(int(axis == 0 and self.ndim > 1)) > limit] = False
# fillna, but if we cannot coerce, then try again as an ObjectBlock
try:
values, _, _, _ = self._try_coerce_args(self.values, value)
@@ -2381,8 +2377,8 @@ def _try_coerce_result(self, result):
return result
- def fillna(self, value, limit=None, inplace=False, downcast=None,
- mgr=None):
+ def fillna(self, value, axis=0, limit=None, inplace=False,
+ downcast=None, mgr=None):
# we may need to upcast our fill to match our dtype
if limit is not None:
raise NotImplementedError("specifying a limit for 'fillna' has "
@@ -2859,7 +2855,7 @@ def interpolate(self, method='pad', axis=0, inplace=False, limit=None,
placement=self.mgr_locs)
def fillna(self, value, limit=None, inplace=False, downcast=None,
- mgr=None):
+ axis=0, mgr=None):
# we may need to upcast our fill to match our dtype
if limit is not None:
raise NotImplementedError("specifying a limit for 'fillna' has "
diff --git a/pandas/tests/frame/test_missing.py b/pandas/tests/frame/test_missing.py
index 2e4e8b9582cf6..fedac9d72412a 100644
--- a/pandas/tests/frame/test_missing.py
+++ b/pandas/tests/frame/test_missing.py
@@ -465,6 +465,36 @@ def test_frame_fillna_limit(self):
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
+ def test_frame_fillna_axis(self):
+ # GH 17399
+ # with limit
+ df = DataFrame(np.random.randn(10, 4))
+ df.iloc[1:4, 1:4] = nan
+ expected = df.copy()
+ expected.iloc[1:4, 1:3] = 0
+ result = df.fillna(value=0, limit=2, axis=1)
+ tm.assert_frame_equal(result, expected)
+
+ # with no limit
+ expected = df.copy()
+ expected.iloc[1:4, 1:4] = 0
+ result = df.fillna(value=0, axis=1)
+ tm.assert_frame_equal(result, expected)
+
+ # with method, limit
+ expected = df.copy()
+ for c in lrange(1, 3):
+ expected.iloc[1:4, c] = expected.iloc[1:4, 0]
+ result = df.fillna(method='ffill', limit=2, axis=1)
+ tm.assert_frame_equal(result, expected)
+ tm.assert_frame_equal(result, df.ffill(limit=2, axis=1))
+
+ # with inplace
+ expected = df.copy()
+ expected.fillna(value=0, limit=2, axis=1, inplace=True)
+ result = df.fillna(value=0, limit=2, axis=1)
+ tm.assert_frame_equal(result, expected)
+
def test_fillna_skip_certain_blocks(self):
# don't try to fill boolean, int blocks
| - [x] closes #17399
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/17409 | 2017-09-01T10:08:59Z | 2018-02-24T17:09:43Z | null | 2019-09-11T14:46:40Z |
TST: Enable tests that aren't currently running in indexes/datetimes/test_tools.py | diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py
index c0f234a36803d..37f13b365ca20 100644
--- a/pandas/core/tools/datetimes.py
+++ b/pandas/core/tools/datetimes.py
@@ -605,7 +605,7 @@ def f(value):
if len(excess):
raise ValueError("extra keys have been passed "
"to the datetime assemblage: "
- "[{excess}]".format(','.join(excess=excess)))
+ "[{excess}]".format(excess=','.join(excess)))
def coerce(values):
# we allow coercion to if errors allows
diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py
index 50669ee357bbd..d60727d33a209 100644
--- a/pandas/tests/indexes/datetimes/test_tools.py
+++ b/pandas/tests/indexes/datetimes/test_tools.py
@@ -25,7 +25,7 @@
compat)
-class TimeConversionFormats(object):
+class TestTimeConversionFormats(object):
def test_to_datetime_format(self):
values = ['1/1/2000', '1/2/2000', '1/3/2000']
@@ -334,7 +334,7 @@ def test_datetime_invalid_datatype(self):
pd.to_datetime(pd.to_datetime)
-class ToDatetimeUnit(object):
+class TestToDatetimeUnit(object):
def test_unit(self):
# GH 11758
@@ -528,7 +528,10 @@ def test_dataframe(self):
df2 = DataFrame({'year': [2015, 2016],
'month': [2, 20],
'day': [4, 5]})
- with pytest.raises(ValueError):
+
+ msg = ("cannot assemble the datetimes: time data .+ does not "
+ "match format '%Y%m%d' \(match\)")
+ with tm.assert_raises_regex(ValueError, msg):
to_datetime(df2)
result = to_datetime(df2, errors='coerce')
expected = Series([Timestamp('20150204 00:00:00'),
@@ -536,26 +539,31 @@ def test_dataframe(self):
assert_series_equal(result, expected)
# extra columns
- with pytest.raises(ValueError):
+ msg = ("extra keys have been passed to the datetime assemblage: "
+ "\[foo\]")
+ with tm.assert_raises_regex(ValueError, msg):
df2 = df.copy()
df2['foo'] = 1
to_datetime(df2)
# not enough
+ msg = ('to assemble mappings requires at least that \[year, month, '
+ 'day\] be specified: \[.+\] is missing')
for c in [['year'],
['year', 'month'],
['year', 'month', 'second'],
['month', 'day'],
['year', 'day', 'second']]:
- with pytest.raises(ValueError):
+ with tm.assert_raises_regex(ValueError, msg):
to_datetime(df[c])
# duplicates
+ msg = 'cannot assemble with duplicate keys'
df2 = DataFrame({'year': [2015, 2016],
'month': [2, 20],
'day': [4, 5]})
df2.columns = ['year', 'year', 'day']
- with pytest.raises(ValueError):
+ with tm.assert_raises_regex(ValueError, msg):
to_datetime(df2)
df2 = DataFrame({'year': [2015, 2016],
@@ -563,7 +571,7 @@ def test_dataframe(self):
'day': [4, 5],
'hour': [4, 5]})
df2.columns = ['year', 'month', 'day', 'day']
- with pytest.raises(ValueError):
+ with tm.assert_raises_regex(ValueError, msg):
to_datetime(df2)
def test_dataframe_dtypes(self):
@@ -594,7 +602,7 @@ def test_dataframe_dtypes(self):
to_datetime(df)
-class ToDatetimeMisc(object):
+class TestToDatetimeMisc(object):
def test_index_to_datetime(self):
idx = Index(['1/1/2000', '1/2/2000', '1/3/2000'])
| - [X] closes #17403
- [X] tests added / passed
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
Enabled tests that aren't currently running. Made a small modification to a typo in `datetimes.py` to make sure all tests pass. Small modifications to checks in `TestToDatetimeUnit::test_frame` to verify that the raised messages match expectations.
| https://api.github.com/repos/pandas-dev/pandas/pulls/17405 | 2017-08-31T20:53:38Z | 2017-09-01T14:52:45Z | 2017-09-01T14:52:44Z | 2017-09-01T16:38:14Z |
TST: remove tests and docs for legacy (pre 0.12) hdf5 support | diff --git a/doc/source/io.rst b/doc/source/io.rst
index e338407361705..f55c72bae5a20 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -4419,44 +4419,6 @@ Now you can import the ``DataFrame`` into R:
starting point if you have stored multiple ``DataFrame`` objects to a
single HDF5 file.
-Backwards Compatibility
-'''''''''''''''''''''''
-
-0.10.1 of ``HDFStore`` can read tables created in a prior version of pandas,
-however query terms using the
-prior (undocumented) methodology are unsupported. ``HDFStore`` will
-issue a warning if you try to use a legacy-format file. You must
-read in the entire file and write it out using the new format, using the
-method ``copy`` to take advantage of the updates. The group attribute
-``pandas_version`` contains the version information. ``copy`` takes a
-number of options, please see the docstring.
-
-
-.. ipython:: python
- :suppress:
-
- import os
- legacy_file_path = os.path.abspath('source/_static/legacy_0.10.h5')
-
-.. ipython:: python
- :okwarning:
-
- # a legacy store
- legacy_store = pd.HDFStore(legacy_file_path,'r')
- legacy_store
-
- # copy (and return the new handle)
- new_store = legacy_store.copy('store_new.h5')
- new_store
- new_store.close()
-
-.. ipython:: python
- :suppress:
-
- legacy_store.close()
- import os
- os.remove('store_new.h5')
-
Performance
'''''''''''
diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt
index 014f251ffb90a..ec48753bee889 100644
--- a/doc/source/whatsnew/v0.21.0.txt
+++ b/doc/source/whatsnew/v0.21.0.txt
@@ -317,6 +317,8 @@ Removal of prior version deprecations/changes
- ``Categorical`` has dropped the ``.order()`` and ``.sort()`` methods in favor of ``.sort_values()`` (:issue:`12882`)
- :func:`eval` and :func:`DataFrame.eval` have changed the default of ``inplace`` from ``None`` to ``False`` (:issue:`11149`)
- The function ``get_offset_name`` has been dropped in favor of the ``.freqstr`` attribute for an offset (:issue:`11834`)
+- pandas no longer tests for compatibility with hdf5-files created with pandas < 0.11 (:issue:`17404`).
+
.. _whatsnew_0210.performance:
diff --git a/pandas/tests/io/data/legacy_hdf/legacy_0.10.h5 b/pandas/tests/io/data/legacy_hdf/legacy_0.10.h5
deleted file mode 100644
index b1439ef16361a..0000000000000
Binary files a/pandas/tests/io/data/legacy_hdf/legacy_0.10.h5 and /dev/null differ
diff --git a/pandas/tests/io/data/legacy_hdf/legacy_table_0.11.h5 b/pandas/tests/io/data/legacy_hdf/legacy_table_0.11.h5
deleted file mode 100644
index 958effc2ce6f8..0000000000000
Binary files a/pandas/tests/io/data/legacy_hdf/legacy_table_0.11.h5 and /dev/null differ
diff --git a/pandas/tests/io/test_pytables.py b/pandas/tests/io/test_pytables.py
index f33ba7627101e..b5ecc4d34cd08 100644
--- a/pandas/tests/io/test_pytables.py
+++ b/pandas/tests/io/test_pytables.py
@@ -4599,41 +4599,13 @@ def test_legacy_table_read(self):
expected = df2[df2.index > df2.index[2]]
assert_frame_equal(expected, result)
- def test_legacy_0_10_read(self):
- # legacy from 0.10
- with catch_warnings(record=True):
- path = tm.get_data_path('legacy_hdf/legacy_0.10.h5')
- with ensure_clean_store(path, mode='r') as store:
- str(store)
- for k in store.keys():
- store.select(k)
-
- def test_legacy_0_11_read(self):
- # legacy from 0.11
- path = os.path.join('legacy_hdf', 'legacy_table_0.11.h5')
- with ensure_clean_store(tm.get_data_path(path), mode='r') as store:
- str(store)
- assert 'df' in store
- assert 'df1' in store
- assert 'mi' in store
- df = store.select('df')
- df1 = store.select('df1')
- mi = store.select('mi')
- assert isinstance(df, DataFrame)
- assert isinstance(df1, DataFrame)
- assert isinstance(mi, DataFrame)
-
def test_copy(self):
with catch_warnings(record=True):
- def do_copy(f=None, new_f=None, keys=None,
+ def do_copy(f, new_f=None, keys=None,
propindexes=True, **kwargs):
try:
- if f is None:
- f = tm.get_data_path(os.path.join('legacy_hdf',
- 'legacy_0.10.h5'))
-
store = HDFStore(f, 'r')
if new_f is None:
@@ -4671,10 +4643,6 @@ def do_copy(f=None, new_f=None, keys=None,
pass
safe_remove(new_f)
- do_copy()
- do_copy(keys=['/a', '/b', '/df1_mixed'])
- do_copy(propindexes=False)
-
# new table
df = tm.makeDataFrame()
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Removing tests for old compatability because this issue is sufficiently old now, that people should be expected to have solved this themselves by now.
The first commit removes the legacy test for pandas v0.10. The second commit removes the legacy test for pandas v0.11.
This pull request replaces #17398.
See also discussion in #17375. | https://api.github.com/repos/pandas-dev/pandas/pulls/17404 | 2017-08-31T20:05:15Z | 2017-09-01T16:36:00Z | 2017-09-01T16:36:00Z | 2017-09-11T21:12:08Z |
Tslib unused | diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx
index 5dd30072fb7aa..50e0b77c6d3a0 100644
--- a/pandas/_libs/tslib.pyx
+++ b/pandas/_libs/tslib.pyx
@@ -2622,8 +2622,6 @@ cdef class _Timedelta(timedelta):
int ndim
if isinstance(other, _Timedelta):
- if isinstance(other, _NaT):
- return _cmp_nat_dt(other, self, _reverse_ops[op])
ots = other
elif isinstance(other, timedelta):
ots = Timedelta(other)
@@ -3882,7 +3880,7 @@ fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'millisecond', 'microsecond', 'nanosecond',
'week', 'dayofyear', 'weekofyear', 'days_in_month', 'daysinmonth',
'dayofweek', 'weekday_name', 'days', 'seconds', 'microseconds',
- 'nanoseconds', 'qyear', 'quarter']
+ 'nanoseconds', 'qyear']
for field in fields:
prop = property(fget=lambda self: np.nan)
setattr(NaTType, field, prop)
@@ -4620,7 +4618,6 @@ def build_field_sarray(ndarray[int64_t] dtindex):
"""
cdef:
Py_ssize_t i, count = 0
- int isleap
pandas_datetimestruct dts
ndarray[int32_t] years, months, days, hours, minutes, seconds, mus
@@ -5270,7 +5267,6 @@ cpdef _isleapyear_arr(ndarray years):
def monthrange(int64_t year, int64_t month):
cdef:
int64_t days
- int64_t day_of_week
if month < 1 or month > 12:
raise ValueError("bad month number 0; must be 1-12")
| along with one repeated string and a case in `_Timedelta.__richcmp__` that is not reachable.
- [ ] closes #17379
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/17402 | 2017-08-31T18:53:39Z | 2017-09-01T17:11:41Z | 2017-09-01T17:11:41Z | 2017-09-01T17:20:47Z |
TST: Remove tests for hdf files created with pandas <= v0.11 | diff --git a/doc/source/io.rst b/doc/source/io.rst
index e338407361705..f55c72bae5a20 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -4419,44 +4419,6 @@ Now you can import the ``DataFrame`` into R:
starting point if you have stored multiple ``DataFrame`` objects to a
single HDF5 file.
-Backwards Compatibility
-'''''''''''''''''''''''
-
-0.10.1 of ``HDFStore`` can read tables created in a prior version of pandas,
-however query terms using the
-prior (undocumented) methodology are unsupported. ``HDFStore`` will
-issue a warning if you try to use a legacy-format file. You must
-read in the entire file and write it out using the new format, using the
-method ``copy`` to take advantage of the updates. The group attribute
-``pandas_version`` contains the version information. ``copy`` takes a
-number of options, please see the docstring.
-
-
-.. ipython:: python
- :suppress:
-
- import os
- legacy_file_path = os.path.abspath('source/_static/legacy_0.10.h5')
-
-.. ipython:: python
- :okwarning:
-
- # a legacy store
- legacy_store = pd.HDFStore(legacy_file_path,'r')
- legacy_store
-
- # copy (and return the new handle)
- new_store = legacy_store.copy('store_new.h5')
- new_store
- new_store.close()
-
-.. ipython:: python
- :suppress:
-
- legacy_store.close()
- import os
- os.remove('store_new.h5')
-
Performance
'''''''''''
diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt
index 014f251ffb90a..09ab7034dc633 100644
--- a/doc/source/whatsnew/v0.21.0.txt
+++ b/doc/source/whatsnew/v0.21.0.txt
@@ -317,7 +317,7 @@ Removal of prior version deprecations/changes
- ``Categorical`` has dropped the ``.order()`` and ``.sort()`` methods in favor of ``.sort_values()`` (:issue:`12882`)
- :func:`eval` and :func:`DataFrame.eval` have changed the default of ``inplace`` from ``None`` to ``False`` (:issue:`11149`)
- The function ``get_offset_name`` has been dropped in favor of the ``.freqstr`` attribute for an offset (:issue:`11834`)
-
+- pandas no longer tests for compatibility with hdf5-files created with pandas <=0.11 (:issue:`17398`).
.. _whatsnew_0210.performance:
diff --git a/pandas/tests/io/data/legacy_hdf/legacy_0.10.h5 b/pandas/tests/io/data/legacy_hdf/legacy_0.10.h5
deleted file mode 100644
index b1439ef16361a..0000000000000
Binary files a/pandas/tests/io/data/legacy_hdf/legacy_0.10.h5 and /dev/null differ
diff --git a/pandas/tests/io/data/legacy_hdf/legacy_table.h5 b/pandas/tests/io/data/legacy_hdf/legacy_table.h5
deleted file mode 100644
index 1c90382d9125c..0000000000000
Binary files a/pandas/tests/io/data/legacy_hdf/legacy_table.h5 and /dev/null differ
diff --git a/pandas/tests/io/data/legacy_hdf/legacy_table_0.11.h5 b/pandas/tests/io/data/legacy_hdf/legacy_table_0.11.h5
deleted file mode 100644
index 958effc2ce6f8..0000000000000
Binary files a/pandas/tests/io/data/legacy_hdf/legacy_table_0.11.h5 and /dev/null differ
diff --git a/pandas/tests/io/test_pytables.py b/pandas/tests/io/test_pytables.py
index f33ba7627101e..080eeb9e6747e 100644
--- a/pandas/tests/io/test_pytables.py
+++ b/pandas/tests/io/test_pytables.py
@@ -4576,53 +4576,6 @@ def test_pytables_native2_read(self):
d1 = store['detector']
assert isinstance(d1, DataFrame)
- def test_legacy_table_read(self):
- # legacy table types
- with ensure_clean_store(
- tm.get_data_path('legacy_hdf/legacy_table.h5'),
- mode='r') as store:
-
- with catch_warnings(record=True):
- store.select('df1')
- store.select('df2')
- store.select('wp1')
-
- # force the frame
- store.select('df2', typ='legacy_frame')
-
- # old version warning
- pytest.raises(
- Exception, store.select, 'wp1', 'minor_axis=B')
-
- df2 = store.select('df2')
- result = store.select('df2', 'index>df2.index[2]')
- expected = df2[df2.index > df2.index[2]]
- assert_frame_equal(expected, result)
-
- def test_legacy_0_10_read(self):
- # legacy from 0.10
- with catch_warnings(record=True):
- path = tm.get_data_path('legacy_hdf/legacy_0.10.h5')
- with ensure_clean_store(path, mode='r') as store:
- str(store)
- for k in store.keys():
- store.select(k)
-
- def test_legacy_0_11_read(self):
- # legacy from 0.11
- path = os.path.join('legacy_hdf', 'legacy_table_0.11.h5')
- with ensure_clean_store(tm.get_data_path(path), mode='r') as store:
- str(store)
- assert 'df' in store
- assert 'df1' in store
- assert 'mi' in store
- df = store.select('df')
- df1 = store.select('df1')
- mi = store.select('mi')
- assert isinstance(df, DataFrame)
- assert isinstance(df1, DataFrame)
- assert isinstance(mi, DataFrame)
-
def test_copy(self):
with catch_warnings(record=True):
@@ -4688,31 +4641,6 @@ def do_copy(f=None, new_f=None, keys=None,
finally:
safe_remove(path)
- def test_legacy_table_write(self):
- pytest.skip("cannot write legacy tables")
-
- store = HDFStore(tm.get_data_path(
- 'legacy_hdf/legacy_table_%s.h5' % pandas.__version__), 'a')
-
- df = tm.makeDataFrame()
- with catch_warnings(record=True):
- wp = tm.makePanel()
-
- index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
- ['one', 'two', 'three']],
- labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
- [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
- names=['foo', 'bar'])
- df = DataFrame(np.random.randn(10, 3), index=index,
- columns=['A', 'B', 'C'])
- store.append('mi', df)
-
- df = DataFrame(dict(A='foo', B='bar'), index=lrange(10))
- store.append('df', df, data_columns=['B'], min_itemsize={'A': 200})
- store.append('wp', wp)
-
- store.close()
-
def test_store_datetime_fractional_secs(self):
with ensure_clean_store(self.path) as store:
| - [ x] closes #17375
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x ] whatsnew entry
Tests for old compatability removed because this issue is sufficiently old now, that people should be expected to have solved this by now.
@jreback , the methods ``test_legacy_table_read`` and ``test_legacy_table_write`` don't say if they concern pandas <=0.11 or it's newer compatability issues. It it ok to remove those?
See also discussion in #17375. | https://api.github.com/repos/pandas-dev/pandas/pulls/17398 | 2017-08-31T14:11:05Z | 2017-09-01T00:30:34Z | null | 2017-09-01T00:30:43Z |
TST: Made s3 related tests mock boto | diff --git a/appveyor.yml b/appveyor.yml
index 65e62f887554e..a1f8886f6d068 100644
--- a/appveyor.yml
+++ b/appveyor.yml
@@ -74,12 +74,18 @@ install:
# create our env
- cmd: conda create -n pandas python=%PYTHON_VERSION% cython pytest>=3.1.0 pytest-xdist
- cmd: activate pandas
+ - cmd: pip install moto
- SET REQ=ci\requirements-%PYTHON_VERSION%_WIN.run
- cmd: echo "installing requirements from %REQ%"
- cmd: conda install -n pandas --file=%REQ%
- cmd: conda list -n pandas
- cmd: echo "installing requirements from %REQ% - done"
+ # add some pip only reqs to the env
+ - SET REQ=ci\requirements-%PYTHON_VERSION%_WIN.pip
+ - cmd: echo "installing requirements from %REQ%"
+ - cmd: pip install -Ur %REQ%
+
# build em using the local source checkout in the correct windows env
- cmd: '%CMD_IN_ENV% python setup.py build_ext --inplace'
diff --git a/ci/install_circle.sh b/ci/install_circle.sh
index 29ca69970104b..fd79f907625e9 100755
--- a/ci/install_circle.sh
+++ b/ci/install_circle.sh
@@ -67,6 +67,7 @@ time conda create -n pandas -q --file=${REQ_BUILD} || exit 1
time conda install -n pandas pytest>=3.1.0 || exit 1
source activate pandas
+time pip install moto || exit 1
# build but don't install
echo "[build em]"
diff --git a/ci/install_travis.sh b/ci/install_travis.sh
index d26689f2e6b4b..b85263daa1eac 100755
--- a/ci/install_travis.sh
+++ b/ci/install_travis.sh
@@ -104,7 +104,7 @@ if [ -e ${REQ} ]; then
fi
time conda install -n pandas pytest>=3.1.0
-time pip install pytest-xdist
+time pip install pytest-xdist moto
if [ "$LINT" ]; then
conda install flake8
diff --git a/ci/requirements-2.7_WIN.pip b/ci/requirements-2.7_WIN.pip
new file mode 100644
index 0000000000000..e69de29bb2d1d
diff --git a/ci/requirements-3.6_NUMPY_DEV.pip b/ci/requirements-3.6_NUMPY_DEV.pip
new file mode 100644
index 0000000000000..e69de29bb2d1d
diff --git a/ci/requirements-3.6_WIN.pip b/ci/requirements-3.6_WIN.pip
new file mode 100644
index 0000000000000..e69de29bb2d1d
diff --git a/ci/requirements_dev.txt b/ci/requirements_dev.txt
index c7190c506ba18..dbc4f6cbd6509 100644
--- a/ci/requirements_dev.txt
+++ b/ci/requirements_dev.txt
@@ -5,3 +5,4 @@ cython
pytest>=3.1.0
pytest-cov
flake8
+moto
diff --git a/pandas/tests/io/parser/data/tips.csv.bz2 b/pandas/tests/io/parser/data/tips.csv.bz2
new file mode 100644
index 0000000000000..1452896b05e9d
Binary files /dev/null and b/pandas/tests/io/parser/data/tips.csv.bz2 differ
diff --git a/pandas/tests/io/parser/data/tips.csv.gz b/pandas/tests/io/parser/data/tips.csv.gz
new file mode 100644
index 0000000000000..3a131068b2a38
Binary files /dev/null and b/pandas/tests/io/parser/data/tips.csv.gz differ
diff --git a/pandas/tests/io/parser/test_network.py b/pandas/tests/io/parser/test_network.py
index 3344243f8137a..27cc708889fa2 100644
--- a/pandas/tests/io/parser/test_network.py
+++ b/pandas/tests/io/parser/test_network.py
@@ -4,13 +4,20 @@
Tests parsers ability to read and parse non-local files
and hence require a network connection to be read.
"""
-
import os
+
import pytest
+import moto
import pandas.util.testing as tm
from pandas import DataFrame
from pandas.io.parsers import read_csv, read_table
+from pandas.compat import BytesIO
+
+
+@pytest.fixture(scope='module')
+def tips_file():
+ return os.path.join(tm.get_data_path(), 'tips.csv')
@pytest.fixture(scope='module')
@@ -19,6 +26,40 @@ def salaries_table():
return read_table(path)
+@pytest.fixture(scope='module')
+def s3_resource(tips_file):
+ pytest.importorskip('s3fs')
+ moto.mock_s3().start()
+
+ test_s3_files = [
+ ('tips.csv', tips_file),
+ ('tips.csv.gz', tips_file + '.gz'),
+ ('tips.csv.bz2', tips_file + '.bz2'),
+ ]
+
+ def add_tips_files(bucket_name):
+ for s3_key, file_name in test_s3_files:
+ with open(file_name, 'rb') as f:
+ conn.Bucket(bucket_name).put_object(
+ Key=s3_key,
+ Body=f)
+
+ boto3 = pytest.importorskip('boto3')
+ # see gh-16135
+ bucket = 'pandas-test'
+
+ conn = boto3.resource("s3", region_name="us-east-1")
+ conn.create_bucket(Bucket=bucket)
+ add_tips_files(bucket)
+
+ conn.create_bucket(Bucket='cant_get_it', ACL='private')
+ add_tips_files('cant_get_it')
+
+ yield conn
+
+ moto.mock_s3().stop()
+
+
@pytest.mark.network
@pytest.mark.parametrize(
"compression,extension",
@@ -51,15 +92,11 @@ def check_compressed_urls(salaries_table, compression, extension, mode,
class TestS3(object):
-
- def setup_method(self, method):
- try:
- import s3fs # noqa
- except ImportError:
- pytest.skip("s3fs not installed")
-
@tm.network
def test_parse_public_s3_bucket(self):
+ pytest.importorskip('s3fs')
+ # more of an integration test due to the not-public contents portion
+ # can probably mock this though.
for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]:
df = read_csv('s3://pandas-test/tips.csv' +
ext, compression=comp)
@@ -74,8 +111,8 @@ def test_parse_public_s3_bucket(self):
assert not df.empty
tm.assert_frame_equal(read_csv(tm.get_data_path('tips.csv')), df)
- @tm.network
- def test_parse_public_s3n_bucket(self):
+ def test_parse_public_s3n_bucket(self, s3_resource):
+
# Read from AWS s3 as "s3n" URL
df = read_csv('s3n://pandas-test/tips.csv', nrows=10)
assert isinstance(df, DataFrame)
@@ -83,8 +120,7 @@ def test_parse_public_s3n_bucket(self):
tm.assert_frame_equal(read_csv(
tm.get_data_path('tips.csv')).iloc[:10], df)
- @tm.network
- def test_parse_public_s3a_bucket(self):
+ def test_parse_public_s3a_bucket(self, s3_resource):
# Read from AWS s3 as "s3a" URL
df = read_csv('s3a://pandas-test/tips.csv', nrows=10)
assert isinstance(df, DataFrame)
@@ -92,8 +128,7 @@ def test_parse_public_s3a_bucket(self):
tm.assert_frame_equal(read_csv(
tm.get_data_path('tips.csv')).iloc[:10], df)
- @tm.network
- def test_parse_public_s3_bucket_nrows(self):
+ def test_parse_public_s3_bucket_nrows(self, s3_resource):
for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]:
df = read_csv('s3://pandas-test/tips.csv' +
ext, nrows=10, compression=comp)
@@ -102,8 +137,7 @@ def test_parse_public_s3_bucket_nrows(self):
tm.assert_frame_equal(read_csv(
tm.get_data_path('tips.csv')).iloc[:10], df)
- @tm.network
- def test_parse_public_s3_bucket_chunked(self):
+ def test_parse_public_s3_bucket_chunked(self, s3_resource):
# Read with a chunksize
chunksize = 5
local_tips = read_csv(tm.get_data_path('tips.csv'))
@@ -121,8 +155,7 @@ def test_parse_public_s3_bucket_chunked(self):
chunksize * i_chunk: chunksize * (i_chunk + 1)]
tm.assert_frame_equal(true_df, df)
- @tm.network
- def test_parse_public_s3_bucket_chunked_python(self):
+ def test_parse_public_s3_bucket_chunked_python(self, s3_resource):
# Read with a chunksize using the Python parser
chunksize = 5
local_tips = read_csv(tm.get_data_path('tips.csv'))
@@ -140,8 +173,7 @@ def test_parse_public_s3_bucket_chunked_python(self):
chunksize * i_chunk: chunksize * (i_chunk + 1)]
tm.assert_frame_equal(true_df, df)
- @tm.network
- def test_parse_public_s3_bucket_python(self):
+ def test_parse_public_s3_bucket_python(self, s3_resource):
for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]:
df = read_csv('s3://pandas-test/tips.csv' + ext, engine='python',
compression=comp)
@@ -150,8 +182,7 @@ def test_parse_public_s3_bucket_python(self):
tm.assert_frame_equal(read_csv(
tm.get_data_path('tips.csv')), df)
- @tm.network
- def test_infer_s3_compression(self):
+ def test_infer_s3_compression(self, s3_resource):
for ext in ['', '.gz', '.bz2']:
df = read_csv('s3://pandas-test/tips.csv' + ext,
engine='python', compression='infer')
@@ -160,8 +191,7 @@ def test_infer_s3_compression(self):
tm.assert_frame_equal(read_csv(
tm.get_data_path('tips.csv')), df)
- @tm.network
- def test_parse_public_s3_bucket_nrows_python(self):
+ def test_parse_public_s3_bucket_nrows_python(self, s3_resource):
for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]:
df = read_csv('s3://pandas-test/tips.csv' + ext, engine='python',
nrows=10, compression=comp)
@@ -170,8 +200,7 @@ def test_parse_public_s3_bucket_nrows_python(self):
tm.assert_frame_equal(read_csv(
tm.get_data_path('tips.csv')).iloc[:10], df)
- @tm.network
- def test_s3_fails(self):
+ def test_s3_fails(self, s3_resource):
with pytest.raises(IOError):
read_csv('s3://nyqpug/asdf.csv')
@@ -180,21 +209,18 @@ def test_s3_fails(self):
with pytest.raises(IOError):
read_csv('s3://cant_get_it/')
- @tm.network
- def boto3_client_s3(self):
+ def test_read_csv_handles_boto_s3_object(self,
+ s3_resource,
+ tips_file):
# see gh-16135
- # boto3 is a dependency of s3fs
- import boto3
- client = boto3.client("s3")
-
- key = "/tips.csv"
- bucket = "pandas-test"
- s3_object = client.get_object(Bucket=bucket, Key=key)
+ s3_object = s3_resource.meta.client.get_object(
+ Bucket='pandas-test',
+ Key='tips.csv')
- result = read_csv(s3_object["Body"])
+ result = read_csv(BytesIO(s3_object["Body"].read()), encoding='utf8')
assert isinstance(result, DataFrame)
assert not result.empty
- expected = read_csv(tm.get_data_path('tips.csv'))
+ expected = read_csv(tips_file)
tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/io/test_excel.py b/pandas/tests/io/test_excel.py
index 92147b46097b8..6a399f41975e5 100644
--- a/pandas/tests/io/test_excel.py
+++ b/pandas/tests/io/test_excel.py
@@ -1,33 +1,32 @@
# pylint: disable=E1101
-
-from pandas.compat import u, range, map, openpyxl_compat, BytesIO, iteritems
-from datetime import datetime, date, time
-import sys
+import functools
+import operator
import os
+import sys
+import warnings
+from datetime import datetime, date, time
from distutils.version import LooseVersion
from functools import partial
-
-import warnings
from warnings import catch_warnings
-import operator
-import functools
-import pytest
-from numpy import nan
import numpy as np
+import pytest
+from numpy import nan
+import moto
import pandas as pd
+import pandas.util.testing as tm
from pandas import DataFrame, Index, MultiIndex
-from pandas.io.formats.excel import ExcelFormatter
-from pandas.io.parsers import read_csv
+from pandas.compat import u, range, map, openpyxl_compat, BytesIO, iteritems
+from pandas.core.config import set_option, get_option
+from pandas.io.common import URLError
from pandas.io.excel import (
ExcelFile, ExcelWriter, read_excel, _XlwtWriter, _Openpyxl1Writer,
_Openpyxl20Writer, _Openpyxl22Writer, register_writer, _XlsxWriter
)
-from pandas.io.common import URLError
+from pandas.io.formats.excel import ExcelFormatter
+from pandas.io.parsers import read_csv
from pandas.util.testing import ensure_clean, makeCustomDataframe as mkdf
-from pandas.core.config import set_option, get_option
-import pandas.util.testing as tm
def _skip_if_no_xlrd():
@@ -67,13 +66,6 @@ def _skip_if_no_excelsuite():
_skip_if_no_openpyxl()
-def _skip_if_no_s3fs():
- try:
- import s3fs # noqa
- except ImportError:
- pytest.skip('s3fs not installed, skipping')
-
-
_seriesd = tm.getSeriesData()
_tsd = tm.getTimeSeriesData()
_frame = DataFrame(_seriesd)[:10]
@@ -605,14 +597,22 @@ def test_read_from_http_url(self):
local_table = self.get_exceldf('test1')
tm.assert_frame_equal(url_table, local_table)
- @tm.network(check_before_test=True)
def test_read_from_s3_url(self):
- _skip_if_no_s3fs()
-
- url = ('s3://pandas-test/test1' + self.ext)
- url_table = read_excel(url)
- local_table = self.get_exceldf('test1')
- tm.assert_frame_equal(url_table, local_table)
+ boto3 = pytest.importorskip('boto3')
+ pytest.importorskip('s3fs')
+
+ with moto.mock_s3():
+ conn = boto3.resource("s3", region_name="us-east-1")
+ conn.create_bucket(Bucket="pandas-test")
+ file_name = os.path.join(self.dirpath, 'test1' + self.ext)
+ with open(file_name, 'rb') as f:
+ conn.Bucket("pandas-test").put_object(Key="test1" + self.ext,
+ Body=f)
+
+ url = ('s3://pandas-test/test1' + self.ext)
+ url_table = read_excel(url)
+ local_table = self.get_exceldf('test1')
+ tm.assert_frame_equal(url_table, local_table)
@pytest.mark.slow
def test_read_from_file_url(self):
diff --git a/tox.ini b/tox.ini
index 45ad7fc451e76..f055251581a93 100644
--- a/tox.ini
+++ b/tox.ini
@@ -19,6 +19,7 @@ deps =
xlrd
six
sqlalchemy
+ moto
# cd to anything but the default {toxinidir} which
# contains the pandas subdirectory and confuses
| Kept a couple tests un-mocked for testing things like accessing a private bucket as that's hard to mock.
- [ ] closes #17325
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/17388 | 2017-08-31T02:50:06Z | 2017-09-14T10:14:44Z | 2017-09-14T10:14:44Z | 2017-09-15T13:31:32Z |
CLN: replace %s syntax with .format in io/formats | diff --git a/pandas/io/formats/css.py b/pandas/io/formats/css.py
index d12d2373e1190..429c98b579ca0 100644
--- a/pandas/io/formats/css.py
+++ b/pandas/io/formats/css.py
@@ -94,12 +94,13 @@ def __call__(self, declarations_str, inherited=None):
# 3. TODO: resolve other font-relative units
for side in self.SIDES:
- prop = 'border-%s-width' % side
+ prop = 'border-{side}-width'.format(side=side)
if prop in props:
props[prop] = self.size_to_pt(
props[prop], em_pt=font_size,
conversions=self.BORDER_WIDTH_RATIOS)
- for prop in ['margin-%s' % side, 'padding-%s' % side]:
+ for prop in ['margin-{side}'.format(side=side),
+ 'padding-{side}'.format(side=side)]:
if prop in props:
# TODO: support %
props[prop] = self.size_to_pt(
@@ -152,7 +153,8 @@ def __call__(self, declarations_str, inherited=None):
def size_to_pt(self, in_val, em_pt=None, conversions=UNIT_RATIOS):
def _error():
- warnings.warn('Unhandled size: %r' % in_val, CSSWarning)
+ warnings.warn('Unhandled size: {val!r}'.format(val=in_val),
+ CSSWarning)
return self.size_to_pt('1!!default', conversions=conversions)
try:
@@ -185,10 +187,10 @@ def _error():
val = round(val, 5)
if int(val) == val:
- size_fmt = '%d'
+ size_fmt = '{fmt:d}pt'.format(fmt=int(val))
else:
- size_fmt = '%f'
- return (size_fmt + 'pt') % val
+ size_fmt = '{fmt:f}pt'.format(fmt=val)
+ return size_fmt
def atomize(self, declarations):
for prop, value in declarations:
@@ -215,19 +217,19 @@ def expand(self, prop, value):
try:
mapping = self.SIDE_SHORTHANDS[len(tokens)]
except KeyError:
- warnings.warn('Could not expand "%s: %s"' % (prop, value),
- CSSWarning)
+ warnings.warn('Could not expand "{prop}: {val}"'
+ .format(prop=prop, val=value), CSSWarning)
return
for key, idx in zip(self.SIDES, mapping):
- yield prop_fmt % key, tokens[idx]
+ yield prop_fmt.format(key), tokens[idx]
return expand
- expand_border_color = _side_expander('border-%s-color')
- expand_border_style = _side_expander('border-%s-style')
- expand_border_width = _side_expander('border-%s-width')
- expand_margin = _side_expander('margin-%s')
- expand_padding = _side_expander('padding-%s')
+ expand_border_color = _side_expander('border-{:s}-color')
+ expand_border_style = _side_expander('border-{:s}-style')
+ expand_border_width = _side_expander('border-{:s}-width')
+ expand_margin = _side_expander('margin-{:s}')
+ expand_padding = _side_expander('padding-{:s}')
def parse(self, declarations_str):
"""Generates (prop, value) pairs from declarations
@@ -245,4 +247,4 @@ def parse(self, declarations_str):
yield prop, val
else:
warnings.warn('Ill-formatted attribute: expected a colon '
- 'in %r' % decl, CSSWarning)
+ 'in {decl!r}'.format(decl=decl), CSSWarning)
diff --git a/pandas/io/formats/excel.py b/pandas/io/formats/excel.py
index 80c3880d39dfd..ab689d196f4b6 100644
--- a/pandas/io/formats/excel.py
+++ b/pandas/io/formats/excel.py
@@ -132,10 +132,12 @@ def build_alignment(self, props):
def build_border(self, props):
return {side: {
- 'style': self._border_style(props.get('border-%s-style' % side),
- props.get('border-%s-width' % side)),
+ 'style': self._border_style(props.get('border-{side}-style'
+ .format(side=side)),
+ props.get('border-{side}-width'
+ .format(side=side))),
'color': self.color_to_excel(
- props.get('border-%s-color' % side)),
+ props.get('border-{side}-color'.format(side=side))),
} for side in ['top', 'right', 'bottom', 'left']}
def _border_style(self, style, width):
@@ -302,7 +304,8 @@ def color_to_excel(self, val):
try:
return self.NAMED_COLORS[val]
except KeyError:
- warnings.warn('Unhandled colour format: %r' % val, CSSWarning)
+ warnings.warn('Unhandled colour format: {val!r}'.format(val=val),
+ CSSWarning)
class ExcelFormatter(object):
@@ -369,7 +372,7 @@ def _format_value(self, val):
if lib.isposinf_scalar(val):
val = self.inf_rep
elif lib.isneginf_scalar(val):
- val = '-%s' % self.inf_rep
+ val = '-{inf}'.format(inf=self.inf_rep)
elif self.float_format is not None:
val = float(self.float_format % val)
return val
@@ -434,8 +437,9 @@ def _format_header_regular(self):
colnames = self.columns
if has_aliases:
if len(self.header) != len(self.columns):
- raise ValueError('Writing %d cols but got %d aliases' %
- (len(self.columns), len(self.header)))
+ raise ValueError('Writing {cols} cols but got {alias} '
+ 'aliases'.format(cols=len(self.columns),
+ alias=len(self.header)))
else:
colnames = self.header
diff --git a/pandas/io/formats/printing.py b/pandas/io/formats/printing.py
index cbad603630bd3..e0f53f671017a 100644
--- a/pandas/io/formats/printing.py
+++ b/pandas/io/formats/printing.py
@@ -102,9 +102,9 @@ def _pprint_seq(seq, _nest_lvl=0, max_seq_items=None, **kwds):
bounds length of printed sequence, depending on options
"""
if isinstance(seq, set):
- fmt = u("{%s}")
+ fmt = u("{{{body}}}")
else:
- fmt = u("[%s]") if hasattr(seq, '__setitem__') else u("(%s)")
+ fmt = u("[{body}]") if hasattr(seq, '__setitem__') else u("({body})")
if max_seq_items is False:
nitems = len(seq)
@@ -123,7 +123,7 @@ def _pprint_seq(seq, _nest_lvl=0, max_seq_items=None, **kwds):
elif isinstance(seq, tuple) and len(seq) == 1:
body += ','
- return fmt % body
+ return fmt.format(body=body)
def _pprint_dict(seq, _nest_lvl=0, max_seq_items=None, **kwds):
@@ -131,10 +131,10 @@ def _pprint_dict(seq, _nest_lvl=0, max_seq_items=None, **kwds):
internal. pprinter for iterables. you should probably use pprint_thing()
rather then calling this directly.
"""
- fmt = u("{%s}")
+ fmt = u("{{{things}}}")
pairs = []
- pfmt = u("%s: %s")
+ pfmt = u("{key}: {val}")
if max_seq_items is False:
nitems = len(seq)
@@ -142,16 +142,17 @@ def _pprint_dict(seq, _nest_lvl=0, max_seq_items=None, **kwds):
nitems = max_seq_items or get_option("max_seq_items") or len(seq)
for k, v in list(seq.items())[:nitems]:
- pairs.append(pfmt %
- (pprint_thing(k, _nest_lvl + 1,
- max_seq_items=max_seq_items, **kwds),
- pprint_thing(v, _nest_lvl + 1,
- max_seq_items=max_seq_items, **kwds)))
+ pairs.append(
+ pfmt.format(
+ key=pprint_thing(k, _nest_lvl + 1,
+ max_seq_items=max_seq_items, **kwds),
+ val=pprint_thing(v, _nest_lvl + 1,
+ max_seq_items=max_seq_items, **kwds)))
if nitems < len(seq):
- return fmt % (", ".join(pairs) + ", ...")
+ return fmt.format(things=", ".join(pairs) + ", ...")
else:
- return fmt % ", ".join(pairs)
+ return fmt.format(things=", ".join(pairs))
def pprint_thing(thing, _nest_lvl=0, escape_chars=None, default_escapes=False,
@@ -221,10 +222,10 @@ def as_escaped_unicode(thing, escape_chars=escape_chars):
max_seq_items=max_seq_items)
elif isinstance(thing, compat.string_types) and quote_strings:
if compat.PY3:
- fmt = "'%s'"
+ fmt = u("'{thing}'")
else:
- fmt = "u'%s'"
- result = fmt % as_escaped_unicode(thing)
+ fmt = u("u'{thing}'")
+ result = fmt.format(thing=as_escaped_unicode(thing))
else:
result = as_escaped_unicode(thing)
diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py
index 445fceb4b8146..87d672197be30 100644
--- a/pandas/io/formats/style.py
+++ b/pandas/io/formats/style.py
@@ -230,7 +230,7 @@ def format_attr(pair):
# ... except maybe the last for columns.names
name = self.data.columns.names[r]
cs = [BLANK_CLASS if name is None else INDEX_NAME_CLASS,
- "level%s" % r]
+ "level{lvl}".format(lvl=r)]
name = BLANK_VALUE if name is None else name
row_es.append({"type": "th",
"value": name,
@@ -240,7 +240,8 @@ def format_attr(pair):
if clabels:
for c, value in enumerate(clabels[r]):
- cs = [COL_HEADING_CLASS, "level%s" % r, "col%s" % c]
+ cs = [COL_HEADING_CLASS, "level{lvl}".format(lvl=r),
+ "col{col}".format(col=c)]
cs.extend(cell_context.get(
"col_headings", {}).get(r, {}).get(c, []))
es = {
@@ -264,7 +265,7 @@ def format_attr(pair):
for c, name in enumerate(self.data.index.names):
cs = [INDEX_NAME_CLASS,
- "level%s" % c]
+ "level{lvl}".format(lvl=c)]
name = '' if name is None else name
index_header_row.append({"type": "th", "value": name,
"class": " ".join(cs)})
@@ -281,7 +282,8 @@ def format_attr(pair):
for r, idx in enumerate(self.data.index):
row_es = []
for c, value in enumerate(rlabels[r]):
- rid = [ROW_HEADING_CLASS, "level%s" % c, "row%s" % r]
+ rid = [ROW_HEADING_CLASS, "level{lvl}".format(lvl=c),
+ "row{row}".format(row=r)]
es = {
"type": "th",
"is_visible": _is_visible(r, c, idx_lengths),
@@ -298,7 +300,8 @@ def format_attr(pair):
row_es.append(es)
for c, col in enumerate(self.data.columns):
- cs = [DATA_CLASS, "row%s" % r, "col%s" % c]
+ cs = [DATA_CLASS, "row{row}".format(row=r),
+ "col{col}".format(col=c)]
cs.extend(cell_context.get("data", {}).get(r, {}).get(c, []))
formatter = self._display_funcs[(r, c)]
value = self.data.iloc[r, c]
@@ -317,7 +320,8 @@ def format_attr(pair):
else:
props.append(['', ''])
cellstyle.append({'props': props,
- 'selector': "row%s_col%s" % (r, c)})
+ 'selector': "row{row}_col{col}"
+ .format(row=r, col=c)})
body.append(row_es)
return dict(head=head, cellstyle=cellstyle, body=body, uuid=uuid,
@@ -512,22 +516,23 @@ def _apply(self, func, axis=0, subset=None, **kwargs):
result = func(data, **kwargs)
if not isinstance(result, pd.DataFrame):
raise TypeError(
- "Function {!r} must return a DataFrame when "
- "passed to `Styler.apply` with axis=None".format(func))
+ "Function {func!r} must return a DataFrame when "
+ "passed to `Styler.apply` with axis=None"
+ .format(func=func))
if not (result.index.equals(data.index) and
result.columns.equals(data.columns)):
- msg = ('Result of {!r} must have identical index and columns '
- 'as the input'.format(func))
+ msg = ('Result of {func!r} must have identical index and '
+ 'columns as the input'.format(func=func))
raise ValueError(msg)
result_shape = result.shape
expected_shape = self.data.loc[subset].shape
if result_shape != expected_shape:
- msg = ("Function {!r} returned the wrong shape.\n"
- "Result has shape: {}\n"
- "Expected shape: {}".format(func,
- result.shape,
- expected_shape))
+ msg = ("Function {func!r} returned the wrong shape.\n"
+ "Result has shape: {res}\n"
+ "Expected shape: {expect}".format(func=func,
+ res=result.shape,
+ expect=expected_shape))
raise ValueError(msg)
self._update_ctx(result)
return self
@@ -771,7 +776,8 @@ def set_table_styles(self, table_styles):
@staticmethod
def _highlight_null(v, null_color):
- return 'background-color: %s' % null_color if pd.isna(v) else ''
+ return ('background-color: {color}'.format(color=null_color)
+ if pd.isna(v) else '')
def highlight_null(self, null_color='red'):
"""
@@ -839,7 +845,8 @@ def _background_gradient(s, cmap='PuBu', low=0, high=0):
# https://github.com/matplotlib/matplotlib/issues/5427
normed = norm(s.values)
c = [colors.rgb2hex(x) for x in plt.cm.get_cmap(cmap)(normed)]
- return ['background-color: %s' % color for color in c]
+ return ['background-color: {color}'.format(color=color)
+ for color in c]
def set_properties(self, subset=None, **kwargs):
"""
@@ -1182,6 +1189,6 @@ def _maybe_wrap_formatter(formatter):
elif callable(formatter):
return formatter
else:
- msg = "Expected a template string or callable, got {} instead".format(
- formatter)
+ msg = ("Expected a template string or callable, got {formatter} "
+ "instead".format(formatter=formatter))
raise TypeError(msg)
diff --git a/pandas/io/formats/terminal.py b/pandas/io/formats/terminal.py
index 30bd1d16b538a..4bcb28fa59b86 100644
--- a/pandas/io/formats/terminal.py
+++ b/pandas/io/formats/terminal.py
@@ -124,4 +124,4 @@ def ioctl_GWINSZ(fd):
if __name__ == "__main__":
sizex, sizey = get_terminal_size()
- print('width = %s height = %s' % (sizex, sizey))
+ print('width = {w} height = {h}'.format(w=sizex, h=sizey))
| Progress toward issue #16130. Converted old string formatting to new string formatting in io/formats/css.py, excel.py, printing.py, style.py, and terminal.py.
| https://api.github.com/repos/pandas-dev/pandas/pulls/17387 | 2017-08-31T02:49:10Z | 2017-08-31T10:24:24Z | 2017-08-31T10:24:24Z | 2017-09-25T11:08:24Z |
BUG: Fix wrong SparseBlock initialization | diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 045580d393b26..7bae661ba93dd 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -29,6 +29,7 @@
is_bool_dtype,
is_object_dtype,
is_datetimelike_v_numeric,
+ is_complex_dtype,
is_float_dtype, is_numeric_dtype,
is_numeric_v_string_like, is_extension_type,
is_list_like,
@@ -454,8 +455,11 @@ def make_a_block(nv, ref_loc):
nv = _block_shape(nv, ndim=self.ndim)
except (AttributeError, NotImplementedError):
pass
+
block = self.make_block(values=nv,
- placement=ref_loc, fastpath=True)
+ placement=ref_loc,
+ fastpath=True)
+
return block
# ndim == 1
@@ -1020,7 +1024,7 @@ def f(m, v, i):
return [self.make_block(new_values, fastpath=True)]
- def coerce_to_target_dtype(self, other):
+ def coerce_to_target_dtype(self, other, copy=False):
"""
coerce the current block to a dtype compat for other
we will return a block, possibly object, and not raise
@@ -1037,7 +1041,7 @@ def coerce_to_target_dtype(self, other):
if self.is_bool or is_object_dtype(dtype) or is_bool_dtype(dtype):
# we don't upcast to bool
- return self.astype(object)
+ return self.astype(object, copy=copy)
elif ((self.is_float or self.is_complex) and
(is_integer_dtype(dtype) or is_float_dtype(dtype))):
@@ -1051,14 +1055,14 @@ def coerce_to_target_dtype(self, other):
# not a datetime
if not ((is_datetime64_dtype(dtype) or
is_datetime64tz_dtype(dtype)) and self.is_datetime):
- return self.astype(object)
+ return self.astype(object, copy=copy)
# don't upcast timezone with different timezone or no timezone
mytz = getattr(self.dtype, 'tz', None)
othertz = getattr(dtype, 'tz', None)
if str(mytz) != str(othertz):
- return self.astype(object)
+ return self.astype(object, copy=copy)
raise AssertionError("possible recursion in "
"coerce_to_target_dtype: {} {}".format(
@@ -1068,18 +1072,18 @@ def coerce_to_target_dtype(self, other):
# not a timedelta
if not (is_timedelta64_dtype(dtype) and self.is_timedelta):
- return self.astype(object)
+ return self.astype(object, copy=copy)
raise AssertionError("possible recursion in "
"coerce_to_target_dtype: {} {}".format(
self, other))
try:
- return self.astype(dtype)
+ return self.astype(dtype, copy=copy)
except (ValueError, TypeError):
pass
- return self.astype(object)
+ return self.astype(object, copy=copy)
def interpolate(self, method='pad', axis=0, index=None, values=None,
inplace=False, limit=None, limit_direction='forward',
@@ -1440,6 +1444,11 @@ def where(self, other, cond, align=True, errors='raise',
if hasattr(other, 'reindex_axis'):
other = other.values
+ if is_scalar(other) or is_list_like(other):
+ fill_value = other
+ else:
+ fill_value = None
+
if hasattr(cond, 'reindex_axis'):
cond = cond.values
@@ -1452,6 +1461,9 @@ def where(self, other, cond, align=True, errors='raise',
if not hasattr(cond, 'shape'):
raise ValueError("where must have a condition that is ndarray "
"like")
+ else:
+ if self.is_sparse:
+ cond = cond.flatten()
# our where function
def func(cond, values, other):
@@ -1489,7 +1501,7 @@ def func(cond, values, other):
transpose=transpose)
return self._maybe_downcast(blocks, 'infer')
- if self._can_hold_na or self.ndim == 1:
+ if self._can_hold_element(fill_value) or values.ndim == 1:
if transpose:
result = result.T
@@ -1498,7 +1510,12 @@ def func(cond, values, other):
if try_cast:
result = self._try_cast_result(result)
- return self.make_block(result)
+ if isinstance(result, np.ndarray):
+ ndim = result.ndim
+ else:
+ ndim = None
+
+ return self.make_block(result, ndim=ndim, fill_value=fill_value)
# might need to separate out blocks
axis = cond.ndim - 1
@@ -1512,7 +1529,8 @@ def func(cond, values, other):
r = self._try_cast_result(result.take(m.nonzero()[0],
axis=axis))
result_blocks.append(
- self.make_block(r.T, placement=self.mgr_locs[m]))
+ self.make_block_same_class(r.T,
+ placement=self.mgr_locs[m]))
return result_blocks
@@ -1832,6 +1850,7 @@ class FloatBlock(FloatOrComplexBlock):
is_float = True
_downcast_dtype = 'int64'
+ @classmethod
def _can_hold_element(self, element):
tipo = maybe_infer_dtype_type(element)
if tipo is not None:
@@ -1881,6 +1900,7 @@ class ComplexBlock(FloatOrComplexBlock):
__slots__ = ()
is_complex = True
+ @classmethod
def _can_hold_element(self, element):
tipo = maybe_infer_dtype_type(element)
if tipo is not None:
@@ -2042,6 +2062,7 @@ class BoolBlock(NumericBlock):
is_bool = True
_can_hold_na = False
+ @classmethod
def _can_hold_element(self, element):
tipo = maybe_infer_dtype_type(element)
if tipo is not None:
@@ -2751,11 +2772,63 @@ class SparseBlock(NonConsolidatableMixIn, Block):
is_sparse = True
is_numeric = True
_box_to_block_values = False
- _can_hold_na = True
_ftype = 'sparse'
_holder = SparseArray
_concatenator = staticmethod(_concat._concat_sparse)
+ def __init__(self, values, placement, ndim=None, fastpath=False, **kwargs):
+ super(SparseBlock, self).__init__(values, placement,
+ ndim, fastpath,
+ **kwargs)
+
+ dtype = self.values.sp_values.dtype
+
+ if is_float_dtype(dtype):
+ self.is_float = True
+ self._can_hold_na = True
+ elif is_complex_dtype(dtype):
+ self.is_complex = True
+ self._can_hold_na = True
+ elif is_integer_dtype(dtype):
+ self.is_integer = True
+ self._can_hold_na = False
+ elif is_bool_dtype(dtype):
+ self.is_bool = True
+ self._can_hold_na = False
+ elif is_object_dtype(dtype):
+ self.is_object = True
+ self._can_hold_na = True
+ else:
+ self._can_hold_na = False
+
+ def _can_hold_element(self, element):
+ """ require the same dtype as ourselves """
+ dtype = self.values.sp_values.dtype
+
+ if is_bool_dtype(dtype):
+ return BoolBlock._can_hold_element(element)
+ elif is_integer_dtype(dtype):
+ if is_list_like(element):
+ element = np.array(element)
+ tipo = element.dtype.type
+ return (issubclass(tipo, np.integer) and
+ not issubclass(tipo,
+ (np.datetime64,
+ np.timedelta64)) and
+ dtype.itemsize >= element.dtype.itemsize)
+ return is_integer(element)
+ elif is_float_dtype(dtype):
+ return FloatBlock._can_hold_element(element)
+ elif is_complex_dtype(dtype):
+ return ComplexBlock._can_hold_element(element)
+ elif is_object_dtype(dtype):
+ return True
+ else:
+ return False
+
+ def coerce_to_target_dtype(self, other, copy=True):
+ return super(SparseBlock, self).coerce_to_target_dtype(other, copy)
+
@property
def shape(self):
return (len(self.mgr_locs), self.sp_index.length)
@@ -2816,6 +2889,20 @@ def copy(self, deep=True, mgr=None):
kind=self.kind, copy=deep,
placement=self.mgr_locs)
+ def make_block(self, values, placement=None,
+ ndim=None, fill_value=None, **kwargs):
+ """
+ Create a new block, with type inference propagate any values that are
+ not specified
+ """
+ if fill_value is not None and isinstance(values, SparseArray):
+ values = SparseArray(values.to_dense(), fill_value=fill_value,
+ kind=values.kind, dtype=values.dtype)
+
+ return super(SparseBlock, self).make_block(values, placement=placement,
+ ndim=ndim, fill_value=None,
+ **kwargs)
+
def make_block_same_class(self, values, placement, sparse_index=None,
kind=None, dtype=None, fill_value=None,
copy=False, fastpath=True, **kwargs):
@@ -2912,9 +2999,15 @@ def sparse_reindex(self, new_index):
return self.make_block_same_class(values, sparse_index=new_index,
placement=self.mgr_locs)
+ def _try_coerce_result(self, result):
+ """ reverse of try_coerce_args """
+ if isinstance(result, np.ndarray):
+ result = SparseArray(result.flatten(), kind=self.kind)
+ return result
+
def make_block(values, placement, klass=None, ndim=None, dtype=None,
- fastpath=False):
+ fastpath=False, **kwargs):
if klass is None:
dtype = dtype or values.dtype
vtype = dtype.type
diff --git a/pandas/core/sparse/array.py b/pandas/core/sparse/array.py
index 0424ac8703e25..699618b11448d 100644
--- a/pandas/core/sparse/array.py
+++ b/pandas/core/sparse/array.py
@@ -248,7 +248,7 @@ def _simple_new(cls, data, sp_index, fill_value):
sp_index.ngaps > 0):
# if float fill_value is being included in dense repr,
# convert values to float
- data = data.astype(float)
+ data = data.astype(float, copy=True)
result = data.view(cls)
diff --git a/pandas/core/sparse/frame.py b/pandas/core/sparse/frame.py
index 1b45b180b8dc1..19b33c3d6df8c 100644
--- a/pandas/core/sparse/frame.py
+++ b/pandas/core/sparse/frame.py
@@ -321,8 +321,9 @@ def _apply_columns(self, func):
data=new_data, index=self.index, columns=self.columns,
default_fill_value=self.default_fill_value).__finalize__(self)
- def astype(self, dtype):
- return self._apply_columns(lambda x: x.astype(dtype))
+ def astype(self, dtype, copy=True, errors='raise', **kwargs):
+ return self._apply_columns(lambda x: x.astype(dtype, copy,
+ errors, **kwargs))
def copy(self, deep=True):
"""
@@ -333,6 +334,16 @@ def copy(self, deep=True):
result._default_kind = self._default_kind
return result
+ def where(self, cond, other=np.nan, inplace=False, axis=None, level=None,
+ try_cast=False, raise_on_error=True):
+ result = super(SparseDataFrame,
+ self).where(cond, other,
+ inplace, axis,
+ level, try_cast,
+ raise_on_error=raise_on_error)
+ result._default_fill_value = other
+ return result
+
@property
def default_fill_value(self):
return self._default_fill_value
diff --git a/pandas/tests/sparse/test_frame.py b/pandas/tests/sparse/test_frame.py
index e65059156c5b9..b8bc0530294a5 100644
--- a/pandas/tests/sparse/test_frame.py
+++ b/pandas/tests/sparse/test_frame.py
@@ -1410,8 +1410,6 @@ def test_numpy_func_call(self):
[nan, nan]
]
])
- @pytest.mark.xfail(reason='Wrong SparseBlock initialization '
- '(GH 17386)')
def test_where_with_numeric_data(self, data):
# GH 17386
lower_bound = 1.5
@@ -1443,8 +1441,6 @@ def test_where_with_numeric_data(self, data):
0.1,
100.0 + 100.0j
])
- @pytest.mark.xfail(reason='Wrong SparseBlock initialization '
- '(GH 17386)')
def test_where_with_numeric_data_and_other(self, data, other):
# GH 17386
lower_bound = 1.5
@@ -1460,8 +1456,6 @@ def test_where_with_numeric_data_and_other(self, data, other):
tm.assert_frame_equal(result, dense_expected)
tm.assert_sp_frame_equal(result, sparse_expected)
- @pytest.mark.xfail(reason='Wrong SparseBlock initialization '
- '(GH 17386)')
def test_where_with_bool_data(self):
# GH 17386
data = [[False, False], [True, True], [False, False]]
@@ -1483,8 +1477,6 @@ def test_where_with_bool_data(self):
0.1,
100.0 + 100.0j
])
- @pytest.mark.xfail(reason='Wrong SparseBlock initialization '
- '(GH 17386)')
def test_where_with_bool_data_and_other(self, other):
# GH 17386
data = [[False, False], [True, True], [False, False]]
@@ -1501,8 +1493,6 @@ def test_where_with_bool_data_and_other(self, other):
tm.assert_frame_equal(result, dense_expected)
tm.assert_sp_frame_equal(result, sparse_expected)
- @pytest.mark.xfail(reason='Wrong SparseBlock initialization '
- '(GH 17386)')
def test_quantile(self):
# GH 17386
data = [[1, 1], [2, 10], [3, 100], [nan, nan]]
@@ -1518,8 +1508,6 @@ def test_quantile(self):
tm.assert_series_equal(result, dense_expected)
tm.assert_sp_series_equal(result, sparse_expected)
- @pytest.mark.xfail(reason='Wrong SparseBlock initialization '
- '(GH 17386)')
def test_quantile_multi(self):
# GH 17386
data = [[1, 1], [2, 10], [3, 100], [nan, nan]]
diff --git a/pandas/tests/sparse/test_series.py b/pandas/tests/sparse/test_series.py
index 1dc1c7f1575cc..4014826847611 100644
--- a/pandas/tests/sparse/test_series.py
+++ b/pandas/tests/sparse/test_series.py
@@ -1430,8 +1430,6 @@ def test_deprecated_reindex_axis(self):
nan, nan
]
])
- @pytest.mark.xfail(reason='Wrong SparseBlock initialization '
- '(GH 17386)')
def test_where_with_numeric_data(self, data):
# GH 17386
lower_bound = 1.5
@@ -1463,9 +1461,6 @@ def test_where_with_numeric_data(self, data):
0.1,
100.0 + 100.0j
])
- @pytest.mark.skip(reason='Wrong SparseBlock initialization '
- '(Segfault) '
- '(GH 17386)')
def test_where_with_numeric_data_and_other(self, data, other):
# GH 17386
lower_bound = 1.5
@@ -1480,8 +1475,6 @@ def test_where_with_numeric_data_and_other(self, data, other):
tm.assert_series_equal(result, dense_expected)
tm.assert_sp_series_equal(result, sparse_expected)
- @pytest.mark.xfail(reason='Wrong SparseBlock initialization '
- '(GH 17386)')
def test_where_with_bool_data(self):
# GH 17386
data = [False, False, True, True, False, False]
@@ -1503,9 +1496,6 @@ def test_where_with_bool_data(self):
0.1,
100.0 + 100.0j
])
- @pytest.mark.skip(reason='Wrong SparseBlock initialization '
- '(Segfault) '
- '(GH 17386)')
def test_where_with_bool_data_and_other(self, other):
# GH 17386
data = [False, False, True, True, False, False]
| - [x] closes #17198
- [x] tests added / passed
Passed the same tests which the current master branch pass.
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/17386 | 2017-08-31T00:33:43Z | 2018-07-07T14:49:19Z | null | 2018-07-07T14:49:19Z |
DOC: Adding methods for "bad" lines that preserve all data | diff --git a/doc/source/io.rst b/doc/source/io.rst
index e338407361705..afce266a52198 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -1130,7 +1130,7 @@ options:
.. _io.bad_lines:
-Handling "bad" lines
+Handling "bad" lines - excluding the data
''''''''''''''''''''
Some files may have malformed lines with too few fields or too many. Lines with
@@ -1175,6 +1175,80 @@ data that appear in some lines but not others:
0 1 2 3
1 4 5 6
2 8 9 10
+
+Handling "bad" lines - preserving the data
+''''''''''''''''''''
+
+To preserve all data, you can specify a sufficient number of header ``names``:
+
+.. code-block:: ipython
+
+ In [31]: pd.read_csv(StringIO(data), names=['a', 'b', 'c', 'd'])
+
+ Out[31]:
+ a b c d
+ 0 1 2 3 NaN
+ 1 4 5 6 7
+ 2 8 9 10 NaN
+
+or you can use Python's ``open`` command to detect the length of the widest row:
+
+.. code-block:: ipython
+
+ In [32]:
+ import csv
+ with open('data.csv', newline='') as f:
+ reader = csv.reader(f)
+ max_width = 0
+ for row in reader:
+ length = row.count(',')
+ if length > max_width:
+ max_width = length
+
+and then choose to edit the csv itself:
+
+.. code-block:: ipython
+
+ In [32] (cont'd):
+
+ amended_rows = []
+ for row in reader:
+ length = row.count(',')
+ if length < max_width:
+ for _ in range(max_width - length):
+ row = row + ','
+ amended_rows.append(row)
+
+ writer = csv.writer(f)
+ writer.writerows(amended_rows)
+
+ pd.read_csv('data.csv')
+
+ Out[32]:
+ a b c d
+ 0 1 2 3 NaN
+ 1 4 5 6 7
+ 2 8 9 10 NaN
+
+or to specify ``names`` based on the length of the widest row:
+
+.. code-block:: ipython
+
+ In [32] (cont'd):
+
+ label = 'c'
+ col_labels = []
+ for col_num in range(max_width):
+ label = label + str(col_num)
+ col_labels.append(label)
+
+ pd.read_csv('data.csv', names=col_labels)
+
+ Out[32]:
+ c1 c2 c3 c4
+ 0 1 2 3 NaN
+ 1 4 5 6 7
+ 2 8 9 10 NaN
.. _io.dialect:
| - [ ] closes #17319
| https://api.github.com/repos/pandas-dev/pandas/pulls/17385 | 2017-08-30T21:18:34Z | 2017-10-28T15:54:15Z | null | 2023-05-11T01:16:18Z |
TST: not correctly using OrderedDict in test_series_apply | diff --git a/pandas/tests/series/test_apply.py b/pandas/tests/series/test_apply.py
index e3be5427588b3..d0693984689a6 100644
--- a/pandas/tests/series/test_apply.py
+++ b/pandas/tests/series/test_apply.py
@@ -317,9 +317,9 @@ def test_non_callable_aggregates(self):
# test when mixed w/ callable reducers
result = s.agg(['size', 'count', 'mean'])
- expected = Series(OrderedDict({'size': 3.0,
- 'count': 2.0,
- 'mean': 1.5}))
+ expected = Series(OrderedDict([('size', 3.0),
+ ('count', 2.0),
+ ('mean', 1.5)]))
assert_series_equal(result[expected.index], expected)
| in Python versions <3.6 this syntax will result in an unordered dict
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/17384 | 2017-08-30T19:57:58Z | 2017-08-31T10:35:52Z | 2017-08-31T10:35:52Z | 2017-08-31T10:35:57Z |
Remove boxplot from _dataframe_apply_whitelist | diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index c23b00dc740a4..248f3b2095a78 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -63,6 +63,8 @@
import pandas.core.common as com
from pandas.core.config import option_context
+from pandas.plotting._core import boxplot_frame_groupby
+
from pandas._libs import lib, groupby as libgroupby, Timestamp, NaT, iNaT
from pandas._libs.lib import count_level_2d
@@ -168,8 +170,9 @@
{'nlargest', 'nsmallest'}) -
{'boxplot'}) | frozenset(['dtype', 'unique'])
-_dataframe_apply_whitelist = (_common_apply_whitelist |
- frozenset(['dtypes', 'corrwith']))
+_dataframe_apply_whitelist = ((_common_apply_whitelist |
+ frozenset(['dtypes', 'corrwith'])) -
+ {'boxplot'})
_cython_transforms = frozenset(['cumprod', 'cumsum', 'shift',
'cummin', 'cummax'])
@@ -4280,9 +4283,7 @@ def groupby_series(obj, col=None):
results.index = _default_index(len(results))
return results
-
-from pandas.plotting._core import boxplot_frame_groupby # noqa
-DataFrameGroupBy.boxplot = boxplot_frame_groupby
+ boxplot = boxplot_frame_groupby
class PanelGroupBy(NDFrameGroupBy):
diff --git a/pandas/tests/groupby/test_whitelist.py b/pandas/tests/groupby/test_whitelist.py
index 2c8bf57f20fae..1c5161d2ffb43 100644
--- a/pandas/tests/groupby/test_whitelist.py
+++ b/pandas/tests/groupby/test_whitelist.py
@@ -42,7 +42,6 @@
'pct_change',
'skew',
'plot',
- 'boxplot',
'hist',
'median',
'dtypes',
| This is a piece of #17179, which is being broken up into smaller chunks.
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/17381 | 2017-08-30T16:47:17Z | 2017-08-31T10:37:59Z | 2017-08-31T10:37:59Z | 2017-10-30T16:23:46Z |
DOC: Added examples to pd.Index.get_loc | diff --git a/appveyor.yml b/appveyor.yml
index 65e62f887554e..a1f8886f6d068 100644
--- a/appveyor.yml
+++ b/appveyor.yml
@@ -74,12 +74,18 @@ install:
# create our env
- cmd: conda create -n pandas python=%PYTHON_VERSION% cython pytest>=3.1.0 pytest-xdist
- cmd: activate pandas
+ - cmd: pip install moto
- SET REQ=ci\requirements-%PYTHON_VERSION%_WIN.run
- cmd: echo "installing requirements from %REQ%"
- cmd: conda install -n pandas --file=%REQ%
- cmd: conda list -n pandas
- cmd: echo "installing requirements from %REQ% - done"
+ # add some pip only reqs to the env
+ - SET REQ=ci\requirements-%PYTHON_VERSION%_WIN.pip
+ - cmd: echo "installing requirements from %REQ%"
+ - cmd: pip install -Ur %REQ%
+
# build em using the local source checkout in the correct windows env
- cmd: '%CMD_IN_ENV% python setup.py build_ext --inplace'
diff --git a/asv_bench/benchmarks/categoricals.py b/asv_bench/benchmarks/categoricals.py
index 6432ccfb19efe..d90c994b3d194 100644
--- a/asv_bench/benchmarks/categoricals.py
+++ b/asv_bench/benchmarks/categoricals.py
@@ -67,6 +67,9 @@ def time_value_counts_dropna(self):
def time_rendering(self):
str(self.sel)
+ def time_set_categories(self):
+ self.ts.cat.set_categories(self.ts.cat.categories[::2])
+
class Categoricals3(object):
goal_time = 0.2
diff --git a/asv_bench/benchmarks/period.py b/asv_bench/benchmarks/period.py
index f9837191a7bae..df3c2bf3e4b46 100644
--- a/asv_bench/benchmarks/period.py
+++ b/asv_bench/benchmarks/period.py
@@ -2,6 +2,35 @@
from pandas import Series, Period, PeriodIndex, date_range
+class PeriodProperties(object):
+ def setup(self):
+ self.per = Period('2012-06-01', freq='M')
+
+ def time_year(self):
+ self.per.year
+
+ def time_month(self):
+ self.per.month
+
+ def time_quarter(self):
+ self.per.quarter
+
+ def time_day(self):
+ self.per.day
+
+ def time_hour(self):
+ self.per.hour
+
+ def time_minute(self):
+ self.per.second
+
+ def time_second(self):
+ self.per.second
+
+ def time_leap_year(self):
+ self.per.is_leapyear
+
+
class Constructor(object):
goal_time = 0.2
@@ -49,6 +78,65 @@ def time_value_counts_pindex(self):
self.i.value_counts()
+class Properties(object):
+ def setup(self):
+ self.per = Period('2017-09-06 08:28', freq='min')
+
+ def time_year(self):
+ self.per.year
+
+ def time_month(self):
+ self.per.month
+
+ def time_day(self):
+ self.per.day
+
+ def time_hour(self):
+ self.per.hour
+
+ def time_minute(self):
+ self.per.minute
+
+ def time_second(self):
+ self.per.second
+
+ def time_is_leap_year(self):
+ self.per.is_leap_year
+
+ def time_quarter(self):
+ self.per.quarter
+
+ def time_qyear(self):
+ self.per.qyear
+
+ def time_week(self):
+ self.per.week
+
+ def time_daysinmonth(self):
+ self.per.daysinmonth
+
+ def time_dayofweek(self):
+ self.per.dayofweek
+
+ def time_dayofyear(self):
+ self.per.dayofyear
+
+ def time_start_time(self):
+ self.per.start_time
+
+ def time_end_time(self):
+ self.per.end_time
+
+ def time_to_timestamp():
+ self.per.to_timestamp()
+
+ def time_now():
+ self.per.now()
+
+ def time_asfreq():
+ self.per.asfreq('A')
+
+
class period_standard_indexing(object):
goal_time = 0.2
diff --git a/asv_bench/benchmarks/timestamp.py b/asv_bench/benchmarks/timestamp.py
new file mode 100644
index 0000000000000..066479b22739a
--- /dev/null
+++ b/asv_bench/benchmarks/timestamp.py
@@ -0,0 +1,60 @@
+from .pandas_vb_common import *
+from pandas import to_timedelta, Timestamp
+
+
+class TimestampProperties(object):
+ goal_time = 0.2
+
+ def setup(self):
+ self.ts = Timestamp('2017-08-25 08:16:14')
+
+ def time_tz(self):
+ self.ts.tz
+
+ def time_offset(self):
+ self.ts.offset
+
+ def time_dayofweek(self):
+ self.ts.dayofweek
+
+ def time_weekday_name(self):
+ self.ts.weekday_name
+
+ def time_dayofyear(self):
+ self.ts.dayofyear
+
+ def time_week(self):
+ self.ts.week
+
+ def time_quarter(self):
+ self.ts.quarter
+
+ def time_days_in_month(self):
+ self.ts.days_in_month
+
+ def time_freqstr(self):
+ self.ts.freqstr
+
+ def time_is_month_start(self):
+ self.ts.is_month_start
+
+ def time_is_month_end(self):
+ self.ts.is_month_end
+
+ def time_is_quarter_start(self):
+ self.ts.is_quarter_start
+
+ def time_is_quarter_end(self):
+ self.ts.is_quarter_end
+
+ def time_is_year_start(self):
+ self.ts.is_quarter_end
+
+ def time_is_year_end(self):
+ self.ts.is_quarter_end
+
+ def time_is_leap_year(self):
+ self.ts.is_quarter_end
+
+ def time_microsecond(self):
+ self.ts.microsecond
diff --git a/ci/install_circle.sh b/ci/install_circle.sh
index 29ca69970104b..fd79f907625e9 100755
--- a/ci/install_circle.sh
+++ b/ci/install_circle.sh
@@ -67,6 +67,7 @@ time conda create -n pandas -q --file=${REQ_BUILD} || exit 1
time conda install -n pandas pytest>=3.1.0 || exit 1
source activate pandas
+time pip install moto || exit 1
# build but don't install
echo "[build em]"
diff --git a/ci/install_travis.sh b/ci/install_travis.sh
index d26689f2e6b4b..b85263daa1eac 100755
--- a/ci/install_travis.sh
+++ b/ci/install_travis.sh
@@ -104,7 +104,7 @@ if [ -e ${REQ} ]; then
fi
time conda install -n pandas pytest>=3.1.0
-time pip install pytest-xdist
+time pip install pytest-xdist moto
if [ "$LINT" ]; then
conda install flake8
diff --git a/ci/requirements-2.7_WIN.pip b/ci/requirements-2.7_WIN.pip
new file mode 100644
index 0000000000000..e69de29bb2d1d
diff --git a/ci/requirements-3.5.sh b/ci/requirements-3.5.sh
index 33db9c28c78a9..d694ad3679ac1 100644
--- a/ci/requirements-3.5.sh
+++ b/ci/requirements-3.5.sh
@@ -8,4 +8,4 @@ echo "install 35"
conda remove -n pandas python-dateutil --force
pip install python-dateutil
-conda install -n pandas -c conda-forge feather-format pyarrow=0.4.1
+conda install -n pandas -c conda-forge feather-format pyarrow=0.5.0
diff --git a/ci/requirements-3.6_NUMPY_DEV.pip b/ci/requirements-3.6_NUMPY_DEV.pip
new file mode 100644
index 0000000000000..e69de29bb2d1d
diff --git a/ci/requirements-3.6_WIN.pip b/ci/requirements-3.6_WIN.pip
new file mode 100644
index 0000000000000..e69de29bb2d1d
diff --git a/ci/requirements_dev.txt b/ci/requirements_dev.txt
index c7190c506ba18..dbc4f6cbd6509 100644
--- a/ci/requirements_dev.txt
+++ b/ci/requirements_dev.txt
@@ -5,3 +5,4 @@ cython
pytest>=3.1.0
pytest-cov
flake8
+moto
diff --git a/doc/source/10min.rst b/doc/source/10min.rst
index def49a641a0ff..ef6b2d6ef2c90 100644
--- a/doc/source/10min.rst
+++ b/doc/source/10min.rst
@@ -655,7 +655,7 @@ the quarter end:
Categoricals
------------
-Since version 0.15, pandas can include categorical data in a ``DataFrame``. For full docs, see the
+pandas can include categorical data in a ``DataFrame``. For full docs, see the
:ref:`categorical introduction <categorical>` and the :ref:`API documentation <api.categorical>`.
.. ipython:: python
diff --git a/doc/source/advanced.rst b/doc/source/advanced.rst
index 711c3e9a95d05..3bda8c7eacb61 100644
--- a/doc/source/advanced.rst
+++ b/doc/source/advanced.rst
@@ -26,12 +26,6 @@ See the :ref:`Indexing and Selecting Data <indexing>` for general indexing docum
should be avoided. See :ref:`Returning a View versus Copy
<indexing.view_versus_copy>`
-.. warning::
-
- In 0.15.0 ``Index`` has internally been refactored to no longer sub-class ``ndarray``
- but instead subclass ``PandasObject``, similarly to the rest of the pandas objects. This should be
- a transparent change with only very limited API implications (See the :ref:`Internal Refactoring <whatsnew_0150.refactoring>`)
-
See the :ref:`cookbook<cookbook.selection>` for some advanced strategies
.. _advanced.hierarchical:
@@ -270,9 +264,6 @@ Passing a list of labels or tuples works similar to reindexing:
Using slicers
~~~~~~~~~~~~~
-.. versionadded:: 0.14.0
-
-In 0.14.0 we added a new way to slice multi-indexed objects.
You can slice a multi-index by providing multiple indexers.
You can provide any of the selectors as if you are indexing by label, see :ref:`Selection by Label <indexing.label>`,
@@ -384,7 +375,7 @@ selecting data at a particular level of a MultiIndex easier.
.. ipython:: python
- # using the slicers (new in 0.14.0)
+ # using the slicers
df.loc[(slice(None),'one'),:]
You can also select on the columns with :meth:`~pandas.MultiIndex.xs`, by
@@ -397,7 +388,7 @@ providing the axis argument
.. ipython:: python
- # using the slicers (new in 0.14.0)
+ # using the slicers
df.loc[:,(slice(None),'one')]
:meth:`~pandas.MultiIndex.xs` also allows selection with multiple keys
@@ -408,11 +399,9 @@ providing the axis argument
.. ipython:: python
- # using the slicers (new in 0.14.0)
+ # using the slicers
df.loc[:,('bar','one')]
-.. versionadded:: 0.13.0
-
You can pass ``drop_level=False`` to :meth:`~pandas.MultiIndex.xs` to retain
the level that was selected
@@ -636,19 +625,16 @@ Index Types
We have discussed ``MultiIndex`` in the previous sections pretty extensively. ``DatetimeIndex`` and ``PeriodIndex``
are shown :ref:`here <timeseries.overview>`. ``TimedeltaIndex`` are :ref:`here <timedeltas.timedeltas>`.
-In the following sub-sections we will highlite some other index types.
+In the following sub-sections we will highlight some other index types.
.. _indexing.categoricalindex:
CategoricalIndex
~~~~~~~~~~~~~~~~
-.. versionadded:: 0.16.1
-
-We introduce a ``CategoricalIndex``, a new type of index object that is useful for supporting
-indexing with duplicates. This is a container around a ``Categorical`` (introduced in v0.15.0)
-and allows efficient indexing and storage of an index with a large number of duplicated elements. Prior to 0.16.1,
-setting the index of a ``DataFrame/Series`` with a ``category`` dtype would convert this to regular object-based ``Index``.
+``CategoricalIndex`` is a type of index that is useful for supporting
+indexing with duplicates. This is a container around a ``Categorical``
+and allows efficient indexing and storage of an index with a large number of duplicated elements.
.. ipython:: python
@@ -659,7 +645,7 @@ setting the index of a ``DataFrame/Series`` with a ``category`` dtype would conv
df.dtypes
df.B.cat.categories
-Setting the index, will create create a ``CategoricalIndex``
+Setting the index, will create a ``CategoricalIndex``
.. ipython:: python
@@ -695,7 +681,7 @@ Groupby operations on the index will preserve the index nature as well
Reindexing operations, will return a resulting index based on the type of the passed
indexer, meaning that passing a list will return a plain-old-``Index``; indexing with
a ``Categorical`` will return a ``CategoricalIndex``, indexed according to the categories
-of the PASSED ``Categorical`` dtype. This allows one to arbitrarly index these even with
+of the PASSED ``Categorical`` dtype. This allows one to arbitrarily index these even with
values NOT in the categories, similarly to how you can reindex ANY pandas index.
.. ipython :: python
@@ -736,23 +722,13 @@ Int64Index and RangeIndex
Prior to 0.18.0, the ``Int64Index`` would provide the default index for all ``NDFrame`` objects.
``RangeIndex`` is a sub-class of ``Int64Index`` added in version 0.18.0, now providing the default index for all ``NDFrame`` objects.
-``RangeIndex`` is an optimized version of ``Int64Index`` that can represent a monotonic ordered set. These are analagous to python `range types <https://docs.python.org/3/library/stdtypes.html#typesseq-range>`__.
+``RangeIndex`` is an optimized version of ``Int64Index`` that can represent a monotonic ordered set. These are analogous to python `range types <https://docs.python.org/3/library/stdtypes.html#typesseq-range>`__.
.. _indexing.float64index:
Float64Index
~~~~~~~~~~~~
-.. note::
-
- As of 0.14.0, ``Float64Index`` is backed by a native ``float64`` dtype
- array. Prior to 0.14.0, ``Float64Index`` was backed by an ``object`` dtype
- array. Using a ``float64`` dtype in the backend speeds up arithmetic
- operations by about 30x and boolean indexing operations on the
- ``Float64Index`` itself are about 2x as fast.
-
-.. versionadded:: 0.13.0
-
By default a ``Float64Index`` will be automatically created when passing floating, or mixed-integer-floating values in index creation.
This enables a pure label-based slicing paradigm that makes ``[],ix,loc`` for scalar indexing and slicing work exactly the
same.
@@ -987,7 +963,7 @@ index can be somewhat complicated. For example, the following does not work:
s.loc['c':'e'+1]
A very common use case is to limit a time series to start and end at two
-specific dates. To enable this, we made the design design to make label-based
+specific dates. To enable this, we made the design to make label-based
slicing include both endpoints:
.. ipython:: python
diff --git a/doc/source/api.rst b/doc/source/api.rst
index 12e6c7ad7f630..4e02f7b11f466 100644
--- a/doc/source/api.rst
+++ b/doc/source/api.rst
@@ -218,10 +218,19 @@ Top-level dealing with datetimelike
to_timedelta
date_range
bdate_range
+ cdate_range
period_range
timedelta_range
infer_freq
+Top-level dealing with intervals
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autosummary::
+ :toctree: generated/
+
+ interval_range
+
Top-level evaluation
~~~~~~~~~~~~~~~~~~~~
@@ -1282,7 +1291,7 @@ Index
-----
**Many of these methods or variants thereof are available on the objects
-that contain an index (Series/Dataframe) and those should most likely be
+that contain an index (Series/DataFrame) and those should most likely be
used before calling these methods directly.**
.. autosummary::
@@ -1599,6 +1608,198 @@ Conversion
TimedeltaIndex.floor
TimedeltaIndex.ceil
+.. currentmodule:: pandas
+
+Scalars
+-------
+
+Period
+~~~~~~
+.. autosummary::
+ :toctree: generated/
+
+ Period
+
+Attributes
+~~~~~~~~~~
+.. autosummary::
+ :toctree: generated/
+
+ Period.day
+ Period.dayofweek
+ Period.dayofyear
+ Period.days_in_month
+ Period.daysinmonth
+ Period.end_time
+ Period.freq
+ Period.freqstr
+ Period.hour
+ Period.is_leap_year
+ Period.minute
+ Period.month
+ Period.now
+ Period.ordinal
+ Period.quarter
+ Period.qyear
+ Period.second
+ Period.start_time
+ Period.strftime
+ Period.week
+ Period.weekday
+ Period.weekofyear
+ Period.year
+
+Methods
+~~~~~~~
+.. autosummary::
+ :toctree: generated/
+
+ Period.asfreq
+ Period.strftime
+ Period.to_timestamp
+
+Timestamp
+~~~~~~~~~
+.. autosummary::
+ :toctree: generated/
+
+ Timestamp
+
+Properties
+~~~~~~~~~~
+.. autosummary::
+ :toctree: generated/
+
+ Timestamp.asm8
+ Timestamp.day
+ Timestamp.dayofweek
+ Timestamp.dayofyear
+ Timestamp.days_in_month
+ Timestamp.daysinmonth
+ Timestamp.hour
+ Timestamp.is_leap_year
+ Timestamp.is_month_end
+ Timestamp.is_month_start
+ Timestamp.is_quarter_end
+ Timestamp.is_quarter_start
+ Timestamp.is_year_end
+ Timestamp.is_year_start
+ Timestamp.max
+ Timestamp.microsecond
+ Timestamp.min
+ Timestamp.month
+ Timestamp.nanosecond
+ Timestamp.quarter
+ Timestamp.resolution
+ Timestamp.second
+ Timestamp.tz
+ Timestamp.tzinfo
+ Timestamp.value
+ Timestamp.weekday_name
+ Timestamp.weekofyear
+ Timestamp.year
+
+Methods
+~~~~~~~
+.. autosummary::
+ :toctree: generated/
+
+ Timestamp.astimezone
+ Timestamp.ceil
+ Timestamp.combine
+ Timestamp.ctime
+ Timestamp.date
+ Timestamp.dst
+ Timestamp.floor
+ Timestamp.freq
+ Timestamp.freqstr
+ Timestamp.fromordinal
+ Timestamp.fromtimestamp
+ Timestamp.isocalendar
+ Timestamp.isoformat
+ Timestamp.isoweekday
+ Timestamp.normalize
+ Timestamp.now
+ Timestamp.replace
+ Timestamp.round
+ Timestamp.strftime
+ Timestamp.strptime
+ Timestamp.time
+ Timestamp.timetuple
+ Timestamp.timetz
+ Timestamp.to_datetime64
+ Timestamp.to_julian_date
+ Timestamp.to_period
+ Timestamp.to_pydatetime
+ Timestamp.today
+ Timestamp.toordinal
+ Timestamp.tz_convert
+ Timestamp.tz_localize
+ Timestamp.tzname
+ Timestamp.utcfromtimestamp
+ Timestamp.utcnow
+ Timestamp.utcoffset
+ Timestamp.utctimetuple
+ Timestamp.weekday
+
+Interval
+~~~~~~~~
+.. autosummary::
+ :toctree: generated/
+
+ Interval
+
+Properties
+~~~~~~~~~~
+.. autosummary::
+ :toctree generated/
+
+ Interval.closed
+ Interval.closed_left
+ Interval.closed_right
+ Interval.left
+ Interval.mid
+ Interval.open_left
+ Interval.open_right
+ Interval.right
+
+Timedelta
+~~~~~~~~~
+.. autosummary::
+ :toctree: generated/
+
+ Timedelta
+
+Properties
+~~~~~~~~~~
+.. autosummary::
+ :toctree generated/
+
+ Timedelta.asm8
+ Timedelta.components
+ Timedelta.days
+ Timedelta.freq
+ Timedelta.max
+ Timedelta.microseconds
+ Timedelta.min
+ Timedelta.nanoseconds
+ Timedelta.resolution
+ Timedelta.seconds
+ Timedelta.value
+
+Methods
+~~~~~~~
+.. autosummary::
+ :toctree generated/
+
+ Timedelta.ceil
+ Timedelta.floor
+ Timedelta.isoformat
+ Timedelta.round
+ Timedelta.to_pytimedelta
+ Timedelta.to_timedelta64
+ Timedelta.total_seconds
+
Window
------
.. currentmodule:: pandas.core.window
@@ -1870,6 +2071,7 @@ Style Application
Styler.apply
Styler.applymap
+ Styler.where
Styler.format
Styler.set_precision
Styler.set_table_styles
diff --git a/doc/source/basics.rst b/doc/source/basics.rst
index fe20a7eb2b786..0990d2bd15ee6 100644
--- a/doc/source/basics.rst
+++ b/doc/source/basics.rst
@@ -251,8 +251,8 @@ replace NaN with some other value using ``fillna`` if you wish).
Flexible Comparisons
~~~~~~~~~~~~~~~~~~~~
-Starting in v0.8, pandas introduced binary comparison methods eq, ne, lt, gt,
-le, and ge to Series and DataFrame whose behavior is analogous to the binary
+Series and DataFrame have the binary comparison methods ``eq``, ``ne``, ``lt``, ``gt``,
+``le``, and ``ge`` whose behavior is analogous to the binary
arithmetic operations described above:
.. ipython:: python
@@ -347,7 +347,7 @@ That is because NaNs do not compare as equals:
np.nan == np.nan
-So, as of v0.13.1, NDFrames (such as Series, DataFrames, and Panels)
+So, NDFrames (such as Series, DataFrames, and Panels)
have an :meth:`~DataFrame.equals` method for testing equality, with NaNs in
corresponding locations treated as equal.
@@ -719,8 +719,6 @@ on an entire ``DataFrame`` or ``Series``, row- or column-wise, or elementwise.
Tablewise Function Application
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.. versionadded:: 0.16.2
-
``DataFrames`` and ``Series`` can of course just be passed into functions.
However, if the function needs to be called in a chain, consider using the :meth:`~DataFrame.pipe` method.
Compare the following
@@ -925,7 +923,7 @@ Passing a named function will yield that name for the row:
Aggregating with a dict
+++++++++++++++++++++++
-Passing a dictionary of column names to a scalar or a list of scalars, to ``DataFame.agg``
+Passing a dictionary of column names to a scalar or a list of scalars, to ``DataFrame.agg``
allows you to customize which functions are applied to which columns. Note that the results
are not in any particular order, you can use an ``OrderedDict`` instead to guarantee ordering.
@@ -1104,10 +1102,6 @@ Applying with a ``Panel`` will pass a ``Series`` to the applied function. If the
function returns a ``Series``, the result of the application will be a ``Panel``. If the applied function
reduces to a scalar, the result of the application will be a ``DataFrame``.
-.. note::
-
- Prior to 0.13.1 ``apply`` on a ``Panel`` would only work on ``ufuncs`` (e.g. ``np.sum/np.max``).
-
.. ipython:: python
import pandas.util.testing as tm
@@ -1800,8 +1794,6 @@ Series has the :meth:`~Series.searchsorted` method, which works similar to
smallest / largest values
~~~~~~~~~~~~~~~~~~~~~~~~~
-.. versionadded:: 0.14.0
-
``Series`` has the :meth:`~Series.nsmallest` and :meth:`~Series.nlargest` methods which return the
smallest or largest :math:`n` values. For a large ``Series`` this can be much
faster than sorting the entire Series and calling ``head(n)`` on the result.
@@ -1866,8 +1858,10 @@ dtypes
------
The main types stored in pandas objects are ``float``, ``int``, ``bool``,
-``datetime64[ns]`` and ``datetime64[ns, tz]`` (in >= 0.17.0), ``timedelta[ns]``, ``category`` (in >= 0.15.0), and ``object``. In addition these dtypes
-have item sizes, e.g. ``int64`` and ``int32``. See :ref:`Series with TZ <timeseries.timezone_series>` for more detail on ``datetime64[ns, tz]`` dtypes.
+``datetime64[ns]`` and ``datetime64[ns, tz]`` (in >= 0.17.0), ``timedelta[ns]``,
+``category`` and ``object``. In addition these dtypes have item sizes, e.g.
+``int64`` and ``int32``. See :ref:`Series with TZ <timeseries.timezone_series>`
+for more detail on ``datetime64[ns, tz]`` dtypes.
A convenient :attr:`~DataFrame.dtypes` attribute for DataFrames returns a Series with the data type of each column.
@@ -1908,7 +1902,7 @@ each type in a ``DataFrame``:
dft.get_dtype_counts()
-Numeric dtypes will propagate and can coexist in DataFrames (starting in v0.11.0).
+Numeric dtypes will propagate and can coexist in DataFrames.
If a dtype is passed (either directly via the ``dtype`` keyword, a passed ``ndarray``,
or a passed ``Series``, then it will be preserved in DataFrame operations. Furthermore,
different numeric dtypes will **NOT** be combined. The following example will give you a taste.
@@ -2137,7 +2131,7 @@ gotchas
~~~~~~~
Performing selection operations on ``integer`` type data can easily upcast the data to ``floating``.
-The dtype of the input data will be preserved in cases where ``nans`` are not introduced (starting in 0.11.0)
+The dtype of the input data will be preserved in cases where ``nans`` are not introduced.
See also :ref:`Support for integer NA <gotchas.intna>`
.. ipython:: python
@@ -2168,8 +2162,6 @@ Selecting columns based on ``dtype``
.. _basics.selectdtypes:
-.. versionadded:: 0.14.1
-
The :meth:`~DataFrame.select_dtypes` method implements subsetting of columns
based on their ``dtype``.
diff --git a/doc/source/categorical.rst b/doc/source/categorical.rst
index 02d7920bc4a84..8835c4a1533d0 100644
--- a/doc/source/categorical.rst
+++ b/doc/source/categorical.rst
@@ -16,13 +16,6 @@
Categorical Data
****************
-.. versionadded:: 0.15
-
-.. note::
- While there was `pandas.Categorical` in earlier versions, the ability to use
- categorical data in `Series` and `DataFrame` is new.
-
-
This is an introduction to pandas categorical data type, including a short comparison
with R's ``factor``.
@@ -295,10 +288,6 @@ Sorting and Order
.. _categorical.sort:
-.. warning::
-
- The default for construction has changed in v0.16.0 to ``ordered=False``, from the prior implicit ``ordered=True``
-
If categorical data is ordered (``s.cat.ordered == True``), then the order of the categories has a
meaning and certain operations are possible. If the categorical is unordered, ``.min()/.max()`` will raise a `TypeError`.
@@ -803,13 +792,11 @@ Following table summarizes the results of ``Categoricals`` related concatenation
Getting Data In/Out
-------------------
-.. versionadded:: 0.15.2
+You can write data that contains ``category`` dtypes to a ``HDFStore``.
+See :ref:`here <io.hdf5-categorical>` for an example and caveats.
-Writing data (`Series`, `Frames`) to a HDF store that contains a ``category`` dtype was implemented
-in 0.15.2. See :ref:`here <io.hdf5-categorical>` for an example and caveats.
-
-Writing data to and reading data from *Stata* format files was implemented in
-0.15.2. See :ref:`here <io.stata-categorical>` for an example and caveats.
+It is also possible to write data to and reading data from *Stata* format files.
+See :ref:`here <io.stata-categorical>` for an example and caveats.
Writing to a CSV file will convert the data, effectively removing any information about the
categorical (categories and ordering). So if you read back the CSV file you have to convert the
@@ -928,32 +915,6 @@ an ``object`` dtype is a constant times the length of the data.
s.astype('category').nbytes
-Old style constructor usage
-~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-In earlier versions than pandas 0.15, a `Categorical` could be constructed by passing in precomputed
-`codes` (called then `labels`) instead of values with categories. The `codes` were interpreted as
-pointers to the categories with `-1` as `NaN`. This type of constructor usage is replaced by
-the special constructor :func:`Categorical.from_codes`.
-
-Unfortunately, in some special cases, using code which assumes the old style constructor usage
-will work with the current pandas version, resulting in subtle bugs:
-
-.. code-block:: python
-
- >>> cat = pd.Categorical([1,2], [1,2,3])
- >>> # old version
- >>> cat.get_values()
- array([2, 3], dtype=int64)
- >>> # new version
- >>> cat.get_values()
- array([1, 2], dtype=int64)
-
-.. warning::
- If you used `Categoricals` with older versions of pandas, please audit your code before
- upgrading and change your code to use the :func:`~pandas.Categorical.from_codes`
- constructor.
-
`Categorical` is not a `numpy` array
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -982,8 +943,7 @@ Dtype comparisons work:
dtype == np.str_
np.str_ == dtype
-To check if a Series contains Categorical data, with pandas 0.16 or later, use
-``hasattr(s, 'cat')``:
+To check if a Series contains Categorical data, use ``hasattr(s, 'cat')``:
.. ipython:: python
@@ -1023,13 +983,13 @@ basic type) and applying along columns will also convert to object.
Categorical Index
~~~~~~~~~~~~~~~~~
-.. versionadded:: 0.16.1
-
-A new ``CategoricalIndex`` index type is introduced in version 0.16.1. See the
-:ref:`advanced indexing docs <indexing.categoricalindex>` for a more detailed
+``CategoricalIndex`` is a type of index that is useful for supporting
+indexing with duplicates. This is a container around a ``Categorical``
+and allows efficient indexing and storage of an index with a large number of duplicated elements.
+See the :ref:`advanced indexing docs <indexing.categoricalindex>` for a more detailed
explanation.
-Setting the index, will create create a ``CategoricalIndex``
+Setting the index will create a ``CategoricalIndex``
.. ipython:: python
@@ -1041,10 +1001,6 @@ Setting the index, will create create a ``CategoricalIndex``
# This now sorts by the categories order
df.sort_index()
-In previous versions (<0.16.1) there is no index of type ``category``, so
-setting the index to categorical column will convert the categorical data to a
-"normal" dtype first and therefore remove any custom ordering of the categories.
-
Side Effects
~~~~~~~~~~~~
diff --git a/doc/source/comparison_with_r.rst b/doc/source/comparison_with_r.rst
index 194e022e34c7c..eb97aeeb7e696 100644
--- a/doc/source/comparison_with_r.rst
+++ b/doc/source/comparison_with_r.rst
@@ -247,8 +247,6 @@ For more details and examples see :ref:`the reshaping documentation
|subset|_
~~~~~~~~~~
-.. versionadded:: 0.13
-
The :meth:`~pandas.DataFrame.query` method is similar to the base R ``subset``
function. In R you might want to get the rows of a ``data.frame`` where one
column's values are less than another column's values:
@@ -277,8 +275,6 @@ For more details and examples see :ref:`the query documentation
|with|_
~~~~~~~~
-.. versionadded:: 0.13
-
An expression using a data.frame called ``df`` in R with the columns ``a`` and
``b`` would be evaluated using ``with`` like so:
@@ -509,8 +505,6 @@ For more details and examples see :ref:`the reshaping documentation
|factor|_
~~~~~~~~~
-.. versionadded:: 0.15
-
pandas has a data type for categorical data.
.. code-block:: r
diff --git a/doc/source/computation.rst b/doc/source/computation.rst
index 76a030d355e33..14cfdbc364837 100644
--- a/doc/source/computation.rst
+++ b/doc/source/computation.rst
@@ -654,7 +654,7 @@ aggregation with, outputting a DataFrame:
r['A'].agg([np.sum, np.mean, np.std])
-On a widowed DataFrame, you can pass a list of functions to apply to each
+On a windowed DataFrame, you can pass a list of functions to apply to each
column, which produces an aggregated result with a hierarchical index:
.. ipython:: python
@@ -924,15 +924,12 @@ EWM has a ``min_periods`` argument, which has the same
meaning it does for all the ``.expanding`` and ``.rolling`` methods:
no output values will be set until at least ``min_periods`` non-null values
are encountered in the (expanding) window.
-(This is a change from versions prior to 0.15.0, in which the ``min_periods``
-argument affected only the ``min_periods`` consecutive entries starting at the
-first non-null value.)
-EWM also has an ``ignore_na`` argument, which deterines how
+EWM also has an ``ignore_na`` argument, which determines how
intermediate null values affect the calculation of the weights.
When ``ignore_na=False`` (the default), weights are calculated based on absolute
positions, so that intermediate null values affect the result.
-When ``ignore_na=True`` (which reproduces the behavior in versions prior to 0.15.0),
+When ``ignore_na=True``,
weights are calculated by ignoring intermediate null values.
For example, assuming ``adjust=True``, if ``ignore_na=False``, the weighted
average of ``3, NaN, 5`` would be calculated as
diff --git a/doc/source/cookbook.rst b/doc/source/cookbook.rst
index 32e7a616fe856..5bb3ba75fe51b 100644
--- a/doc/source/cookbook.rst
+++ b/doc/source/cookbook.rst
@@ -256,12 +256,6 @@ Panels
pf = pd.Panel({'df1':df1,'df2':df2,'df3':df3});pf
- #Assignment using Transpose (pandas < 0.15)
- pf = pf.transpose(2,0,1)
- pf['E'] = pd.DataFrame(data, rng, cols)
- pf = pf.transpose(1,2,0);pf
-
- #Direct assignment (pandas > 0.15)
pf.loc[:,:,'F'] = pd.DataFrame(data, rng, cols);pf
`Mask a panel by using np.where and then reconstructing the panel with the new masked values
@@ -818,7 +812,7 @@ The :ref:`Concat <merging.concatenation>` docs. The :ref:`Join <merging.join>` d
df1 = pd.DataFrame(np.random.randn(6, 3), index=rng, columns=['A', 'B', 'C'])
df2 = df1.copy()
-ignore_index is needed in pandas < v0.13, and depending on df construction
+Depending on df construction, ``ignore_index`` may be needed
.. ipython:: python
diff --git a/doc/source/dsintro.rst b/doc/source/dsintro.rst
index 3c6572229802d..ec0a1c7a00bf7 100644
--- a/doc/source/dsintro.rst
+++ b/doc/source/dsintro.rst
@@ -73,7 +73,7 @@ index is passed, one will be created having values ``[0, ..., len(data) - 1]``.
.. note::
- Starting in v0.8.0, pandas supports non-unique index values. If an operation
+ pandas supports non-unique index values. If an operation
that does not support duplicate index values is attempted, an exception
will be raised at that time. The reason for being lazy is nearly all performance-based
(there are many instances in computations, like parts of GroupBy, where the index
@@ -453,8 +453,6 @@ available to insert at a particular location in the columns:
Assigning New Columns in Method Chains
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.. versionadded:: 0.16.0
-
Inspired by `dplyr's
<http://cran.rstudio.com/web/packages/dplyr/vignettes/introduction.html#mutate>`__
``mutate`` verb, DataFrame has an :meth:`~pandas.DataFrame.assign`
@@ -698,7 +696,7 @@ DataFrame in tabular form, though it won't always fit the console width:
print(baseball.iloc[-20:, :12].to_string())
-New since 0.10.0, wide DataFrames will now be printed across multiple rows by
+Wide DataFrames will be printed across multiple rows by
default:
.. ipython:: python
@@ -845,19 +843,16 @@ DataFrame objects with mixed-type columns, all of the data will get upcasted to
.. note::
- Unfortunately Panel, being less commonly used than Series and DataFrame,
+ Panel, being less commonly used than Series and DataFrame,
has been slightly neglected feature-wise. A number of methods and options
- available in DataFrame are not available in Panel. This will get worked
- on, of course, in future releases. And faster if you join me in working on
- the codebase.
+ available in DataFrame are not available in Panel.
.. _dsintro.to_panel:
From DataFrame using ``to_panel`` method
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-This method was introduced in v0.7 to replace ``LongPanel.to_long``, and converts
-a DataFrame with a two-level index to a Panel.
+``to_panel`` converts a DataFrame with a two-level index to a Panel.
.. ipython:: python
:okwarning:
diff --git a/doc/source/enhancingperf.rst b/doc/source/enhancingperf.rst
index 685a8690a53d5..264bd1de1fc77 100644
--- a/doc/source/enhancingperf.rst
+++ b/doc/source/enhancingperf.rst
@@ -213,17 +213,18 @@ the rows, applying our ``integrate_f_typed``, and putting this in the zeros arra
.. warning::
- In 0.13.0 since ``Series`` has internaly been refactored to no longer sub-class ``ndarray``
- but instead subclass ``NDFrame``, you can **not pass** a ``Series`` directly as a ``ndarray`` typed parameter
- to a cython function. Instead pass the actual ``ndarray`` using the ``.values`` attribute of the Series.
+ You can **not pass** a ``Series`` directly as a ``ndarray`` typed parameter
+ to a cython function. Instead pass the actual ``ndarray`` using the
+ ``.values`` attribute of the Series. The reason is that the cython
+ definition is specific to an ndarray and not the passed Series.
- Prior to 0.13.0
+ So, do not do this:
.. code-block:: python
apply_integrate_f(df['a'], df['b'], df['N'])
- Use ``.values`` to get the underlying ``ndarray``
+ But rather, use ``.values`` to get the underlying ``ndarray``
.. code-block:: python
@@ -399,10 +400,8 @@ Read more in the `numba docs <http://numba.pydata.org/>`__.
.. _enhancingperf.eval:
-Expression Evaluation via :func:`~pandas.eval` (Experimental)
--------------------------------------------------------------
-
-.. versionadded:: 0.13
+Expression Evaluation via :func:`~pandas.eval`
+-----------------------------------------------
The top-level function :func:`pandas.eval` implements expression evaluation of
:class:`~pandas.Series` and :class:`~pandas.DataFrame` objects.
@@ -539,10 +538,8 @@ Now let's do the same thing but with comparisons:
of type ``bool`` or ``np.bool_``. Again, you should perform these kinds of
operations in plain Python.
-The ``DataFrame.eval`` method (Experimental)
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. versionadded:: 0.13
+The ``DataFrame.eval`` method
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
In addition to the top level :func:`pandas.eval` function you can also
evaluate an expression in the "context" of a :class:`~pandas.DataFrame`.
@@ -646,19 +643,6 @@ whether the query modifies the original frame.
Local Variables
~~~~~~~~~~~~~~~
-In pandas version 0.14 the local variable API has changed. In pandas 0.13.x,
-you could refer to local variables the same way you would in standard Python.
-For example,
-
-.. code-block:: python
-
- df = pd.DataFrame(np.random.randn(5, 2), columns=['a', 'b'])
- newcol = np.random.randn(len(df))
- df.eval('b + newcol')
-
- UndefinedVariableError: name 'newcol' is not defined
-
-As you can see from the exception generated, this syntax is no longer allowed.
You must *explicitly reference* any local variable that you want to use in an
expression by placing the ``@`` character in front of the name. For example,
diff --git a/doc/source/gotchas.rst b/doc/source/gotchas.rst
index a3062b4086673..9e6f98923fca6 100644
--- a/doc/source/gotchas.rst
+++ b/doc/source/gotchas.rst
@@ -22,8 +22,8 @@ Frequently Asked Questions (FAQ)
DataFrame memory usage
----------------------
-As of pandas version 0.15.0, the memory usage of a dataframe (including
-the index) is shown when accessing the ``info`` method of a dataframe. A
+The memory usage of a dataframe (including the index)
+is shown when accessing the ``info`` method of a dataframe. A
configuration option, ``display.memory_usage`` (see :ref:`options`),
specifies if the dataframe's memory usage will be displayed when
invoking the ``df.info()`` method.
diff --git a/doc/source/groupby.rst b/doc/source/groupby.rst
index 937d682d238b3..e9a7d8dd0a46e 100644
--- a/doc/source/groupby.rst
+++ b/doc/source/groupby.rst
@@ -140,7 +140,7 @@ columns:
In [5]: grouped = df.groupby(get_letter_type, axis=1)
-Starting with 0.8, pandas Index objects now support duplicate values. If a
+pandas Index objects support duplicate values. If a
non-unique index is used as the group key in a groupby operation, all values
for the same index value will be considered to be in one group and thus the
output of aggregation functions will only contain unique index values:
@@ -288,8 +288,6 @@ chosen level:
s.sum(level='second')
-.. versionadded:: 0.6
-
Grouping with multiple levels is supported.
.. ipython:: python
@@ -563,7 +561,7 @@ must be either implemented on GroupBy or available via :ref:`dispatching
.. note::
- If you pass a dict to ``aggregate``, the ordering of the output colums is
+ If you pass a dict to ``aggregate``, the ordering of the output columns is
non-deterministic. If you want to be sure the output columns will be in a specific
order, you can use an ``OrderedDict``. Compare the output of the following two commands:
@@ -768,8 +766,6 @@ missing values with the ``ffill()`` method.
Filtration
----------
-.. versionadded:: 0.12
-
The ``filter`` method returns a subset of the original object. Suppose we
want to take only elements that belong to groups with a group sum greater
than 2.
@@ -860,8 +856,6 @@ In this example, we chopped the collection of time series into yearly chunks
then independently called :ref:`fillna <missing_data.fillna>` on the
groups.
-.. versionadded:: 0.14.1
-
The ``nlargest`` and ``nsmallest`` methods work on ``Series`` style groupbys:
.. ipython:: python
@@ -1050,19 +1044,6 @@ Just like for a DataFrame or Series you can call head and tail on a groupby:
This shows the first or last n rows from each group.
-.. warning::
-
- Before 0.14.0 this was implemented with a fall-through apply,
- so the result would incorrectly respect the as_index flag:
-
- .. code-block:: python
-
- >>> g.head(1): # was equivalent to g.apply(lambda x: x.head(1))
- A B
- A
- 1 0 1 2
- 5 2 5 6
-
.. _groupby.nth:
Taking the nth row of each group
@@ -1115,8 +1096,6 @@ You can also select multiple rows from each group by specifying multiple nth val
Enumerate group items
~~~~~~~~~~~~~~~~~~~~~
-.. versionadded:: 0.13.0
-
To see the order in which each row appears within its group, use the
``cumcount`` method:
@@ -1232,7 +1211,7 @@ Groupby by Indexer to 'resample' data
Resampling produces new hypothetical samples (resamples) from already existing observed data or from a model that generates data. These new samples are similar to the pre-existing samples.
-In order to resample to work on indices that are non-datetimelike , the following procedure can be utilized.
+In order to resample to work on indices that are non-datetimelike, the following procedure can be utilized.
In the following examples, **df.index // 5** returns a binary array which is used to determine what gets selected for the groupby operation.
diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst
index 53a259ad6eb15..edbc4e6d7fd22 100644
--- a/doc/source/indexing.rst
+++ b/doc/source/indexing.rst
@@ -47,12 +47,6 @@ advanced indexing.
should be avoided. See :ref:`Returning a View versus Copy
<indexing.view_versus_copy>`
-.. warning::
-
- In 0.15.0 ``Index`` has internally been refactored to no longer subclass ``ndarray``
- but instead subclass ``PandasObject``, similarly to the rest of the pandas objects. This should be
- a transparent change with only very limited API implications (See the :ref:`Internal Refactoring <whatsnew_0150.refactoring>`)
-
.. warning::
Indexing on an integer-based Index with floats has been clarified in 0.18.0, for a summary of the changes, see :ref:`here <whatsnew_0180.float_indexers>`.
@@ -66,8 +60,6 @@ See the :ref:`cookbook<cookbook.selection>` for some advanced strategies
Different Choices for Indexing
------------------------------
-.. versionadded:: 0.11.0
-
Object selection has had a number of user-requested additions in order to
support more explicit location based indexing. Pandas now supports three types
of multi-axis indexing.
@@ -250,8 +242,6 @@ as an attribute:
- In any of these cases, standard indexing will still work, e.g. ``s['1']``, ``s['min']``, and ``s['index']`` will
access the corresponding element or column.
- - The ``Series/Panel`` accesses are available starting in 0.13.0.
-
If you are using the IPython environment, you may also use tab-completion to
see these accessible attributes.
@@ -279,21 +269,6 @@ new column. In 0.21.0 and later, this will raise a ``UserWarning``:
1 2.0
2 3.0
-Similarly, it is possible to create a column with a name which collides with one of Pandas's
-built-in methods or attributes, which can cause confusion later when attempting to access
-that column as an attribute. This behavior now warns:
-
-.. code-block:: ipython
-
- In[4]: df['sum'] = [5., 7., 9.]
- UserWarning: Column name 'sum' collides with a built-in method, which will cause unexpected attribute behavior
- In[5]: df.sum
- Out[5]:
- <bound method DataFrame.sum of one sum
- 0 1.0 5.0
- 1 2.0 7.0
- 2 3.0 9.0>
-
Slicing ranges
--------------
@@ -531,7 +506,6 @@ Out of range slice indexes are handled gracefully just as in Python/Numpy.
.. ipython:: python
# these are allowed in python/numpy.
- # Only works in Pandas starting from v0.14.0.
x = list('abcdef')
x
x[4:10]
@@ -541,14 +515,8 @@ Out of range slice indexes are handled gracefully just as in Python/Numpy.
s.iloc[4:10]
s.iloc[8:10]
-.. note::
-
- Prior to v0.14.0, ``iloc`` would not accept out of bounds indexers for
- slices, e.g. a value that exceeds the length of the object being indexed.
-
-
-Note that this could result in an empty axis (e.g. an empty DataFrame being
-returned)
+Note that using slices that go out of bounds can result in
+an empty axis (e.g. an empty DataFrame being returned)
.. ipython:: python
@@ -671,7 +639,6 @@ For getting *multiple* indexers, using ``.get_indexer``
Selecting Random Samples
------------------------
-.. versionadded::0.16.1
A random selection of rows or columns from a Series, DataFrame, or Panel with the :meth:`~DataFrame.sample` method. The method will sample rows by default, and accepts a specific number of rows/columns to return, or a fraction of rows.
@@ -747,9 +714,7 @@ Finally, one can also set a seed for ``sample``'s random number generator using
Setting With Enlargement
------------------------
-.. versionadded:: 0.13
-
-The ``.loc/[]`` operations can perform enlargement when setting a non-existant key for that axis.
+The ``.loc/[]`` operations can perform enlargement when setting a non-existent key for that axis.
In the ``Series`` case this is effectively an appending operation
@@ -1022,8 +987,6 @@ partial setting via ``.loc`` (but on the contents rather than the axis labels)
df2[ df2[1:4] > 0 ] = 3
df2
-.. versionadded:: 0.13
-
Where can also accept ``axis`` and ``level`` parameters to align the input when
performing the ``where``.
@@ -1066,8 +1029,6 @@ as condition and ``other`` argument.
The :meth:`~pandas.DataFrame.query` Method (Experimental)
---------------------------------------------------------
-.. versionadded:: 0.13
-
:class:`~pandas.DataFrame` objects have a :meth:`~pandas.DataFrame.query`
method that allows selection using an expression.
@@ -1508,8 +1469,6 @@ The name, if set, will be shown in the console display:
Setting metadata
~~~~~~~~~~~~~~~~
-.. versionadded:: 0.13.0
-
Indexes are "mostly immutable", but it is possible to set and change their
metadata, like the index ``name`` (or, for ``MultiIndex``, ``levels`` and
``labels``).
@@ -1529,8 +1488,6 @@ See :ref:`Advanced Indexing <advanced>` for usage of MultiIndexes.
ind.name = "bob"
ind
-.. versionadded:: 0.15.0
-
``set_names``, ``set_levels``, and ``set_labels`` also take an optional
`level`` argument
@@ -1546,11 +1503,6 @@ Set operations on Index objects
.. _indexing.set_ops:
-.. warning::
-
- In 0.15.0. the set operations ``+`` and ``-`` were deprecated in order to provide these for numeric type operations on certain
- index types. ``+`` can be replace by ``.union()`` or ``|``, and ``-`` by ``.difference()``.
-
The two main operations are ``union (|)``, ``intersection (&)``
These can be directly called as instance methods or used via overloaded
operators. Difference is provided via the ``.difference()`` method.
@@ -1792,7 +1744,7 @@ Evaluation order matters
Furthermore, in chained expressions, the order may determine whether a copy is returned or not.
If an expression will set values on a copy of a slice, then a ``SettingWithCopy``
-exception will be raised (this raise/warn behavior is new starting in 0.13.0)
+warning will be issued.
You can control the action of a chained assignment via the option ``mode.chained_assignment``,
which can take the values ``['raise','warn',None]``, where showing a warning is the default.
diff --git a/doc/source/install.rst b/doc/source/install.rst
index f92c43839ee31..c805f84d0faaa 100644
--- a/doc/source/install.rst
+++ b/doc/source/install.rst
@@ -18,7 +18,7 @@ Instructions for installing from source,
Python version support
----------------------
-Officially Python 2.7, 3.4, 3.5, and 3.6
+Officially Python 2.7, 3.5, and 3.6.
Installing pandas
-----------------
@@ -107,7 +107,7 @@ following command::
To install a specific pandas version::
- conda install pandas=0.13.1
+ conda install pandas=0.20.3
To install other packages, IPython for example::
@@ -183,21 +183,17 @@ installed), make sure you have `pytest
>>> import pandas as pd
>>> pd.test()
- Running unit tests for pandas
- pandas version 0.18.0
- numpy version 1.10.2
- pandas is installed in pandas
- Python version 2.7.11 |Continuum Analytics, Inc.|
- (default, Dec 6 2015, 18:57:58) [GCC 4.2.1 (Apple Inc. build 5577)]
- nose version 1.3.7
+ running: pytest --skip-slow --skip-network C:\Users\TP\Anaconda3\envs\py36\lib\site-packages\pandas
+ ============================= test session starts =============================
+ platform win32 -- Python 3.6.2, pytest-3.2.1, py-1.4.34, pluggy-0.4.0
+ rootdir: C:\Users\TP\Documents\Python\pandasdev\pandas, inifile: setup.cfg
+ collected 12145 items / 3 skipped
+
..................................................................S......
........S................................................................
.........................................................................
- ----------------------------------------------------------------------
- Ran 9252 tests in 368.339s
-
- OK (SKIP=117)
+ ==================== 12130 passed, 12 skipped in 368.339 seconds =====================
Dependencies
------------
diff --git a/doc/source/io.rst b/doc/source/io.rst
index e338407361705..fcf7f6029197b 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -364,7 +364,7 @@ warn_bad_lines : boolean, default ``True``
Specifying column data types
''''''''''''''''''''''''''''
-Starting with v0.10, you can indicate the data type for the whole DataFrame or
+You can indicate the data type for the whole DataFrame or
individual columns:
.. ipython:: python
@@ -592,8 +592,7 @@ Ignoring line comments and empty lines
++++++++++++++++++++++++++++++++++++++
If the ``comment`` parameter is specified, then completely commented lines will
-be ignored. By default, completely blank lines will be ignored as well. Both of
-these are API changes introduced in version 0.15.
+be ignored. By default, completely blank lines will be ignored as well.
.. ipython:: python
@@ -1310,8 +1309,6 @@ column widths for contiguous columns:
The parser will take care of extra white spaces around the columns
so it's ok to have extra separation between the columns in the file.
-.. versionadded:: 0.13.0
-
By default, ``read_fwf`` will try to infer the file's ``colspecs`` by using the
first 100 rows of the file. It can do it only in cases when the columns are
aligned and correctly separated by the provided ``delimiter`` (default delimiter
@@ -1407,8 +1404,7 @@ Reading columns with a ``MultiIndex``
By specifying list of row locations for the ``header`` argument, you
can read in a ``MultiIndex`` for the columns. Specifying non-consecutive
-rows will skip the intervening rows. In order to have the pre-0.13 behavior
-of tupleizing columns, specify ``tupleize_cols=True``.
+rows will skip the intervening rows.
.. ipython:: python
@@ -1418,7 +1414,7 @@ of tupleizing columns, specify ``tupleize_cols=True``.
print(open('mi.csv').read())
pd.read_csv('mi.csv',header=[0,1,2,3],index_col=[0,1])
-Starting in 0.13.0, ``read_csv`` will be able to interpret a more common format
+``read_csv`` is also able to interpret a more common format
of multi-columns indices.
.. ipython:: python
@@ -2012,8 +2008,6 @@ The speedup is less noticeable for smaller datasets:
Normalization
'''''''''''''
-.. versionadded:: 0.13.0
-
pandas provides a utility function to take a dict or list of dicts and *normalize* this semi-structured data
into a flat table.
@@ -2198,8 +2192,6 @@ Reading HTML Content
We **highly encourage** you to read the :ref:`HTML Table Parsing gotchas <io.html.gotchas>`
below regarding the issues surrounding the BeautifulSoup4/html5lib/lxml parsers.
-.. versionadded:: 0.12.0
-
The top-level :func:`~pandas.io.html.read_html` function can accept an HTML
string/file/URL and will parse HTML tables into list of pandas DataFrames.
Let's look at a few examples.
@@ -2653,10 +2645,6 @@ of sheet names can simply be passed to ``read_excel`` with no loss in performanc
# equivalent using the read_excel function
data = read_excel('path_to_file.xls', ['Sheet1', 'Sheet2'], index_col=None, na_values=['NA'])
-.. versionadded:: 0.12
-
-``ExcelFile`` has been moved to the top level namespace.
-
.. versionadded:: 0.17
``read_excel`` can take an ``ExcelFile`` object as input
@@ -2712,13 +2700,8 @@ Using a list to get multiple sheets:
# Returns the 1st and 4th sheet, as a dictionary of DataFrames.
read_excel('path_to_file.xls',sheet_name=['Sheet1',3])
-.. versionadded:: 0.16
-
``read_excel`` can read more than one sheet, by setting ``sheet_name`` to either
a list of sheet names, a list of sheet positions, or ``None`` to read all sheets.
-
-.. versionadded:: 0.13
-
Sheets can be specified by sheet index or sheet name, using an integer or string,
respectively.
@@ -2866,9 +2849,9 @@ Files with a ``.xls`` extension will be written using ``xlwt`` and those with a
``.xlsx`` extension will be written using ``xlsxwriter`` (if available) or
``openpyxl``.
-The DataFrame will be written in a way that tries to mimic the REPL output. One
-difference from 0.12.0 is that the ``index_label`` will be placed in the second
-row instead of the first. You can get the previous behaviour by setting the
+The DataFrame will be written in a way that tries to mimic the REPL output.
+The ``index_label`` will be placed in the second
+row instead of the first. You can place it in the first row by setting the
``merge_cells`` option in ``to_excel()`` to ``False``:
.. code-block:: python
@@ -2945,8 +2928,6 @@ Added support for Openpyxl >= 2.2
Excel writer engines
''''''''''''''''''''
-.. versionadded:: 0.13
-
``pandas`` chooses an Excel writer via two methods:
1. the ``engine`` keyword argument
@@ -3074,14 +3055,19 @@ any pickled pandas object (or any other pickled object) from file:
Loading pickled data received from untrusted sources can be unsafe.
- See: http://docs.python.org/2.7/library/pickle.html
+ See: https://docs.python.org/3.6/library/pickle.html
.. warning::
- Several internal refactorings, 0.13 (:ref:`Series Refactoring <whatsnew_0130.refactoring>`), and 0.15 (:ref:`Index Refactoring <whatsnew_0150.refactoring>`),
- preserve compatibility with pickles created prior to these versions. However, these must
- be read with ``pd.read_pickle``, rather than the default python ``pickle.load``.
- See `this question <http://stackoverflow.com/questions/20444593/pandas-compiled-from-source-default-pickle-behavior-changed>`__
+ Several internal refactorings have been done while still preserving
+ compatibility with pickles created with older versions of pandas. However,
+ for such cases, pickled dataframes, series etc, must be read with
+ ``pd.read_pickle``, rather than ``pickle.load``.
+
+ See `here <http://pandas.pydata.org/pandas-docs/stable/whatsnew.html#whatsnew-0130-refactoring>`__
+ and `here <http://pandas.pydata.org/pandas-docs/stable/whatsnew.html#whatsnew-0150-refactoring>`__
+ for some examples of compatibility-breaking changes. See
+ `this question <http://stackoverflow.com/questions/20444593/pandas-compiled-from-source-default-pickle-behavior-changed>`__
for a detailed explanation.
.. _io.pickle.compression:
@@ -3091,7 +3077,7 @@ Compressed pickle files
.. versionadded:: 0.20.0
-:func:`read_pickle`, :meth:`DataFame.to_pickle` and :meth:`Series.to_pickle` can read
+:func:`read_pickle`, :meth:`DataFrame.to_pickle` and :meth:`Series.to_pickle` can read
and write compressed pickle files. The compression types of ``gzip``, ``bz2``, ``xz`` are supported for reading and writing.
`zip`` file supports read only and must contain only one data file
to be read in.
@@ -3150,9 +3136,7 @@ The default is to 'infer
msgpack
-------
-.. versionadded:: 0.13.0
-
-Starting in 0.13.0, pandas is supporting the ``msgpack`` format for
+pandas supports the ``msgpack`` format for
object serialization. This is a lightweight portable binary format, similar
to binary JSON, that is highly space efficient, and provides good performance
both on the writing (serialization), and reading (deserialization).
@@ -3254,11 +3238,10 @@ for some advanced strategies
.. warning::
- As of version 0.15.0, pandas requires ``PyTables`` >= 3.0.0. Stores written with prior versions of pandas / ``PyTables`` >= 2.3 are fully compatible (this was the previous minimum ``PyTables`` required version).
-
-.. warning::
-
- There is a ``PyTables`` indexing bug which may appear when querying stores using an index. If you see a subset of results being returned, upgrade to ``PyTables`` >= 3.2. Stores created previously will need to be rewritten using the updated version.
+ pandas requires ``PyTables`` >= 3.0.0.
+ There is a indexing bug in ``PyTables`` < 3.2 which may appear when querying stores using an index.
+ If you see a subset of results being returned, upgrade to ``PyTables`` >= 3.2.
+ Stores created previously will need to be rewritten using the updated version.
.. warning::
@@ -3346,7 +3329,7 @@ Read/Write API
''''''''''''''
``HDFStore`` supports an top-level API using ``read_hdf`` for reading and ``to_hdf`` for writing,
-similar to how ``read_csv`` and ``to_csv`` work. (new in 0.11.0)
+similar to how ``read_csv`` and ``to_csv`` work.
.. ipython:: python
@@ -3424,10 +3407,6 @@ This is also true for the major axis of a ``Panel``:
Fixed Format
''''''''''''
-.. note::
-
- This was prior to 0.13.0 the ``Storer`` format.
-
The examples above show storing using ``put``, which write the HDF5 to ``PyTables`` in a fixed array format, called
the ``fixed`` format. These types of stores are **not** appendable once written (though you can simply
remove them and rewrite). Nor are they **queryable**; they must be
@@ -3460,8 +3439,6 @@ other sessions. In addition, delete & query type operations are
supported. This format is specified by ``format='table'`` or ``format='t'``
to ``append`` or ``put`` or ``to_hdf``
-.. versionadded:: 0.13
-
This format can be set as an option as well ``pd.set_option('io.hdf.default_format','table')`` to
enable ``put/append/to_hdf`` to by default store in the ``table`` format.
@@ -3765,9 +3742,7 @@ space. These are in terms of the total number of rows in a table.
Using timedelta64[ns]
+++++++++++++++++++++
-.. versionadded:: 0.13
-
-Beginning in 0.13.0, you can store and query using the ``timedelta64[ns]`` type. Terms can be
+You can store and query using the ``timedelta64[ns]`` type. Terms can be
specified in the format: ``<float>(<unit>)``, where float may be signed (and fractional), and unit can be
``D,s,ms,us,ns`` for the timedelta. Here's an example:
@@ -3791,7 +3766,7 @@ indexed dimension as the ``where``.
.. note::
- Indexes are automagically created (starting ``0.10.1``) on the indexables
+ Indexes are automagically created on the indexables
and any data columns you specify. This behavior can be turned off by passing
``index=False`` to ``append``.
@@ -3878,7 +3853,7 @@ create a new table!)
Iterator
++++++++
-Starting in ``0.11.0``, you can pass, ``iterator=True`` or ``chunksize=number_in_a_chunk``
+You can pass ``iterator=True`` or ``chunksize=number_in_a_chunk``
to ``select`` and ``select_as_multiple`` to return an iterator on the results.
The default is 50,000 rows returned in a chunk.
@@ -3889,8 +3864,6 @@ The default is 50,000 rows returned in a chunk.
.. note::
- .. versionadded:: 0.12.0
-
You can also use the iterator with ``read_hdf`` which will open, then
automatically close the store when finished iterating.
@@ -3986,8 +3959,8 @@ of rows in an object.
Multiple Table Queries
++++++++++++++++++++++
-New in 0.10.1 are the methods ``append_to_multiple`` and
-``select_as_multiple``, that can perform appending/selecting from
+The methods ``append_to_multiple`` and
+``select_as_multiple`` can perform appending/selecting from
multiple tables at once. The idea is to have one table (call it the
selector table) that you index most/all of the columns, and perform your
queries. The other table(s) are data tables with an index matching the
@@ -4233,10 +4206,8 @@ object : ``strings`` ``np.nan``
Categorical Data
++++++++++++++++
-.. versionadded:: 0.15.2
-
-Writing data to a ``HDFStore`` that contains a ``category`` dtype was implemented
-in 0.15.2. Queries work the same as if it was an object array. However, the ``category`` dtyped data is
+You can write data that contains ``category`` dtypes to a ``HDFStore``.
+Queries work the same as if it was an object array. However, the ``category`` dtyped data is
stored in a more efficient manner.
.. ipython:: python
@@ -4251,21 +4222,6 @@ stored in a more efficient manner.
result
result.dtypes
-.. warning::
-
- The format of the ``Categorical`` is readable by prior versions of pandas (< 0.15.2), but will retrieve
- the data as an integer based column (e.g. the ``codes``). However, the ``categories`` *can* be retrieved
- but require the user to select them manually using the explicit meta path.
-
- The data is stored like so:
-
- .. ipython:: python
-
- cstore
-
- # to get the categories
- cstore.select('dfcat/meta/A/meta')
-
.. ipython:: python
:suppress:
:okexcept:
@@ -4291,7 +4247,7 @@ Pass ``min_itemsize`` on the first table creation to a-priori specify the minimu
``min_itemsize`` can be an integer, or a dict mapping a column name to an integer. You can pass ``values`` as a key to
allow all *indexables* or *data_columns* to have this min_itemsize.
-Starting in 0.11.0, passing a ``min_itemsize`` dict will cause all passed columns to be created as *data_columns* automatically.
+Passing a ``min_itemsize`` dict will cause all passed columns to be created as *data_columns* automatically.
.. note::
@@ -4419,44 +4375,6 @@ Now you can import the ``DataFrame`` into R:
starting point if you have stored multiple ``DataFrame`` objects to a
single HDF5 file.
-Backwards Compatibility
-'''''''''''''''''''''''
-
-0.10.1 of ``HDFStore`` can read tables created in a prior version of pandas,
-however query terms using the
-prior (undocumented) methodology are unsupported. ``HDFStore`` will
-issue a warning if you try to use a legacy-format file. You must
-read in the entire file and write it out using the new format, using the
-method ``copy`` to take advantage of the updates. The group attribute
-``pandas_version`` contains the version information. ``copy`` takes a
-number of options, please see the docstring.
-
-
-.. ipython:: python
- :suppress:
-
- import os
- legacy_file_path = os.path.abspath('source/_static/legacy_0.10.h5')
-
-.. ipython:: python
- :okwarning:
-
- # a legacy store
- legacy_store = pd.HDFStore(legacy_file_path,'r')
- legacy_store
-
- # copy (and return the new handle)
- new_store = legacy_store.copy('store_new.h5')
- new_store
- new_store.close()
-
-.. ipython:: python
- :suppress:
-
- legacy_store.close()
- import os
- os.remove('store_new.h5')
-
Performance
'''''''''''
@@ -4597,8 +4515,7 @@ See the documentation for `pyarrow <http://arrow.apache.org/docs/python/>`__ and
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.date_range('20130101', periods=3),
- 'g': pd.date_range('20130101', periods=3, tz='US/Eastern'),
- 'h': pd.date_range('20130101', periods=3, freq='ns')})
+ 'g': pd.date_range('20130101', periods=3, tz='US/Eastern')})
df
df.dtypes
@@ -4641,8 +4558,6 @@ included in Python's standard library by default.
You can find an overview of supported drivers for each SQL dialect in the
`SQLAlchemy docs <http://docs.sqlalchemy.org/en/latest/dialects/index.html>`__.
-.. versionadded:: 0.14.0
-
If SQLAlchemy is not installed, a fallback is only provided for sqlite (and
for mysql for backwards compatibility, but this is deprecated and will be
removed in a future version).
@@ -4809,8 +4724,6 @@ You can check if a table exists using :func:`~pandas.io.sql.has_table`
Schema support
''''''''''''''
-.. versionadded:: 0.15.0
-
Reading from and writing to different schema's is supported through the ``schema``
keyword in the :func:`~pandas.read_sql_table` and :func:`~pandas.DataFrame.to_sql`
functions. Note however that this depends on the database flavor (sqlite does not
@@ -4975,8 +4888,6 @@ Full documentation can be found `here <https://pandas-gbq.readthedocs.io/>`__
Stata Format
------------
-.. versionadded:: 0.12.0
-
.. _io.stata_writer:
Writing to Stata format
@@ -5040,8 +4951,6 @@ be used to read the file incrementally.
pd.read_stata('stata.dta')
-.. versionadded:: 0.16.0
-
Specifying a ``chunksize`` yields a
:class:`~pandas.io.stata.StataReader` instance that can be used to
read ``chunksize`` lines from the file at a time. The ``StataReader``
@@ -5099,8 +5008,6 @@ values will have ``object`` data type.
Categorical Data
++++++++++++++++
-.. versionadded:: 0.15.2
-
``Categorical`` data can be exported to *Stata* data files as value labeled data.
The exported data consists of the underlying category codes as integer data values
and the categories as value labels. *Stata* does not have an explicit equivalent
diff --git a/doc/source/merging.rst b/doc/source/merging.rst
index d956f1ca54e6b..72787ea97a782 100644
--- a/doc/source/merging.rst
+++ b/doc/source/merging.rst
@@ -1053,8 +1053,6 @@ As you can see, this drops any rows where there was no match.
Joining a single Index to a Multi-index
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.. versionadded:: 0.14.0
-
You can join a singly-indexed ``DataFrame`` with a level of a multi-indexed ``DataFrame``.
The level will match on the name of the index of the singly-indexed frame against
a level name of the multi-indexed frame.
@@ -1331,7 +1329,7 @@ By default we are taking the asof of the quotes.
on='time',
by='ticker')
-We only asof within ``2ms`` betwen the quote time and the trade time.
+We only asof within ``2ms`` between the quote time and the trade time.
.. ipython:: python
@@ -1340,8 +1338,8 @@ We only asof within ``2ms`` betwen the quote time and the trade time.
by='ticker',
tolerance=pd.Timedelta('2ms'))
-We only asof within ``10ms`` betwen the quote time and the trade time and we exclude exact matches on time.
-Note that though we exclude the exact matches (of the quotes), prior quotes DO propogate to that point
+We only asof within ``10ms`` between the quote time and the trade time and we exclude exact matches on time.
+Note that though we exclude the exact matches (of the quotes), prior quotes DO propagate to that point
in time.
.. ipython:: python
diff --git a/doc/source/missing_data.rst b/doc/source/missing_data.rst
index d54288baa389b..b33b5c304853a 100644
--- a/doc/source/missing_data.rst
+++ b/doc/source/missing_data.rst
@@ -67,9 +67,8 @@ arise and we wish to also consider that "missing" or "not available" or "NA".
.. note::
- Prior to version v0.10.0 ``inf`` and ``-inf`` were also
- considered to be "NA" in computations. This is no longer the case by
- default; use the ``mode.use_inf_as_na`` option to recover it.
+ If you want to consider ``inf`` and ``-inf`` to be "NA" in computations,
+ you can set ``pandas.options.mode.use_inf_as_na = True``.
.. _missing.isna:
@@ -264,8 +263,6 @@ and ``bfill()`` is equivalent to ``fillna(method='bfill')``
Filling with a PandasObject
~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.. versionadded:: 0.12
-
You can also fillna using a dict or Series that is alignable. The labels of the dict or index of the Series
must match the columns of the frame you wish to fill. The
use case of this is to fill a DataFrame with the mean of that column.
@@ -281,8 +278,6 @@ use case of this is to fill a DataFrame with the mean of that column.
dff.fillna(dff.mean())
dff.fillna(dff.mean()['B':'C'])
-.. versionadded:: 0.13
-
Same result as above, but is aligning the 'fill' value which is
a Series in this case.
@@ -321,16 +316,11 @@ examined :ref:`in the API <api.dataframe.missing>`.
Interpolation
~~~~~~~~~~~~~
-.. versionadded:: 0.13.0
-
- :meth:`~pandas.DataFrame.interpolate`, and :meth:`~pandas.Series.interpolate` have
- revamped interpolation methods and functionality.
-
.. versionadded:: 0.17.0
The ``limit_direction`` keyword argument was added.
-Both Series and Dataframe objects have an ``interpolate`` method that, by default,
+Both Series and DataFrame objects have an ``interpolate`` method that, by default,
performs linear interpolation at missing datapoints.
.. ipython:: python
@@ -485,8 +475,8 @@ respectively:
Replacing Generic Values
~~~~~~~~~~~~~~~~~~~~~~~~
-Often times we want to replace arbitrary values with other values. New in v0.8
-is the ``replace`` method in Series/DataFrame that provides an efficient yet
+Often times we want to replace arbitrary values with other values. The
+``replace`` method in Series/DataFrame provides an efficient yet
flexible way to perform such replacements.
For a Series, you can replace a single value or a list of values by another
diff --git a/doc/source/options.rst b/doc/source/options.rst
index 51d02bc89692a..f042e4d3f5120 100644
--- a/doc/source/options.rst
+++ b/doc/source/options.rst
@@ -306,16 +306,16 @@ display.float_format None The callable should accept a fl
See core.format.EngFormatter for an example.
display.large_repr truncate For DataFrames exceeding max_rows/max_cols,
the repr (and HTML repr) can show
- a truncated table (the default from 0.13),
+ a truncated table (the default),
or switch to the view from df.info()
(the behaviour in earlier versions of pandas).
allowable settings, ['truncate', 'info']
display.latex.repr False Whether to produce a latex DataFrame
representation for jupyter frontends
that support it.
-display.latex.escape True Escapes special caracters in Dataframes, when
+display.latex.escape True Escapes special characters in DataFrames, when
using the to_latex method.
-display.latex.longtable False Specifies if the to_latex method of a Dataframe
+display.latex.longtable False Specifies if the to_latex method of a DataFrame
uses the longtable format.
display.latex.multicolumn True Combines columns when using a MultiIndex
display.latex.multicolumn_format 'l' Alignment of multicolumn labels
diff --git a/doc/source/remote_data.rst b/doc/source/remote_data.rst
index 7980133582125..9af66058a7aaa 100644
--- a/doc/source/remote_data.rst
+++ b/doc/source/remote_data.rst
@@ -11,14 +11,13 @@ Remote Data Access
DataReader
----------
-The sub-package ``pandas.io.data`` is removed in favor of a separately
-installable `pandas-datareader package
+The sub-package ``pandas.io.data`` was deprecated in v.0.17 and removed in
+`v.0.19 <http://pandas-docs.github.io/pandas-docs-travis/whatsnew.html#v0-19-0-october-2-2016>`__.
+ Instead there has been created a separately installable `pandas-datareader package
<https://github.com/pydata/pandas-datareader>`_. This will allow the data
-modules to be independently updated to your pandas installation. The API for
-``pandas-datareader v0.1.1`` is the same as in ``pandas v0.16.1``.
-(:issue:`8961`)
+modules to be independently updated on your pandas installation.
- You should replace the imports of the following:
+ For code older than < 0.19 you should replace the imports of the following:
.. code-block:: python
diff --git a/doc/source/reshaping.rst b/doc/source/reshaping.rst
index 3dce73b302c7c..1209c4a8d6be8 100644
--- a/doc/source/reshaping.rst
+++ b/doc/source/reshaping.rst
@@ -156,7 +156,7 @@ the level numbers:
stacked.unstack('second')
Notice that the ``stack`` and ``unstack`` methods implicitly sort the index
-levels involved. Hence a call to ``stack`` and then ``unstack``, or viceversa,
+levels involved. Hence a call to ``stack`` and then ``unstack``, or vice versa,
will result in a **sorted** copy of the original DataFrame or Series:
.. ipython:: python
@@ -569,8 +569,6 @@ This function is often used along with discretization functions like ``cut``:
See also :func:`Series.str.get_dummies <pandas.Series.str.get_dummies>`.
-.. versionadded:: 0.15.0
-
:func:`get_dummies` also accepts a DataFrame. By default all categorical
variables (categorical in the statistical sense,
those with `object` or `categorical` dtype) are encoded as dummy variables.
@@ -675,4 +673,4 @@ handling of NaN:
you can use ``df["cat_col"] = pd.Categorical(df["col"])`` or
``df["cat_col"] = df["col"].astype("category")``. For full docs on :class:`~pandas.Categorical`,
see the :ref:`Categorical introduction <categorical>` and the
- :ref:`API documentation <api.categorical>`. This feature was introduced in version 0.15.
+ :ref:`API documentation <api.categorical>`.
diff --git a/doc/source/sparse.rst b/doc/source/sparse.rst
index b4884cf1c4141..89efa7b4be3ee 100644
--- a/doc/source/sparse.rst
+++ b/doc/source/sparse.rst
@@ -132,7 +132,7 @@ dtype, ``fill_value`` default changes:
s.to_sparse()
You can change the dtype using ``.astype()``, the result is also sparse. Note that
-``.astype()`` also affects to the ``fill_value`` to keep its dense represantation.
+``.astype()`` also affects to the ``fill_value`` to keep its dense representation.
.. ipython:: python
@@ -216,8 +216,6 @@ To convert a ``SparseDataFrame`` back to sparse SciPy matrix in COO format, you
SparseSeries
~~~~~~~~~~~~
-.. versionadded:: 0.16.0
-
A :meth:`SparseSeries.to_coo` method is implemented for transforming a ``SparseSeries`` indexed by a ``MultiIndex`` to a ``scipy.sparse.coo_matrix``.
The method requires a ``MultiIndex`` with two or more levels.
diff --git a/doc/source/style.ipynb b/doc/source/style.ipynb
index c250787785e14..1d6ce163cf977 100644
--- a/doc/source/style.ipynb
+++ b/doc/source/style.ipynb
@@ -169,7 +169,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "Notice the similarity with the standard `df.applymap`, which operates on DataFrames elementwise. We want you to be able to resuse your existing knowledge of how to interact with DataFrames.\n",
+ "Notice the similarity with the standard `df.applymap`, which operates on DataFrames elementwise. We want you to be able to reuse your existing knowledge of how to interact with DataFrames.\n",
"\n",
"Notice also that our function returned a string containing the CSS attribute and value, separated by a colon just like in a `<style>` tag. This will be a common theme.\n",
"\n",
diff --git a/doc/source/text.rst b/doc/source/text.rst
index e3e4b24d17f44..85b8aa6aa1857 100644
--- a/doc/source/text.rst
+++ b/doc/source/text.rst
@@ -211,8 +211,6 @@ Extracting Substrings
Extract first match in each subject (extract)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-.. versionadded:: 0.13.0
-
.. warning::
In version 0.18.0, ``extract`` gained the ``expand`` argument. When
diff --git a/doc/source/timedeltas.rst b/doc/source/timedeltas.rst
index 07effcfdff33b..d055c49dc4721 100644
--- a/doc/source/timedeltas.rst
+++ b/doc/source/timedeltas.rst
@@ -23,13 +23,12 @@
Time Deltas
***********
-.. note::
-
- Starting in v0.15.0, we introduce a new scalar type ``Timedelta``, which is a subclass of ``datetime.timedelta``, and behaves in a similar manner,
- but allows compatibility with ``np.timedelta64`` types as well as a host of custom representation, parsing, and attributes.
+Timedeltas are differences in times, expressed in difference units, e.g. days, hours, minutes,
+seconds. They can be both positive and negative.
-Timedeltas are differences in times, expressed in difference units, e.g. days, hours, minutes, seconds.
-They can be both positive and negative.
+``Timedelta`` is a subclass of ``datetime.timedelta``, and behaves in a similar manner,
+but allows compatibility with ``np.timedelta64`` types as well as a host of custom representation,
+parsing, and attributes.
Parsing
-------
@@ -78,15 +77,10 @@ Further, operations among the scalars yield another scalar ``Timedelta``.
to_timedelta
~~~~~~~~~~~~
-.. warning::
-
- Prior to 0.15.0 ``pd.to_timedelta`` would return a ``Series`` for list-like/Series input, and a ``np.timedelta64`` for scalar input.
- It will now return a ``TimedeltaIndex`` for list-like input, ``Series`` for Series input, and ``Timedelta`` for scalar input.
-
- The arguments to ``pd.to_timedelta`` are now ``(arg, unit='ns', box=True)``, previously were ``(arg, box=True, unit='ns')`` as these are more logical.
-
-Using the top-level ``pd.to_timedelta``, you can convert a scalar, array, list, or Series from a recognized timedelta format / value into a ``Timedelta`` type.
-It will construct Series if the input is a Series, a scalar if the input is scalar-like, otherwise will output a ``TimedeltaIndex``.
+Using the top-level ``pd.to_timedelta``, you can convert a scalar, array, list,
+or Series from a recognized timedelta format / value into a ``Timedelta`` type.
+It will construct Series if the input is a Series, a scalar if the input is
+scalar-like, otherwise it will output a ``TimedeltaIndex``.
You can parse a single string to a Timedelta:
@@ -242,8 +236,6 @@ Numeric reduction operation for ``timedelta64[ns]`` will return ``Timedelta`` ob
Frequency Conversion
--------------------
-.. versionadded:: 0.13
-
Timedelta Series, ``TimedeltaIndex``, and ``Timedelta`` scalars can be converted to other 'frequencies' by dividing by another timedelta,
or by astyping to a specific timedelta type. These operations yield Series and propagate ``NaT`` -> ``nan``.
Note that division by the numpy scalar is true division, while astyping is equivalent of floor division.
@@ -330,8 +322,6 @@ You can convert a ``Timedelta`` to an `ISO 8601 Duration`_ string with the
TimedeltaIndex
--------------
-.. versionadded:: 0.15.0
-
To generate an index with time delta, you can use either the ``TimedeltaIndex`` or
the ``timedelta_range`` constructor.
diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst
index ce4a920ad77b5..3b8f105bb1b47 100644
--- a/doc/source/timeseries.rst
+++ b/doc/source/timeseries.rst
@@ -177,7 +177,7 @@ you can pass the ``dayfirst`` flag:
.. note::
Specifying a ``format`` argument will potentially speed up the conversion
- considerably and on versions later then 0.13.0 explicitly specifying
+ considerably and explicitly specifying
a format string of '%Y%m%d' takes a faster path still.
If you pass a single string to ``to_datetime``, it returns single ``Timestamp``.
@@ -1054,7 +1054,7 @@ as ``BusinessHour`` except that it skips specified custom holidays.
# Tuesday after MLK Day (Monday is skipped because it's a holiday)
dt + bhour_us * 2
-You can use keyword arguments suported by either ``BusinessHour`` and ``CustomBusinessDay``.
+You can use keyword arguments supported by either ``BusinessHour`` and ``CustomBusinessDay``.
.. ipython:: python
@@ -1069,8 +1069,7 @@ Offset Aliases
~~~~~~~~~~~~~~
A number of string aliases are given to useful common time series
-frequencies. We will refer to these aliases as *offset aliases*
-(referred to as *time rules* prior to v0.8.0).
+frequencies. We will refer to these aliases as *offset aliases*.
.. csv-table::
:header: "Alias", "Description"
@@ -1089,7 +1088,7 @@ frequencies. We will refer to these aliases as *offset aliases*
"BMS", "business month start frequency"
"CBMS", "custom business month start frequency"
"Q", "quarter end frequency"
- "BQ", "business quarter endfrequency"
+ "BQ", "business quarter end frequency"
"QS", "quarter start frequency"
"BQS", "business quarter start frequency"
"A, Y", "year end frequency"
@@ -1133,13 +1132,13 @@ For some frequencies you can specify an anchoring suffix:
:header: "Alias", "Description"
:widths: 15, 100
- "W\-SUN", "weekly frequency (sundays). Same as 'W'"
- "W\-MON", "weekly frequency (mondays)"
- "W\-TUE", "weekly frequency (tuesdays)"
- "W\-WED", "weekly frequency (wednesdays)"
- "W\-THU", "weekly frequency (thursdays)"
- "W\-FRI", "weekly frequency (fridays)"
- "W\-SAT", "weekly frequency (saturdays)"
+ "W\-SUN", "weekly frequency (Sundays). Same as 'W'"
+ "W\-MON", "weekly frequency (Mondays)"
+ "W\-TUE", "weekly frequency (Tuesdays)"
+ "W\-WED", "weekly frequency (Wednesdays)"
+ "W\-THU", "weekly frequency (Thursdays)"
+ "W\-FRI", "weekly frequency (Fridays)"
+ "W\-SAT", "weekly frequency (Saturdays)"
"(B)Q(S)\-DEC", "quarterly frequency, year ends in December. Same as 'Q'"
"(B)Q(S)\-JAN", "quarterly frequency, year ends in January"
"(B)Q(S)\-FEB", "quarterly frequency, year ends in February"
@@ -1706,6 +1705,15 @@ has multiplied span.
pd.PeriodIndex(start='2014-01', freq='3M', periods=4)
+If ``start`` or ``end`` are ``Period`` objects, they will be used as anchor
+endpoints for a ``PeriodIndex`` with frequency matching that of the
+``PeriodIndex`` constructor.
+
+.. ipython:: python
+
+ pd.PeriodIndex(start=pd.Period('2017Q1', freq='Q'),
+ end=pd.Period('2017Q2', freq='Q'), freq='M')
+
Just like ``DatetimeIndex``, a ``PeriodIndex`` can also be used to index pandas
objects:
@@ -1947,9 +1955,11 @@ These can easily be converted to a ``PeriodIndex``
Time Zone Handling
------------------
-Pandas provides rich support for working with timestamps in different time zones using ``pytz`` and ``dateutil`` libraries.
-``dateutil`` support is new in 0.14.1 and currently only supported for fixed offset and tzfile zones. The default library is ``pytz``.
-Support for ``dateutil`` is provided for compatibility with other applications e.g. if you use ``dateutil`` in other python packages.
+Pandas provides rich support for working with timestamps in different time
+zones using ``pytz`` and ``dateutil`` libraries. ``dateutil`` currently is only
+supported for fixed offset and tzfile zones. The default library is ``pytz``.
+Support for ``dateutil`` is provided for compatibility with other
+applications e.g. if you use ``dateutil`` in other python packages.
Working with Time Zones
~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/source/visualization.rst b/doc/source/visualization.rst
index fb799c642131d..82ad8de93514e 100644
--- a/doc/source/visualization.rst
+++ b/doc/source/visualization.rst
@@ -229,8 +229,6 @@ To get horizontal bar plots, use the ``barh`` method:
Histograms
~~~~~~~~~~
-.. versionadded:: 0.15.0
-
Histogram can be drawn by using the :meth:`DataFrame.plot.hist` and :meth:`Series.plot.hist` methods.
.. ipython:: python
@@ -263,7 +261,7 @@ Histogram can be stacked by ``stacked=True``. Bin size can be changed by ``bins`
plt.close('all')
-You can pass other keywords supported by matplotlib ``hist``. For example, horizontal and cumulative histgram can be drawn by ``orientation='horizontal'`` and ``cumulative='True'``.
+You can pass other keywords supported by matplotlib ``hist``. For example, horizontal and cumulative histogram can be drawn by ``orientation='horizontal'`` and ``cumulative=True``.
.. ipython:: python
@@ -306,8 +304,6 @@ subplots:
df.diff().hist(color='k', alpha=0.5, bins=50)
-.. versionadded:: 0.10.0
-
The ``by`` keyword can be specified to plot grouped histograms:
.. ipython:: python
@@ -330,8 +326,6 @@ The ``by`` keyword can be specified to plot grouped histograms:
Box Plots
~~~~~~~~~
-.. versionadded:: 0.15.0
-
Boxplot can be drawn calling :meth:`Series.plot.box` and :meth:`DataFrame.plot.box`,
or :meth:`DataFrame.boxplot` to visualize the distribution of values within each column.
@@ -514,8 +508,6 @@ Compare to:
Area Plot
~~~~~~~~~
-.. versionadded:: 0.14
-
You can create area plots with :meth:`Series.plot.area` and :meth:`DataFrame.plot.area`.
Area plots are stacked by default. To produce stacked area plot, each column must be either all positive or all negative values.
@@ -552,8 +544,6 @@ To produce an unstacked plot, pass ``stacked=False``. Alpha value is set to 0.5
Scatter Plot
~~~~~~~~~~~~
-.. versionadded:: 0.13
-
Scatter plot can be drawn by using the :meth:`DataFrame.plot.scatter` method.
Scatter plot requires numeric columns for x and y axis.
These can be specified by ``x`` and ``y`` keywords each.
@@ -621,8 +611,6 @@ See the :meth:`scatter <matplotlib.axes.Axes.scatter>` method and the
Hexagonal Bin Plot
~~~~~~~~~~~~~~~~~~
-.. versionadded:: 0.14
-
You can create hexagonal bin plots with :meth:`DataFrame.plot.hexbin`.
Hexbin plots can be a useful alternative to scatter plots if your data are
too dense to plot each point individually.
@@ -684,8 +672,6 @@ See the :meth:`hexbin <matplotlib.axes.Axes.hexbin>` method and the
Pie plot
~~~~~~~~
-.. versionadded:: 0.14
-
You can create a pie plot with :meth:`DataFrame.plot.pie` or :meth:`Series.plot.pie`.
If your data includes any ``NaN``, they will be automatically filled with 0.
A ``ValueError`` will be raised if there are any negative values in your data.
@@ -831,8 +817,6 @@ and take a :class:`Series` or :class:`DataFrame` as an argument.
Scatter Matrix Plot
~~~~~~~~~~~~~~~~~~~
-.. versionadded:: 0.7.3
-
You can create a scatter plot matrix using the
``scatter_matrix`` method in ``pandas.plotting``:
@@ -859,8 +843,6 @@ You can create a scatter plot matrix using the
Density Plot
~~~~~~~~~~~~
-.. versionadded:: 0.8.0
-
You can create density plots using the :meth:`Series.plot.kde` and :meth:`DataFrame.plot.kde` methods.
.. ipython:: python
@@ -1371,8 +1353,6 @@ Another option is passing an ``ax`` argument to :meth:`Series.plot` to plot on a
Plotting With Error Bars
~~~~~~~~~~~~~~~~~~~~~~~~
-.. versionadded:: 0.14
-
Plotting with error bars is now supported in the :meth:`DataFrame.plot` and :meth:`Series.plot`
Horizontal and vertical errorbars can be supplied to the ``xerr`` and ``yerr`` keyword arguments to :meth:`~DataFrame.plot()`. The error values can be specified using a variety of formats.
@@ -1413,8 +1393,6 @@ Here is an example of one way to easily plot group means with standard deviation
Plotting Tables
~~~~~~~~~~~~~~~
-.. versionadded:: 0.14
-
Plotting with matplotlib table is now supported in :meth:`DataFrame.plot` and :meth:`Series.plot` with a ``table`` keyword. The ``table`` keyword can accept ``bool``, :class:`DataFrame` or :class:`Series`. The simple way to draw a table is to specify ``table=True``. Data will be transposed to meet matplotlib's default layout.
.. ipython:: python
@@ -1591,10 +1569,6 @@ available in matplotlib. Although this formatting does not provide the same
level of refinement you would get when plotting via pandas, it can be faster
when plotting a large number of points.
-.. note::
-
- The speed up for large data sets only applies to pandas 0.14.0 and later.
-
.. ipython:: python
:suppress:
diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index 9d475390175b2..fe24f8f499172 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -1705,6 +1705,7 @@ Reshaping
- Bug in ``pd.concat()`` in which concatenating with an empty dataframe with ``join='inner'`` was being improperly handled (:issue:`15328`)
- Bug with ``sort=True`` in ``DataFrame.join`` and ``pd.merge`` when joining on indexes (:issue:`15582`)
- Bug in ``DataFrame.nsmallest`` and ``DataFrame.nlargest`` where identical values resulted in duplicated rows (:issue:`15297`)
+- Bug in :func:`pandas.pivot_table` incorrectly raising ``UnicodeError`` when passing unicode input for ```margins`` keyword (:issue:`13292`)
Numeric
^^^^^^^
diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt
index e0963a1908bbc..722e19d2703b5 100644
--- a/doc/source/whatsnew/v0.21.0.txt
+++ b/doc/source/whatsnew/v0.21.0.txt
@@ -67,8 +67,8 @@ Improved warnings when attempting to create columns
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
New users are often flummoxed by the relationship between column operations and attribute
-access on ``DataFrame`` instances (:issue:`5904` & :issue:`7175`). Two specific instances
-of this confusion include attempting to create a new column by setting into an attribute:
+access on ``DataFrame`` instances (:issue:`7175`). One specific instance
+of this confusion is attempting to create a new column by setting into an attribute:
.. code-block:: ipython
@@ -86,25 +86,7 @@ This does not raise any obvious exceptions, but also does not create a new colum
1 2.0
2 3.0
-The second source of confusion is creating a column whose name collides with a method or
-attribute already in the instance namespace:
-
-.. code-block:: ipython
-
- In[4]: df['sum'] = [5., 7., 9.]
-
-This does not permit that column to be accessed as an attribute:
-
-.. code-block:: ipython
-
- In[5]: df.sum
- Out[5]:
- <bound method DataFrame.sum of one sum
- 0 1.0 5.0
- 1 2.0 7.0
- 2 3.0 9.0>
-
-Both of these now raise a ``UserWarning`` about the potential for unexpected behavior. See :ref:`Attribute Access <indexing.attribute_access>`.
+Setting a list-like data structure into a new attribute now raise a ``UserWarning`` about the potential for unexpected behavior. See :ref:`Attribute Access <indexing.attribute_access>`.
.. _whatsnew_0210.enhancements.other:
@@ -125,11 +107,12 @@ Other Enhancements
- :func:`DataFrame.select_dtypes` now accepts scalar values for include/exclude as well as list-like. (:issue:`16855`)
- :func:`date_range` now accepts 'YS' in addition to 'AS' as an alias for start of year (:issue:`9313`)
- :func:`date_range` now accepts 'Y' in addition to 'A' as an alias for end of year (:issue:`9313`)
-- Integration with `Apache Parquet <https://parquet.apache.org/>`__, including a new top-level :func:`read_parquet` and :func:`DataFrame.to_parquet` method, see :ref:`here <io.parquet>`.
+- Integration with `Apache Parquet <https://parquet.apache.org/>`__, including a new top-level :func:`read_parquet` and :func:`DataFrame.to_parquet` method, see :ref:`here <io.parquet>`. (:issue:`15838`, :issue:`17438`)
- :func:`DataFrame.add_prefix` and :func:`DataFrame.add_suffix` now accept strings containing the '%' character. (:issue:`17151`)
- `read_*` methods can now infer compression from non-string paths, such as ``pathlib.Path`` objects (:issue:`17206`).
- :func:`pd.read_sas()` now recognizes much more of the most frequently used date (datetime) formats in SAS7BDAT files (:issue:`15871`).
- :func:`DataFrame.items` and :func:`Series.items` is now present in both Python 2 and 3 and is lazy in all cases (:issue:`13918`, :issue:`17213`)
+- :func:`Styler.where` has been implemented. It is as a convenience for :func:`Styler.applymap` and enables simple DataFrame styling on the Jupyter notebook (:issue:`17474`).
@@ -205,6 +188,53 @@ the target. Now, a ``ValueError`` will be raised when such an input is passed in
...
ValueError: Cannot operate inplace if there is no assignment
+.. _whatsnew_0210.api_breaking.iteration_scalars:
+
+Iteration of Series/Index will now return Python scalars
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Previously, when using certain iteration methods for a ``Series`` with dtype ``int`` or ``float``, you would receive a ``numpy`` scalar, e.g. a ``np.int64``, rather than a Python ``int``. Issue (:issue:`10904`) corrected this for ``Series.tolist()`` and ``list(Series)``. This change makes all iteration methods consistent, in particular, for ``__iter__()`` and ``.map()``; note that this only affects int/float dtypes. (:issue:`13236`, :issue:`13258`, :issue:`14216`).
+
+.. ipython:: python
+
+ s = pd.Series([1, 2, 3])
+ s
+
+Previously:
+
+.. code-block:: ipython
+
+ In [2]: type(list(s)[0])
+ Out[2]: numpy.int64
+
+New Behaviour:
+
+.. ipython:: python
+
+ type(list(s)[0])
+
+Furthermore this will now correctly box the results of iteration for :func:`DataFrame.to_dict` as well.
+
+.. ipython:: ipython
+
+ d = {'a':[1], 'b':['b']}
+ df = pd.DataFrame(d)
+
+Previously:
+
+.. code-block:: ipython
+
+ In [8]: type(df.to_dict()['a'][0])
+ Out[8]: numpy.int64
+
+New Behaviour:
+
+.. ipython:: python
+
+ type(df.to_dict()['a'][0])
+
+.. _whatsnew_0210.api_breaking.dtype_conversions:
+
Dtype Conversions
^^^^^^^^^^^^^^^^^
@@ -328,6 +358,59 @@ Previously, :func:`to_datetime` did not localize datetime ``Series`` data when `
Additionally, DataFrames with datetime columns that were parsed by :func:`read_sql_table` and :func:`read_sql_query` will also be localized to UTC only if the original SQL columns were timezone aware datetime columns.
+.. _whatsnew_0210.api.consistency_of_range_functions:
+
+Consistency of Range Functions
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In previous versions, there were some inconsistencies between the various range functions: func:`date_range`, func:`bdate_range`, func:`cdate_range`, func:`period_range`, func:`timedelta_range`, and func:`interval_range`. (:issue:`17471`).
+
+One of the inconsistent behaviors occurred when the ``start``, ``end`` and ``period`` parameters were all specified, potentially leading to ambiguous ranges. When all three parameters were passed, ``interval_range`` ignored the ``period`` parameter, ``period_range`` ignored the ``end`` parameter, and the other range functions raised. To promote consistency among the range functions, and avoid potentially ambiguous ranges, ``interval_range`` and ``period_range`` will now raise when all three parameters are passed.
+
+Previous Behavior:
+
+.. code-block:: ipython
+
+ In [2]: pd.interval_range(start=0, end=4, periods=6)
+ Out[2]:
+ IntervalIndex([(0, 1], (1, 2], (2, 3]]
+ closed='right',
+ dtype='interval[int64]')
+
+ In [3]: pd.period_range(start='2017Q1', end='2017Q4', periods=6, freq='Q')
+ Out[3]: PeriodIndex(['2017Q1', '2017Q2', '2017Q3', '2017Q4', '2018Q1', '2018Q2'], dtype='period[Q-DEC]', freq='Q-DEC')
+
+New Behavior:
+
+.. code-block:: ipython
+
+ In [2]: pd.interval_range(start=0, end=4, periods=6)
+ ---------------------------------------------------------------------------
+ ValueError: Of the three parameters: start, end, and periods, exactly two must be specified
+
+ In [3]: pd.period_range(start='2017Q1', end='2017Q4', periods=6, freq='Q')
+ ---------------------------------------------------------------------------
+ ValueError: Of the three parameters: start, end, and periods, exactly two must be specified
+
+Additionally, the endpoint parameter ``end`` was not included in the intervals produced by ``interval_range``. However, all other range functions include ``end`` in their output. To promote consistency among the range functions, ``interval_range`` will now include ``end`` as the right endpoint of the final interval, except if ``freq`` is specified in a way which skips ``end``.
+
+Previous Behavior:
+
+.. code-block:: ipython
+
+ In [4]: pd.interval_range(start=0, end=4)
+ Out[4]:
+ IntervalIndex([(0, 1], (1, 2], (2, 3]]
+ closed='right',
+ dtype='interval[int64]')
+
+
+New Behavior:
+
+ .. ipython:: python
+
+ pd.interval_range(start=0, end=4)
+
.. _whatsnew_0210.api:
Other API Changes
@@ -348,6 +431,7 @@ Other API Changes
- :func:`Series.argmin` and :func:`Series.argmax` will now raise a ``TypeError`` when used with ``object`` dtypes, instead of a ``ValueError`` (:issue:`13595`)
- :class:`Period` is now immutable, and will now raise an ``AttributeError`` when a user tries to assign a new value to the ``ordinal`` or ``freq`` attributes (:issue:`17116`).
- :func:`to_datetime` when passed a tz-aware ``origin=`` kwarg will now raise a more informative ``ValueError`` rather than a ``TypeError`` (:issue:`16842`)
+- Renamed non-functional ``index`` to ``index_col`` in :func:`read_stata` to improve API consistency (:issue:`16342`)
.. _whatsnew_0210.deprecations:
@@ -358,6 +442,8 @@ Deprecations
- ``pd.options.html.border`` has been deprecated in favor of ``pd.options.display.html.border`` (:issue:`15793`).
+- :func:`SeriesGroupBy.nth` has deprecated ``True`` in favor of ``'all'`` for its kwarg ``dropna`` (:issue:`11038`).
+
.. _whatsnew_0210.prior_deprecations:
Removal of prior version deprecations/changes
@@ -371,6 +457,8 @@ Removal of prior version deprecations/changes
- ``Categorical`` has dropped the ``.order()`` and ``.sort()`` methods in favor of ``.sort_values()`` (:issue:`12882`)
- :func:`eval` and :func:`DataFrame.eval` have changed the default of ``inplace`` from ``None`` to ``False`` (:issue:`11149`)
- The function ``get_offset_name`` has been dropped in favor of the ``.freqstr`` attribute for an offset (:issue:`11834`)
+- pandas no longer tests for compatibility with hdf5-files created with pandas < 0.11 (:issue:`17404`).
+
.. _whatsnew_0210.performance:
@@ -380,23 +468,24 @@ Performance Improvements
- Improved performance of instantiating :class:`SparseDataFrame` (:issue:`16773`)
- :attr:`Series.dt` no longer performs frequency inference, yielding a large speedup when accessing the attribute (:issue:`17210`)
-
+- Improved performance of :meth:`Categorical.set_categories` by not materializing the values (:issue:`17508`)
+- :attr:`Timestamp.microsecond` no longer re-computes on attribute access (:issue:`17331`)
+- Improved performance of the :class:`CategoricalIndex` for data that is already categorical dtype (:issue:`17513`)
.. _whatsnew_0210.bug_fixes:
Bug Fixes
~~~~~~~~~
-
Conversion
^^^^^^^^^^
- Bug in assignment against datetime-like data with ``int`` may incorrectly convert to datetime-like (:issue:`14145`)
- Bug in assignment against ``int64`` data with ``np.ndarray`` with ``float64`` dtype may keep ``int64`` dtype (:issue:`14001`)
-- Fix :func:`DataFrame.memory_usage` to support PyPy. Objects on PyPy do not have a fixed size, so an approximation is used instead (:issue:`17228`)
- Fixed the return type of ``IntervalIndex.is_non_overlapping_monotonic`` to be a Python ``bool`` for consistency with similar attributes/methods. Previously returned a ``numpy.bool_``. (:issue:`17237`)
- Bug in ``IntervalIndex.is_non_overlapping_monotonic`` when intervals are closed on both sides and overlap at a point (:issue:`16560`)
- Bug in :func:`Series.fillna` returns frame when ``inplace=True`` and ``value`` is dict (:issue:`16156`)
+- Bug in :attr:`Timestamp.weekday_name` returning a UTC-based weekday name when localized to a timezone (:issue:`17354`)
Indexing
^^^^^^^^
@@ -414,6 +503,8 @@ Indexing
- Bug in ``.iloc`` when used with inplace addition or assignment and an int indexer on a ``MultiIndex`` causing the wrong indexes to be read from and written to (:issue:`17148`)
- Bug in ``.isin()`` in which checking membership in empty ``Series`` objects raised an error (:issue:`16991`)
- Bug in ``CategoricalIndex`` reindexing in which specified indices containing duplicates were not being respected (:issue:`17323`)
+- Bug in intersection of ``RangeIndex`` with negative step (:issue:`17296`)
+- Bug in ``IntervalIndex`` where performing a scalar lookup fails for included right endpoints of non-overlapping monotonic decreasing indexes (:issue:`16417`, :issue:`17271`)
I/O
^^^
@@ -425,12 +516,14 @@ I/O
- Bug in :func:`read_csv` when called with ``low_memory=False`` in which a CSV with at least one column > 2GB in size would incorrectly raise a ``MemoryError`` (:issue:`16798`).
- Bug in :func:`read_csv` when called with a single-element list ``header`` would return a ``DataFrame`` of all NaN values (:issue:`7757`)
- Bug in :func:`read_stata` where value labels could not be read when using an iterator (:issue:`16923`)
+- Bug in :func:`read_stata` where the index was not set (:issue:`16342`)
- Bug in :func:`read_html` where import check fails when run in multiple threads (:issue:`16928`)
+- Bug in :func:`read_csv` where automatic delimiter detection caused a ``TypeError`` to be thrown when a bad line was encountered rather than the correct error message (:issue:`13374`)
Plotting
^^^^^^^^
- Bug in plotting methods using ``secondary_y`` and ``fontsize`` not setting secondary axis font size (:issue:`12565`)
-
+- Bug when plotting ``timedelta`` and ``datetime`` dtypes on y-axis (:issue:`16953`)
Groupby/Resample/Rolling
^^^^^^^^^^^^^^^^^^^^^^^^
@@ -476,8 +569,18 @@ Categorical
the ``.categories`` to be an empty ``Float64Index`` rather than an empty
``Index`` with object dtype (:issue:`17248`)
+PyPy
+^^^^
+
+- Compatibility with PyPy in :func:`read_csv` with ``usecols=[<unsorted ints>]`` and
+ :func:`read_json` (:issue:`17351`)
+- Split tests into cases for CPython and PyPy where needed, which highlights the fragility
+ of index matching with ``float('nan')``, ``np.nan`` and ``NAT`` (:issue:`17351`)
+- Fix :func:`DataFrame.memory_usage` to support PyPy. Objects on PyPy do not have a fixed size,
+ so an approximation is used instead (:issue:`17228`)
Other
^^^^^
- Bug in :func:`eval` where the ``inplace`` parameter was being incorrectly handled (:issue:`16732`)
- Several ``NaT`` method docstrings (e.g. :func:`NaT.ctime`) were incorrect (:issue:`17327`)
+- The documentation has had references to versions < v0.17 removed and cleaned up (:issue:`17442`, :issue:`17442`, :issue:`17404` & :issue:`17504`)
diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx
index 42ba0c1cadaec..884117799ec5b 100644
--- a/pandas/_libs/index.pyx
+++ b/pandas/_libs/index.pyx
@@ -17,6 +17,7 @@ cimport tslib
from hashtable cimport HashTable
+from tslibs.timezones cimport is_utc, get_utcoffset
from pandas._libs import tslib, algos, hashtable as _hash
from pandas._libs.tslib import Timestamp, Timedelta
from datetime import datetime, timedelta
@@ -32,9 +33,6 @@ cdef extern from "datetime.h":
cdef int64_t iNaT = util.get_nat()
-from dateutil.tz import tzutc as _du_utc
-import pytz
-UTC = pytz.utc
PyDateTime_IMPORT
@@ -553,15 +551,12 @@ cdef inline _to_i8(object val):
tzinfo = getattr(val, 'tzinfo', None)
# Save the original date value so we can get the utcoffset from it.
ival = _pydatetime_to_dts(val, &dts)
- if tzinfo is not None and not _is_utc(tzinfo):
- offset = tslib._get_utcoffset(tzinfo, val)
+ if tzinfo is not None and not is_utc(tzinfo):
+ offset = get_utcoffset(tzinfo, val)
ival -= tslib._delta_to_nanoseconds(offset)
return ival
return val
-cdef inline bint _is_utc(object tz):
- return tz is UTC or isinstance(tz, _du_utc)
-
cdef class MultiIndexObjectEngine(ObjectEngine):
"""
diff --git a/pandas/_libs/period.pyx b/pandas/_libs/period.pyx
index 816b7ebfff86d..49353f7b0491c 100644
--- a/pandas/_libs/period.pyx
+++ b/pandas/_libs/period.pyx
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
from datetime import datetime, date, timedelta
import operator
@@ -9,17 +10,16 @@ from cpython cimport (
from numpy cimport (int8_t, int32_t, int64_t, import_array, ndarray,
NPY_INT64, NPY_DATETIME, NPY_TIMEDELTA)
import numpy as np
+import_array()
from libc.stdlib cimport free
-from pandas import compat
from pandas.compat import PY2
cimport cython
from datetime cimport (
is_leapyear,
- PyDateTime_IMPORT,
pandas_datetimestruct,
pandas_datetimestruct_to_datetime,
pandas_datetime_to_datetimestruct,
@@ -28,17 +28,17 @@ from datetime cimport (
cimport util, lib
+from util cimport is_period_object, is_string_object
from lib cimport is_null_datetimelike, is_period
from pandas._libs import tslib, lib
from pandas._libs.tslib import (Timedelta, Timestamp, iNaT,
- NaT, _get_utcoffset)
-from tslib cimport (
- maybe_get_tz,
- _is_utc,
- _is_tzlocal,
- _get_dst_info,
- _nat_scalar_rules)
+ NaT)
+from tslibs.timezones cimport (
+ is_utc, is_tzlocal, get_utcoffset, _get_dst_info, maybe_get_tz)
+from tslib cimport _nat_scalar_rules
+
+from tslibs.frequencies cimport get_freq_code
from pandas.tseries import offsets
from pandas.core.tools.datetimes import parse_time_string
@@ -105,6 +105,8 @@ cdef extern from "period_helper.h":
int pday(int64_t ordinal, int freq) except INT32_MIN
int pweekday(int64_t ordinal, int freq) except INT32_MIN
int pday_of_week(int64_t ordinal, int freq) except INT32_MIN
+ # TODO: pday_of_week and pweekday are identical. Make one an alias instead
+ # of importing them separately.
int pday_of_year(int64_t ordinal, int freq) except INT32_MIN
int pweek(int64_t ordinal, int freq) except INT32_MIN
int phour(int64_t ordinal, int freq) except INT32_MIN
@@ -328,8 +330,6 @@ cdef list str_extra_fmts = ["^`AB`^", "^`CD`^", "^`EF`^",
"^`GH`^", "^`IJ`^", "^`KL`^"]
cdef object _period_strftime(int64_t value, int freq, object fmt):
- import sys
-
cdef:
Py_ssize_t i
date_info dinfo
@@ -533,7 +533,7 @@ cdef _reso_local(ndarray[int64_t] stamps, object tz):
ndarray[int64_t] trans, deltas, pos
pandas_datetimestruct dts
- if _is_utc(tz):
+ if is_utc(tz):
for i in range(n):
if stamps[i] == NPY_NAT:
continue
@@ -541,7 +541,7 @@ cdef _reso_local(ndarray[int64_t] stamps, object tz):
curr_reso = _reso_stamp(&dts)
if curr_reso < reso:
reso = curr_reso
- elif _is_tzlocal(tz):
+ elif is_tzlocal(tz):
for i in range(n):
if stamps[i] == NPY_NAT:
continue
@@ -549,7 +549,7 @@ cdef _reso_local(ndarray[int64_t] stamps, object tz):
&dts)
dt = datetime(dts.year, dts.month, dts.day, dts.hour,
dts.min, dts.sec, dts.us, tz)
- delta = int(_get_utcoffset(tz, dt).total_seconds()) * 1000000000
+ delta = int(get_utcoffset(tz, dt).total_seconds()) * 1000000000
pandas_datetime_to_datetimestruct(stamps[i] + delta,
PANDAS_FR_ns, &dts)
curr_reso = _reso_stamp(&dts)
@@ -597,7 +597,7 @@ cdef ndarray[int64_t] localize_dt64arr_to_period(ndarray[int64_t] stamps,
ndarray[int64_t] trans, deltas, pos
pandas_datetimestruct dts
- if _is_utc(tz):
+ if is_utc(tz):
for i in range(n):
if stamps[i] == NPY_NAT:
result[i] = NPY_NAT
@@ -607,7 +607,7 @@ cdef ndarray[int64_t] localize_dt64arr_to_period(ndarray[int64_t] stamps,
dts.hour, dts.min, dts.sec,
dts.us, dts.ps, freq)
- elif _is_tzlocal(tz):
+ elif is_tzlocal(tz):
for i in range(n):
if stamps[i] == NPY_NAT:
result[i] = NPY_NAT
@@ -616,7 +616,7 @@ cdef ndarray[int64_t] localize_dt64arr_to_period(ndarray[int64_t] stamps,
&dts)
dt = datetime(dts.year, dts.month, dts.day, dts.hour,
dts.min, dts.sec, dts.us, tz)
- delta = int(_get_utcoffset(tz, dt).total_seconds()) * 1000000000
+ delta = int(get_utcoffset(tz, dt).total_seconds()) * 1000000000
pandas_datetime_to_datetimestruct(stamps[i] + delta,
PANDAS_FR_ns, &dts)
result[i] = get_period_ordinal(dts.year, dts.month, dts.day,
@@ -682,7 +682,7 @@ cdef class _Period(object):
def _maybe_convert_freq(cls, object freq):
if isinstance(freq, (int, tuple)):
- code, stride = frequencies.get_freq_code(freq)
+ code, stride = get_freq_code(freq)
freq = frequencies._get_freq_str(code, stride)
freq = frequencies.to_offset(freq)
@@ -706,7 +706,7 @@ cdef class _Period(object):
return self
def __richcmp__(self, other, op):
- if isinstance(other, Period):
+ if is_period_object(other):
if other.freq != self.freq:
msg = _DIFFERENT_FREQ.format(self.freqstr, other.freqstr)
raise IncompatibleFrequency(msg)
@@ -752,7 +752,7 @@ cdef class _Period(object):
return NotImplemented
def __add__(self, other):
- if isinstance(self, Period):
+ if is_period_object(self):
if isinstance(other, (timedelta, np.timedelta64,
offsets.DateOffset,
Timedelta)):
@@ -764,13 +764,13 @@ cdef class _Period(object):
return Period(ordinal=ordinal, freq=self.freq)
else: # pragma: no cover
return NotImplemented
- elif isinstance(other, Period):
+ elif is_period_object(other):
return other + self
else:
return NotImplemented
def __sub__(self, other):
- if isinstance(self, Period):
+ if is_period_object(self):
if isinstance(other, (timedelta, np.timedelta64,
offsets.DateOffset,
Timedelta)):
@@ -779,7 +779,7 @@ cdef class _Period(object):
elif lib.is_integer(other):
ordinal = self.ordinal - other * self.freq.n
return Period(ordinal=ordinal, freq=self.freq)
- elif isinstance(other, Period):
+ elif is_period_object(other):
if other.freq != self.freq:
msg = _DIFFERENT_FREQ.format(self.freqstr, other.freqstr)
raise IncompatibleFrequency(msg)
@@ -788,7 +788,7 @@ cdef class _Period(object):
return -other.__sub__(self)
else: # pragma: no cover
return NotImplemented
- elif isinstance(other, Period):
+ elif is_period_object(other):
if self is NaT:
return NaT
return NotImplemented
@@ -812,8 +812,8 @@ cdef class _Period(object):
"""
freq = self._maybe_convert_freq(freq)
how = _validate_end_alias(how)
- base1, mult1 = frequencies.get_freq_code(self.freq)
- base2, mult2 = frequencies.get_freq_code(freq)
+ base1, mult1 = get_freq_code(self.freq)
+ base2, mult2 = get_freq_code(freq)
# mult1 can't be negative or 0
end = how == 'E'
@@ -859,67 +859,90 @@ cdef class _Period(object):
how = _validate_end_alias(how)
if freq is None:
- base, mult = frequencies.get_freq_code(self.freq)
+ base, mult = get_freq_code(self.freq)
freq = frequencies.get_to_timestamp_base(base)
- base, mult = frequencies.get_freq_code(freq)
+ base, mult = get_freq_code(freq)
val = self.asfreq(freq, how)
dt64 = period_ordinal_to_dt64(val.ordinal, base)
return Timestamp(dt64, tz=tz)
- cdef _field(self, alias):
- base, mult = frequencies.get_freq_code(self.freq)
- return get_period_field(alias, self.ordinal, base)
-
- property year:
- def __get__(self):
- return self._field(0)
- property month:
- def __get__(self):
- return self._field(3)
- property day:
- def __get__(self):
- return self._field(4)
- property hour:
- def __get__(self):
- return self._field(5)
- property minute:
- def __get__(self):
- return self._field(6)
- property second:
- def __get__(self):
- return self._field(7)
- property weekofyear:
- def __get__(self):
- return self._field(8)
- property week:
- def __get__(self):
- return self.weekofyear
- property dayofweek:
- def __get__(self):
- return self._field(10)
- property weekday:
- def __get__(self):
- return self.dayofweek
- property dayofyear:
- def __get__(self):
- return self._field(9)
- property quarter:
- def __get__(self):
- return self._field(2)
- property qyear:
- def __get__(self):
- return self._field(1)
- property days_in_month:
- def __get__(self):
- return self._field(11)
- property daysinmonth:
- def __get__(self):
- return self.days_in_month
- property is_leap_year:
- def __get__(self):
- return bool(is_leapyear(self._field(0)))
+ @property
+ def year(self):
+ base, mult = get_freq_code(self.freq)
+ return pyear(self.ordinal, base)
+
+ @property
+ def month(self):
+ base, mult = get_freq_code(self.freq)
+ return pmonth(self.ordinal, base)
+
+ @property
+ def day(self):
+ base, mult = get_freq_code(self.freq)
+ return pday(self.ordinal, base)
+
+ @property
+ def hour(self):
+ base, mult = get_freq_code(self.freq)
+ return phour(self.ordinal, base)
+
+ @property
+ def minute(self):
+ base, mult = get_freq_code(self.freq)
+ return pminute(self.ordinal, base)
+
+ @property
+ def second(self):
+ base, mult = get_freq_code(self.freq)
+ return psecond(self.ordinal, base)
+
+ @property
+ def weekofyear(self):
+ base, mult = get_freq_code(self.freq)
+ return pweek(self.ordinal, base)
+
+ @property
+ def week(self):
+ return self.weekofyear
+
+ @property
+ def dayofweek(self):
+ base, mult = get_freq_code(self.freq)
+ return pweekday(self.ordinal, base)
+
+ @property
+ def weekday(self):
+ return self.dayofweek
+
+ @property
+ def dayofyear(self):
+ base, mult = get_freq_code(self.freq)
+ return pday_of_year(self.ordinal, base)
+
+ @property
+ def quarter(self):
+ base, mult = get_freq_code(self.freq)
+ return pquarter(self.ordinal, base)
+
+ @property
+ def qyear(self):
+ base, mult = get_freq_code(self.freq)
+ return pqyear(self.ordinal, base)
+
+ @property
+ def days_in_month(self):
+ base, mult = get_freq_code(self.freq)
+ return pdays_in_month(self.ordinal, base)
+
+ @property
+ def daysinmonth(self):
+ return self.days_in_month
+
+ @property
+ def is_leap_year(self):
+ return bool(is_leapyear(self.year))
@classmethod
def now(cls, freq=None):
@@ -934,7 +957,7 @@ cdef class _Period(object):
return self.freq.freqstr
def __repr__(self):
- base, mult = frequencies.get_freq_code(self.freq)
+ base, mult = get_freq_code(self.freq)
formatted = period_format(self.ordinal, base)
return "Period('%s', '%s')" % (formatted, self.freqstr)
@@ -945,7 +968,7 @@ cdef class _Period(object):
Invoked by unicode(df) in py2 only. Yields a Unicode String in both
py2/py3.
"""
- base, mult = frequencies.get_freq_code(self.freq)
+ base, mult = get_freq_code(self.freq)
formatted = period_format(self.ordinal, base)
value = ("%s" % formatted)
return value
@@ -1095,13 +1118,13 @@ cdef class _Period(object):
>>> a.strftime('%b. %d, %Y was a %A')
'Jan. 01, 2001 was a Monday'
"""
- base, mult = frequencies.get_freq_code(self.freq)
+ base, mult = get_freq_code(self.freq)
return period_format(self.ordinal, base, fmt)
class Period(_Period):
"""
- Represents an period of time
+ Represents a period of time
Parameters
----------
@@ -1160,10 +1183,10 @@ class Period(_Period):
ordinal = _ordinal_from_fields(year, month, quarter, day,
hour, minute, second, freq)
- elif isinstance(value, Period):
+ elif is_period_object(value):
other = value
- if freq is None or frequencies.get_freq_code(
- freq) == frequencies.get_freq_code(other.freq):
+ if freq is None or get_freq_code(
+ freq) == get_freq_code(other.freq):
ordinal = other.ordinal
freq = other.freq
else:
@@ -1173,7 +1196,7 @@ class Period(_Period):
elif is_null_datetimelike(value) or value in tslib._nat_strings:
ordinal = iNaT
- elif isinstance(value, compat.string_types) or lib.is_integer(value):
+ elif is_string_object(value) or lib.is_integer(value):
if lib.is_integer(value):
value = str(value)
value = value.upper()
@@ -1190,7 +1213,7 @@ class Period(_Period):
dt = value
if freq is None:
raise ValueError('Must supply freq for datetime value')
- elif isinstance(value, np.datetime64):
+ elif util.is_datetime64_object(value):
dt = Timestamp(value)
if freq is None:
raise ValueError('Must supply freq for datetime value')
@@ -1203,7 +1226,7 @@ class Period(_Period):
raise ValueError(msg)
if ordinal is None:
- base, mult = frequencies.get_freq_code(freq)
+ base, mult = get_freq_code(freq)
ordinal = get_period_ordinal(dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.microsecond, 0, base)
@@ -1213,7 +1236,7 @@ class Period(_Period):
def _ordinal_from_fields(year, month, quarter, day,
hour, minute, second, freq):
- base, mult = frequencies.get_freq_code(freq)
+ base, mult = get_freq_code(freq)
if quarter is not None:
year, month = _quarter_to_myear(year, quarter, freq)
@@ -1226,8 +1249,7 @@ def _quarter_to_myear(year, quarter, freq):
if quarter <= 0 or quarter > 4:
raise ValueError('Quarter must be 1 <= q <= 4')
- mnum = frequencies._month_numbers[
- frequencies._get_rule_month(freq)] + 1
+ mnum = tslib._MONTH_NUMBERS[tslib._get_rule_month(freq)] + 1
month = (mnum + (quarter - 1) * 3) % 12 + 1
if month > mnum:
year -= 1
diff --git a/pandas/_libs/src/inference.pyx b/pandas/_libs/src/inference.pyx
index 6b5a8f20f0067..2bb362eab4097 100644
--- a/pandas/_libs/src/inference.pyx
+++ b/pandas/_libs/src/inference.pyx
@@ -2,7 +2,8 @@ import sys
from decimal import Decimal
cimport util
cimport cython
-from tslib import NaT, get_timezone
+from tslib import NaT
+from tslibs.timezones cimport get_timezone
from datetime import datetime, timedelta
iNaT = util.get_nat()
diff --git a/pandas/_libs/src/skiplist.pyx b/pandas/_libs/src/skiplist.pyx
index 559b529822a69..1524dca38d0e0 100644
--- a/pandas/_libs/src/skiplist.pyx
+++ b/pandas/_libs/src/skiplist.pyx
@@ -15,7 +15,6 @@ cdef double Log2(double x):
return log(x) / log(2.)
cimport numpy as np
-from numpy cimport *
import numpy as np
from random import random
diff --git a/pandas/_libs/src/ujson/python/JSONtoObj.c b/pandas/_libs/src/ujson/python/JSONtoObj.c
index b0132532c16af..85cf1d5e5e7a1 100644
--- a/pandas/_libs/src/ujson/python/JSONtoObj.c
+++ b/pandas/_libs/src/ujson/python/JSONtoObj.c
@@ -409,7 +409,7 @@ JSOBJ Object_npyEndObject(void *prv, JSOBJ obj) {
}
int Object_npyObjectAddKey(void *prv, JSOBJ obj, JSOBJ name, JSOBJ value) {
- PyObject *label;
+ PyObject *label, *labels;
npy_intp labelidx;
// add key to label array, value to values array
NpyArrContext *npyarr = (NpyArrContext *)obj;
@@ -424,11 +424,11 @@ int Object_npyObjectAddKey(void *prv, JSOBJ obj, JSOBJ name, JSOBJ value) {
if (!npyarr->labels[labelidx]) {
npyarr->labels[labelidx] = PyList_New(0);
}
-
+ labels = npyarr->labels[labelidx];
// only fill label array once, assumes all column labels are the same
// for 2-dimensional arrays.
- if (PyList_GET_SIZE(npyarr->labels[labelidx]) <= npyarr->elcount) {
- PyList_Append(npyarr->labels[labelidx], label);
+ if (PyList_Check(labels) && PyList_GET_SIZE(labels) <= npyarr->elcount) {
+ PyList_Append(labels, label);
}
if (((JSONObjectDecoder *)npyarr->dec)->arrayAddItem(prv, obj, value)) {
@@ -439,16 +439,16 @@ int Object_npyObjectAddKey(void *prv, JSOBJ obj, JSOBJ name, JSOBJ value) {
}
int Object_objectAddKey(void *prv, JSOBJ obj, JSOBJ name, JSOBJ value) {
- PyDict_SetItem(obj, name, value);
+ int ret = PyDict_SetItem(obj, name, value);
Py_DECREF((PyObject *)name);
Py_DECREF((PyObject *)value);
- return 1;
+ return ret == 0 ? 1 : 0;
}
int Object_arrayAddItem(void *prv, JSOBJ obj, JSOBJ value) {
- PyList_Append(obj, value);
+ int ret = PyList_Append(obj, value);
Py_DECREF((PyObject *)value);
- return 1;
+ return ret == 0 ? 1 : 0;
}
JSOBJ Object_newString(void *prv, wchar_t *start, wchar_t *end) {
diff --git a/pandas/_libs/tslib.pxd b/pandas/_libs/tslib.pxd
index aa8cbcb2cedc7..ee8adfe67bb5e 100644
--- a/pandas/_libs/tslib.pxd
+++ b/pandas/_libs/tslib.pxd
@@ -2,9 +2,5 @@ from numpy cimport ndarray, int64_t
cdef convert_to_tsobject(object, object, object, bint, bint)
cpdef convert_to_timedelta64(object, object)
-cpdef object maybe_get_tz(object)
-cdef bint _is_utc(object)
-cdef bint _is_tzlocal(object)
-cdef object _get_dst_info(object)
cdef bint _nat_scalar_rules[6]
cdef bint _check_all_nulls(obj)
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx
index 5dd30072fb7aa..ec12611ae7f02 100644
--- a/pandas/_libs/tslib.pyx
+++ b/pandas/_libs/tslib.pyx
@@ -21,8 +21,7 @@ from cpython cimport (
PyObject_RichCompare,
Py_GT, Py_GE, Py_EQ, Py_NE, Py_LT, Py_LE,
PyUnicode_Check,
- PyUnicode_AsUTF8String,
-)
+ PyUnicode_AsUTF8String)
cdef extern from "Python.h":
cdef PyTypeObject *Py_TYPE(object)
@@ -73,19 +72,12 @@ import re
# dateutil compat
from dateutil.tz import (tzoffset, tzlocal as _dateutil_tzlocal,
- tzfile as _dateutil_tzfile,
tzutc as _dateutil_tzutc,
tzstr as _dateutil_tzstr)
-from pandas.compat import is_platform_windows
-if is_platform_windows():
- from dateutil.zoneinfo import gettz as _dateutil_gettz
-else:
- from dateutil.tz import gettz as _dateutil_gettz
from dateutil.relativedelta import relativedelta
from dateutil.parser import DEFAULTPARSER
-from pytz.tzinfo import BaseTzInfo as _pytz_BaseTzInfo
from pandas.compat import (parse_date, string_types, iteritems,
StringIO, callable)
@@ -107,6 +99,20 @@ cdef int64_t NPY_NAT = util.get_nat()
iNaT = NPY_NAT
+from tslibs.timezones cimport (
+ is_utc, is_tzlocal, _is_fixed_offset,
+ treat_tz_as_dateutil, treat_tz_as_pytz,
+ get_timezone, get_utcoffset, maybe_get_tz,
+ _get_dst_info
+ )
+from tslibs.timezones import ( # noqa
+ get_timezone, get_utcoffset, maybe_get_tz,
+ _p_tz_cache_key, dst_cache,
+ _unbox_utcoffsets,
+ _dateutil_gettz
+ )
+
+
cdef inline object create_timestamp_from_ts(
int64_t value, pandas_datetimestruct dts,
object tz, object freq):
@@ -152,7 +158,7 @@ def ints_to_pydatetime(ndarray[int64_t] arr, tz=None, freq=None, box=False):
func_create = create_datetime_from_ts
if tz is not None:
- if _is_utc(tz):
+ if is_utc(tz):
for i in range(n):
value = arr[i]
if value == NPY_NAT:
@@ -161,7 +167,7 @@ def ints_to_pydatetime(ndarray[int64_t] arr, tz=None, freq=None, box=False):
pandas_datetime_to_datetimestruct(
value, PANDAS_FR_ns, &dts)
result[i] = func_create(value, dts, tz, freq)
- elif _is_tzlocal(tz) or _is_fixed_offset(tz):
+ elif is_tzlocal(tz) or _is_fixed_offset(tz):
for i in range(n):
value = arr[i]
if value == NPY_NAT:
@@ -186,7 +192,7 @@ def ints_to_pydatetime(ndarray[int64_t] arr, tz=None, freq=None, box=False):
# Adjust datetime64 timestamp, recompute datetimestruct
pos = trans.searchsorted(value, side='right') - 1
- if _treat_tz_as_pytz(tz):
+ if treat_tz_as_pytz(tz):
# find right representation of dst etc in pytz timezone
new_tz = tz._tzinfos[tz._transition_info[pos]]
else:
@@ -233,24 +239,6 @@ def ints_to_pytimedelta(ndarray[int64_t] arr, box=False):
return result
-cdef inline bint _is_tzlocal(object tz):
- return isinstance(tz, _dateutil_tzlocal)
-
-
-cdef inline bint _is_fixed_offset(object tz):
- if _treat_tz_as_dateutil(tz):
- if len(tz._trans_idx) == 0 and len(tz._trans_list) == 0:
- return 1
- else:
- return 0
- elif _treat_tz_as_pytz(tz):
- if (len(tz._transition_info) == 0
- and len(tz._utc_transition_times) == 0):
- return 1
- else:
- return 0
- return 1
-
_zero_time = datetime_time(0, 0)
_no_input = object()
@@ -532,9 +520,7 @@ class Timestamp(_Timestamp):
@property
def weekday_name(self):
- out = get_date_name_field(
- np.array([self.value], dtype=np.int64), 'weekday_name')
- return out[0]
+ return self._get_named_field('weekday_name')
@property
def dayofyear(self):
@@ -546,10 +532,6 @@ class Timestamp(_Timestamp):
weekofyear = week
- @property
- def microsecond(self):
- return self._get_field('us')
-
@property
def quarter(self):
return self._get_field('q')
@@ -829,8 +811,6 @@ class NaTType(_NaT):
cdef _NaT base
base = _NaT.__new__(cls, 1, 1, 1)
- base._day = -1
- base._month = -1
base.value = NPY_NAT
return base
@@ -858,6 +838,9 @@ class NaTType(_NaT):
return (__nat_unpickle, (None, ))
def total_seconds(self):
+ """
+ Total duration of timedelta in seconds (to ns precision)
+ """
# GH 10939
return np.nan
@@ -1108,12 +1091,12 @@ cdef class _Timestamp(datetime):
try:
stamp += self.strftime('%z')
if self.tzinfo:
- zone = _get_zone(self.tzinfo)
+ zone = get_timezone(self.tzinfo)
except ValueError:
year2000 = self.replace(year=2000)
stamp += year2000.strftime('%z')
if self.tzinfo:
- zone = _get_zone(self.tzinfo)
+ zone = get_timezone(self.tzinfo)
try:
stamp += zone.strftime(' %%Z')
@@ -1268,21 +1251,37 @@ cdef class _Timestamp(datetime):
# same timezone if specified)
return datetime.__sub__(self, other)
- cpdef _get_field(self, field):
+ cdef int64_t _maybe_convert_value_to_local(self):
+ """Convert UTC i8 value to local i8 value if tz exists"""
+ cdef:
+ int64_t val
val = self.value
- if self.tz is not None and not _is_utc(self.tz):
+ if self.tz is not None and not is_utc(self.tz):
val = tz_convert_single(self.value, 'UTC', self.tz)
+ return val
+
+ cpdef _get_field(self, field):
+ cdef:
+ int64_t val
+ ndarray[int32_t] out
+ val = self._maybe_convert_value_to_local()
out = get_date_field(np.array([val], dtype=np.int64), field)
return int(out[0])
+ cpdef _get_named_field(self, field):
+ cdef:
+ int64_t val
+ ndarray[object] out
+ val = self._maybe_convert_value_to_local()
+ out = get_date_name_field(np.array([val], dtype=np.int64), field)
+ return out[0]
+
cpdef _get_start_end_field(self, field):
month_kw = self.freq.kwds.get(
'startingMonth', self.freq.kwds.get(
'month', 12)) if self.freq else 12
freqstr = self.freqstr if self.freq else None
- val = self.value
- if self.tz is not None and not _is_utc(self.tz):
- val = tz_convert_single(self.value, 'UTC', self.tz)
+ val = self._maybe_convert_value_to_local()
out = get_start_end_field(
np.array([val], dtype=np.int64), field, freqstr, month_kw)
return out[0]
@@ -1430,11 +1429,6 @@ cdef class _TSObject:
def __get__(self):
return self.value
-cpdef _get_utcoffset(tzinfo, obj):
- try:
- return tzinfo._utcoffset
- except AttributeError:
- return tzinfo.utcoffset(obj)
# helper to extract datetime and int64 from several different possibilities
cdef convert_to_tsobject(object ts, object tz, object unit,
@@ -1500,14 +1494,14 @@ cdef convert_to_tsobject(object ts, object tz, object unit,
except:
pass
obj.value = _pydatetime_to_dts(ts, &obj.dts)
- ts_offset = _get_utcoffset(ts.tzinfo, ts)
+ ts_offset = get_utcoffset(ts.tzinfo, ts)
obj.value -= _delta_to_nanoseconds(ts_offset)
- tz_offset = _get_utcoffset(tz, ts)
+ tz_offset = get_utcoffset(tz, ts)
obj.value += _delta_to_nanoseconds(tz_offset)
pandas_datetime_to_datetimestruct(obj.value,
PANDAS_FR_ns, &obj.dts)
obj.tzinfo = tz
- elif not _is_utc(tz):
+ elif not is_utc(tz):
ts = _localize_pydatetime(ts, tz)
obj.value = _pydatetime_to_dts(ts, &obj.dts)
obj.tzinfo = ts.tzinfo
@@ -1519,8 +1513,8 @@ cdef convert_to_tsobject(object ts, object tz, object unit,
obj.value = _pydatetime_to_dts(ts, &obj.dts)
obj.tzinfo = ts.tzinfo
- if obj.tzinfo is not None and not _is_utc(obj.tzinfo):
- offset = _get_utcoffset(obj.tzinfo, ts)
+ if obj.tzinfo is not None and not is_utc(obj.tzinfo):
+ offset = get_utcoffset(obj.tzinfo, ts)
obj.value -= _delta_to_nanoseconds(offset)
if is_timestamp(ts):
@@ -1631,13 +1625,13 @@ cdef inline void _localize_tso(_TSObject obj, object tz):
"""
Take a TSObject in UTC and localizes to timezone tz.
"""
- if _is_utc(tz):
+ if is_utc(tz):
obj.tzinfo = tz
- elif _is_tzlocal(tz):
+ elif is_tzlocal(tz):
pandas_datetime_to_datetimestruct(obj.value, PANDAS_FR_ns, &obj.dts)
dt = datetime(obj.dts.year, obj.dts.month, obj.dts.day, obj.dts.hour,
obj.dts.min, obj.dts.sec, obj.dts.us, tz)
- delta = int(_get_utcoffset(tz, dt).total_seconds()) * 1000000000
+ delta = int(get_utcoffset(tz, dt).total_seconds()) * 1000000000
if obj.value != NPY_NAT:
pandas_datetime_to_datetimestruct(obj.value + delta,
PANDAS_FR_ns, &obj.dts)
@@ -1661,7 +1655,7 @@ cdef inline void _localize_tso(_TSObject obj, object tz):
pandas_datetime_to_datetimestruct(
obj.value, PANDAS_FR_ns, &obj.dts)
obj.tzinfo = tz
- elif _treat_tz_as_pytz(tz):
+ elif treat_tz_as_pytz(tz):
inf = tz._transition_info[pos]
if obj.value != NPY_NAT:
pandas_datetime_to_datetimestruct(obj.value + deltas[pos],
@@ -1670,7 +1664,7 @@ cdef inline void _localize_tso(_TSObject obj, object tz):
pandas_datetime_to_datetimestruct(obj.value,
PANDAS_FR_ns, &obj.dts)
obj.tzinfo = tz._tzinfos[inf]
- elif _treat_tz_as_dateutil(tz):
+ elif treat_tz_as_dateutil(tz):
if obj.value != NPY_NAT:
pandas_datetime_to_datetimestruct(obj.value + deltas[pos],
PANDAS_FR_ns, &obj.dts)
@@ -1699,71 +1693,6 @@ def _localize_pydatetime(object dt, object tz):
return dt.replace(tzinfo=tz)
-def get_timezone(tz):
- return _get_zone(tz)
-
-cdef inline bint _is_utc(object tz):
- return tz is UTC or isinstance(tz, _dateutil_tzutc)
-
-cdef inline object _get_zone(object tz):
- """
- We need to do several things here:
- 1) Distinguish between pytz and dateutil timezones
- 2) Not be over-specific (e.g. US/Eastern with/without DST is same *zone*
- but a different tz object)
- 3) Provide something to serialize when we're storing a datetime object
- in pytables.
-
- We return a string prefaced with dateutil if it's a dateutil tz, else just
- the tz name. It needs to be a string so that we can serialize it with
- UJSON/pytables. maybe_get_tz (below) is the inverse of this process.
- """
- if _is_utc(tz):
- return 'UTC'
- else:
- if _treat_tz_as_dateutil(tz):
- if '.tar.gz' in tz._filename:
- raise ValueError(
- 'Bad tz filename. Dateutil on python 3 on windows has a '
- 'bug which causes tzfile._filename to be the same for all '
- 'timezone files. Please construct dateutil timezones '
- 'implicitly by passing a string like "dateutil/Europe'
- '/London" when you construct your pandas objects instead '
- 'of passing a timezone object. See '
- 'https://github.com/pandas-dev/pandas/pull/7362')
- return 'dateutil/' + tz._filename
- else:
- # tz is a pytz timezone or unknown.
- try:
- zone = tz.zone
- if zone is None:
- return tz
- return zone
- except AttributeError:
- return tz
-
-
-cpdef inline object maybe_get_tz(object tz):
- """
- (Maybe) Construct a timezone object from a string. If tz is a string, use
- it to construct a timezone object. Otherwise, just return tz.
- """
- if isinstance(tz, string_types):
- if tz == 'tzlocal()':
- tz = _dateutil_tzlocal()
- elif tz.startswith('dateutil/'):
- zone = tz[9:]
- tz = _dateutil_gettz(zone)
- # On Python 3 on Windows, the filename is not always set correctly.
- if isinstance(tz, _dateutil_tzfile) and '.tar.gz' in tz._filename:
- tz._filename = zone
- else:
- tz = pytz.timezone(tz)
- elif is_integer_object(tz):
- tz = pytz.FixedOffset(tz / 60)
- return tz
-
-
class OutOfBoundsDatetime(ValueError):
pass
@@ -1804,10 +1733,10 @@ def datetime_to_datetime64(ndarray[object] values):
elif PyDateTime_Check(val):
if val.tzinfo is not None:
if inferred_tz is not None:
- if _get_zone(val.tzinfo) != inferred_tz:
+ if get_timezone(val.tzinfo) != inferred_tz:
raise ValueError('Array must be all same time zone')
else:
- inferred_tz = _get_zone(val.tzinfo)
+ inferred_tz = get_timezone(val.tzinfo)
_ts = convert_to_tsobject(val, None, None, 0, 0)
iresult[i] = _ts.value
@@ -2622,8 +2551,6 @@ cdef class _Timedelta(timedelta):
int ndim
if isinstance(other, _Timedelta):
- if isinstance(other, _NaT):
- return _cmp_nat_dt(other, self, _reverse_ops[op])
ots = other
elif isinstance(other, timedelta):
ots = Timedelta(other)
@@ -3882,7 +3809,7 @@ fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'millisecond', 'microsecond', 'nanosecond',
'week', 'dayofyear', 'weekofyear', 'days_in_month', 'daysinmonth',
'dayofweek', 'weekday_name', 'days', 'seconds', 'microseconds',
- 'nanoseconds', 'qyear', 'quarter']
+ 'nanoseconds', 'qyear']
for field in fields:
prop = property(fget=lambda self: np.nan)
setattr(NaTType, field, prop)
@@ -3894,8 +3821,9 @@ for field in fields:
_nat_methods = ['date', 'now', 'replace', 'to_pydatetime',
'today', 'round', 'floor', 'ceil', 'tz_convert',
'tz_localize']
-_nan_methods = ['weekday', 'isoweekday', 'total_seconds']
-_implemented_methods = ['to_datetime', 'to_datetime64', 'isoformat']
+_nan_methods = ['weekday', 'isoweekday']
+_implemented_methods = [
+ 'to_datetime', 'to_datetime64', 'isoformat', 'total_seconds']
_implemented_methods.extend(_nat_methods)
_implemented_methods.extend(_nan_methods)
@@ -4123,9 +4051,9 @@ def tz_convert(ndarray[int64_t] vals, object tz1, object tz2):
return np.array([], dtype=np.int64)
# Convert to UTC
- if _get_zone(tz1) != 'UTC':
+ if get_timezone(tz1) != 'UTC':
utc_dates = np.empty(n, dtype=np.int64)
- if _is_tzlocal(tz1):
+ if is_tzlocal(tz1):
for i in range(n):
v = vals[i]
if v == NPY_NAT:
@@ -4134,7 +4062,7 @@ def tz_convert(ndarray[int64_t] vals, object tz1, object tz2):
pandas_datetime_to_datetimestruct(v, PANDAS_FR_ns, &dts)
dt = datetime(dts.year, dts.month, dts.day, dts.hour,
dts.min, dts.sec, dts.us, tz1)
- delta = (int(_get_utcoffset(tz1, dt).total_seconds())
+ delta = (int(get_utcoffset(tz1, dt).total_seconds())
* 1000000000)
utc_dates[i] = v - delta
else:
@@ -4161,11 +4089,11 @@ def tz_convert(ndarray[int64_t] vals, object tz1, object tz2):
else:
utc_dates = vals
- if _get_zone(tz2) == 'UTC':
+ if get_timezone(tz2) == 'UTC':
return utc_dates
result = np.zeros(n, dtype=np.int64)
- if _is_tzlocal(tz2):
+ if is_tzlocal(tz2):
for i in range(n):
v = utc_dates[i]
if v == NPY_NAT:
@@ -4174,7 +4102,7 @@ def tz_convert(ndarray[int64_t] vals, object tz1, object tz2):
pandas_datetime_to_datetimestruct(v, PANDAS_FR_ns, &dts)
dt = datetime(dts.year, dts.month, dts.day, dts.hour,
dts.min, dts.sec, dts.us, tz2)
- delta = (int(_get_utcoffset(tz2, dt).total_seconds())
+ delta = (int(get_utcoffset(tz2, dt).total_seconds())
* 1000000000)
result[i] = v + delta
return result
@@ -4237,13 +4165,13 @@ def tz_convert_single(int64_t val, object tz1, object tz2):
return val
# Convert to UTC
- if _is_tzlocal(tz1):
+ if is_tzlocal(tz1):
pandas_datetime_to_datetimestruct(val, PANDAS_FR_ns, &dts)
dt = datetime(dts.year, dts.month, dts.day, dts.hour,
dts.min, dts.sec, dts.us, tz1)
- delta = int(_get_utcoffset(tz1, dt).total_seconds()) * 1000000000
+ delta = int(get_utcoffset(tz1, dt).total_seconds()) * 1000000000
utc_date = val - delta
- elif _get_zone(tz1) != 'UTC':
+ elif get_timezone(tz1) != 'UTC':
trans, deltas, typ = _get_dst_info(tz1)
pos = trans.searchsorted(val, side='right') - 1
if pos < 0:
@@ -4253,13 +4181,13 @@ def tz_convert_single(int64_t val, object tz1, object tz2):
else:
utc_date = val
- if _get_zone(tz2) == 'UTC':
+ if get_timezone(tz2) == 'UTC':
return utc_date
- if _is_tzlocal(tz2):
+ if is_tzlocal(tz2):
pandas_datetime_to_datetimestruct(val, PANDAS_FR_ns, &dts)
dt = datetime(dts.year, dts.month, dts.day, dts.hour,
dts.min, dts.sec, dts.us, tz2)
- delta = int(_get_utcoffset(tz2, dt).total_seconds()) * 1000000000
+ delta = int(get_utcoffset(tz2, dt).total_seconds()) * 1000000000
return utc_date + delta
# Convert UTC to other timezone
@@ -4272,148 +4200,6 @@ def tz_convert_single(int64_t val, object tz1, object tz2):
offset = deltas[pos]
return utc_date + offset
-# Timezone data caches, key is the pytz string or dateutil file name.
-dst_cache = {}
-
-cdef inline bint _treat_tz_as_pytz(object tz):
- return hasattr(tz, '_utc_transition_times') and hasattr(
- tz, '_transition_info')
-
-cdef inline bint _treat_tz_as_dateutil(object tz):
- return hasattr(tz, '_trans_list') and hasattr(tz, '_trans_idx')
-
-
-def _p_tz_cache_key(tz):
- """ Python interface for cache function to facilitate testing."""
- return _tz_cache_key(tz)
-
-
-cdef inline object _tz_cache_key(object tz):
- """
- Return the key in the cache for the timezone info object or None
- if unknown.
-
- The key is currently the tz string for pytz timezones, the filename for
- dateutil timezones.
-
- Notes
- =====
- This cannot just be the hash of a timezone object. Unfortunately, the
- hashes of two dateutil tz objects which represent the same timezone are
- not equal (even though the tz objects will compare equal and represent
- the same tz file). Also, pytz objects are not always hashable so we use
- str(tz) instead.
- """
- if isinstance(tz, _pytz_BaseTzInfo):
- return tz.zone
- elif isinstance(tz, _dateutil_tzfile):
- if '.tar.gz' in tz._filename:
- raise ValueError('Bad tz filename. Dateutil on python 3 on '
- 'windows has a bug which causes tzfile._filename '
- 'to be the same for all timezone files. Please '
- 'construct dateutil timezones implicitly by '
- 'passing a string like "dateutil/Europe/London" '
- 'when you construct your pandas objects instead '
- 'of passing a timezone object. See '
- 'https://github.com/pandas-dev/pandas/pull/7362')
- return 'dateutil' + tz._filename
- else:
- return None
-
-
-cdef object _get_dst_info(object tz):
- """
- return a tuple of :
- (UTC times of DST transitions,
- UTC offsets in microseconds corresponding to DST transitions,
- string of type of transitions)
-
- """
- cache_key = _tz_cache_key(tz)
- if cache_key is None:
- num = int(_get_utcoffset(tz, None).total_seconds()) * 1000000000
- return (np.array([NPY_NAT + 1], dtype=np.int64),
- np.array([num], dtype=np.int64),
- None)
-
- if cache_key not in dst_cache:
- if _treat_tz_as_pytz(tz):
- trans = np.array(tz._utc_transition_times, dtype='M8[ns]')
- trans = trans.view('i8')
- try:
- if tz._utc_transition_times[0].year == 1:
- trans[0] = NPY_NAT + 1
- except Exception:
- pass
- deltas = _unbox_utcoffsets(tz._transition_info)
- typ = 'pytz'
-
- elif _treat_tz_as_dateutil(tz):
- if len(tz._trans_list):
- # get utc trans times
- trans_list = _get_utc_trans_times_from_dateutil_tz(tz)
- trans = np.hstack([
- np.array([0], dtype='M8[s]'), # place holder for first item
- np.array(trans_list, dtype='M8[s]')]).astype(
- 'M8[ns]') # all trans listed
- trans = trans.view('i8')
- trans[0] = NPY_NAT + 1
-
- # deltas
- deltas = np.array([v.offset for v in (
- tz._ttinfo_before,) + tz._trans_idx], dtype='i8')
- deltas *= 1000000000
- typ = 'dateutil'
-
- elif _is_fixed_offset(tz):
- trans = np.array([NPY_NAT + 1], dtype=np.int64)
- deltas = np.array([tz._ttinfo_std.offset],
- dtype='i8') * 1000000000
- typ = 'fixed'
- else:
- trans = np.array([], dtype='M8[ns]')
- deltas = np.array([], dtype='i8')
- typ = None
-
- else:
- # static tzinfo
- trans = np.array([NPY_NAT + 1], dtype=np.int64)
- num = int(_get_utcoffset(tz, None).total_seconds()) * 1000000000
- deltas = np.array([num], dtype=np.int64)
- typ = 'static'
-
- dst_cache[cache_key] = (trans, deltas, typ)
-
- return dst_cache[cache_key]
-
-cdef object _get_utc_trans_times_from_dateutil_tz(object tz):
- """
- Transition times in dateutil timezones are stored in local non-dst
- time. This code converts them to UTC. It's the reverse of the code
- in dateutil.tz.tzfile.__init__.
- """
- new_trans = list(tz._trans_list)
- last_std_offset = 0
- for i, (trans, tti) in enumerate(zip(tz._trans_list, tz._trans_idx)):
- if not tti.isdst:
- last_std_offset = tti.offset
- new_trans[i] = trans - last_std_offset
- return new_trans
-
-
-cpdef ndarray _unbox_utcoffsets(object transinfo):
- cdef:
- Py_ssize_t i, sz
- ndarray[int64_t] arr
-
- sz = len(transinfo)
- arr = np.empty(sz, dtype='i8')
-
- for i in range(sz):
- arr[i] = int(transinfo[i][0].total_seconds()) * 1000000000
-
- return arr
-
@cython.boundscheck(False)
@cython.wraparound(False)
@@ -4447,13 +4233,13 @@ def tz_localize_to_utc(ndarray[int64_t] vals, object tz, object ambiguous=None,
result = np.empty(n, dtype=np.int64)
- if _is_tzlocal(tz):
+ if is_tzlocal(tz):
for i in range(n):
v = vals[i]
pandas_datetime_to_datetimestruct(v, PANDAS_FR_ns, &dts)
dt = datetime(dts.year, dts.month, dts.day, dts.hour,
dts.min, dts.sec, dts.us, tz)
- delta = int(_get_utcoffset(tz, dt).total_seconds()) * 1000000000
+ delta = int(get_utcoffset(tz, dt).total_seconds()) * 1000000000
result[i] = v - delta
return result
@@ -4620,7 +4406,6 @@ def build_field_sarray(ndarray[int64_t] dtindex):
"""
cdef:
Py_ssize_t i, count = 0
- int isleap
pandas_datetimestruct dts
ndarray[int32_t] years, months, days, hours, minutes, seconds, mus
@@ -5159,7 +4944,7 @@ cdef _normalize_local(ndarray[int64_t] stamps, object tz):
ndarray[int64_t] trans, deltas, pos
pandas_datetimestruct dts
- if _is_utc(tz):
+ if is_utc(tz):
with nogil:
for i in range(n):
if stamps[i] == NPY_NAT:
@@ -5168,7 +4953,7 @@ cdef _normalize_local(ndarray[int64_t] stamps, object tz):
pandas_datetime_to_datetimestruct(
stamps[i], PANDAS_FR_ns, &dts)
result[i] = _normalized_stamp(&dts)
- elif _is_tzlocal(tz):
+ elif is_tzlocal(tz):
for i in range(n):
if stamps[i] == NPY_NAT:
result[i] = NPY_NAT
@@ -5176,7 +4961,7 @@ cdef _normalize_local(ndarray[int64_t] stamps, object tz):
pandas_datetime_to_datetimestruct(stamps[i], PANDAS_FR_ns, &dts)
dt = datetime(dts.year, dts.month, dts.day, dts.hour,
dts.min, dts.sec, dts.us, tz)
- delta = int(_get_utcoffset(tz, dt).total_seconds()) * 1000000000
+ delta = int(get_utcoffset(tz, dt).total_seconds()) * 1000000000
pandas_datetime_to_datetimestruct(stamps[i] + delta,
PANDAS_FR_ns, &dts)
result[i] = _normalized_stamp(&dts)
@@ -5223,12 +5008,12 @@ def dates_normalized(ndarray[int64_t] stamps, tz=None):
Py_ssize_t i, n = len(stamps)
pandas_datetimestruct dts
- if tz is None or _is_utc(tz):
+ if tz is None or is_utc(tz):
for i in range(n):
pandas_datetime_to_datetimestruct(stamps[i], PANDAS_FR_ns, &dts)
if (dts.hour + dts.min + dts.sec + dts.us) > 0:
return False
- elif _is_tzlocal(tz):
+ elif is_tzlocal(tz):
for i in range(n):
pandas_datetime_to_datetimestruct(stamps[i], PANDAS_FR_ns, &dts)
dt = datetime(dts.year, dts.month, dts.day, dts.hour, dts.min,
@@ -5270,7 +5055,6 @@ cpdef _isleapyear_arr(ndarray years):
def monthrange(int64_t year, int64_t month):
cdef:
int64_t days
- int64_t day_of_week
if month < 1 or month > 12:
raise ValueError("bad month number 0; must be 1-12")
diff --git a/pandas/_libs/tslibs/__init__.py b/pandas/_libs/tslibs/__init__.py
new file mode 100644
index 0000000000000..f3aa0424f0376
--- /dev/null
+++ b/pandas/_libs/tslibs/__init__.py
@@ -0,0 +1,2 @@
+# -*- coding: utf-8 -*-
+# cython: profile=False
diff --git a/pandas/_libs/tslibs/frequencies.pxd b/pandas/_libs/tslibs/frequencies.pxd
new file mode 100644
index 0000000000000..974eb4ab45df0
--- /dev/null
+++ b/pandas/_libs/tslibs/frequencies.pxd
@@ -0,0 +1,4 @@
+# -*- coding: utf-8 -*-
+# cython: profile=False
+
+cpdef get_freq_code(freqstr)
diff --git a/pandas/_libs/tslibs/frequencies.pyx b/pandas/_libs/tslibs/frequencies.pyx
new file mode 100644
index 0000000000000..f7889d76abbc7
--- /dev/null
+++ b/pandas/_libs/tslibs/frequencies.pyx
@@ -0,0 +1,204 @@
+# -*- coding: utf-8 -*-
+# cython: profile=False
+import re
+
+cimport cython
+
+import numpy as np
+cimport numpy as np
+np.import_array()
+
+from util cimport is_integer_object
+
+
+cpdef get_freq_code(freqstr):
+ """
+ Return freq str or tuple to freq code and stride (mult)
+
+ Parameters
+ ----------
+ freqstr : str or tuple
+
+ Returns
+ -------
+ return : tuple of base frequency code and stride (mult)
+
+ Example
+ -------
+ >>> get_freq_code('3D')
+ (6000, 3)
+
+ >>> get_freq_code('D')
+ (6000, 1)
+
+ >>> get_freq_code(('D', 3))
+ (6000, 3)
+ """
+ if getattr(freqstr, '_typ', None) == 'dateoffset':
+ freqstr = (freqstr.rule_code, freqstr.n)
+
+ if isinstance(freqstr, tuple):
+ if (is_integer_object(freqstr[0]) and
+ is_integer_object(freqstr[1])):
+ # e.g., freqstr = (2000, 1)
+ return freqstr
+ else:
+ # e.g., freqstr = ('T', 5)
+ try:
+ code = _period_str_to_code(freqstr[0])
+ stride = freqstr[1]
+ except:
+ if is_integer_object(freqstr[1]):
+ raise
+ code = _period_str_to_code(freqstr[1])
+ stride = freqstr[0]
+ return code, stride
+
+ if is_integer_object(freqstr):
+ return (freqstr, 1)
+
+ base, stride = _base_and_stride(freqstr)
+ code = _period_str_to_code(base)
+
+ return code, stride
+
+
+# hack to handle WOM-1MON
+opattern = re.compile(
+ r'([\-]?\d*|[\-]?\d*\.\d*)\s*([A-Za-z]+([\-][\dA-Za-z\-]+)?)'
+)
+
+
+cpdef _base_and_stride(freqstr):
+ """
+ Return base freq and stride info from string representation
+
+ Examples
+ --------
+ _freq_and_stride('5Min') -> 'Min', 5
+ """
+ groups = opattern.match(freqstr)
+
+ if not groups:
+ raise ValueError("Could not evaluate {freq}".format(freq=freqstr))
+
+ stride = groups.group(1)
+
+ if len(stride):
+ stride = int(stride)
+ else:
+ stride = 1
+
+ base = groups.group(2)
+
+ return (base, stride)
+
+
+# ---------------------------------------------------------------------
+# Period codes
+
+# period frequency constants corresponding to scikits timeseries
+# originals
+_period_code_map = {
+ # Annual freqs with various fiscal year ends.
+ # eg, 2005 for A-FEB runs Mar 1, 2004 to Feb 28, 2005
+ "A-DEC": 1000, # Annual - December year end
+ "A-JAN": 1001, # Annual - January year end
+ "A-FEB": 1002, # Annual - February year end
+ "A-MAR": 1003, # Annual - March year end
+ "A-APR": 1004, # Annual - April year end
+ "A-MAY": 1005, # Annual - May year end
+ "A-JUN": 1006, # Annual - June year end
+ "A-JUL": 1007, # Annual - July year end
+ "A-AUG": 1008, # Annual - August year end
+ "A-SEP": 1009, # Annual - September year end
+ "A-OCT": 1010, # Annual - October year end
+ "A-NOV": 1011, # Annual - November year end
+
+ # Quarterly frequencies with various fiscal year ends.
+ # eg, Q42005 for Q-OCT runs Aug 1, 2005 to Oct 31, 2005
+ "Q-DEC": 2000, # Quarterly - December year end
+ "Q-JAN": 2001, # Quarterly - January year end
+ "Q-FEB": 2002, # Quarterly - February year end
+ "Q-MAR": 2003, # Quarterly - March year end
+ "Q-APR": 2004, # Quarterly - April year end
+ "Q-MAY": 2005, # Quarterly - May year end
+ "Q-JUN": 2006, # Quarterly - June year end
+ "Q-JUL": 2007, # Quarterly - July year end
+ "Q-AUG": 2008, # Quarterly - August year end
+ "Q-SEP": 2009, # Quarterly - September year end
+ "Q-OCT": 2010, # Quarterly - October year end
+ "Q-NOV": 2011, # Quarterly - November year end
+
+ "M": 3000, # Monthly
+
+ "W-SUN": 4000, # Weekly - Sunday end of week
+ "W-MON": 4001, # Weekly - Monday end of week
+ "W-TUE": 4002, # Weekly - Tuesday end of week
+ "W-WED": 4003, # Weekly - Wednesday end of week
+ "W-THU": 4004, # Weekly - Thursday end of week
+ "W-FRI": 4005, # Weekly - Friday end of week
+ "W-SAT": 4006, # Weekly - Saturday end of week
+
+ "B": 5000, # Business days
+ "D": 6000, # Daily
+ "H": 7000, # Hourly
+ "T": 8000, # Minutely
+ "S": 9000, # Secondly
+ "L": 10000, # Millisecondly
+ "U": 11000, # Microsecondly
+ "N": 12000, # Nanosecondly
+}
+
+_reverse_period_code_map = {
+ _period_code_map[key]: key for key in _period_code_map}
+
+# Yearly aliases; careful not to put these in _reverse_period_code_map
+_period_code_map.update({'Y' + key[1:]: _period_code_map[key]
+ for key in _period_code_map
+ if key.startswith('A-')})
+
+_period_code_map.update({
+ "Q": 2000, # Quarterly - December year end (default quarterly)
+ "A": 1000, # Annual
+ "W": 4000, # Weekly
+ "C": 5000, # Custom Business Day
+ })
+
+_dont_uppercase = set(('MS', 'ms'))
+
+_lite_rule_alias = {
+ 'W': 'W-SUN',
+ 'Q': 'Q-DEC',
+
+ 'A': 'A-DEC', # YearEnd(month=12),
+ 'Y': 'A-DEC',
+ 'AS': 'AS-JAN', # YearBegin(month=1),
+ 'YS': 'AS-JAN',
+ 'BA': 'BA-DEC', # BYearEnd(month=12),
+ 'BY': 'BA-DEC',
+ 'BAS': 'BAS-JAN', # BYearBegin(month=1),
+ 'BYS': 'BAS-JAN',
+
+ 'Min': 'T',
+ 'min': 'T',
+ 'ms': 'L',
+ 'us': 'U',
+ 'ns': 'N'}
+
+_INVALID_FREQ_ERROR = "Invalid frequency: {0}"
+
+
+cpdef _period_str_to_code(freqstr):
+ freqstr = _lite_rule_alias.get(freqstr, freqstr)
+
+ if freqstr not in _dont_uppercase:
+ lower = freqstr.lower()
+ freqstr = _lite_rule_alias.get(lower, freqstr)
+
+ if freqstr not in _dont_uppercase:
+ freqstr = freqstr.upper()
+ try:
+ return _period_code_map[freqstr]
+ except KeyError:
+ raise ValueError(_INVALID_FREQ_ERROR.format(freqstr))
diff --git a/pandas/_libs/tslibs/timezones.pxd b/pandas/_libs/tslibs/timezones.pxd
new file mode 100644
index 0000000000000..fac0018a78bc2
--- /dev/null
+++ b/pandas/_libs/tslibs/timezones.pxd
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+# cython: profile=False
+
+from numpy cimport ndarray
+
+cdef bint is_utc(object tz)
+cdef bint is_tzlocal(object tz)
+
+cdef bint treat_tz_as_pytz(object tz)
+cdef bint treat_tz_as_dateutil(object tz)
+
+cpdef object get_timezone(object tz)
+cpdef object maybe_get_tz(object tz)
+
+cpdef get_utcoffset(tzinfo, obj)
+cdef bint _is_fixed_offset(object tz)
+
+cdef object _get_dst_info(object tz)
diff --git a/pandas/_libs/tslibs/timezones.pyx b/pandas/_libs/tslibs/timezones.pyx
new file mode 100644
index 0000000000000..346da41e7073b
--- /dev/null
+++ b/pandas/_libs/tslibs/timezones.pyx
@@ -0,0 +1,277 @@
+# -*- coding: utf-8 -*-
+# cython: profile=False
+
+cimport cython
+from cython cimport Py_ssize_t
+
+# dateutil compat
+from dateutil.tz import (
+ tzutc as _dateutil_tzutc,
+ tzlocal as _dateutil_tzlocal,
+ tzfile as _dateutil_tzfile)
+
+import sys
+if sys.platform == 'win32' or sys.platform == 'cygwin':
+ # equiv pd.compat.is_platform_windows()
+ from dateutil.zoneinfo import gettz as _dateutil_gettz
+else:
+ from dateutil.tz import gettz as _dateutil_gettz
+
+
+from pytz.tzinfo import BaseTzInfo as _pytz_BaseTzInfo
+import pytz
+UTC = pytz.utc
+
+
+import numpy as np
+cimport numpy as np
+from numpy cimport ndarray, int64_t
+np.import_array()
+
+# ----------------------------------------------------------------------
+from util cimport is_string_object, is_integer_object, get_nat
+
+cdef int64_t NPY_NAT = get_nat()
+
+# ----------------------------------------------------------------------
+
+cdef inline bint is_utc(object tz):
+ return tz is UTC or isinstance(tz, _dateutil_tzutc)
+
+
+cdef inline bint is_tzlocal(object tz):
+ return isinstance(tz, _dateutil_tzlocal)
+
+
+cdef inline bint treat_tz_as_pytz(object tz):
+ return hasattr(tz, '_utc_transition_times') and hasattr(
+ tz, '_transition_info')
+
+
+cdef inline bint treat_tz_as_dateutil(object tz):
+ return hasattr(tz, '_trans_list') and hasattr(tz, '_trans_idx')
+
+
+cpdef inline object get_timezone(object tz):
+ """
+ We need to do several things here:
+ 1) Distinguish between pytz and dateutil timezones
+ 2) Not be over-specific (e.g. US/Eastern with/without DST is same *zone*
+ but a different tz object)
+ 3) Provide something to serialize when we're storing a datetime object
+ in pytables.
+
+ We return a string prefaced with dateutil if it's a dateutil tz, else just
+ the tz name. It needs to be a string so that we can serialize it with
+ UJSON/pytables. maybe_get_tz (below) is the inverse of this process.
+ """
+ if is_utc(tz):
+ return 'UTC'
+ else:
+ if treat_tz_as_dateutil(tz):
+ if '.tar.gz' in tz._filename:
+ raise ValueError(
+ 'Bad tz filename. Dateutil on python 3 on windows has a '
+ 'bug which causes tzfile._filename to be the same for all '
+ 'timezone files. Please construct dateutil timezones '
+ 'implicitly by passing a string like "dateutil/Europe'
+ '/London" when you construct your pandas objects instead '
+ 'of passing a timezone object. See '
+ 'https://github.com/pandas-dev/pandas/pull/7362')
+ return 'dateutil/' + tz._filename
+ else:
+ # tz is a pytz timezone or unknown.
+ try:
+ zone = tz.zone
+ if zone is None:
+ return tz
+ return zone
+ except AttributeError:
+ return tz
+
+
+cpdef inline object maybe_get_tz(object tz):
+ """
+ (Maybe) Construct a timezone object from a string. If tz is a string, use
+ it to construct a timezone object. Otherwise, just return tz.
+ """
+ if is_string_object(tz):
+ if tz == 'tzlocal()':
+ tz = _dateutil_tzlocal()
+ elif tz.startswith('dateutil/'):
+ zone = tz[9:]
+ tz = _dateutil_gettz(zone)
+ # On Python 3 on Windows, the filename is not always set correctly.
+ if isinstance(tz, _dateutil_tzfile) and '.tar.gz' in tz._filename:
+ tz._filename = zone
+ else:
+ tz = pytz.timezone(tz)
+ elif is_integer_object(tz):
+ tz = pytz.FixedOffset(tz / 60)
+ return tz
+
+
+def _p_tz_cache_key(tz):
+ """ Python interface for cache function to facilitate testing."""
+ return _tz_cache_key(tz)
+
+
+# Timezone data caches, key is the pytz string or dateutil file name.
+dst_cache = {}
+
+
+cdef inline object _tz_cache_key(object tz):
+ """
+ Return the key in the cache for the timezone info object or None
+ if unknown.
+
+ The key is currently the tz string for pytz timezones, the filename for
+ dateutil timezones.
+
+ Notes
+ =====
+ This cannot just be the hash of a timezone object. Unfortunately, the
+ hashes of two dateutil tz objects which represent the same timezone are
+ not equal (even though the tz objects will compare equal and represent
+ the same tz file). Also, pytz objects are not always hashable so we use
+ str(tz) instead.
+ """
+ if isinstance(tz, _pytz_BaseTzInfo):
+ return tz.zone
+ elif isinstance(tz, _dateutil_tzfile):
+ if '.tar.gz' in tz._filename:
+ raise ValueError('Bad tz filename. Dateutil on python 3 on '
+ 'windows has a bug which causes tzfile._filename '
+ 'to be the same for all timezone files. Please '
+ 'construct dateutil timezones implicitly by '
+ 'passing a string like "dateutil/Europe/London" '
+ 'when you construct your pandas objects instead '
+ 'of passing a timezone object. See '
+ 'https://github.com/pandas-dev/pandas/pull/7362')
+ return 'dateutil' + tz._filename
+ else:
+ return None
+
+
+#----------------------------------------------------------------------
+# UTC Offsets
+
+cpdef get_utcoffset(tzinfo, obj):
+ try:
+ return tzinfo._utcoffset
+ except AttributeError:
+ return tzinfo.utcoffset(obj)
+
+
+cdef inline bint _is_fixed_offset(object tz):
+ if treat_tz_as_dateutil(tz):
+ if len(tz._trans_idx) == 0 and len(tz._trans_list) == 0:
+ return 1
+ else:
+ return 0
+ elif treat_tz_as_pytz(tz):
+ if (len(tz._transition_info) == 0
+ and len(tz._utc_transition_times) == 0):
+ return 1
+ else:
+ return 0
+ return 1
+
+
+cdef object _get_utc_trans_times_from_dateutil_tz(object tz):
+ """
+ Transition times in dateutil timezones are stored in local non-dst
+ time. This code converts them to UTC. It's the reverse of the code
+ in dateutil.tz.tzfile.__init__.
+ """
+ new_trans = list(tz._trans_list)
+ last_std_offset = 0
+ for i, (trans, tti) in enumerate(zip(tz._trans_list, tz._trans_idx)):
+ if not tti.isdst:
+ last_std_offset = tti.offset
+ new_trans[i] = trans - last_std_offset
+ return new_trans
+
+
+cpdef ndarray _unbox_utcoffsets(object transinfo):
+ cdef:
+ Py_ssize_t i, sz
+ ndarray[int64_t] arr
+
+ sz = len(transinfo)
+ arr = np.empty(sz, dtype='i8')
+
+ for i in range(sz):
+ arr[i] = int(transinfo[i][0].total_seconds()) * 1000000000
+
+ return arr
+
+
+# ----------------------------------------------------------------------
+# Daylight Savings
+
+
+cdef object _get_dst_info(object tz):
+ """
+ return a tuple of :
+ (UTC times of DST transitions,
+ UTC offsets in microseconds corresponding to DST transitions,
+ string of type of transitions)
+
+ """
+ cache_key = _tz_cache_key(tz)
+ if cache_key is None:
+ num = int(get_utcoffset(tz, None).total_seconds()) * 1000000000
+ return (np.array([NPY_NAT + 1], dtype=np.int64),
+ np.array([num], dtype=np.int64),
+ None)
+
+ if cache_key not in dst_cache:
+ if treat_tz_as_pytz(tz):
+ trans = np.array(tz._utc_transition_times, dtype='M8[ns]')
+ trans = trans.view('i8')
+ try:
+ if tz._utc_transition_times[0].year == 1:
+ trans[0] = NPY_NAT + 1
+ except Exception:
+ pass
+ deltas = _unbox_utcoffsets(tz._transition_info)
+ typ = 'pytz'
+
+ elif treat_tz_as_dateutil(tz):
+ if len(tz._trans_list):
+ # get utc trans times
+ trans_list = _get_utc_trans_times_from_dateutil_tz(tz)
+ trans = np.hstack([
+ np.array([0], dtype='M8[s]'), # place holder for first item
+ np.array(trans_list, dtype='M8[s]')]).astype(
+ 'M8[ns]') # all trans listed
+ trans = trans.view('i8')
+ trans[0] = NPY_NAT + 1
+
+ # deltas
+ deltas = np.array([v.offset for v in (
+ tz._ttinfo_before,) + tz._trans_idx], dtype='i8')
+ deltas *= 1000000000
+ typ = 'dateutil'
+
+ elif _is_fixed_offset(tz):
+ trans = np.array([NPY_NAT + 1], dtype=np.int64)
+ deltas = np.array([tz._ttinfo_std.offset],
+ dtype='i8') * 1000000000
+ typ = 'fixed'
+ else:
+ trans = np.array([], dtype='M8[ns]')
+ deltas = np.array([], dtype='i8')
+ typ = None
+
+ else:
+ # static tzinfo
+ trans = np.array([NPY_NAT + 1], dtype=np.int64)
+ num = int(get_utcoffset(tz, None).total_seconds()) * 1000000000
+ deltas = np.array([num], dtype=np.int64)
+ typ = 'static'
+
+ dst_cache[cache_key] = (trans, deltas, typ)
+
+ return dst_cache[cache_key]
diff --git a/pandas/_libs/window.pyx b/pandas/_libs/window.pyx
index 9fb3d0662eb4f..b6bd6f92f6199 100644
--- a/pandas/_libs/window.pyx
+++ b/pandas/_libs/window.pyx
@@ -1,55 +1,29 @@
# cython: profile=False
# cython: boundscheck=False, wraparound=False, cdivision=True
-from numpy cimport *
+from cython cimport Py_ssize_t
+
cimport numpy as np
import numpy as np
cimport cython
-import_array()
+np.import_array()
cimport util
from libc.stdlib cimport malloc, free
-from numpy cimport NPY_INT8 as NPY_int8
-from numpy cimport NPY_INT16 as NPY_int16
-from numpy cimport NPY_INT32 as NPY_int32
-from numpy cimport NPY_INT64 as NPY_int64
-from numpy cimport NPY_FLOAT16 as NPY_float16
-from numpy cimport NPY_FLOAT32 as NPY_float32
-from numpy cimport NPY_FLOAT64 as NPY_float64
-
-from numpy cimport (int8_t, int16_t, int32_t, int64_t, uint8_t, uint16_t,
- uint32_t, uint64_t, float16_t, float32_t, float64_t)
-
-int8 = np.dtype(np.int8)
-int16 = np.dtype(np.int16)
-int32 = np.dtype(np.int32)
-int64 = np.dtype(np.int64)
-float16 = np.dtype(np.float16)
-float32 = np.dtype(np.float32)
-float64 = np.dtype(np.float64)
-
-cdef np.int8_t MINint8 = np.iinfo(np.int8).min
-cdef np.int16_t MINint16 = np.iinfo(np.int16).min
-cdef np.int32_t MINint32 = np.iinfo(np.int32).min
-cdef np.int64_t MINint64 = np.iinfo(np.int64).min
-cdef np.float16_t MINfloat16 = np.NINF
+
+from numpy cimport ndarray, double_t, int64_t, float64_t
+
cdef np.float32_t MINfloat32 = np.NINF
cdef np.float64_t MINfloat64 = np.NINF
-cdef np.int8_t MAXint8 = np.iinfo(np.int8).max
-cdef np.int16_t MAXint16 = np.iinfo(np.int16).max
-cdef np.int32_t MAXint32 = np.iinfo(np.int32).max
-cdef np.int64_t MAXint64 = np.iinfo(np.int64).max
-cdef np.float16_t MAXfloat16 = np.inf
cdef np.float32_t MAXfloat32 = np.inf
cdef np.float64_t MAXfloat64 = np.inf
cdef double NaN = <double> np.NaN
-cdef double nan = NaN
cdef inline int int_max(int a, int b): return a if a >= b else b
cdef inline int int_min(int a, int b): return a if a <= b else b
diff --git a/pandas/compat/chainmap_impl.py b/pandas/compat/chainmap_impl.py
index 05a0d5faa4c2a..c4aa8c8d6ab30 100644
--- a/pandas/compat/chainmap_impl.py
+++ b/pandas/compat/chainmap_impl.py
@@ -34,10 +34,10 @@ def wrapper(self):
class ChainMap(MutableMapping):
""" A ChainMap groups multiple dicts (or other mappings) together
- to create a single, updateable view.
+ to create a single, updatable view.
The underlying mappings are stored in a list. That list is public and can
- accessed or updated using the *maps* attribute. There is no other state.
+ be accessed / updated using the *maps* attribute. There is no other state.
Lookups search the underlying mappings successively until a key is found.
In contrast, writes, updates, and deletions only operate on the first
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index cccb094eaae7b..9f712a1cf039b 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -1475,7 +1475,7 @@ def func(arr, indexer, out, fill_value=np.nan):
def diff(arr, n, axis=0):
"""
difference of n between self,
- analagoust to s-s.shift(n)
+ analogous to s-s.shift(n)
Parameters
----------
diff --git a/pandas/core/base.py b/pandas/core/base.py
index d60a8515dc920..f0e8d8a16661b 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -8,7 +8,12 @@
from pandas.core.dtypes.missing import isna
from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries, ABCIndexClass
-from pandas.core.dtypes.common import is_object_dtype, is_list_like, is_scalar
+from pandas.core.dtypes.common import (
+ is_object_dtype,
+ is_list_like,
+ is_scalar,
+ is_datetimelike)
+
from pandas.util._validators import validate_bool_kwarg
from pandas.core import common as com
@@ -18,7 +23,8 @@
from pandas.compat import PYPY
from pandas.util._decorators import (Appender, cache_readonly,
deprecate_kwarg, Substitution)
-from pandas.core.common import AbstractMethodError
+from pandas.core.common import AbstractMethodError, _maybe_box_datetimelike
+
from pandas.core.accessor import DirNamesMixin
_shared_docs = dict()
@@ -884,6 +890,34 @@ def argmin(self, axis=None):
"""
return nanops.nanargmin(self.values)
+ def tolist(self):
+ """
+ Return a list of the values.
+
+ These are each a scalar type, which is a Python scalar
+ (for str, int, float) or a pandas scalar
+ (for Timestamp/Timedelta/Interval/Period)
+
+ See Also
+ --------
+ numpy.tolist
+ """
+
+ if is_datetimelike(self):
+ return [_maybe_box_datetimelike(x) for x in self._values]
+ else:
+ return self._values.tolist()
+
+ def __iter__(self):
+ """
+ Return an iterator of the values.
+
+ These are each a scalar type, which is a Python scalar
+ (for str, int, float) or a pandas scalar
+ (for Timestamp/Timedelta/Interval/Period)
+ """
+ return iter(self.tolist())
+
@cache_readonly
def hasnans(self):
""" return if I have any nans; enables various perf speedups """
diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py
index 1c2a29333001c..e67ce2936819f 100644
--- a/pandas/core/categorical.py
+++ b/pandas/core/categorical.py
@@ -26,7 +26,7 @@
is_integer_dtype, is_bool,
is_list_like, is_sequence,
is_scalar)
-from pandas.core.common import is_null_slice
+from pandas.core.common import is_null_slice, _maybe_box_datetimelike
from pandas.core.algorithms import factorize, take_1d, unique1d
from pandas.core.base import (PandasObject, PandasDelegate,
@@ -399,6 +399,18 @@ def itemsize(self):
""" return the size of a single category """
return self.categories.itemsize
+ def tolist(self):
+ """
+ Return a list of the values.
+
+ These are each a scalar type, which is a Python scalar
+ (for str, int, float) or a pandas scalar
+ (for Timestamp/Timedelta/Interval/Period)
+ """
+ if is_datetimelike(self.categories):
+ return [_maybe_box_datetimelike(x) for x in self]
+ return np.array(self).tolist()
+
def reshape(self, new_shape, *args, **kwargs):
"""
.. deprecated:: 0.19.0
@@ -765,8 +777,9 @@ def set_categories(self, new_categories, ordered=None, rename=False,
# remove all _codes which are larger and set to -1/NaN
self._codes[self._codes >= len(new_categories)] = -1
else:
- values = cat.__array__()
- cat._codes = _get_codes_for_values(values, new_categories)
+ codes = _recode_for_categories(self.codes, self.categories,
+ new_categories)
+ cat._codes = codes
cat._categories = new_categories
if ordered is None:
@@ -2101,6 +2114,38 @@ def _get_codes_for_values(values, categories):
return coerce_indexer_dtype(t.lookup(vals), cats)
+def _recode_for_categories(codes, old_categories, new_categories):
+ """
+ Convert a set of codes for to a new set of categories
+
+ Parameters
+ ----------
+ codes : array
+ old_categories, new_categories : Index
+
+ Returns
+ -------
+ new_codes : array
+
+ Examples
+ --------
+ >>> old_cat = pd.Index(['b', 'a', 'c'])
+ >>> new_cat = pd.Index(['a', 'b'])
+ >>> codes = np.array([0, 1, 1, 2])
+ >>> _recode_for_categories(codes, old_cat, new_cat)
+ array([ 1, 0, 0, -1])
+ """
+ from pandas.core.algorithms import take_1d
+
+ if len(old_categories) == 0:
+ # All null anyway, so just retain the nulls
+ return codes
+ indexer = coerce_indexer_dtype(new_categories.get_indexer(old_categories),
+ new_categories)
+ new_codes = take_1d(indexer, codes.copy(), fill_value=-1)
+ return new_codes
+
+
def _convert_to_list_like(list_like):
if hasattr(list_like, "dtype"):
return list_like
diff --git a/pandas/core/dtypes/concat.py b/pandas/core/dtypes/concat.py
index 0ce45eea119ed..f6f956832eebe 100644
--- a/pandas/core/dtypes/concat.py
+++ b/pandas/core/dtypes/concat.py
@@ -314,6 +314,7 @@ def union_categoricals(to_union, sort_categories=False, ignore_order=False):
Categories (3, object): [b, c, a]
"""
from pandas import Index, Categorical, CategoricalIndex, Series
+ from pandas.core.categorical import _recode_for_categories
if len(to_union) == 0:
raise ValueError('No Categoricals to union')
@@ -359,14 +360,8 @@ def _maybe_unwrap(x):
new_codes = []
for c in to_union:
- if len(c.categories) > 0:
- indexer = categories.get_indexer(c.categories)
-
- from pandas.core.algorithms import take_1d
- new_codes.append(take_1d(indexer, c.codes, fill_value=-1))
- else:
- # must be all NaN
- new_codes.append(c.codes)
+ new_codes.append(_recode_for_categories(c.codes, c.categories,
+ categories))
new_codes = np.concatenate(new_codes)
else:
# ordered - to show a proper error message
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 5991ec825c841..dd5d490ea66a8 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1479,8 +1479,6 @@ def to_csv(self, path_or_buf=None, sep=",", na_rep='', float_format=None,
Character recognized as decimal separator. E.g. use ',' for
European data
- .. versionadded:: 0.16.0
-
"""
formatter = fmt.CSVFormatter(self, path_or_buf,
line_terminator=line_terminator, sep=sep,
@@ -2165,8 +2163,6 @@ def _getitem_frame(self, key):
def query(self, expr, inplace=False, **kwargs):
"""Query the columns of a frame with a boolean expression.
- .. versionadded:: 0.13
-
Parameters
----------
expr : string
@@ -2561,8 +2557,6 @@ def assign(self, **kwargs):
Assign new columns to a DataFrame, returning a new object
(a copy) with all the original columns in addition to the new ones.
- .. versionadded:: 0.16.0
-
Parameters
----------
kwargs : keyword, value pairs
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index cdb08d8887e05..a71bf7be1bc75 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -1265,7 +1265,7 @@ def to_json(self, path_or_buf=None, orient=None, date_format=None,
Parameters
----------
path_or_buf : the path or buffer to write the result string
- if this is None, return a StringIO of the converted string
+ if this is None, return the converted string
orient : string
* Series
@@ -1905,10 +1905,6 @@ def _slice(self, slobj, axis=0, kind=None):
return result
def _set_item(self, key, value):
- if isinstance(key, str) and callable(getattr(self, key, None)):
- warnings.warn("Column name '{key}' collides with a built-in "
- "method, which will cause unexpected attribute "
- "behavior".format(key=key), stacklevel=3)
self._data.set(key, value)
self._clear_item_cache()
@@ -2352,8 +2348,6 @@ def drop(self, labels, axis=0, level=None, inplace=False, errors='raise'):
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and existing labels are dropped.
- .. versionadded:: 0.16.1
-
Returns
-------
dropped : type of caller
@@ -3074,8 +3068,6 @@ def sample(self, n=None, frac=None, replace=False, weights=None,
"""
Returns a random sample of items from an axis of object.
- .. versionadded:: 0.16.1
-
Parameters
----------
n : int, optional
@@ -3232,8 +3224,6 @@ def sample(self, n=None, frac=None, replace=False, weights=None,
_shared_docs['pipe'] = ("""
Apply func(self, \*args, \*\*kwargs)
- .. versionadded:: 0.16.2
-
Parameters
----------
func : function
@@ -3441,8 +3431,8 @@ def __setattr__(self, name, value):
object.__setattr__(self, name, value)
except (AttributeError, TypeError):
if isinstance(self, ABCDataFrame) and (is_list_like(value)):
- warnings.warn("Pandas doesn't allow Series to be assigned "
- "into nonexistent columns - see "
+ warnings.warn("Pandas doesn't allow columns to be "
+ "created via a new attribute name - see "
"https://pandas.pydata.org/pandas-docs/"
"stable/indexing.html#attribute-access",
stacklevel=2)
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index 248f3b2095a78..f14ed08a27fae 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -1393,12 +1393,21 @@ def nth(self, n, dropna=None):
return out.sort_index() if self.sort else out
- if isinstance(self._selected_obj, DataFrame) and \
- dropna not in ['any', 'all']:
- # Note: when agg-ing picker doesn't raise this, just returns NaN
- raise ValueError("For a DataFrame groupby, dropna must be "
- "either None, 'any' or 'all', "
- "(was passed %s)." % (dropna),)
+ if dropna not in ['any', 'all']:
+ if isinstance(self._selected_obj, Series) and dropna is True:
+ warnings.warn("the dropna='%s' keyword is deprecated,"
+ "use dropna='all' instead. "
+ "For a Series groupby, dropna must be "
+ "either None, 'any' or 'all'." % (dropna),
+ FutureWarning,
+ stacklevel=2)
+ dropna = 'all'
+ else:
+ # Note: when agg-ing picker doesn't raise this,
+ # just returns NaN
+ raise ValueError("For a DataFrame groupby, dropna must be "
+ "either None, 'any' or 'all', "
+ "(was passed %s)." % (dropna),)
# old behaviour, but with all and any support for DataFrames.
# modified in GH 7559 to have better perf
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 6a30eaefaaae7..378c9f7e6dd6a 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -585,12 +585,6 @@ def memory_usage(self, deep=False):
return result
# ops compat
- def tolist(self):
- """
- return a list of the Index values
- """
- return list(self.values)
-
@deprecate_kwarg(old_arg_name='n', new_arg_name='repeats')
def repeat(self, repeats, *args, **kwargs):
"""
@@ -1601,9 +1595,6 @@ def is_all_dates(self):
return False
return is_datetime_array(_ensure_object(self.values))
- def __iter__(self):
- return iter(self.values)
-
def __reduce__(self):
d = dict(data=self._data)
d.update(self._get_attributes_dict())
@@ -2450,10 +2441,24 @@ def _get_unique_index(self, dropna=False):
Returns
-------
- loc : int if unique index, possibly slice or mask if not
+ loc : int if unique index, slice if monotonic index, else mask
+
+ Examples
+ ---------
+ >>> unique_index = pd.%(klass)s(list('abc'))
+ >>> unique_index.get_loc('b')
+ 1
+
+ >>> monotonic_index = pd.%(klass)s(list('abbc'))
+ >>> monotonic_index.get_loc('b')
+ slice(1, 3, None)
+
+ >>> non_monotonic_index = pd.%(klass)s(list('abcb'))
+ >>> non_monotonic_index.get_loc('b')
+ array([False, True, False, True], dtype=bool)
"""
- @Appender(_index_shared_docs['get_loc'])
+ @Appender(_index_shared_docs['get_loc'] % _index_doc_kwargs)
def get_loc(self, key, method=None, tolerance=None):
if method is None:
if tolerance is not None:
@@ -2529,15 +2534,23 @@ def set_value(self, arr, key, value):
def _get_level_values(self, level):
"""
Return an Index of values for requested level, equal to the length
- of the index
+ of the index.
Parameters
----------
- level : int
+ level : int or str
+ ``level`` is either the integer position of the level in the
+ MultiIndex, or the name of the level.
Returns
-------
values : Index
+ ``self``, as there is only one level in the Index.
+
+ See also
+ ---------
+ pandas.MultiIndex.get_level_values : get values for a level of a
+ MultiIndex
"""
self._validate_index_level(level)
@@ -3457,7 +3470,7 @@ def _searchsorted_monotonic(self, label, side='left'):
# everything for it to work (element ordering, search side and
# resulting value).
pos = self[::-1].searchsorted(label, side='right' if side == 'left'
- else 'right')
+ else 'left')
return len(self) - pos
raise ValueError('index must be monotonic increasing or decreasing')
diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py
index 0681202289311..de9063098813a 100644
--- a/pandas/core/indexes/category.py
+++ b/pandas/core/indexes/category.py
@@ -33,8 +33,6 @@ class CategoricalIndex(Index, base.PandasDelegate):
Immutable Index implementing an ordered, sliceable set. CategoricalIndex
represents a sparsely populated Index with an underlying Categorical.
- .. versionadded:: 0.16.1
-
Parameters
----------
data : array-like or Categorical, (1-dimensional)
@@ -132,6 +130,10 @@ def _create_categorical(self, data, categories=None, ordered=None):
-------
Categorical
"""
+ if (isinstance(data, (ABCSeries, type(self))) and
+ is_categorical_dtype(data)):
+ data = data.values
+
if not isinstance(data, ABCCategorical):
ordered = False if ordered is None else ordered
from pandas.core.categorical import Categorical
@@ -253,6 +255,9 @@ def get_values(self):
""" return the underlying data as an ndarray """
return self._data.get_values()
+ def tolist(self):
+ return self._data.tolist()
+
@property
def codes(self):
return self._data.codes
@@ -349,7 +354,7 @@ def _to_safe_for_reshape(self):
def get_loc(self, key, method=None):
"""
- Get integer location for requested label
+ Get integer location for requested label.
Parameters
----------
@@ -359,7 +364,21 @@ def get_loc(self, key, method=None):
Returns
-------
- loc : int if unique index, possibly slice or mask if not
+ loc : int if unique index, slice if monotonic index, else mask
+
+ Examples
+ ---------
+ >>> unique_index = pd.CategoricalIndex(list('abc'))
+ >>> unique_index.get_loc('b')
+ 1
+
+ >>> monotonic_index = pd.CategoricalIndex(list('abbc'))
+ >>> monotonic_index.get_loc('b')
+ slice(1, 3, None)
+
+ >>> non_monotonic_index = p.dCategoricalIndex(list('abcb'))
+ >>> non_monotonic_index.get_loc('b')
+ array([False, True, False, True], dtype=bool)
"""
codes = self.categories.get_loc(key)
if (codes == -1):
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index 5a04c550f4502..1c8d0b334b91c 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -292,8 +292,8 @@ def __new__(cls, data=None,
if is_float(periods):
periods = int(periods)
elif not is_integer(periods):
- raise ValueError('Periods must be a number, got %s' %
- str(periods))
+ msg = 'periods must be a number, got {periods}'
+ raise TypeError(msg.format(periods=periods))
if data is None and freq is None:
raise ValueError("Must provide freq argument if no data is "
@@ -412,7 +412,8 @@ def __new__(cls, data=None,
def _generate(cls, start, end, periods, name, offset,
tz=None, normalize=False, ambiguous='raise', closed=None):
if com._count_not_none(start, end, periods) != 2:
- raise ValueError('Must specify two of start, end, or periods')
+ raise ValueError('Of the three parameters: start, end, and '
+ 'periods, exactly two must be specified')
_normalized = True
@@ -1577,7 +1578,7 @@ def _set_freq(self, value):
days_in_month = _field_accessor(
'days_in_month',
'dim',
- "The number of days in the month\n\n.. versionadded:: 0.16.0")
+ "The number of days in the month")
daysinmonth = days_in_month
is_month_start = _field_accessor(
'is_month_start',
@@ -2004,7 +2005,7 @@ def _generate_regular_range(start, end, periods, offset):
def date_range(start=None, end=None, periods=None, freq='D', tz=None,
normalize=False, name=None, closed=None, **kwargs):
"""
- Return a fixed frequency datetime index, with day (calendar) as the default
+ Return a fixed frequency DatetimeIndex, with day (calendar) as the default
frequency
Parameters
@@ -2013,24 +2014,25 @@ def date_range(start=None, end=None, periods=None, freq='D', tz=None,
Left bound for generating dates
end : string or datetime-like, default None
Right bound for generating dates
- periods : integer or None, default None
- If None, must specify start and end
+ periods : integer, default None
+ Number of periods to generate
freq : string or DateOffset, default 'D' (calendar daily)
Frequency strings can have multiples, e.g. '5H'
- tz : string or None
+ tz : string, default None
Time zone name for returning localized DatetimeIndex, for example
Asia/Hong_Kong
normalize : bool, default False
Normalize start/end dates to midnight before generating date range
- name : str, default None
- Name of the resulting index
- closed : string or None, default None
+ name : string, default None
+ Name of the resulting DatetimeIndex
+ closed : string, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None)
Notes
-----
- 2 of start, end, or periods must be specified
+ Of the three parameters: ``start``, ``end``, and ``periods``, exactly two
+ must be specified.
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
@@ -2047,7 +2049,7 @@ def date_range(start=None, end=None, periods=None, freq='D', tz=None,
def bdate_range(start=None, end=None, periods=None, freq='B', tz=None,
normalize=True, name=None, closed=None, **kwargs):
"""
- Return a fixed frequency datetime index, with business day as the default
+ Return a fixed frequency DatetimeIndex, with business day as the default
frequency
Parameters
@@ -2056,8 +2058,8 @@ def bdate_range(start=None, end=None, periods=None, freq='B', tz=None,
Left bound for generating dates
end : string or datetime-like, default None
Right bound for generating dates
- periods : integer or None, default None
- If None, must specify start and end
+ periods : integer, default None
+ Number of periods to generate
freq : string or DateOffset, default 'B' (business daily)
Frequency strings can have multiples, e.g. '5H'
tz : string or None
@@ -2065,15 +2067,16 @@ def bdate_range(start=None, end=None, periods=None, freq='B', tz=None,
Asia/Beijing
normalize : bool, default False
Normalize start/end dates to midnight before generating date range
- name : str, default None
- Name for the resulting index
- closed : string or None, default None
+ name : string, default None
+ Name of the resulting DatetimeIndex
+ closed : string, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None)
Notes
-----
- 2 of start, end, or periods must be specified
+ Of the three parameters: ``start``, ``end``, and ``periods``, exactly two
+ must be specified.
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
@@ -2091,7 +2094,7 @@ def bdate_range(start=None, end=None, periods=None, freq='B', tz=None,
def cdate_range(start=None, end=None, periods=None, freq='C', tz=None,
normalize=True, name=None, closed=None, **kwargs):
"""
- **EXPERIMENTAL** Return a fixed frequency datetime index, with
+ **EXPERIMENTAL** Return a fixed frequency DatetimeIndex, with
CustomBusinessDay as the default frequency
.. warning:: EXPERIMENTAL
@@ -2105,29 +2108,30 @@ def cdate_range(start=None, end=None, periods=None, freq='C', tz=None,
Left bound for generating dates
end : string or datetime-like, default None
Right bound for generating dates
- periods : integer or None, default None
- If None, must specify start and end
+ periods : integer, default None
+ Number of periods to generate
freq : string or DateOffset, default 'C' (CustomBusinessDay)
Frequency strings can have multiples, e.g. '5H'
- tz : string or None
+ tz : string, default None
Time zone name for returning localized DatetimeIndex, for example
Asia/Beijing
normalize : bool, default False
Normalize start/end dates to midnight before generating date range
- name : str, default None
- Name for the resulting index
- weekmask : str, Default 'Mon Tue Wed Thu Fri'
+ name : string, default None
+ Name of the resulting DatetimeIndex
+ weekmask : string, Default 'Mon Tue Wed Thu Fri'
weekmask of valid business days, passed to ``numpy.busdaycalendar``
holidays : list
list/array of dates to exclude from the set of valid business days,
passed to ``numpy.busdaycalendar``
- closed : string or None, default None
+ closed : string, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None)
Notes
-----
- 2 of start, end, or periods must be specified
+ Of the three parameters: ``start``, ``end``, and ``periods``, exactly two
+ must be specified.
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index e0ed6c7ea35c0..d3fe25c7ef5ec 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -15,6 +15,8 @@
is_float_dtype,
is_interval_dtype,
is_scalar,
+ is_float,
+ is_number,
is_integer)
from pandas.core.indexes.base import (
Index, _ensure_index,
@@ -25,11 +27,15 @@
Interval, IntervalMixin, IntervalTree,
intervals_to_interval_bounds)
+from pandas.core.indexes.datetimes import date_range
+from pandas.core.indexes.timedeltas import timedelta_range
from pandas.core.indexes.multi import MultiIndex
from pandas.compat.numpy import function as nv
from pandas.core import common as com
from pandas.util._decorators import cache_readonly, Appender
from pandas.core.config import get_option
+from pandas.tseries.frequencies import to_offset
+from pandas.tseries.offsets import DateOffset
import pandas.core.indexes.base as ibase
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
@@ -683,6 +689,34 @@ def _find_non_overlapping_monotonic_bounds(self, key):
return start, stop
def get_loc(self, key, method=None):
+ """Get integer location for requested label.
+
+ Parameters
+ ----------
+ key : label
+ method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional
+ * default: exact matches only.
+ * pad / ffill: find the PREVIOUS index value if no exact match.
+ * backfill / bfill: use NEXT index value if no exact match
+ * nearest: use the NEAREST index value if no exact match. Tied
+ distances are broken by preferring the larger index value.
+
+ Returns
+ -------
+ loc : int if unique index, slice if monotonic index, else mask
+
+ Examples
+ ---------
+ >>> index = pd.IntervalIndex.from_intervals([pd.Interval(0, 1), pd.Interval(1, 2)])
+ >>> index.get_loc(1)
+ 0
+ >>> index.get_loc(1,5) # a point inside an interval
+ 1
+
+ >>> overlapping_index = pd.IntervalIndex.from_intervals([pd.Interval(0, 2), pd.Interval(1, 2)])
+ >>> overlapping_index .get_loc(1.5)
+ array([0, 1], dtype=int64)
+ """
self._check_method(method)
original_key = key
@@ -912,7 +946,7 @@ def take(self, indices, axis=0, allow_fill=True,
except ValueError:
# we need to coerce; migth have NA's in an
- # interger dtype
+ # integer dtype
new_left = taker(left.astype(float))
new_right = taker(right.astype(float))
@@ -1028,54 +1062,152 @@ def func(self, other):
IntervalIndex._add_logical_methods_disabled()
-def interval_range(start=None, end=None, freq=None, periods=None,
- name=None, closed='right', **kwargs):
+def _is_valid_endpoint(endpoint):
+ """helper for interval_range to check if start/end are valid types"""
+ return any([is_number(endpoint),
+ isinstance(endpoint, Timestamp),
+ isinstance(endpoint, Timedelta),
+ endpoint is None])
+
+
+def _is_type_compatible(a, b):
+ """helper for interval_range to check type compat of start/end/freq"""
+ is_ts_compat = lambda x: isinstance(x, (Timestamp, DateOffset))
+ is_td_compat = lambda x: isinstance(x, (Timedelta, DateOffset))
+ return ((is_number(a) and is_number(b)) or
+ (is_ts_compat(a) and is_ts_compat(b)) or
+ (is_td_compat(a) and is_td_compat(b)) or
+ com._any_none(a, b))
+
+
+def interval_range(start=None, end=None, periods=None, freq=None,
+ name=None, closed='right'):
"""
Return a fixed frequency IntervalIndex
Parameters
----------
- start : string or datetime-like, default None
- Left bound for generating data
- end : string or datetime-like, default None
- Right bound for generating data
- freq : interger, string or DateOffset, default 1
- periods : interger, default None
- name : str, default None
- Name of the resulting index
+ start : numeric or datetime-like, default None
+ Left bound for generating intervals
+ end : numeric or datetime-like, default None
+ Right bound for generating intervals
+ periods : integer, default None
+ Number of periods to generate
+ freq : numeric, string, or DateOffset, default None
+ The length of each interval. Must be consistent with the type of start
+ and end, e.g. 2 for numeric, or '5H' for datetime-like. Default is 1
+ for numeric and 'D' (calendar daily) for datetime-like.
+ name : string, default None
+ Name of the resulting IntervalIndex
closed : string, default 'right'
options are: 'left', 'right', 'both', 'neither'
Notes
-----
- 2 of start, end, or periods must be specified
+ Of the three parameters: ``start``, ``end``, and ``periods``, exactly two
+ must be specified.
Returns
-------
rng : IntervalIndex
+
+ Examples
+ --------
+
+ Numeric ``start`` and ``end`` is supported.
+
+ >>> pd.interval_range(start=0, end=5)
+ IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]]
+ closed='right', dtype='interval[int64]')
+
+ Additionally, datetime-like input is also supported.
+
+ >>> pd.interval_range(start='2017-01-01', end='2017-01-04')
+ IntervalIndex([(2017-01-01, 2017-01-02], (2017-01-02, 2017-01-03],
+ (2017-01-03, 2017-01-04]]
+ closed='right', dtype='interval[datetime64[ns]]')
+
+ The ``freq`` parameter specifies the frequency between the left and right.
+ endpoints of the individual intervals within the ``IntervalIndex``. For
+ numeric ``start`` and ``end``, the frequency must also be numeric.
+
+ >>> pd.interval_range(start=0, periods=4, freq=1.5)
+ IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]]
+ closed='right', dtype='interval[float64]')
+
+ Similarly, for datetime-like ``start`` and ``end``, the frequency must be
+ convertible to a DateOffset.
+
+ >>> pd.interval_range(start='2017-01-01', periods=3, freq='MS')
+ IntervalIndex([(2017-01-01, 2017-02-01], (2017-02-01, 2017-03-01],
+ (2017-03-01, 2017-04-01]]
+ closed='right', dtype='interval[datetime64[ns]]')
+
+ The ``closed`` parameter specifies which endpoints of the individual
+ intervals within the ``IntervalIndex`` are closed.
+
+ >>> pd.interval_range(end=5, periods=4, closed='both')
+ IntervalIndex([[1, 2], [2, 3], [3, 4], [4, 5]]
+ closed='both', dtype='interval[int64]')
"""
+ if com._count_not_none(start, end, periods) != 2:
+ raise ValueError('Of the three parameters: start, end, and periods, '
+ 'exactly two must be specified')
+
+ start = com._maybe_box_datetimelike(start)
+ end = com._maybe_box_datetimelike(end)
+ endpoint = next(com._not_none(start, end))
+
+ if not _is_valid_endpoint(start):
+ msg = 'start must be numeric or datetime-like, got {start}'
+ raise ValueError(msg.format(start=start))
+
+ if not _is_valid_endpoint(end):
+ msg = 'end must be numeric or datetime-like, got {end}'
+ raise ValueError(msg.format(end=end))
+
+ if is_float(periods):
+ periods = int(periods)
+ elif not is_integer(periods) and periods is not None:
+ msg = 'periods must be a number, got {periods}'
+ raise TypeError(msg.format(periods=periods))
+
+ freq = freq or (1 if is_number(endpoint) else 'D')
+ if not is_number(freq):
+ try:
+ freq = to_offset(freq)
+ except ValueError:
+ raise ValueError('freq must be numeric or convertible to '
+ 'DateOffset, got {freq}'.format(freq=freq))
+
+ # verify type compatibility
+ if not all([_is_type_compatible(start, end),
+ _is_type_compatible(start, freq),
+ _is_type_compatible(end, freq)]):
+ raise TypeError("start, end, freq need to be type compatible")
- if freq is None:
- freq = 1
+ if is_number(endpoint):
+ if periods is None:
+ periods = int((end - start) // freq)
- if start is None:
- if periods is None or end is None:
- raise ValueError("must specify 2 of start, end, periods")
- start = end - periods * freq
- if end is None:
- if periods is None or start is None:
- raise ValueError("must specify 2 of start, end, periods")
+ if start is None:
+ start = end - periods * freq
+
+ # force end to be consistent with freq (lower if freq skips over end)
end = start + periods * freq
- if periods is None:
- if start is None or end is None:
- raise ValueError("must specify 2 of start, end, periods")
- pass
-
- # must all be same units or None
- arr = np.array([start, end, freq])
- if is_object_dtype(arr):
- raise ValueError("start, end, freq need to be the same type")
-
- return IntervalIndex.from_breaks(np.arange(start, end, freq),
- name=name,
- closed=closed)
+
+ # end + freq for inclusive endpoint
+ breaks = np.arange(start, end + freq, freq)
+ elif isinstance(endpoint, Timestamp):
+ # add one to account for interval endpoints (n breaks = n-1 intervals)
+ if periods is not None:
+ periods += 1
+ breaks = date_range(start=start, end=end, periods=periods, freq=freq)
+ else:
+ # add one to account for interval endpoints (n breaks = n-1 intervals)
+ if periods is not None:
+ periods += 1
+ breaks = timedelta_range(start=start, end=end, periods=periods,
+ freq=freq)
+
+ return IntervalIndex.from_breaks(breaks, name=name, closed=closed)
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index d7d5b6d128a2c..2f3cd3e3dae81 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -882,15 +882,34 @@ def _get_level_values(self, level):
def get_level_values(self, level):
"""
Return vector of label values for requested level,
- equal to the length of the index
+ equal to the length of the index.
Parameters
----------
- level : int or level name
+ level : int or str
+ ``level`` is either the integer position of the level in the
+ MultiIndex, or the name of the level.
Returns
-------
values : Index
+ ``values`` is a level of this MultiIndex converted to
+ a single :class:`Index` (or subclass thereof).
+
+ Examples
+ ---------
+
+ Create a MultiIndex:
+
+ >>> mi = pd.MultiIndex.from_arrays((list('abc'), list('def')))
+ >>> mi.names = ['level_1', 'level_2']
+
+ Get level values by supplying level as either integer or name:
+
+ >>> mi.get_level_values(0)
+ Index(['a', 'b', 'c'], dtype='object', name='level_1')
+ >>> mi.get_level_values('level_2')
+ Index(['d', 'e', 'f'], dtype='object', name='level_2')
"""
level = self._get_level_number(level)
values = self._get_level_values(level)
@@ -1947,6 +1966,19 @@ def get_loc(self, key, method=None):
Returns
-------
loc : int, slice object or boolean mask
+
+ Examples
+ ---------
+ >>> mi = pd.MultiIndex.from_array([list('abc'), list('def')])
+ >>> mi.get_loc('b')
+ slice(1, 2, None)
+ >>> mi.get_loc(('b', 'e'))
+ 1
+
+ See also
+ --------
+ get_locs : Given a tuple of slices/lists/labels/boolean indexer to a level-wise
+ spec, produce an indexer to extract those locations
"""
if method is not None:
raise NotImplementedError('only the default get_loc method is '
diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py
index 0915462d4d421..fb47d1db48610 100644
--- a/pandas/core/indexes/period.py
+++ b/pandas/core/indexes/period.py
@@ -199,8 +199,8 @@ def __new__(cls, data=None, ordinal=None, freq=None, start=None, end=None,
if is_float(periods):
periods = int(periods)
elif not is_integer(periods):
- raise ValueError('Periods must be a number, got %s' %
- str(periods))
+ msg = 'periods must be a number, got {periods}'
+ raise TypeError(msg.format(periods=periods))
if name is None and hasattr(data, 'name'):
name = data.name
@@ -1051,8 +1051,9 @@ def tz_localize(self, tz, infer_dst=False):
def _get_ordinal_range(start, end, periods, freq, mult=1):
- if com._count_not_none(start, end, periods) < 2:
- raise ValueError('Must specify 2 of start, end, periods')
+ if com._count_not_none(start, end, periods) != 2:
+ raise ValueError('Of the three parameters: start, end, and periods, '
+ 'exactly two must be specified')
if freq is not None:
_, mult = _gfc(freq)
@@ -1066,9 +1067,9 @@ def _get_ordinal_range(start, end, periods, freq, mult=1):
is_end_per = isinstance(end, Period)
if is_start_per and is_end_per and start.freq != end.freq:
- raise ValueError('Start and end must have same freq')
+ raise ValueError('start and end must have same freq')
if (start is tslib.NaT or end is tslib.NaT):
- raise ValueError('Start and end must not be NaT')
+ raise ValueError('start and end must not be NaT')
if freq is None:
if is_start_per:
@@ -1157,24 +1158,55 @@ def pnow(freq=None):
def period_range(start=None, end=None, periods=None, freq='D', name=None):
"""
- Return a fixed frequency datetime index, with day (calendar) as the default
+ Return a fixed frequency PeriodIndex, with day (calendar) as the default
frequency
-
Parameters
----------
- start : starting value, period-like, optional
- end : ending value, period-like, optional
- periods : int, default None
- Number of periods in the index
- freq : str/DateOffset, default 'D'
+ start : string or period-like, default None
+ Left bound for generating periods
+ end : string or period-like, default None
+ Right bound for generating periods
+ periods : integer, default None
+ Number of periods to generate
+ freq : string or DateOffset, default 'D' (calendar daily)
Frequency alias
- name : str, default None
- Name for the resulting PeriodIndex
+ name : string, default None
+ Name of the resulting PeriodIndex
+
+ Notes
+ -----
+ Of the three parameters: ``start``, ``end``, and ``periods``, exactly two
+ must be specified.
+
+ To learn more about the frequency strings, please see `this link
+ <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
Returns
-------
prng : PeriodIndex
+
+ Examples
+ --------
+
+ >>> pd.period_range(start='2017-01-01', end='2018-01-01', freq='M')
+ PeriodIndex(['2017-01', '2017-02', '2017-03', '2017-04', '2017-05',
+ '2017-06', '2017-06', '2017-07', '2017-08', '2017-09',
+ '2017-10', '2017-11', '2017-12', '2018-01'],
+ dtype='period[M]', freq='M')
+
+ If ``start`` or ``end`` are ``Period`` objects, they will be used as anchor
+ endpoints for a ``PeriodIndex`` with frequency matching that of the
+ ``period_range`` constructor.
+
+ >>> pd.period_range(start=pd.Period('2017Q1', freq='Q'),
+ ... end=pd.Period('2017Q2', freq='Q'), freq='M')
+ PeriodIndex(['2017-03', '2017-04', '2017-05', '2017-06'],
+ dtype='period[M]', freq='M')
"""
+ if com._count_not_none(start, end, periods) != 2:
+ raise ValueError('Of the three parameters: start, end, and periods, '
+ 'exactly two must be specified')
+
return PeriodIndex(start=start, end=end, periods=periods,
freq=freq, name=name)
diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py
index 82412d3a7ef57..b759abaed4e56 100644
--- a/pandas/core/indexes/range.py
+++ b/pandas/core/indexes/range.py
@@ -324,12 +324,13 @@ def intersection(self, other):
if not len(self) or not len(other):
return RangeIndex._simple_new(None)
+ first = self[::-1] if self._step < 0 else self
+ second = other[::-1] if other._step < 0 else other
+
# check whether intervals intersect
# deals with in- and decreasing ranges
- int_low = max(min(self._start, self._stop + 1),
- min(other._start, other._stop + 1))
- int_high = min(max(self._stop, self._start + 1),
- max(other._stop, other._start + 1))
+ int_low = max(first._start, second._start)
+ int_high = min(first._stop, second._stop)
if int_high <= int_low:
return RangeIndex._simple_new(None)
@@ -337,21 +338,24 @@ def intersection(self, other):
# solve intersection problem
# performance hint: for identical step sizes, could use
# cheaper alternative
- gcd, s, t = self._extended_gcd(self._step, other._step)
+ gcd, s, t = first._extended_gcd(first._step, second._step)
# check whether element sets intersect
- if (self._start - other._start) % gcd:
+ if (first._start - second._start) % gcd:
return RangeIndex._simple_new(None)
# calculate parameters for the RangeIndex describing the
# intersection disregarding the lower bounds
- tmp_start = self._start + (other._start - self._start) * \
- self._step // gcd * s
- new_step = self._step * other._step // gcd
+ tmp_start = first._start + (second._start - first._start) * \
+ first._step // gcd * s
+ new_step = first._step * second._step // gcd
new_index = RangeIndex(tmp_start, int_high, new_step, fastpath=True)
# adjust index to limiting interval
new_index._start = new_index._min_fitting_element(int_low)
+
+ if (self._step < 0 and other._step < 0) is not (new_index._step < 0):
+ new_index = new_index[::-1]
return new_index
def _min_fitting_element(self, lower_limit):
diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py
index 2823951c0f348..d7b7d56d74a3a 100644
--- a/pandas/core/indexes/timedeltas.py
+++ b/pandas/core/indexes/timedeltas.py
@@ -180,8 +180,8 @@ def __new__(cls, data=None, unit=None,
if is_float(periods):
periods = int(periods)
elif not is_integer(periods):
- raise ValueError('Periods must be a number, got %s' %
- str(periods))
+ msg = 'periods must be a number, got {periods}'
+ raise TypeError(msg.format(periods=periods))
if data is None and freq is None:
raise ValueError("Must provide freq argument if no data is "
@@ -234,7 +234,8 @@ def __new__(cls, data=None, unit=None,
@classmethod
def _generate(cls, start, end, periods, name, offset, closed=None):
if com._count_not_none(start, end, periods) != 2:
- raise ValueError('Must specify two of start, end, or periods')
+ raise ValueError('Of the three parameters: start, end, and '
+ 'periods, exactly two must be specified')
if start is not None:
start = Timedelta(start)
@@ -960,22 +961,22 @@ def _generate_regular_range(start, end, periods, offset):
def timedelta_range(start=None, end=None, periods=None, freq='D',
name=None, closed=None):
"""
- Return a fixed frequency timedelta index, with day as the default
+ Return a fixed frequency TimedeltaIndex, with day as the default
frequency
Parameters
----------
start : string or timedelta-like, default None
- Left bound for generating dates
- end : string or datetime-like, default None
- Right bound for generating dates
- periods : integer or None, default None
- If None, must specify start and end
+ Left bound for generating timedeltas
+ end : string or timedelta-like, default None
+ Right bound for generating timedeltas
+ periods : integer, default None
+ Number of periods to generate
freq : string or DateOffset, default 'D' (calendar daily)
Frequency strings can have multiples, e.g. '5H'
- name : str, default None
- Name of the resulting index
- closed : string or None, default None
+ name : string, default None
+ Name of the resulting TimedeltaIndex
+ closed : string, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None)
@@ -985,11 +986,34 @@ def timedelta_range(start=None, end=None, periods=None, freq='D',
Notes
-----
- 2 of start, end, or periods must be specified.
+ Of the three parameters: ``start``, ``end``, and ``periods``, exactly two
+ must be specified.
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
+
+ Examples
+ --------
+
+ >>> pd.timedelta_range(start='1 day', periods=4)
+ TimedeltaIndex(['1 days', '2 days', '3 days', '4 days'],
+ dtype='timedelta64[ns]', freq='D')
+
+ The ``closed`` parameter specifies which endpoint is included. The default
+ behavior is to include both endpoints.
+
+ >>> pd.timedelta_range(start='1 day', periods=4, closed='right')
+ TimedeltaIndex(['2 days', '3 days', '4 days'],
+ dtype='timedelta64[ns]', freq='D')
+
+ The ``freq`` parameter specifies the frequency of the TimedeltaIndex.
+ Only fixed frequencies can be passed, non-fixed frequencies such as
+ 'M' (month end) will raise.
+
+ >>> pd.timedelta_range(start='1 day', end='2 days', freq='6H')
+ TimedeltaIndex(['1 days 00:00:00', '1 days 06:00:00', '1 days 12:00:00',
+ '1 days 18:00:00', '2 days 00:00:00'],
+ dtype='timedelta64[ns]', freq='6H')
"""
return TimedeltaIndex(start=start, end=end, periods=periods,
- freq=freq, name=name,
- closed=closed)
+ freq=freq, name=name, closed=closed)
diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py
index 9e180c624963c..4040c65136617 100644
--- a/pandas/core/reshape/concat.py
+++ b/pandas/core/reshape/concat.py
@@ -72,7 +72,7 @@ def concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False,
The keys, levels, and names arguments are all optional.
A walkthrough of how this method fits in with other tools for combining
- panda objects can be found `here
+ pandas objects can be found `here
<http://pandas.pydata.org/pandas-docs/stable/merging.html>`__.
See Also
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index 947300a28e510..6bb6988a7442a 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -447,7 +447,7 @@ def merge_asof(left, right, on=None,
3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93
4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN
- We only asof within 2ms betwen the quote time and the trade time
+ We only asof within 2ms between the quote time and the trade time
>>> pd.merge_asof(trades, quotes,
... on='time',
@@ -460,9 +460,9 @@ def merge_asof(left, right, on=None,
3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93
4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN
- We only asof within 10ms betwen the quote time and the trade time
+ We only asof within 10ms between the quote time and the trade time
and we exclude exact matches on time. However *prior* data will
- propogate forward
+ propagate forward
>>> pd.merge_asof(trades, quotes,
... on='time',
diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py
index f07123ca18489..d19de6030d473 100644
--- a/pandas/core/reshape/pivot.py
+++ b/pandas/core/reshape/pivot.py
@@ -145,7 +145,7 @@ def _add_margins(table, data, values, rows, cols, aggfunc,
if not isinstance(margins_name, compat.string_types):
raise ValueError('margins_name argument must be a string')
- msg = 'Conflicting name "{name}" in margins'.format(name=margins_name)
+ msg = u'Conflicting name "{name}" in margins'.format(name=margins_name)
for level in table.index.names:
if margins_name in table.index.get_level_values(level):
raise ValueError(msg)
diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py
index b4abba8026b35..7260bc9a8b7a1 100644
--- a/pandas/core/reshape/reshape.py
+++ b/pandas/core/reshape/reshape.py
@@ -1110,8 +1110,6 @@ def get_dummies(data, prefix=None, prefix_sep='_', dummy_na=False,
Whether the dummy columns should be sparse or not. Returns
SparseDataFrame if `data` is a Series or if all columns are included.
Otherwise returns a DataFrame with some SparseBlocks.
-
- .. versionadded:: 0.16.1
drop_first : bool, default False
Whether to get k-1 dummies out of k categorical levels by removing the
first level.
diff --git a/pandas/core/reshape/tile.py b/pandas/core/reshape/tile.py
index 2f5538556fa6d..fda339aa30461 100644
--- a/pandas/core/reshape/tile.py
+++ b/pandas/core/reshape/tile.py
@@ -359,7 +359,7 @@ def _preprocess_for_cut(x):
"""
handles preprocessing for cut where we convert passed
input to array, strip the index information and store it
- seperately
+ separately
"""
x_is_series = isinstance(x, Series)
series_index = None
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 6905fc1aced74..ac11c5f908fdc 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -19,7 +19,6 @@
is_integer, is_integer_dtype,
is_float_dtype,
is_extension_type, is_datetimetz,
- is_datetimelike,
is_datetime64tz_dtype,
is_timedelta64_dtype,
is_list_like,
@@ -1095,14 +1094,6 @@ def to_string(self, buf=None, na_rep='NaN', float_format=None, header=True,
with open(buf, 'w') as f:
f.write(result)
- def __iter__(self):
- """ provide iteration over the values of the Series
- box values if necessary """
- if is_datetimelike(self):
- return (_maybe_box_datetimelike(x) for x in self._values)
- else:
- return iter(self._values)
-
def iteritems(self):
"""
Lazily iterate over (index, value) tuples
@@ -1118,10 +1109,6 @@ def keys(self):
"""Alias for index"""
return self.index
- def tolist(self):
- """ Convert Series to a nested list """
- return list(self.asobject)
-
def to_dict(self, into=dict):
"""
Convert Series to {label -> value} dict or dict-like object.
diff --git a/pandas/core/sparse/array.py b/pandas/core/sparse/array.py
index 2f830a98db649..f965c91999a03 100644
--- a/pandas/core/sparse/array.py
+++ b/pandas/core/sparse/array.py
@@ -407,8 +407,18 @@ def to_dense(self, fill=None):
return self.values
def __iter__(self):
+ if np.issubdtype(self.dtype, np.floating):
+ boxer = float
+ elif np.issubdtype(self.dtype, np.integer):
+ boxer = int
+ else:
+ boxer = lambda x: x
+
for i in range(len(self)):
- yield self._get_val_at(i)
+ r = self._get_val_at(i)
+
+ # box em
+ yield boxer(r)
def __getitem__(self, key):
"""
diff --git a/pandas/core/sparse/series.py b/pandas/core/sparse/series.py
index 99aec2dd11569..2aecb9d7c4ffb 100644
--- a/pandas/core/sparse/series.py
+++ b/pandas/core/sparse/series.py
@@ -732,8 +732,6 @@ def to_coo(self, row_levels=(0, ), column_levels=(1, ), sort_labels=False):
(labels) or numbers of the levels. {row_levels, column_levels} must be
a partition of the MultiIndex level names (or numbers).
- .. versionadded:: 0.16.0
-
Parameters
----------
row_levels : tuple/list
@@ -784,8 +782,6 @@ def from_coo(cls, A, dense_index=False):
"""
Create a SparseSeries from a scipy.sparse.coo_matrix.
- .. versionadded:: 0.16.0
-
Parameters
----------
A : scipy.sparse.coo_matrix
diff --git a/pandas/core/strings.py b/pandas/core/strings.py
index 48bc2ee05dd68..021f88d1aec00 100644
--- a/pandas/core/strings.py
+++ b/pandas/core/strings.py
@@ -602,8 +602,6 @@ def str_extract(arr, pat, flags=0, expand=None):
For each subject string in the Series, extract groups from the
first match of regular expression pat.
- .. versionadded:: 0.13.0
-
Parameters
----------
pat : string
@@ -1016,7 +1014,6 @@ def str_split(arr, pat=None, n=None):
* If True, return DataFrame/MultiIndex expanding dimensionality.
* If False, return Series/Index.
- .. versionadded:: 0.16.1
return_type : deprecated, use `expand`
Returns
@@ -1047,8 +1044,6 @@ def str_rsplit(arr, pat=None, n=None):
string, starting at the end of the string and working to the front.
Equivalent to :meth:`str.rsplit`.
- .. versionadded:: 0.16.2
-
Parameters
----------
pat : string, default None
diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py
index 9ff0275a7c370..9dde26f43ad33 100644
--- a/pandas/core/tools/datetimes.py
+++ b/pandas/core/tools/datetimes.py
@@ -605,7 +605,7 @@ def f(value):
if len(excess):
raise ValueError("extra keys have been passed "
"to the datetime assemblage: "
- "[{excess}]".format(','.join(excess=excess)))
+ "[{excess}]".format(excess=','.join(excess)))
def coerce(values):
# we allow coercion to if errors allows
diff --git a/pandas/io/feather_format.py b/pandas/io/feather_format.py
index 87a4931421d7d..b2bf4ab7ff7f1 100644
--- a/pandas/io/feather_format.py
+++ b/pandas/io/feather_format.py
@@ -41,8 +41,7 @@ def to_feather(df, path):
Parameters
----------
df : DataFrame
- path : string
- File path
+ path : string file path, or file-like object
"""
path = _stringify_path(path)
@@ -92,8 +91,7 @@ def read_feather(path, nthreads=1):
Parameters
----------
- path : string
- File path
+ path : string file path, or file-like object
nthreads : int, default 1
Number of CPU threads to use when reading to pandas.DataFrame
diff --git a/pandas/io/formats/excel.py b/pandas/io/formats/excel.py
index ab689d196f4b6..51668bb6b0895 100644
--- a/pandas/io/formats/excel.py
+++ b/pandas/io/formats/excel.py
@@ -263,7 +263,7 @@ def build_font(self, props):
else None),
'strike': ('line-through' in decoration) or None,
'color': self.color_to_excel(props.get('color')),
- # shadow if nonzero digit before shadow colour
+ # shadow if nonzero digit before shadow color
'shadow': (bool(re.search('^[^#(]*[1-9]',
props['text-shadow']))
if 'text-shadow' in props else None),
@@ -304,7 +304,7 @@ def color_to_excel(self, val):
try:
return self.NAMED_COLORS[val]
except KeyError:
- warnings.warn('Unhandled colour format: {val!r}'.format(val=val),
+ warnings.warn('Unhandled color format: {val!r}'.format(val=val),
CSSWarning)
diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py
index 87d672197be30..d7677e3642c26 100644
--- a/pandas/io/formats/style.py
+++ b/pandas/io/formats/style.py
@@ -618,11 +618,53 @@ def applymap(self, func, subset=None, **kwargs):
-------
self : Styler
+ See Also
+ --------
+ Styler.where
+
"""
self._todo.append((lambda instance: getattr(instance, '_applymap'),
(func, subset), kwargs))
return self
+ def where(self, cond, value, other=None, subset=None, **kwargs):
+ """
+ Apply a function elementwise, updating the HTML
+ representation with a style which is selected in
+ accordance with the return value of a function.
+
+ .. versionadded:: 0.21.0
+
+ Parameters
+ ----------
+ cond : callable
+ ``cond`` should take a scalar and return a boolean
+ value : str
+ applied when ``cond`` returns true
+ other : str
+ applied when ``cond`` returns false
+ subset : IndexSlice
+ a valid indexer to limit ``data`` to *before* applying the
+ function. Consider using a pandas.IndexSlice
+ kwargs : dict
+ pass along to ``cond``
+
+ Returns
+ -------
+ self : Styler
+
+ See Also
+ --------
+ Styler.applymap
+
+ """
+
+ if other is None:
+ other = ''
+
+ return self.applymap(lambda val: value if cond(val) else other,
+ subset=subset, **kwargs)
+
def set_precision(self, precision):
"""
Set the precision used to render.
diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py
index 09603fd6fdcce..4b507b7f5df6f 100644
--- a/pandas/io/parquet.py
+++ b/pandas/io/parquet.py
@@ -58,13 +58,23 @@ def __init__(self):
"\nor via pip\n"
"pip install -U pyarrow\n")
+ self._pyarrow_lt_050 = LooseVersion(pyarrow.__version__) < '0.5.0'
+ self._pyarrow_lt_060 = LooseVersion(pyarrow.__version__) < '0.6.0'
self.api = pyarrow
- def write(self, df, path, compression='snappy', **kwargs):
+ def write(self, df, path, compression='snappy',
+ coerce_timestamps='ms', **kwargs):
path, _, _ = get_filepath_or_buffer(path)
- table = self.api.Table.from_pandas(df, timestamps_to_ms=True)
- self.api.parquet.write_table(
- table, path, compression=compression, **kwargs)
+ if self._pyarrow_lt_060:
+ table = self.api.Table.from_pandas(df, timestamps_to_ms=True)
+ self.api.parquet.write_table(
+ table, path, compression=compression, **kwargs)
+
+ else:
+ table = self.api.Table.from_pandas(df)
+ self.api.parquet.write_table(
+ table, path, compression=compression,
+ coerce_timestamps=coerce_timestamps, **kwargs)
def read(self, path):
path, _, _ = get_filepath_or_buffer(path)
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 8b1a921536a1d..d9e83176d0d6e 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -1716,6 +1716,7 @@ def _set_noconvert_columns(self):
# A set of integers will be converted to a list in
# the correct order every single time.
usecols = list(self.usecols)
+ usecols.sort()
elif (callable(self.usecols) or
self.usecols_dtype not in ('empty', None)):
# The names attribute should have the correct columns
@@ -2835,7 +2836,9 @@ def _rows_to_cols(self, content):
for row_num, actual_len in bad_lines:
msg = ('Expected %d fields in line %d, saw %d' %
(col_len, row_num + 1, actual_len))
- if len(self.delimiter) > 1 and self.quoting != csv.QUOTE_NONE:
+ if (self.delimiter and
+ len(self.delimiter) > 1 and
+ self.quoting != csv.QUOTE_NONE):
# see gh-13374
reason = ('Error could possibly be due to quotes being '
'ignored when a multi-char delimiter is used.')
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 712e9e9903f0a..9f819a4463bed 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -605,7 +605,7 @@ def open(self, mode='a', **kwargs):
except (Exception) as e:
- # trying to read from a non-existant file causes an error which
+ # trying to read from a non-existent file causes an error which
# is not part of IOError, make it one
if self._mode == 'r' and 'Unable to open/create file' in str(e):
raise IOError(str(e))
@@ -1621,7 +1621,7 @@ def __iter__(self):
def maybe_set_size(self, min_itemsize=None, **kwargs):
""" maybe set a string col itemsize:
- min_itemsize can be an interger or a dict with this columns name
+ min_itemsize can be an integer or a dict with this columns name
with an integer size """
if _ensure_decoded(self.kind) == u('string'):
@@ -1712,11 +1712,11 @@ def set_info(self, info):
self.__dict__.update(idx)
def get_attr(self):
- """ set the kind for this colummn """
+ """ set the kind for this column """
self.kind = getattr(self.attrs, self.kind_attr, None)
def set_attr(self):
- """ set the kind for this colummn """
+ """ set the kind for this column """
setattr(self.attrs, self.kind_attr, self.kind)
def read_metadata(self, handler):
@@ -2160,14 +2160,14 @@ def convert(self, values, nan_rep, encoding):
return self
def get_attr(self):
- """ get the data for this colummn """
+ """ get the data for this column """
self.values = getattr(self.attrs, self.kind_attr, None)
self.dtype = getattr(self.attrs, self.dtype_attr, None)
self.meta = getattr(self.attrs, self.meta_attr, None)
self.set_kind()
def set_attr(self):
- """ set the data for this colummn """
+ """ set the data for this column """
setattr(self.attrs, self.kind_attr, self.values)
setattr(self.attrs, self.meta_attr, self.meta)
if self.dtype is not None:
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index 253ed03c25db9..afc1631a947c8 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -9,31 +9,30 @@
You can find more information on http://presbrey.mit.edu/PyDTA and
http://www.statsmodels.org/devel/
"""
-import numpy as np
-import sys
+import datetime
import struct
-from dateutil.relativedelta import relativedelta
+import sys
-from pandas.core.dtypes.common import (
- is_categorical_dtype, is_datetime64_dtype,
- _ensure_object)
+import numpy as np
+from dateutil.relativedelta import relativedelta
+from pandas._libs.lib import max_len_string_array, infer_dtype
+from pandas._libs.tslib import NaT, Timestamp
+import pandas as pd
+from pandas import compat, to_timedelta, to_datetime, isna, DatetimeIndex
+from pandas.compat import (lrange, lmap, lzip, text_type, string_types, range,
+ zip, BytesIO)
from pandas.core.base import StringMixin
from pandas.core.categorical import Categorical
+from pandas.core.dtypes.common import (is_categorical_dtype, _ensure_object,
+ is_datetime64_dtype)
from pandas.core.frame import DataFrame
from pandas.core.series import Series
-import datetime
-from pandas import compat, to_timedelta, to_datetime, isna, DatetimeIndex
-from pandas.compat import lrange, lmap, lzip, text_type, string_types, range, \
- zip, BytesIO
-from pandas.util._decorators import Appender
-import pandas as pd
-
from pandas.io.common import (get_filepath_or_buffer, BaseIterator,
_stringify_path)
-from pandas._libs.lib import max_len_string_array, infer_dtype
-from pandas._libs.tslib import NaT, Timestamp
+from pandas.util._decorators import Appender
+from pandas.util._decorators import deprecate_kwarg
VALID_ENCODINGS = ('ascii', 'us-ascii', 'latin-1', 'latin_1', 'iso-8859-1',
'iso8859-1', '8859', 'cp819', 'latin', 'latin1', 'L1')
@@ -53,11 +52,11 @@
Encoding used to parse the files. None defaults to latin-1."""
_statafile_processing_params2 = """\
-index : identifier of index column
- identifier of column that should be used as index of the DataFrame
+index_col : string, optional, default: None
+ Column to set as index
convert_missing : boolean, defaults to False
Flag indicating whether to convert missing values to their Stata
- representations. If False, missing values are replaced with nans.
+ representations. If False, missing values are replaced with nan.
If True, columns containing missing values are returned with
object data types and missing values are represented by
StataMissingValue objects.
@@ -159,15 +158,16 @@
@Appender(_read_stata_doc)
+@deprecate_kwarg(old_arg_name='index', new_arg_name='index_col')
def read_stata(filepath_or_buffer, convert_dates=True,
- convert_categoricals=True, encoding=None, index=None,
+ convert_categoricals=True, encoding=None, index_col=None,
convert_missing=False, preserve_dtypes=True, columns=None,
order_categoricals=True, chunksize=None, iterator=False):
reader = StataReader(filepath_or_buffer,
convert_dates=convert_dates,
convert_categoricals=convert_categoricals,
- index=index, convert_missing=convert_missing,
+ index_col=index_col, convert_missing=convert_missing,
preserve_dtypes=preserve_dtypes,
columns=columns,
order_categoricals=order_categoricals,
@@ -248,8 +248,9 @@ def _stata_elapsed_date_to_datetime_vec(dates, fmt):
def convert_year_month_safe(year, month):
"""
Convert year and month to datetimes, using pandas vectorized versions
- when the date range falls within the range supported by pandas. Other
- wise it falls back to a slower but more robust method using datetime.
+ when the date range falls within the range supported by pandas.
+ Otherwise it falls back to a slower but more robust method
+ using datetime.
"""
if year.max() < MAX_YEAR and year.min() > MIN_YEAR:
return to_datetime(100 * year + month, format='%Y%m')
@@ -510,8 +511,8 @@ def _cast_to_stata_types(data):
this range. If the int64 values are outside of the range of those
perfectly representable as float64 values, a warning is raised.
- bool columns are cast to int8. uint colums are converted to int of the
- same size if there is no loss in precision, other wise are upcast to a
+ bool columns are cast to int8. uint columns are converted to int of the
+ same size if there is no loss in precision, otherwise are upcast to a
larger type. uint64 is currently not supported since it is concerted to
object in a DataFrame.
"""
@@ -944,8 +945,9 @@ def __init__(self, encoding):
class StataReader(StataParser, BaseIterator):
__doc__ = _stata_reader_doc
+ @deprecate_kwarg(old_arg_name='index', new_arg_name='index_col')
def __init__(self, path_or_buf, convert_dates=True,
- convert_categoricals=True, index=None,
+ convert_categoricals=True, index_col=None,
convert_missing=False, preserve_dtypes=True,
columns=None, order_categoricals=True,
encoding='latin-1', chunksize=None):
@@ -956,7 +958,7 @@ def __init__(self, path_or_buf, convert_dates=True,
# calls to read).
self._convert_dates = convert_dates
self._convert_categoricals = convert_categoricals
- self._index = index
+ self._index_col = index_col
self._convert_missing = convert_missing
self._preserve_dtypes = preserve_dtypes
self._columns = columns
@@ -1460,8 +1462,9 @@ def get_chunk(self, size=None):
return self.read(nrows=size)
@Appender(_read_method_doc)
+ @deprecate_kwarg(old_arg_name='index', new_arg_name='index_col')
def read(self, nrows=None, convert_dates=None,
- convert_categoricals=None, index=None,
+ convert_categoricals=None, index_col=None,
convert_missing=None, preserve_dtypes=None,
columns=None, order_categoricals=None):
# Handle empty file or chunk. If reading incrementally raise
@@ -1486,6 +1489,8 @@ def read(self, nrows=None, convert_dates=None,
columns = self._columns
if order_categoricals is None:
order_categoricals = self._order_categoricals
+ if index_col is None:
+ index_col = self._index_col
if nrows is None:
nrows = self.nobs
@@ -1524,14 +1529,14 @@ def read(self, nrows=None, convert_dates=None,
self._read_value_labels()
if len(data) == 0:
- data = DataFrame(columns=self.varlist, index=index)
+ data = DataFrame(columns=self.varlist)
else:
- data = DataFrame.from_records(data, index=index)
+ data = DataFrame.from_records(data)
data.columns = self.varlist
# If index is not specified, use actual row number rather than
# restarting at 0 for each chunk.
- if index is None:
+ if index_col is None:
ix = np.arange(self._lines_read - read_lines, self._lines_read)
data = data.set_index(ix)
@@ -1553,7 +1558,7 @@ def read(self, nrows=None, convert_dates=None,
cols_ = np.where(self.dtyplist)[0]
# Convert columns (if needed) to match input type
- index = data.index
+ ix = data.index
requires_type_conversion = False
data_formatted = []
for i in cols_:
@@ -1563,7 +1568,7 @@ def read(self, nrows=None, convert_dates=None,
if dtype != np.dtype(object) and dtype != self.dtyplist[i]:
requires_type_conversion = True
data_formatted.append(
- (col, Series(data[col], index, self.dtyplist[i])))
+ (col, Series(data[col], ix, self.dtyplist[i])))
else:
data_formatted.append((col, data[col]))
if requires_type_conversion:
@@ -1606,6 +1611,9 @@ def read(self, nrows=None, convert_dates=None,
if convert:
data = DataFrame.from_items(retyped_data)
+ if index_col is not None:
+ data = data.set_index(data.pop(index_col))
+
return data
def _do_convert_missing(self, data, convert_missing):
diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py
index e5b9497993172..a0b7e93efd05c 100644
--- a/pandas/plotting/_core.py
+++ b/pandas/plotting/_core.py
@@ -342,7 +342,13 @@ def _compute_plot_data(self):
label = 'None'
data = data.to_frame(name=label)
- numeric_data = data._convert(datetime=True)._get_numeric_data()
+ # GH16953, _convert is needed as fallback, for ``Series``
+ # with ``dtype == object``
+ data = data._convert(datetime=True, timedelta=True)
+ numeric_data = data.select_dtypes(include=[np.number,
+ "datetime",
+ "datetimetz",
+ "timedelta"])
try:
is_empty = numeric_data.empty
diff --git a/pandas/plotting/_misc.py b/pandas/plotting/_misc.py
index db2211fb55135..54f87febdc214 100644
--- a/pandas/plotting/_misc.py
+++ b/pandas/plotting/_misc.py
@@ -413,7 +413,7 @@ def parallel_coordinates(frame, class_column, cols=None, ax=None, color=None,
axvlines_kwds: keywords, optional
Options to be passed to axvline method for vertical lines
sort_labels: bool, False
- Sort class_column labels, useful when assigning colours
+ Sort class_column labels, useful when assigning colors
.. versionadded:: 0.20.0
diff --git a/pandas/plotting/_tools.py b/pandas/plotting/_tools.py
index 389e238ccb96e..c734855bdc09a 100644
--- a/pandas/plotting/_tools.py
+++ b/pandas/plotting/_tools.py
@@ -141,7 +141,7 @@ def _subplots(naxes=None, sharex=False, sharey=False, squeeze=True,
array of Axis objects are returned as numpy 1-d arrays.
- for NxM subplots with N>1 and M>1 are returned as a 2d array.
- If False, no squeezing at all is done: the returned axis object is always
+ If False, no squeezing is done: the returned axis object is always
a 2-d array containing Axis instances, even if it ends up being 1x1.
subplot_kw : dict
@@ -329,7 +329,7 @@ def _handle_shared_axes(axarr, nplots, naxes, nrows, ncols, sharex, sharey):
if ncols > 1:
for ax in axarr:
# only the first column should get y labels -> set all other to
- # off as we only have labels in teh first column and we always
+ # off as we only have labels in the first column and we always
# have a subplot there, we can skip the layout test
if ax.is_first_col():
continue
diff --git a/pandas/tests/dtypes/test_generic.py b/pandas/tests/dtypes/test_generic.py
index 82444d6c94157..bd365f9c3281f 100644
--- a/pandas/tests/dtypes/test_generic.py
+++ b/pandas/tests/dtypes/test_generic.py
@@ -48,7 +48,6 @@ def test_abc_types(self):
def test_setattr_warnings():
- # GH5904 - Suggestion: Warning for DataFrame colname-methodname clash
# GH7175 - GOTCHA: You can't use dot notation to add a column...
d = {'one': pd.Series([1., 2., 3.], index=['a', 'b', 'c']),
'two': pd.Series([1., 2., 3., 4.], index=['a', 'b', 'c', 'd'])}
@@ -78,7 +77,3 @@ def test_setattr_warnings():
# warn when setting column to nonexistent name
df.four = df.two + 2
assert df.four.sum() > df.two.sum()
-
- with tm.assert_produces_warning(UserWarning):
- # warn when column has same name as method
- df['sum'] = df.two
diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py
index a62fcb506a34b..b3209da6449d6 100644
--- a/pandas/tests/frame/test_api.py
+++ b/pandas/tests/frame/test_api.py
@@ -9,7 +9,7 @@
import sys
from distutils.version import LooseVersion
-from pandas.compat import range, lrange
+from pandas.compat import range, lrange, long
from pandas import compat
from numpy.random import randn
@@ -205,15 +205,18 @@ def test_itertuples(self):
'ints': lrange(5)}, columns=['floats', 'ints'])
for tup in df.itertuples(index=False):
- assert isinstance(tup[1], np.integer)
+ assert isinstance(tup[1], (int, long))
df = self.klass(data={"a": [1, 2, 3], "b": [4, 5, 6]})
dfaa = df[['a', 'a']]
assert (list(dfaa.itertuples()) ==
[(0, 1, 1), (1, 2, 2), (2, 3, 3)])
- assert (repr(list(df.itertuples(name=None))) ==
- '[(0, 1, 4), (1, 2, 5), (2, 3, 6)]')
+
+ # repr with be int/long on windows
+ if not compat.is_platform_windows():
+ assert (repr(list(df.itertuples(name=None))) ==
+ '[(0, 1, 4), (1, 2, 5), (2, 3, 6)]')
tup = next(df.itertuples(name='TestName'))
diff --git a/pandas/tests/frame/test_convert_to.py b/pandas/tests/frame/test_convert_to.py
index 629c695b702fe..5bdb76494f4c8 100644
--- a/pandas/tests/frame/test_convert_to.py
+++ b/pandas/tests/frame/test_convert_to.py
@@ -5,6 +5,7 @@
import numpy as np
from pandas import compat
+from pandas.compat import long
from pandas import (DataFrame, Series, MultiIndex, Timestamp,
date_range)
@@ -135,11 +136,11 @@ def test_to_records_with_unicode_index(self):
def test_to_records_with_unicode_column_names(self):
# xref issue: https://github.com/numpy/numpy/issues/2407
# Issue #11879. to_records used to raise an exception when used
- # with column names containing non ascii caracters in Python 2
+ # with column names containing non-ascii characters in Python 2
result = DataFrame(data={u"accented_name_é": [1.0]}).to_records()
# Note that numpy allows for unicode field names but dtypes need
- # to be specified using dictionnary intsead of list of tuples.
+ # to be specified using dictionary instead of list of tuples.
expected = np.rec.array(
[(0, 1.0)],
dtype={"names": ["index", u"accented_name_é"],
@@ -236,3 +237,15 @@ def test_to_records_datetimeindex_with_tz(self, tz):
# both converted to UTC, so they are equal
tm.assert_numpy_array_equal(result, expected)
+
+ def test_to_dict_box_scalars(self):
+ # 14216
+ # make sure that we are boxing properly
+ d = {'a': [1], 'b': ['b']}
+
+ result = DataFrame(d).to_dict()
+ assert isinstance(list(result['a'])[0], (int, long))
+ assert isinstance(list(result['b'])[0], (int, long))
+
+ result = DataFrame(d).to_dict(orient='records')
+ assert isinstance(result[0]['a'], (int, long))
diff --git a/pandas/tests/groupby/test_nth.py b/pandas/tests/groupby/test_nth.py
index 28392537be3c6..ffbede0eb208f 100644
--- a/pandas/tests/groupby/test_nth.py
+++ b/pandas/tests/groupby/test_nth.py
@@ -2,7 +2,10 @@
import pandas as pd
from pandas import DataFrame, MultiIndex, Index, Series, isna
from pandas.compat import lrange
-from pandas.util.testing import assert_frame_equal, assert_series_equal
+from pandas.util.testing import (
+ assert_frame_equal,
+ assert_produces_warning,
+ assert_series_equal)
from .common import MixIn
@@ -171,7 +174,10 @@ def test_nth(self):
# doc example
df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])
g = df.groupby('A')
- result = g.B.nth(0, dropna=True)
+ # PR 17493, related to issue 11038
+ # test Series.nth with True for dropna produces DeprecationWarning
+ with assert_produces_warning(FutureWarning):
+ result = g.B.nth(0, dropna=True)
expected = g.B.first()
assert_series_equal(result, expected)
diff --git a/pandas/tests/groupby/test_transform.py b/pandas/tests/groupby/test_transform.py
index 98839a17d6e0c..267b67972c640 100644
--- a/pandas/tests/groupby/test_transform.py
+++ b/pandas/tests/groupby/test_transform.py
@@ -533,7 +533,7 @@ def test_cython_transform(self):
for (op, args), targop in ops:
if op != 'shift' and 'int' not in gb_target:
# numeric apply fastpath promotes dtype so have
- # to apply seperately and concat
+ # to apply separately and concat
i = gb[['int']].apply(targop)
f = gb[['float', 'float_missing']].apply(targop)
expected = pd.concat([f, i], axis=1)
diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py
index 1fdc08d68eb26..90618cd6e235f 100644
--- a/pandas/tests/indexes/common.py
+++ b/pandas/tests/indexes/common.py
@@ -11,6 +11,7 @@
RangeIndex, MultiIndex, CategoricalIndex, DatetimeIndex,
TimedeltaIndex, PeriodIndex, IntervalIndex,
notna, isna)
+from pandas.core.indexes.base import InvalidIndexError
from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
from pandas.core.dtypes.common import needs_i8_conversion
from pandas._libs.tslib import iNaT
@@ -138,9 +139,14 @@ def test_get_indexer_consistency(self):
if isinstance(index, IntervalIndex):
continue
- indexer = index.get_indexer(index[0:2])
- assert isinstance(indexer, np.ndarray)
- assert indexer.dtype == np.intp
+ if index.is_unique or isinstance(index, CategoricalIndex):
+ indexer = index.get_indexer(index[0:2])
+ assert isinstance(indexer, np.ndarray)
+ assert indexer.dtype == np.intp
+ else:
+ e = "Reindexing only valid with uniquely valued Index objects"
+ with tm.assert_raises_regex(InvalidIndexError, e):
+ indexer = index.get_indexer(index[0:2])
indexer, _ = index.get_indexer_non_unique(index[0:2])
assert isinstance(indexer, np.ndarray)
@@ -632,7 +638,8 @@ def test_difference_base(self):
pass
elif isinstance(idx, (DatetimeIndex, TimedeltaIndex)):
assert result.__class__ == answer.__class__
- tm.assert_numpy_array_equal(result.asi8, answer.asi8)
+ tm.assert_numpy_array_equal(result.sort_values().asi8,
+ answer.sort_values().asi8)
else:
result = first.difference(case)
assert tm.equalContents(result, answer)
@@ -954,3 +961,47 @@ def test_join_self_unique(self, how):
if index.is_unique:
joined = index.join(index, how=how)
assert (index == joined).all()
+
+ def test_searchsorted_monotonic(self):
+ # GH17271
+ for index in self.indices.values():
+ # not implemented for tuple searches in MultiIndex
+ # or Intervals searches in IntervalIndex
+ if isinstance(index, (MultiIndex, IntervalIndex)):
+ continue
+
+ # nothing to test if the index is empty
+ if index.empty:
+ continue
+ value = index[0]
+
+ # determine the expected results (handle dupes for 'right')
+ expected_left, expected_right = 0, (index == value).argmin()
+ if expected_right == 0:
+ # all values are the same, expected_right should be length
+ expected_right = len(index)
+
+ # test _searchsorted_monotonic in all cases
+ # test searchsorted only for increasing
+ if index.is_monotonic_increasing:
+ ssm_left = index._searchsorted_monotonic(value, side='left')
+ assert expected_left == ssm_left
+
+ ssm_right = index._searchsorted_monotonic(value, side='right')
+ assert expected_right == ssm_right
+
+ ss_left = index.searchsorted(value, side='left')
+ assert expected_left == ss_left
+
+ ss_right = index.searchsorted(value, side='right')
+ assert expected_right == ss_right
+ elif index.is_monotonic_decreasing:
+ ssm_left = index._searchsorted_monotonic(value, side='left')
+ assert expected_left == ssm_left
+
+ ssm_right = index._searchsorted_monotonic(value, side='right')
+ assert expected_right == ssm_right
+ else:
+ # non-monotonic should raise.
+ with pytest.raises(ValueError):
+ index._searchsorted_monotonic(value, side='left')
diff --git a/pandas/tests/indexes/datetimes/test_construction.py b/pandas/tests/indexes/datetimes/test_construction.py
index cf896b06130a2..a4706dd8a3767 100644
--- a/pandas/tests/indexes/datetimes/test_construction.py
+++ b/pandas/tests/indexes/datetimes/test_construction.py
@@ -307,8 +307,9 @@ def test_constructor_coverage(self):
exp = date_range('1/1/2000', periods=10)
tm.assert_index_equal(rng, exp)
- pytest.raises(ValueError, DatetimeIndex, start='1/1/2000',
- periods='foo', freq='D')
+ msg = 'periods must be a number, got foo'
+ with tm.assert_raises_regex(TypeError, msg):
+ DatetimeIndex(start='1/1/2000', periods='foo', freq='D')
pytest.raises(ValueError, DatetimeIndex, start='1/1/2000',
end='1/10/2000')
diff --git a/pandas/tests/indexes/datetimes/test_date_range.py b/pandas/tests/indexes/datetimes/test_date_range.py
index da4ca83c10dda..8d86bebdd4d5e 100644
--- a/pandas/tests/indexes/datetimes/test_date_range.py
+++ b/pandas/tests/indexes/datetimes/test_date_range.py
@@ -107,8 +107,10 @@ def test_date_range_ambiguous_arguments(self):
start = datetime(2011, 1, 1, 5, 3, 40)
end = datetime(2011, 1, 1, 8, 9, 40)
- pytest.raises(ValueError, date_range, start, end, freq='s',
- periods=10)
+ msg = ('Of the three parameters: start, end, and periods, '
+ 'exactly two must be specified')
+ with tm.assert_raises_regex(ValueError, msg):
+ date_range(start, end, periods=10, freq='s')
def test_date_range_businesshour(self):
idx = DatetimeIndex(['2014-07-04 09:00', '2014-07-04 10:00',
@@ -146,14 +148,29 @@ def test_date_range_businesshour(self):
def test_range_misspecified(self):
# GH #1095
+ msg = ('Of the three parameters: start, end, and periods, '
+ 'exactly two must be specified')
+
+ with tm.assert_raises_regex(ValueError, msg):
+ date_range(start='1/1/2000')
+
+ with tm.assert_raises_regex(ValueError, msg):
+ date_range(end='1/1/2000')
+
+ with tm.assert_raises_regex(ValueError, msg):
+ date_range(periods=10)
+
+ with tm.assert_raises_regex(ValueError, msg):
+ date_range(start='1/1/2000', freq='H')
- pytest.raises(ValueError, date_range, '1/1/2000')
- pytest.raises(ValueError, date_range, end='1/1/2000')
- pytest.raises(ValueError, date_range, periods=10)
+ with tm.assert_raises_regex(ValueError, msg):
+ date_range(end='1/1/2000', freq='H')
- pytest.raises(ValueError, date_range, '1/1/2000', freq='H')
- pytest.raises(ValueError, date_range, end='1/1/2000', freq='H')
- pytest.raises(ValueError, date_range, periods=10, freq='H')
+ with tm.assert_raises_regex(ValueError, msg):
+ date_range(periods=10, freq='H')
+
+ with tm.assert_raises_regex(ValueError, msg):
+ date_range()
def test_compat_replace(self):
# https://github.com/statsmodels/statsmodels/issues/3349
@@ -231,8 +248,13 @@ def test_constructor(self):
bdate_range(START, END, freq=BDay())
bdate_range(START, periods=20, freq=BDay())
bdate_range(end=START, periods=20, freq=BDay())
- pytest.raises(ValueError, date_range, '2011-1-1', '2012-1-1', 'B')
- pytest.raises(ValueError, bdate_range, '2011-1-1', '2012-1-1', 'B')
+
+ msg = 'periods must be a number, got B'
+ with tm.assert_raises_regex(TypeError, msg):
+ date_range('2011-1-1', '2012-1-1', 'B')
+
+ with tm.assert_raises_regex(TypeError, msg):
+ bdate_range('2011-1-1', '2012-1-1', 'B')
def test_naive_aware_conflicts(self):
naive = bdate_range(START, END, freq=BDay(), tz=None)
@@ -510,8 +532,13 @@ def test_constructor(self):
cdate_range(START, END, freq=CDay())
cdate_range(START, periods=20, freq=CDay())
cdate_range(end=START, periods=20, freq=CDay())
- pytest.raises(ValueError, date_range, '2011-1-1', '2012-1-1', 'C')
- pytest.raises(ValueError, cdate_range, '2011-1-1', '2012-1-1', 'C')
+
+ msg = 'periods must be a number, got C'
+ with tm.assert_raises_regex(TypeError, msg):
+ date_range('2011-1-1', '2012-1-1', 'C')
+
+ with tm.assert_raises_regex(TypeError, msg):
+ cdate_range('2011-1-1', '2012-1-1', 'C')
def test_cached_range(self):
DatetimeIndex._cached_range(START, END, offset=CDay())
diff --git a/pandas/tests/indexes/datetimes/test_datetimelike.py b/pandas/tests/indexes/datetimes/test_datetimelike.py
index 3b970ee382521..538e10e6011ec 100644
--- a/pandas/tests/indexes/datetimes/test_datetimelike.py
+++ b/pandas/tests/indexes/datetimes/test_datetimelike.py
@@ -12,7 +12,9 @@ class TestDatetimeIndex(DatetimeLike):
_holder = DatetimeIndex
def setup_method(self, method):
- self.indices = dict(index=tm.makeDateIndex(10))
+ self.indices = dict(index=tm.makeDateIndex(10),
+ index_dec=date_range('20130110', periods=10,
+ freq='-1D'))
self.setup_indices()
def create_index(self):
diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py
index 089d74a1d69b8..be27334384f6b 100644
--- a/pandas/tests/indexes/datetimes/test_tools.py
+++ b/pandas/tests/indexes/datetimes/test_tools.py
@@ -25,7 +25,7 @@
compat)
-class TimeConversionFormats(object):
+class TestTimeConversionFormats(object):
def test_to_datetime_format(self):
values = ['1/1/2000', '1/2/2000', '1/3/2000']
@@ -372,7 +372,7 @@ def test_datetime_invalid_datatype(self):
pd.to_datetime(pd.to_datetime)
-class ToDatetimeUnit(object):
+class TestToDatetimeUnit(object):
def test_unit(self):
# GH 11758
@@ -566,7 +566,10 @@ def test_dataframe(self):
df2 = DataFrame({'year': [2015, 2016],
'month': [2, 20],
'day': [4, 5]})
- with pytest.raises(ValueError):
+
+ msg = ("cannot assemble the datetimes: time data .+ does not "
+ "match format '%Y%m%d' \(match\)")
+ with tm.assert_raises_regex(ValueError, msg):
to_datetime(df2)
result = to_datetime(df2, errors='coerce')
expected = Series([Timestamp('20150204 00:00:00'),
@@ -574,26 +577,31 @@ def test_dataframe(self):
assert_series_equal(result, expected)
# extra columns
- with pytest.raises(ValueError):
+ msg = ("extra keys have been passed to the datetime assemblage: "
+ "\[foo\]")
+ with tm.assert_raises_regex(ValueError, msg):
df2 = df.copy()
df2['foo'] = 1
to_datetime(df2)
# not enough
+ msg = ('to assemble mappings requires at least that \[year, month, '
+ 'day\] be specified: \[.+\] is missing')
for c in [['year'],
['year', 'month'],
['year', 'month', 'second'],
['month', 'day'],
['year', 'day', 'second']]:
- with pytest.raises(ValueError):
+ with tm.assert_raises_regex(ValueError, msg):
to_datetime(df[c])
# duplicates
+ msg = 'cannot assemble with duplicate keys'
df2 = DataFrame({'year': [2015, 2016],
'month': [2, 20],
'day': [4, 5]})
df2.columns = ['year', 'year', 'day']
- with pytest.raises(ValueError):
+ with tm.assert_raises_regex(ValueError, msg):
to_datetime(df2)
df2 = DataFrame({'year': [2015, 2016],
@@ -601,7 +609,7 @@ def test_dataframe(self):
'day': [4, 5],
'hour': [4, 5]})
df2.columns = ['year', 'month', 'day', 'day']
- with pytest.raises(ValueError):
+ with tm.assert_raises_regex(ValueError, msg):
to_datetime(df2)
def test_dataframe_dtypes(self):
@@ -632,7 +640,7 @@ def test_dataframe_dtypes(self):
to_datetime(df)
-class ToDatetimeMisc(object):
+class TestToDatetimeMisc(object):
def test_index_to_datetime(self):
idx = Index(['1/1/2000', '1/2/2000', '1/3/2000'])
@@ -1136,7 +1144,7 @@ def test_parsers(self):
exp = DatetimeIndex([pd.Timestamp(expected)])
tm.assert_index_equal(res, exp)
- # these really need to have yearfist, but we don't support
+ # these really need to have yearfirst, but we don't support
if not yearfirst:
result5 = Timestamp(date_str)
assert result5 == expected
diff --git a/pandas/tests/indexes/period/test_construction.py b/pandas/tests/indexes/period/test_construction.py
index e5b889e100307..639a9272c3808 100644
--- a/pandas/tests/indexes/period/test_construction.py
+++ b/pandas/tests/indexes/period/test_construction.py
@@ -436,11 +436,12 @@ def test_constructor_error(self):
start = Period('02-Apr-2005', 'B')
end_intv = Period('2006-12-31', ('w', 1))
- msg = 'Start and end must have same freq'
+ msg = 'start and end must have same freq'
with tm.assert_raises_regex(ValueError, msg):
PeriodIndex(start=start, end=end_intv)
- msg = 'Must specify 2 of start, end, periods'
+ msg = ('Of the three parameters: start, end, and periods, '
+ 'exactly two must be specified')
with tm.assert_raises_regex(ValueError, msg):
PeriodIndex(start=start)
diff --git a/pandas/tests/indexes/period/test_period.py b/pandas/tests/indexes/period/test_period.py
index e24e2ad936e2c..51f7d13cb0638 100644
--- a/pandas/tests/indexes/period/test_period.py
+++ b/pandas/tests/indexes/period/test_period.py
@@ -18,7 +18,9 @@ class TestPeriodIndex(DatetimeLike):
_multiprocess_can_split_ = True
def setup_method(self, method):
- self.indices = dict(index=tm.makePeriodIndex(10))
+ self.indices = dict(index=tm.makePeriodIndex(10),
+ index_dec=period_range('20130101', periods=10,
+ freq='D')[::-1])
self.setup_indices()
def create_index(self):
diff --git a/pandas/tests/indexes/period/test_period_range.py b/pandas/tests/indexes/period/test_period_range.py
new file mode 100644
index 0000000000000..640f24f67f72f
--- /dev/null
+++ b/pandas/tests/indexes/period/test_period_range.py
@@ -0,0 +1,94 @@
+import pytest
+import pandas.util.testing as tm
+from pandas import date_range, NaT, period_range, Period, PeriodIndex
+
+
+class TestPeriodRange(object):
+
+ @pytest.mark.parametrize('freq', ['D', 'W', 'M', 'Q', 'A'])
+ def test_construction_from_string(self, freq):
+ # non-empty
+ expected = date_range(start='2017-01-01', periods=5,
+ freq=freq, name='foo').to_period()
+ start, end = str(expected[0]), str(expected[-1])
+
+ result = period_range(start=start, end=end, freq=freq, name='foo')
+ tm.assert_index_equal(result, expected)
+
+ result = period_range(start=start, periods=5, freq=freq, name='foo')
+ tm.assert_index_equal(result, expected)
+
+ result = period_range(end=end, periods=5, freq=freq, name='foo')
+ tm.assert_index_equal(result, expected)
+
+ # empty
+ expected = PeriodIndex([], freq=freq, name='foo')
+
+ result = period_range(start=start, periods=0, freq=freq, name='foo')
+ tm.assert_index_equal(result, expected)
+
+ result = period_range(end=end, periods=0, freq=freq, name='foo')
+ tm.assert_index_equal(result, expected)
+
+ result = period_range(start=end, end=start, freq=freq, name='foo')
+ tm.assert_index_equal(result, expected)
+
+ def test_construction_from_period(self):
+ # upsampling
+ start, end = Period('2017Q1', freq='Q'), Period('2018Q1', freq='Q')
+ expected = date_range(start='2017-03-31', end='2018-03-31', freq='M',
+ name='foo').to_period()
+ result = period_range(start=start, end=end, freq='M', name='foo')
+ tm.assert_index_equal(result, expected)
+
+ # downsampling
+ start, end = Period('2017-1', freq='M'), Period('2019-12', freq='M')
+ expected = date_range(start='2017-01-31', end='2019-12-31', freq='Q',
+ name='foo').to_period()
+ result = period_range(start=start, end=end, freq='Q', name='foo')
+ tm.assert_index_equal(result, expected)
+
+ # empty
+ expected = PeriodIndex([], freq='W', name='foo')
+
+ result = period_range(start=start, periods=0, freq='W', name='foo')
+ tm.assert_index_equal(result, expected)
+
+ result = period_range(end=end, periods=0, freq='W', name='foo')
+ tm.assert_index_equal(result, expected)
+
+ result = period_range(start=end, end=start, freq='W', name='foo')
+ tm.assert_index_equal(result, expected)
+
+ def test_errors(self):
+ # not enough params
+ msg = ('Of the three parameters: start, end, and periods, '
+ 'exactly two must be specified')
+ with tm.assert_raises_regex(ValueError, msg):
+ period_range(start='2017Q1')
+
+ with tm.assert_raises_regex(ValueError, msg):
+ period_range(end='2017Q1')
+
+ with tm.assert_raises_regex(ValueError, msg):
+ period_range(periods=5)
+
+ with tm.assert_raises_regex(ValueError, msg):
+ period_range()
+
+ # too many params
+ with tm.assert_raises_regex(ValueError, msg):
+ period_range(start='2017Q1', end='2018Q1', periods=8, freq='Q')
+
+ # start/end NaT
+ msg = 'start and end must not be NaT'
+ with tm.assert_raises_regex(ValueError, msg):
+ period_range(start=NaT, end='2018Q1')
+
+ with tm.assert_raises_regex(ValueError, msg):
+ period_range(start='2017Q1', end=NaT)
+
+ # invalid periods param
+ msg = 'periods must be a number, got foo'
+ with tm.assert_raises_regex(TypeError, msg):
+ period_range(start='2017Q1', periods='foo')
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index aa32e75ba0d58..fa73c9fc7b722 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -9,7 +9,7 @@
from pandas.tests.indexes.common import Base
from pandas.compat import (range, lrange, lzip, u,
- text_type, zip, PY3, PY36)
+ text_type, zip, PY3, PY36, PYPY)
import operator
import numpy as np
@@ -46,7 +46,8 @@ def setup_method(self, method):
catIndex=tm.makeCategoricalIndex(100),
empty=Index([]),
tuples=MultiIndex.from_tuples(lzip(
- ['foo', 'bar', 'baz'], [1, 2, 3])))
+ ['foo', 'bar', 'baz'], [1, 2, 3])),
+ repeats=Index([0, 0, 1, 1, 2, 2]))
self.setup_indices()
def create_index(self):
@@ -1369,13 +1370,21 @@ def test_isin(self):
assert len(result) == 0
assert result.dtype == np.bool_
- def test_isin_nan(self):
+ @pytest.mark.skipif(PYPY, reason="np.nan is float('nan') on PyPy")
+ def test_isin_nan_not_pypy(self):
+ tm.assert_numpy_array_equal(Index(['a', np.nan]).isin([float('nan')]),
+ np.array([False, False]))
+
+ @pytest.mark.skipif(not PYPY, reason="np.nan is float('nan') on PyPy")
+ def test_isin_nan_pypy(self):
+ tm.assert_numpy_array_equal(Index(['a', np.nan]).isin([float('nan')]),
+ np.array([False, True]))
+
+ def test_isin_nan_common(self):
tm.assert_numpy_array_equal(Index(['a', np.nan]).isin([np.nan]),
np.array([False, True]))
tm.assert_numpy_array_equal(Index(['a', pd.NaT]).isin([pd.NaT]),
np.array([False, True]))
- tm.assert_numpy_array_equal(Index(['a', np.nan]).isin([float('nan')]),
- np.array([False, False]))
tm.assert_numpy_array_equal(Index(['a', np.nan]).isin([pd.NaT]),
np.array([False, False]))
@@ -1438,6 +1447,12 @@ def test_get_level_values(self):
result = self.strIndex.get_level_values(0)
tm.assert_index_equal(result, self.strIndex)
+ # test for name (GH 17414)
+ index_with_name = self.strIndex.copy()
+ index_with_name.name = 'a'
+ result = index_with_name.get_level_values('a')
+ tm.assert_index_equal(result, index_with_name)
+
def test_slice_keep_name(self):
idx = Index(['a', 'b'], name='asdf')
assert idx.name == idx[1:].name
diff --git a/pandas/tests/indexes/test_category.py b/pandas/tests/indexes/test_category.py
index 05d31af57b36c..cf365465763fa 100644
--- a/pandas/tests/indexes/test_category.py
+++ b/pandas/tests/indexes/test_category.py
@@ -125,6 +125,16 @@ def test_construction_with_dtype(self):
result = CategoricalIndex(idx, categories=idx, ordered=True)
tm.assert_index_equal(result, expected, exact=True)
+ def test_create_categorical(self):
+ # https://github.com/pandas-dev/pandas/pull/17513
+ # The public CI constructor doesn't hit this code path with
+ # instances of CategoricalIndex, but we still want to test the code
+ ci = CategoricalIndex(['a', 'b', 'c'])
+ # First ci is self, second ci is data.
+ result = CategoricalIndex._create_categorical(ci, ci)
+ expected = Categorical(['a', 'b', 'c'])
+ tm.assert_categorical_equal(result, expected)
+
def test_disallow_set_ops(self):
# GH 10039
@@ -576,12 +586,13 @@ def test_isin(self):
ci.isin(['c', 'a', 'b', np.nan]), np.array([True] * 6))
# mismatched categorical -> coerced to ndarray so doesn't matter
- tm.assert_numpy_array_equal(
- ci.isin(ci.set_categories(list('abcdefghi'))), np.array([True] *
- 6))
- tm.assert_numpy_array_equal(
- ci.isin(ci.set_categories(list('defghi'))),
- np.array([False] * 5 + [True]))
+ result = ci.isin(ci.set_categories(list('abcdefghi')))
+ expected = np.array([True] * 6)
+ tm.assert_numpy_array_equal(result, expected)
+
+ result = ci.isin(ci.set_categories(list('defghi')))
+ expected = np.array([False] * 5 + [True])
+ tm.assert_numpy_array_equal(result, expected)
def test_identical(self):
diff --git a/pandas/tests/indexes/test_interval.py b/pandas/tests/indexes/test_interval.py
index 18eefc3fbdca6..13c3b35e4d85d 100644
--- a/pandas/tests/indexes/test_interval.py
+++ b/pandas/tests/indexes/test_interval.py
@@ -2,10 +2,11 @@
import pytest
import numpy as np
-
+from datetime import timedelta
from pandas import (Interval, IntervalIndex, Index, isna,
interval_range, Timestamp, Timedelta,
- compat)
+ compat, date_range, timedelta_range, DateOffset)
+from pandas.tseries.offsets import Day
from pandas._libs.interval import IntervalTree
from pandas.tests.indexes.common import Base
import pandas.util.testing as tm
@@ -721,40 +722,278 @@ def test_is_non_overlapping_monotonic(self):
class TestIntervalRange(object):
- def test_construction(self):
- result = interval_range(0, 5, name='foo', closed='both')
+ @pytest.mark.parametrize('closed', ['left', 'right', 'neither', 'both'])
+ def test_construction_from_numeric(self, closed):
+ # combinations of start/end/periods without freq
expected = IntervalIndex.from_breaks(
- np.arange(0, 5), name='foo', closed='both')
+ np.arange(0, 6), name='foo', closed=closed)
+
+ result = interval_range(start=0, end=5, name='foo', closed=closed)
tm.assert_index_equal(result, expected)
- def test_errors(self):
+ result = interval_range(start=0, periods=5, name='foo', closed=closed)
+ tm.assert_index_equal(result, expected)
+
+ result = interval_range(end=5, periods=5, name='foo', closed=closed)
+ tm.assert_index_equal(result, expected)
+
+ # combinations of start/end/periods with freq
+ expected = IntervalIndex.from_tuples([(0, 2), (2, 4), (4, 6)],
+ name='foo', closed=closed)
+
+ result = interval_range(start=0, end=6, freq=2, name='foo',
+ closed=closed)
+ tm.assert_index_equal(result, expected)
+
+ result = interval_range(start=0, periods=3, freq=2, name='foo',
+ closed=closed)
+ tm.assert_index_equal(result, expected)
+
+ result = interval_range(end=6, periods=3, freq=2, name='foo',
+ closed=closed)
+ tm.assert_index_equal(result, expected)
+
+ # output truncates early if freq causes end to be skipped.
+ expected = IntervalIndex.from_tuples([(0.0, 1.5), (1.5, 3.0)],
+ name='foo', closed=closed)
+ result = interval_range(start=0, end=4, freq=1.5, name='foo',
+ closed=closed)
+ tm.assert_index_equal(result, expected)
+
+ @pytest.mark.parametrize('closed', ['left', 'right', 'neither', 'both'])
+ def test_construction_from_timestamp(self, closed):
+ # combinations of start/end/periods without freq
+ start, end = Timestamp('2017-01-01'), Timestamp('2017-01-06')
+ breaks = date_range(start=start, end=end)
+ expected = IntervalIndex.from_breaks(breaks, name='foo', closed=closed)
+
+ result = interval_range(start=start, end=end, name='foo',
+ closed=closed)
+ tm.assert_index_equal(result, expected)
+
+ result = interval_range(start=start, periods=5, name='foo',
+ closed=closed)
+ tm.assert_index_equal(result, expected)
+
+ result = interval_range(end=end, periods=5, name='foo',
+ closed=closed)
+ tm.assert_index_equal(result, expected)
+
+ # combinations of start/end/periods with fixed freq
+ freq = '2D'
+ start, end = Timestamp('2017-01-01'), Timestamp('2017-01-07')
+ breaks = date_range(start=start, end=end, freq=freq)
+ expected = IntervalIndex.from_breaks(breaks, name='foo', closed=closed)
+
+ result = interval_range(start=start, end=end, freq=freq, name='foo',
+ closed=closed)
+ tm.assert_index_equal(result, expected)
+
+ result = interval_range(start=start, periods=3, freq=freq, name='foo',
+ closed=closed)
+ tm.assert_index_equal(result, expected)
+
+ result = interval_range(end=end, periods=3, freq=freq, name='foo',
+ closed=closed)
+ tm.assert_index_equal(result, expected)
+
+ # output truncates early if freq causes end to be skipped.
+ end = Timestamp('2017-01-08')
+ result = interval_range(start=start, end=end, freq=freq, name='foo',
+ closed=closed)
+ tm.assert_index_equal(result, expected)
+
+ # combinations of start/end/periods with non-fixed freq
+ freq = 'M'
+ start, end = Timestamp('2017-01-01'), Timestamp('2017-12-31')
+ breaks = date_range(start=start, end=end, freq=freq)
+ expected = IntervalIndex.from_breaks(breaks, name='foo', closed=closed)
+
+ result = interval_range(start=start, end=end, freq=freq, name='foo',
+ closed=closed)
+ tm.assert_index_equal(result, expected)
+
+ result = interval_range(start=start, periods=11, freq=freq, name='foo',
+ closed=closed)
+ tm.assert_index_equal(result, expected)
+
+ result = interval_range(end=end, periods=11, freq=freq, name='foo',
+ closed=closed)
+ tm.assert_index_equal(result, expected)
+
+ # output truncates early if freq causes end to be skipped.
+ end = Timestamp('2018-01-15')
+ result = interval_range(start=start, end=end, freq=freq, name='foo',
+ closed=closed)
+ tm.assert_index_equal(result, expected)
+
+ @pytest.mark.parametrize('closed', ['left', 'right', 'neither', 'both'])
+ def test_construction_from_timedelta(self, closed):
+ # combinations of start/end/periods without freq
+ start, end = Timedelta('1 day'), Timedelta('6 days')
+ breaks = timedelta_range(start=start, end=end)
+ expected = IntervalIndex.from_breaks(breaks, name='foo', closed=closed)
+
+ result = interval_range(start=start, end=end, name='foo',
+ closed=closed)
+ tm.assert_index_equal(result, expected)
+
+ result = interval_range(start=start, periods=5, name='foo',
+ closed=closed)
+ tm.assert_index_equal(result, expected)
+
+ result = interval_range(end=end, periods=5, name='foo',
+ closed=closed)
+ tm.assert_index_equal(result, expected)
+
+ # combinations of start/end/periods with fixed freq
+ freq = '2D'
+ start, end = Timedelta('1 day'), Timedelta('7 days')
+ breaks = timedelta_range(start=start, end=end, freq=freq)
+ expected = IntervalIndex.from_breaks(breaks, name='foo', closed=closed)
+
+ result = interval_range(start=start, end=end, freq=freq, name='foo',
+ closed=closed)
+ tm.assert_index_equal(result, expected)
+
+ result = interval_range(start=start, periods=3, freq=freq, name='foo',
+ closed=closed)
+ tm.assert_index_equal(result, expected)
+
+ result = interval_range(end=end, periods=3, freq=freq, name='foo',
+ closed=closed)
+ tm.assert_index_equal(result, expected)
+
+ # output truncates early if freq causes end to be skipped.
+ end = Timedelta('7 days 1 hour')
+ result = interval_range(start=start, end=end, freq=freq, name='foo',
+ closed=closed)
+ tm.assert_index_equal(result, expected)
+
+ def test_constructor_coverage(self):
+ # float value for periods
+ expected = pd.interval_range(start=0, periods=10)
+ result = pd.interval_range(start=0, periods=10.5)
+ tm.assert_index_equal(result, expected)
+
+ # equivalent timestamp-like start/end
+ start, end = Timestamp('2017-01-01'), Timestamp('2017-01-15')
+ expected = pd.interval_range(start=start, end=end)
+
+ result = pd.interval_range(start=start.to_pydatetime(),
+ end=end.to_pydatetime())
+ tm.assert_index_equal(result, expected)
+
+ result = pd.interval_range(start=start.tz_localize('UTC'),
+ end=end.tz_localize('UTC'))
+ tm.assert_index_equal(result, expected)
+
+ result = pd.interval_range(start=start.asm8, end=end.asm8)
+ tm.assert_index_equal(result, expected)
+
+ # equivalent freq with timestamp
+ equiv_freq = ['D', Day(), Timedelta(days=1), timedelta(days=1),
+ DateOffset(days=1)]
+ for freq in equiv_freq:
+ result = pd.interval_range(start=start, end=end, freq=freq)
+ tm.assert_index_equal(result, expected)
+
+ # equivalent timedelta-like start/end
+ start, end = Timedelta(days=1), Timedelta(days=10)
+ expected = pd.interval_range(start=start, end=end)
+
+ result = pd.interval_range(start=start.to_pytimedelta(),
+ end=end.to_pytimedelta())
+ tm.assert_index_equal(result, expected)
+
+ result = pd.interval_range(start=start.asm8, end=end.asm8)
+ tm.assert_index_equal(result, expected)
+
+ # equivalent freq with timedelta
+ equiv_freq = ['D', Day(), Timedelta(days=1), timedelta(days=1)]
+ for freq in equiv_freq:
+ result = pd.interval_range(start=start, end=end, freq=freq)
+ tm.assert_index_equal(result, expected)
+ def test_errors(self):
# not enough params
- def f():
- interval_range(0)
+ msg = ('Of the three parameters: start, end, and periods, '
+ 'exactly two must be specified')
- pytest.raises(ValueError, f)
+ with tm.assert_raises_regex(ValueError, msg):
+ interval_range(start=0)
- def f():
- interval_range(periods=2)
+ with tm.assert_raises_regex(ValueError, msg):
+ interval_range(end=5)
- pytest.raises(ValueError, f)
+ with tm.assert_raises_regex(ValueError, msg):
+ interval_range(periods=2)
- def f():
+ with tm.assert_raises_regex(ValueError, msg):
interval_range()
- pytest.raises(ValueError, f)
+ # too many params
+ with tm.assert_raises_regex(ValueError, msg):
+ interval_range(start=0, end=5, periods=6)
# mixed units
- def f():
- interval_range(0, Timestamp('20130101'), freq=2)
+ msg = 'start, end, freq need to be type compatible'
+ with tm.assert_raises_regex(TypeError, msg):
+ interval_range(start=0, end=Timestamp('20130101'), freq=2)
- pytest.raises(ValueError, f)
+ with tm.assert_raises_regex(TypeError, msg):
+ interval_range(start=0, end=Timedelta('1 day'), freq=2)
- def f():
- interval_range(0, 10, freq=Timedelta('1day'))
+ with tm.assert_raises_regex(TypeError, msg):
+ interval_range(start=0, end=10, freq='D')
- pytest.raises(ValueError, f)
+ with tm.assert_raises_regex(TypeError, msg):
+ interval_range(start=Timestamp('20130101'), end=10, freq='D')
+
+ with tm.assert_raises_regex(TypeError, msg):
+ interval_range(start=Timestamp('20130101'),
+ end=Timedelta('1 day'), freq='D')
+
+ with tm.assert_raises_regex(TypeError, msg):
+ interval_range(start=Timestamp('20130101'),
+ end=Timestamp('20130110'), freq=2)
+
+ with tm.assert_raises_regex(TypeError, msg):
+ interval_range(start=Timedelta('1 day'), end=10, freq='D')
+
+ with tm.assert_raises_regex(TypeError, msg):
+ interval_range(start=Timedelta('1 day'),
+ end=Timestamp('20130110'), freq='D')
+
+ with tm.assert_raises_regex(TypeError, msg):
+ interval_range(start=Timedelta('1 day'),
+ end=Timedelta('10 days'), freq=2)
+
+ # invalid periods
+ msg = 'periods must be a number, got foo'
+ with tm.assert_raises_regex(TypeError, msg):
+ interval_range(start=0, periods='foo')
+
+ # invalid start
+ msg = 'start must be numeric or datetime-like, got foo'
+ with tm.assert_raises_regex(ValueError, msg):
+ interval_range(start='foo', periods=10)
+
+ # invalid end
+ msg = 'end must be numeric or datetime-like, got \(0, 1\]'
+ with tm.assert_raises_regex(ValueError, msg):
+ interval_range(end=Interval(0, 1), periods=10)
+
+ # invalid freq for datetime-like
+ msg = 'freq must be numeric or convertible to DateOffset, got foo'
+ with tm.assert_raises_regex(ValueError, msg):
+ interval_range(start=0, end=10, freq='foo')
+
+ with tm.assert_raises_regex(ValueError, msg):
+ interval_range(start=Timestamp('20130101'), periods=10, freq='foo')
+
+ with tm.assert_raises_regex(ValueError, msg):
+ interval_range(end=Timedelta('1 day'), periods=10, freq='foo')
class TestIntervalTree(object):
diff --git a/pandas/tests/indexes/test_multi.py b/pandas/tests/indexes/test_multi.py
index 798d244468961..86308192c9166 100644
--- a/pandas/tests/indexes/test_multi.py
+++ b/pandas/tests/indexes/test_multi.py
@@ -14,7 +14,7 @@
from pandas import (CategoricalIndex, DataFrame, Index, MultiIndex,
compat, date_range, period_range)
-from pandas.compat import PY3, long, lrange, lzip, range, u
+from pandas.compat import PY3, long, lrange, lzip, range, u, PYPY
from pandas.errors import PerformanceWarning, UnsortedIndexError
from pandas.core.indexes.base import InvalidIndexError
from pandas._libs import lib
@@ -2571,13 +2571,22 @@ def test_isin(self):
assert len(result) == 0
assert result.dtype == np.bool_
- def test_isin_nan(self):
+ @pytest.mark.skipif(PYPY, reason="tuples cmp recursively on PyPy")
+ def test_isin_nan_not_pypy(self):
idx = MultiIndex.from_arrays([['foo', 'bar'], [1.0, np.nan]])
tm.assert_numpy_array_equal(idx.isin([('bar', np.nan)]),
np.array([False, False]))
tm.assert_numpy_array_equal(idx.isin([('bar', float('nan'))]),
np.array([False, False]))
+ @pytest.mark.skipif(not PYPY, reason="tuples cmp recursively on PyPy")
+ def test_isin_nan_pypy(self):
+ idx = MultiIndex.from_arrays([['foo', 'bar'], [1.0, np.nan]])
+ tm.assert_numpy_array_equal(idx.isin([('bar', np.nan)]),
+ np.array([False, True]))
+ tm.assert_numpy_array_equal(idx.isin([('bar', float('nan'))]),
+ np.array([False, True]))
+
def test_isin_level_kwarg(self):
idx = MultiIndex.from_arrays([['qux', 'baz', 'foo', 'bar'], np.arange(
4)])
diff --git a/pandas/tests/indexes/test_numeric.py b/pandas/tests/indexes/test_numeric.py
index 1a0a38c173284..7e7e10e4aeabe 100644
--- a/pandas/tests/indexes/test_numeric.py
+++ b/pandas/tests/indexes/test_numeric.py
@@ -181,7 +181,9 @@ class TestFloat64Index(Numeric):
def setup_method(self, method):
self.indices = dict(mixed=Float64Index([1.5, 2, 3, 4, 5]),
- float=Float64Index(np.arange(5) * 2.5))
+ float=Float64Index(np.arange(5) * 2.5),
+ mixed_dec=Float64Index([5, 4, 3, 2, 1.5]),
+ float_dec=Float64Index(np.arange(4, -1, -1) * 2.5))
self.setup_indices()
def create_index(self):
@@ -654,7 +656,8 @@ class TestInt64Index(NumericInt):
_holder = Int64Index
def setup_method(self, method):
- self.indices = dict(index=Int64Index(np.arange(0, 20, 2)))
+ self.indices = dict(index=Int64Index(np.arange(0, 20, 2)),
+ index_dec=Int64Index(np.arange(19, -1, -1)))
self.setup_indices()
def create_index(self):
@@ -949,8 +952,9 @@ class TestUInt64Index(NumericInt):
_holder = UInt64Index
def setup_method(self, method):
- self.indices = dict(index=UInt64Index([2**63, 2**63 + 10, 2**63 + 15,
- 2**63 + 20, 2**63 + 25]))
+ vals = [2**63, 2**63 + 10, 2**63 + 15, 2**63 + 20, 2**63 + 25]
+ self.indices = dict(index=UInt64Index(vals),
+ index_dec=UInt64Index(reversed(vals)))
self.setup_indices()
def create_index(self):
diff --git a/pandas/tests/indexes/test_range.py b/pandas/tests/indexes/test_range.py
index 5ecf467b57fc5..d206c36ee51c9 100644
--- a/pandas/tests/indexes/test_range.py
+++ b/pandas/tests/indexes/test_range.py
@@ -25,7 +25,8 @@ class TestRangeIndex(Numeric):
_compat_props = ['shape', 'ndim', 'size', 'itemsize']
def setup_method(self, method):
- self.indices = dict(index=RangeIndex(0, 20, 2, name='foo'))
+ self.indices = dict(index=RangeIndex(0, 20, 2, name='foo'),
+ index_dec=RangeIndex(18, -1, -2, name='bar'))
self.setup_indices()
def create_index(self):
@@ -610,6 +611,21 @@ def test_intersection(self):
other.values)))
tm.assert_index_equal(result, expected)
+ # reversed (GH 17296)
+ result = other.intersection(self.index)
+ tm.assert_index_equal(result, expected)
+
+ # GH 17296: intersect two decreasing RangeIndexes
+ first = RangeIndex(10, -2, -2)
+ other = RangeIndex(5, -4, -1)
+ expected = first.astype(int).intersection(other.astype(int))
+ result = first.intersection(other).astype(int)
+ tm.assert_index_equal(result, expected)
+
+ # reversed
+ result = other.intersection(first).astype(int)
+ tm.assert_index_equal(result, expected)
+
index = RangeIndex(5)
# intersect of non-overlapping indices
diff --git a/pandas/tests/indexes/timedeltas/test_construction.py b/pandas/tests/indexes/timedeltas/test_construction.py
index dd25e2cca2e55..70aadd9f57174 100644
--- a/pandas/tests/indexes/timedeltas/test_construction.py
+++ b/pandas/tests/indexes/timedeltas/test_construction.py
@@ -50,8 +50,9 @@ def test_constructor_coverage(self):
exp = timedelta_range('1 days', periods=10)
tm.assert_index_equal(rng, exp)
- pytest.raises(ValueError, TimedeltaIndex, start='1 days',
- periods='foo', freq='D')
+ msg = 'periods must be a number, got foo'
+ with tm.assert_raises_regex(TypeError, msg):
+ TimedeltaIndex(start='1 days', periods='foo', freq='D')
pytest.raises(ValueError, TimedeltaIndex, start='1 days',
end='10 days')
diff --git a/pandas/tests/indexes/timedeltas/test_timedelta_range.py b/pandas/tests/indexes/timedeltas/test_timedelta_range.py
index 4732a0ce110de..7624e1f79af15 100644
--- a/pandas/tests/indexes/timedeltas/test_timedelta_range.py
+++ b/pandas/tests/indexes/timedeltas/test_timedelta_range.py
@@ -1,5 +1,4 @@
import numpy as np
-
import pandas as pd
import pandas.util.testing as tm
from pandas.tseries.offsets import Day, Second
@@ -49,3 +48,23 @@ def test_timedelta_range(self):
expected = df.loc[pd.Timedelta('0s'):, :]
result = df.loc['0s':, :]
assert_frame_equal(expected, result)
+
+ def test_errors(self):
+ # not enough params
+ msg = ('Of the three parameters: start, end, and periods, '
+ 'exactly two must be specified')
+ with tm.assert_raises_regex(ValueError, msg):
+ timedelta_range(start='0 days')
+
+ with tm.assert_raises_regex(ValueError, msg):
+ timedelta_range(end='5 days')
+
+ with tm.assert_raises_regex(ValueError, msg):
+ timedelta_range(periods=2)
+
+ with tm.assert_raises_regex(ValueError, msg):
+ timedelta_range()
+
+ # too many params
+ with tm.assert_raises_regex(ValueError, msg):
+ timedelta_range(start='0 days', end='5 days', periods=10)
diff --git a/pandas/tests/indexing/test_interval.py b/pandas/tests/indexing/test_interval.py
index be6e5e1cffb2e..31a94abcd99a5 100644
--- a/pandas/tests/indexing/test_interval.py
+++ b/pandas/tests/indexing/test_interval.py
@@ -3,6 +3,7 @@
import pandas as pd
from pandas import Series, DataFrame, IntervalIndex, Interval
+from pandas.compat import product
import pandas.util.testing as tm
@@ -14,16 +15,6 @@ def setup_method(self, method):
def test_loc_with_scalar(self):
s = self.s
- expected = 0
-
- result = s.loc[0.5]
- assert result == expected
-
- result = s.loc[1]
- assert result == expected
-
- with pytest.raises(KeyError):
- s.loc[0]
expected = s.iloc[:3]
tm.assert_series_equal(expected, s.loc[:3])
@@ -42,16 +33,6 @@ def test_loc_with_scalar(self):
def test_getitem_with_scalar(self):
s = self.s
- expected = 0
-
- result = s[0.5]
- assert result == expected
-
- result = s[1]
- assert result == expected
-
- with pytest.raises(KeyError):
- s[0]
expected = s.iloc[:3]
tm.assert_series_equal(expected, s[:3])
@@ -67,6 +48,41 @@ def test_getitem_with_scalar(self):
expected = s.iloc[2:5]
tm.assert_series_equal(expected, s[s >= 2])
+ @pytest.mark.parametrize('direction, closed',
+ product(('increasing', 'decreasing'),
+ ('left', 'right', 'neither', 'both')))
+ def test_nonoverlapping_monotonic(self, direction, closed):
+ tpls = [(0, 1), (2, 3), (4, 5)]
+ if direction == 'decreasing':
+ tpls = reversed(tpls)
+
+ idx = IntervalIndex.from_tuples(tpls, closed=closed)
+ s = Series(list('abc'), idx)
+
+ for key, expected in zip(idx.left, s):
+ if idx.closed_left:
+ assert s[key] == expected
+ assert s.loc[key] == expected
+ else:
+ with pytest.raises(KeyError):
+ s[key]
+ with pytest.raises(KeyError):
+ s.loc[key]
+
+ for key, expected in zip(idx.right, s):
+ if idx.closed_right:
+ assert s[key] == expected
+ assert s.loc[key] == expected
+ else:
+ with pytest.raises(KeyError):
+ s[key]
+ with pytest.raises(KeyError):
+ s.loc[key]
+
+ for key, expected in zip(idx.mid, s):
+ assert s[key] == expected
+ assert s.loc[key] == expected
+
def test_with_interval(self):
s = self.s
diff --git a/pandas/tests/io/data/legacy_hdf/legacy_0.10.h5 b/pandas/tests/io/data/legacy_hdf/legacy_0.10.h5
deleted file mode 100644
index b1439ef16361a..0000000000000
Binary files a/pandas/tests/io/data/legacy_hdf/legacy_0.10.h5 and /dev/null differ
diff --git a/pandas/tests/io/data/legacy_hdf/legacy_table_0.11.h5 b/pandas/tests/io/data/legacy_hdf/legacy_table_0.11.h5
deleted file mode 100644
index 958effc2ce6f8..0000000000000
Binary files a/pandas/tests/io/data/legacy_hdf/legacy_table_0.11.h5 and /dev/null differ
diff --git a/pandas/tests/io/formats/test_style.py b/pandas/tests/io/formats/test_style.py
index 59d9f938734ab..811381e4cbd2a 100644
--- a/pandas/tests/io/formats/test_style.py
+++ b/pandas/tests/io/formats/test_style.py
@@ -265,6 +265,64 @@ def f(x):
col in self.df.loc[slice_].columns)
assert result == expected
+ def test_where_with_one_style(self):
+ # GH 17474
+ def f(x):
+ return x > 0.5
+
+ style1 = 'foo: bar'
+
+ result = self.df.style.where(f, style1)._compute().ctx
+ expected = dict(((r, c),
+ [style1 if f(self.df.loc[row, col]) else ''])
+ for r, row in enumerate(self.df.index)
+ for c, col in enumerate(self.df.columns))
+ assert result == expected
+
+ def test_where_subset(self):
+ # GH 17474
+ def f(x):
+ return x > 0.5
+
+ style1 = 'foo: bar'
+ style2 = 'baz: foo'
+
+ slices = [pd.IndexSlice[:], pd.IndexSlice[:, ['A']],
+ pd.IndexSlice[[1], :], pd.IndexSlice[[1], ['A']],
+ pd.IndexSlice[:2, ['A', 'B']]]
+
+ for slice_ in slices:
+ result = self.df.style.where(f, style1, style2,
+ subset=slice_)._compute().ctx
+ expected = dict(((r, c),
+ [style1 if f(self.df.loc[row, col]) else style2])
+ for r, row in enumerate(self.df.index)
+ for c, col in enumerate(self.df.columns)
+ if row in self.df.loc[slice_].index and
+ col in self.df.loc[slice_].columns)
+ assert result == expected
+
+ def test_where_subset_compare_with_applymap(self):
+ # GH 17474
+ def f(x):
+ return x > 0.5
+
+ style1 = 'foo: bar'
+ style2 = 'baz: foo'
+
+ def g(x):
+ return style1 if f(x) else style2
+
+ slices = [pd.IndexSlice[:], pd.IndexSlice[:, ['A']],
+ pd.IndexSlice[[1], :], pd.IndexSlice[[1], ['A']],
+ pd.IndexSlice[:2, ['A', 'B']]]
+
+ for slice_ in slices:
+ result = self.df.style.where(f, style1, style2,
+ subset=slice_)._compute().ctx
+ expected = self.df.style.applymap(g, subset=slice_)._compute().ctx
+ assert result == expected
+
def test_empty(self):
df = pd.DataFrame({'A': [1, 0]})
s = df.style
diff --git a/pandas/tests/io/json/test_json_table_schema.py b/pandas/tests/io/json/test_json_table_schema.py
index e447a74b2b462..e097194674cf6 100644
--- a/pandas/tests/io/json/test_json_table_schema.py
+++ b/pandas/tests/io/json/test_json_table_schema.py
@@ -52,7 +52,7 @@ def test_series(self):
result = build_table_schema(s)
assert 'pandas_version' in result
- def tets_series_unnamed(self):
+ def test_series_unnamed(self):
result = build_table_schema(pd.Series([1, 2, 3]), version=False)
expected = {'fields': [{'name': 'index', 'type': 'integer'},
{'name': 'values', 'type': 'integer'}],
diff --git a/pandas/tests/io/parser/data/tips.csv.bz2 b/pandas/tests/io/parser/data/tips.csv.bz2
new file mode 100644
index 0000000000000..1452896b05e9d
Binary files /dev/null and b/pandas/tests/io/parser/data/tips.csv.bz2 differ
diff --git a/pandas/tests/io/parser/data/tips.csv.gz b/pandas/tests/io/parser/data/tips.csv.gz
new file mode 100644
index 0000000000000..3a131068b2a38
Binary files /dev/null and b/pandas/tests/io/parser/data/tips.csv.gz differ
diff --git a/pandas/tests/io/parser/python_parser_only.py b/pandas/tests/io/parser/python_parser_only.py
index a0784d3aeae2d..c3dc91b3f188c 100644
--- a/pandas/tests/io/parser/python_parser_only.py
+++ b/pandas/tests/io/parser/python_parser_only.py
@@ -218,6 +218,25 @@ def test_multi_char_sep_quotes(self):
self.read_csv(StringIO(data), sep=',,',
quoting=csv.QUOTE_NONE)
+ def test_none_delimiter(self):
+ # see gh-13374 and gh-17465
+
+ data = "a,b,c\n0,1,2\n3,4,5,6\n7,8,9"
+ expected = DataFrame({'a': [0, 7],
+ 'b': [1, 8],
+ 'c': [2, 9]})
+
+ # We expect the third line in the data to be
+ # skipped because it is malformed,
+ # but we do not expect any errors to occur.
+ result = self.read_csv(StringIO(data), header=0,
+ sep=None,
+ error_bad_lines=False,
+ warn_bad_lines=True,
+ engine='python',
+ tupleize_cols=True)
+ tm.assert_frame_equal(result, expected)
+
def test_skipfooter_bad_row(self):
# see gh-13879
# see gh-15910
diff --git a/pandas/tests/io/parser/test_network.py b/pandas/tests/io/parser/test_network.py
index 3344243f8137a..27cc708889fa2 100644
--- a/pandas/tests/io/parser/test_network.py
+++ b/pandas/tests/io/parser/test_network.py
@@ -4,13 +4,20 @@
Tests parsers ability to read and parse non-local files
and hence require a network connection to be read.
"""
-
import os
+
import pytest
+import moto
import pandas.util.testing as tm
from pandas import DataFrame
from pandas.io.parsers import read_csv, read_table
+from pandas.compat import BytesIO
+
+
+@pytest.fixture(scope='module')
+def tips_file():
+ return os.path.join(tm.get_data_path(), 'tips.csv')
@pytest.fixture(scope='module')
@@ -19,6 +26,40 @@ def salaries_table():
return read_table(path)
+@pytest.fixture(scope='module')
+def s3_resource(tips_file):
+ pytest.importorskip('s3fs')
+ moto.mock_s3().start()
+
+ test_s3_files = [
+ ('tips.csv', tips_file),
+ ('tips.csv.gz', tips_file + '.gz'),
+ ('tips.csv.bz2', tips_file + '.bz2'),
+ ]
+
+ def add_tips_files(bucket_name):
+ for s3_key, file_name in test_s3_files:
+ with open(file_name, 'rb') as f:
+ conn.Bucket(bucket_name).put_object(
+ Key=s3_key,
+ Body=f)
+
+ boto3 = pytest.importorskip('boto3')
+ # see gh-16135
+ bucket = 'pandas-test'
+
+ conn = boto3.resource("s3", region_name="us-east-1")
+ conn.create_bucket(Bucket=bucket)
+ add_tips_files(bucket)
+
+ conn.create_bucket(Bucket='cant_get_it', ACL='private')
+ add_tips_files('cant_get_it')
+
+ yield conn
+
+ moto.mock_s3().stop()
+
+
@pytest.mark.network
@pytest.mark.parametrize(
"compression,extension",
@@ -51,15 +92,11 @@ def check_compressed_urls(salaries_table, compression, extension, mode,
class TestS3(object):
-
- def setup_method(self, method):
- try:
- import s3fs # noqa
- except ImportError:
- pytest.skip("s3fs not installed")
-
@tm.network
def test_parse_public_s3_bucket(self):
+ pytest.importorskip('s3fs')
+ # more of an integration test due to the not-public contents portion
+ # can probably mock this though.
for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]:
df = read_csv('s3://pandas-test/tips.csv' +
ext, compression=comp)
@@ -74,8 +111,8 @@ def test_parse_public_s3_bucket(self):
assert not df.empty
tm.assert_frame_equal(read_csv(tm.get_data_path('tips.csv')), df)
- @tm.network
- def test_parse_public_s3n_bucket(self):
+ def test_parse_public_s3n_bucket(self, s3_resource):
+
# Read from AWS s3 as "s3n" URL
df = read_csv('s3n://pandas-test/tips.csv', nrows=10)
assert isinstance(df, DataFrame)
@@ -83,8 +120,7 @@ def test_parse_public_s3n_bucket(self):
tm.assert_frame_equal(read_csv(
tm.get_data_path('tips.csv')).iloc[:10], df)
- @tm.network
- def test_parse_public_s3a_bucket(self):
+ def test_parse_public_s3a_bucket(self, s3_resource):
# Read from AWS s3 as "s3a" URL
df = read_csv('s3a://pandas-test/tips.csv', nrows=10)
assert isinstance(df, DataFrame)
@@ -92,8 +128,7 @@ def test_parse_public_s3a_bucket(self):
tm.assert_frame_equal(read_csv(
tm.get_data_path('tips.csv')).iloc[:10], df)
- @tm.network
- def test_parse_public_s3_bucket_nrows(self):
+ def test_parse_public_s3_bucket_nrows(self, s3_resource):
for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]:
df = read_csv('s3://pandas-test/tips.csv' +
ext, nrows=10, compression=comp)
@@ -102,8 +137,7 @@ def test_parse_public_s3_bucket_nrows(self):
tm.assert_frame_equal(read_csv(
tm.get_data_path('tips.csv')).iloc[:10], df)
- @tm.network
- def test_parse_public_s3_bucket_chunked(self):
+ def test_parse_public_s3_bucket_chunked(self, s3_resource):
# Read with a chunksize
chunksize = 5
local_tips = read_csv(tm.get_data_path('tips.csv'))
@@ -121,8 +155,7 @@ def test_parse_public_s3_bucket_chunked(self):
chunksize * i_chunk: chunksize * (i_chunk + 1)]
tm.assert_frame_equal(true_df, df)
- @tm.network
- def test_parse_public_s3_bucket_chunked_python(self):
+ def test_parse_public_s3_bucket_chunked_python(self, s3_resource):
# Read with a chunksize using the Python parser
chunksize = 5
local_tips = read_csv(tm.get_data_path('tips.csv'))
@@ -140,8 +173,7 @@ def test_parse_public_s3_bucket_chunked_python(self):
chunksize * i_chunk: chunksize * (i_chunk + 1)]
tm.assert_frame_equal(true_df, df)
- @tm.network
- def test_parse_public_s3_bucket_python(self):
+ def test_parse_public_s3_bucket_python(self, s3_resource):
for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]:
df = read_csv('s3://pandas-test/tips.csv' + ext, engine='python',
compression=comp)
@@ -150,8 +182,7 @@ def test_parse_public_s3_bucket_python(self):
tm.assert_frame_equal(read_csv(
tm.get_data_path('tips.csv')), df)
- @tm.network
- def test_infer_s3_compression(self):
+ def test_infer_s3_compression(self, s3_resource):
for ext in ['', '.gz', '.bz2']:
df = read_csv('s3://pandas-test/tips.csv' + ext,
engine='python', compression='infer')
@@ -160,8 +191,7 @@ def test_infer_s3_compression(self):
tm.assert_frame_equal(read_csv(
tm.get_data_path('tips.csv')), df)
- @tm.network
- def test_parse_public_s3_bucket_nrows_python(self):
+ def test_parse_public_s3_bucket_nrows_python(self, s3_resource):
for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]:
df = read_csv('s3://pandas-test/tips.csv' + ext, engine='python',
nrows=10, compression=comp)
@@ -170,8 +200,7 @@ def test_parse_public_s3_bucket_nrows_python(self):
tm.assert_frame_equal(read_csv(
tm.get_data_path('tips.csv')).iloc[:10], df)
- @tm.network
- def test_s3_fails(self):
+ def test_s3_fails(self, s3_resource):
with pytest.raises(IOError):
read_csv('s3://nyqpug/asdf.csv')
@@ -180,21 +209,18 @@ def test_s3_fails(self):
with pytest.raises(IOError):
read_csv('s3://cant_get_it/')
- @tm.network
- def boto3_client_s3(self):
+ def test_read_csv_handles_boto_s3_object(self,
+ s3_resource,
+ tips_file):
# see gh-16135
- # boto3 is a dependency of s3fs
- import boto3
- client = boto3.client("s3")
-
- key = "/tips.csv"
- bucket = "pandas-test"
- s3_object = client.get_object(Bucket=bucket, Key=key)
+ s3_object = s3_resource.meta.client.get_object(
+ Bucket='pandas-test',
+ Key='tips.csv')
- result = read_csv(s3_object["Body"])
+ result = read_csv(BytesIO(s3_object["Body"].read()), encoding='utf8')
assert isinstance(result, DataFrame)
assert not result.empty
- expected = read_csv(tm.get_data_path('tips.csv'))
+ expected = read_csv(tips_file)
tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/io/parser/test_parsers.py b/pandas/tests/io/parser/test_parsers.py
index 2fee2451c5e36..0ea4757b10e94 100644
--- a/pandas/tests/io/parser/test_parsers.py
+++ b/pandas/tests/io/parser/test_parsers.py
@@ -3,8 +3,10 @@
import os
import pandas.util.testing as tm
-from pandas import read_csv, read_table
+from pandas import read_csv, read_table, DataFrame
from pandas.core.common import AbstractMethodError
+from pandas._libs.lib import Timestamp
+from pandas.compat import StringIO
from .common import ParserTests
from .header import HeaderTests
@@ -100,3 +102,51 @@ def read_table(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = self.engine
return read_table(*args, **kwds)
+
+
+class TestUnsortedUsecols(object):
+ def test_override__set_noconvert_columns(self):
+ # GH 17351 - usecols needs to be sorted in _setnoconvert_columns
+ # based on the test_usecols_with_parse_dates test from usecols.py
+ from pandas.io.parsers import CParserWrapper, TextFileReader
+
+ s = """a,b,c,d,e
+ 0,1,20140101,0900,4
+ 0,1,20140102,1000,4"""
+
+ parse_dates = [[1, 2]]
+ cols = {
+ 'a': [0, 0],
+ 'c_d': [
+ Timestamp('2014-01-01 09:00:00'),
+ Timestamp('2014-01-02 10:00:00')
+ ]
+ }
+ expected = DataFrame(cols, columns=['c_d', 'a'])
+
+ class MyTextFileReader(TextFileReader):
+ def __init__(self):
+ self._currow = 0
+ self.squeeze = False
+
+ class MyCParserWrapper(CParserWrapper):
+ def _set_noconvert_columns(self):
+ if self.usecols_dtype == 'integer':
+ # self.usecols is a set, which is documented as unordered
+ # but in practice, a CPython set of integers is sorted.
+ # In other implementations this assumption does not hold.
+ # The following code simulates a different order, which
+ # before GH 17351 would cause the wrong columns to be
+ # converted via the parse_dates parameter
+ self.usecols = list(self.usecols)
+ self.usecols.reverse()
+ return CParserWrapper._set_noconvert_columns(self)
+
+ parser = MyTextFileReader()
+ parser.options = {'usecols': [0, 2, 3],
+ 'parse_dates': parse_dates,
+ 'delimiter': ','}
+ parser._engine = MyCParserWrapper(StringIO(s), **parser.options)
+ df = parser.read()
+
+ tm.assert_frame_equal(df, expected)
diff --git a/pandas/tests/io/parser/test_read_fwf.py b/pandas/tests/io/parser/test_read_fwf.py
index ec1d1a2a51cdc..a60f2b5a4c946 100644
--- a/pandas/tests/io/parser/test_read_fwf.py
+++ b/pandas/tests/io/parser/test_read_fwf.py
@@ -291,7 +291,7 @@ def test_full_file_with_spaces(self):
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_full_file_with_spaces_and_missing(self):
- # File with spaces and missing values in columsn
+ # File with spaces and missing values in columns
test = """
Account Name Balance CreditLimit AccountCreated
101 10000.00 1/17/1998
diff --git a/pandas/tests/io/test_excel.py b/pandas/tests/io/test_excel.py
index 92147b46097b8..6a399f41975e5 100644
--- a/pandas/tests/io/test_excel.py
+++ b/pandas/tests/io/test_excel.py
@@ -1,33 +1,32 @@
# pylint: disable=E1101
-
-from pandas.compat import u, range, map, openpyxl_compat, BytesIO, iteritems
-from datetime import datetime, date, time
-import sys
+import functools
+import operator
import os
+import sys
+import warnings
+from datetime import datetime, date, time
from distutils.version import LooseVersion
from functools import partial
-
-import warnings
from warnings import catch_warnings
-import operator
-import functools
-import pytest
-from numpy import nan
import numpy as np
+import pytest
+from numpy import nan
+import moto
import pandas as pd
+import pandas.util.testing as tm
from pandas import DataFrame, Index, MultiIndex
-from pandas.io.formats.excel import ExcelFormatter
-from pandas.io.parsers import read_csv
+from pandas.compat import u, range, map, openpyxl_compat, BytesIO, iteritems
+from pandas.core.config import set_option, get_option
+from pandas.io.common import URLError
from pandas.io.excel import (
ExcelFile, ExcelWriter, read_excel, _XlwtWriter, _Openpyxl1Writer,
_Openpyxl20Writer, _Openpyxl22Writer, register_writer, _XlsxWriter
)
-from pandas.io.common import URLError
+from pandas.io.formats.excel import ExcelFormatter
+from pandas.io.parsers import read_csv
from pandas.util.testing import ensure_clean, makeCustomDataframe as mkdf
-from pandas.core.config import set_option, get_option
-import pandas.util.testing as tm
def _skip_if_no_xlrd():
@@ -67,13 +66,6 @@ def _skip_if_no_excelsuite():
_skip_if_no_openpyxl()
-def _skip_if_no_s3fs():
- try:
- import s3fs # noqa
- except ImportError:
- pytest.skip('s3fs not installed, skipping')
-
-
_seriesd = tm.getSeriesData()
_tsd = tm.getTimeSeriesData()
_frame = DataFrame(_seriesd)[:10]
@@ -605,14 +597,22 @@ def test_read_from_http_url(self):
local_table = self.get_exceldf('test1')
tm.assert_frame_equal(url_table, local_table)
- @tm.network(check_before_test=True)
def test_read_from_s3_url(self):
- _skip_if_no_s3fs()
-
- url = ('s3://pandas-test/test1' + self.ext)
- url_table = read_excel(url)
- local_table = self.get_exceldf('test1')
- tm.assert_frame_equal(url_table, local_table)
+ boto3 = pytest.importorskip('boto3')
+ pytest.importorskip('s3fs')
+
+ with moto.mock_s3():
+ conn = boto3.resource("s3", region_name="us-east-1")
+ conn.create_bucket(Bucket="pandas-test")
+ file_name = os.path.join(self.dirpath, 'test1' + self.ext)
+ with open(file_name, 'rb') as f:
+ conn.Bucket("pandas-test").put_object(Key="test1" + self.ext,
+ Body=f)
+
+ url = ('s3://pandas-test/test1' + self.ext)
+ url_table = read_excel(url)
+ local_table = self.get_exceldf('test1')
+ tm.assert_frame_equal(url_table, local_table)
@pytest.mark.slow
def test_read_from_file_url(self):
diff --git a/pandas/tests/io/test_pytables.py b/pandas/tests/io/test_pytables.py
index f33ba7627101e..f331378b654be 100644
--- a/pandas/tests/io/test_pytables.py
+++ b/pandas/tests/io/test_pytables.py
@@ -1370,7 +1370,7 @@ def check_indexers(key, indexers):
labels=['l1'], items=['ItemA'], minor_axis=['B'])
assert_panel4d_equal(result, expected)
- # non-existant partial selection
+ # non-existent partial selection
result = store.select(
'p4d', "labels='l1' and items='Item1' and minor_axis='B'")
expected = p4d.reindex(labels=['l1'], items=[],
@@ -1980,11 +1980,11 @@ def test_append_misc(self):
with catch_warnings(record=True):
- # unsuported data types for non-tables
+ # unsupported data types for non-tables
p4d = tm.makePanel4D()
pytest.raises(TypeError, store.put, 'p4d', p4d)
- # unsuported data types
+ # unsupported data types
pytest.raises(TypeError, store.put, 'abc', None)
pytest.raises(TypeError, store.put, 'abc', '123')
pytest.raises(TypeError, store.put, 'abc', 123)
@@ -2011,7 +2011,7 @@ def check(obj, comparator):
df['string'] = 'foo'
df['float322'] = 1.
df['float322'] = df['float322'].astype('float32')
- df['boolean'] = df['float322'] > 0
+ df['bool'] = df['float322'] > 0
df['time1'] = Timestamp('20130101')
df['time2'] = Timestamp('20130102')
check(df, tm.assert_frame_equal)
@@ -2141,7 +2141,7 @@ def test_table_values_dtypes_roundtrip(self):
df1['string'] = 'foo'
df1['float322'] = 1.
df1['float322'] = df1['float322'].astype('float32')
- df1['boolean'] = df1['float32'] > 0
+ df1['bool'] = df1['float32'] > 0
df1['time1'] = Timestamp('20130101')
df1['time2'] = Timestamp('20130102')
@@ -4599,41 +4599,13 @@ def test_legacy_table_read(self):
expected = df2[df2.index > df2.index[2]]
assert_frame_equal(expected, result)
- def test_legacy_0_10_read(self):
- # legacy from 0.10
- with catch_warnings(record=True):
- path = tm.get_data_path('legacy_hdf/legacy_0.10.h5')
- with ensure_clean_store(path, mode='r') as store:
- str(store)
- for k in store.keys():
- store.select(k)
-
- def test_legacy_0_11_read(self):
- # legacy from 0.11
- path = os.path.join('legacy_hdf', 'legacy_table_0.11.h5')
- with ensure_clean_store(tm.get_data_path(path), mode='r') as store:
- str(store)
- assert 'df' in store
- assert 'df1' in store
- assert 'mi' in store
- df = store.select('df')
- df1 = store.select('df1')
- mi = store.select('mi')
- assert isinstance(df, DataFrame)
- assert isinstance(df1, DataFrame)
- assert isinstance(mi, DataFrame)
-
def test_copy(self):
with catch_warnings(record=True):
- def do_copy(f=None, new_f=None, keys=None,
+ def do_copy(f, new_f=None, keys=None,
propindexes=True, **kwargs):
try:
- if f is None:
- f = tm.get_data_path(os.path.join('legacy_hdf',
- 'legacy_0.10.h5'))
-
store = HDFStore(f, 'r')
if new_f is None:
@@ -4671,10 +4643,6 @@ def do_copy(f=None, new_f=None, keys=None,
pass
safe_remove(new_f)
- do_copy()
- do_copy(keys=['/a', '/b', '/df1_mixed'])
- do_copy(propindexes=False)
-
# new table
df = tm.makeDataFrame()
@@ -4997,7 +4965,7 @@ def test_preserve_timedeltaindex_type(self):
store['df'] = df
assert_frame_equal(store['df'], df)
- def test_colums_multiindex_modified(self):
+ def test_columns_multiindex_modified(self):
# BUG: 7212
# read_hdf store.select modified the passed columns parameters
# when multi-indexed.
diff --git a/pandas/tests/io/test_stata.py b/pandas/tests/io/test_stata.py
index a414928d318c4..94a0ac31e093e 100644
--- a/pandas/tests/io/test_stata.py
+++ b/pandas/tests/io/test_stata.py
@@ -476,7 +476,7 @@ def test_read_write_reread_dta15(self):
tm.assert_frame_equal(parsed_114, parsed_117)
def test_timestamp_and_label(self):
- original = DataFrame([(1,)], columns=['var'])
+ original = DataFrame([(1,)], columns=['variable'])
time_stamp = datetime(2000, 2, 29, 14, 21)
data_label = 'This is a data file.'
with tm.ensure_clean() as path:
@@ -1309,3 +1309,12 @@ def test_value_labels_iterator(self, write_index):
dta_iter = pd.read_stata(path, iterator=True)
value_labels = dta_iter.value_labels()
assert value_labels == {'A': {0: 'A', 1: 'B', 2: 'C', 3: 'E'}}
+
+ def test_set_index(self):
+ # GH 17328
+ df = tm.makeDataFrame()
+ df.index.name = 'index'
+ with tm.ensure_clean() as path:
+ df.to_stata(path)
+ reread = pd.read_stata(path, index_col='index')
+ tm.assert_frame_equal(df, reread)
diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py
index cff0c1c0b424e..eb10e70f4189b 100644
--- a/pandas/tests/plotting/test_datetimelike.py
+++ b/pandas/tests/plotting/test_datetimelike.py
@@ -347,7 +347,7 @@ def _test(ax):
assert int(result[0]) == expected[0].ordinal
assert int(result[1]) == expected[1].ordinal
- # datetim
+ # datetime
expected = (Period('1/1/2000', ax.freq),
Period('4/1/2000', ax.freq))
ax.set_xlim(datetime(2000, 1, 1), datetime(2000, 4, 1))
diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py
index 67098529a0111..f3b287a8889c3 100644
--- a/pandas/tests/plotting/test_frame.py
+++ b/pandas/tests/plotting/test_frame.py
@@ -380,6 +380,82 @@ def test_subplots_timeseries(self):
self._check_ticks_props(ax, xlabelsize=7, xrot=45,
ylabelsize=7)
+ def test_subplots_timeseries_y_axis(self):
+ # GH16953
+ data = {"numeric": np.array([1, 2, 5]),
+ "timedelta": [pd.Timedelta(-10, unit="s"),
+ pd.Timedelta(10, unit="m"),
+ pd.Timedelta(10, unit="h")],
+ "datetime_no_tz": [pd.to_datetime("2017-08-01 00:00:00"),
+ pd.to_datetime("2017-08-01 02:00:00"),
+ pd.to_datetime("2017-08-02 00:00:00")],
+ "datetime_all_tz": [pd.to_datetime("2017-08-01 00:00:00",
+ utc=True),
+ pd.to_datetime("2017-08-01 02:00:00",
+ utc=True),
+ pd.to_datetime("2017-08-02 00:00:00",
+ utc=True)],
+ "text": ["This", "should", "fail"]}
+ testdata = DataFrame(data)
+
+ ax_numeric = testdata.plot(y="numeric")
+ assert (ax_numeric.get_lines()[0].get_data()[1] ==
+ testdata["numeric"].values).all()
+ ax_timedelta = testdata.plot(y="timedelta")
+ assert (ax_timedelta.get_lines()[0].get_data()[1] ==
+ testdata["timedelta"].values).all()
+ ax_datetime_no_tz = testdata.plot(y="datetime_no_tz")
+ assert (ax_datetime_no_tz.get_lines()[0].get_data()[1] ==
+ testdata["datetime_no_tz"].values).all()
+ ax_datetime_all_tz = testdata.plot(y="datetime_all_tz")
+ assert (ax_datetime_all_tz.get_lines()[0].get_data()[1] ==
+ testdata["datetime_all_tz"].values).all()
+ with pytest.raises(TypeError):
+ testdata.plot(y="text")
+
+ @pytest.mark.xfail(reason='not support for period, categorical, '
+ 'datetime_mixed_tz')
+ def test_subplots_timeseries_y_axis_not_supported(self):
+ """
+ This test will fail for:
+ period:
+ since period isn't yet implemented in ``select_dtypes``
+ and because it will need a custom value converter +
+ tick formater (as was done for x-axis plots)
+
+ categorical:
+ because it will need a custom value converter +
+ tick formater (also doesn't work for x-axis, as of now)
+
+ datetime_mixed_tz:
+ because of the way how pandas handels ``Series`` of
+ ``datetime`` objects with different timezone,
+ generally converting ``datetime`` objects in a tz-aware
+ form could help with this problem
+ """
+ data = {"numeric": np.array([1, 2, 5]),
+ "period": [pd.Period('2017-08-01 00:00:00', freq='H'),
+ pd.Period('2017-08-01 02:00', freq='H'),
+ pd.Period('2017-08-02 00:00:00', freq='H')],
+ "categorical": pd.Categorical(["c", "b", "a"],
+ categories=["a", "b", "c"],
+ ordered=False),
+ "datetime_mixed_tz": [pd.to_datetime("2017-08-01 00:00:00",
+ utc=True),
+ pd.to_datetime("2017-08-01 02:00:00"),
+ pd.to_datetime("2017-08-02 00:00:00")]}
+ testdata = pd.DataFrame(data)
+ ax_period = testdata.plot(x="numeric", y="period")
+ assert (ax_period.get_lines()[0].get_data()[1] ==
+ testdata["period"].values).all()
+ ax_categorical = testdata.plot(x="numeric", y="categorical")
+ assert (ax_categorical.get_lines()[0].get_data()[1] ==
+ testdata["categorical"].values).all()
+ ax_datetime_mixed_tz = testdata.plot(x="numeric",
+ y="datetime_mixed_tz")
+ assert (ax_datetime_mixed_tz.get_lines()[0].get_data()[1] ==
+ testdata["datetime_mixed_tz"].values).all()
+
@pytest.mark.slow
def test_subplots_layout(self):
# GH 6667
diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py
index 879ac96680fbb..bd8a999ce2330 100644
--- a/pandas/tests/reshape/test_pivot.py
+++ b/pandas/tests/reshape/test_pivot.py
@@ -1625,3 +1625,13 @@ def test_isleapyear_deprecate(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
assert isleapyear(2004)
+
+ def test_pivot_margins_name_unicode(self):
+ # issue #13292
+ greek = u'\u0394\u03bf\u03ba\u03b9\u03bc\u03ae'
+ frame = pd.DataFrame({'foo': [1, 2, 3]})
+ table = pd.pivot_table(frame, index=['foo'], aggfunc=len, margins=True,
+ margins_name=greek)
+ index = pd.Index([1, 2, 3, greek], dtype='object', name='foo')
+ expected = pd.DataFrame(index=index)
+ tm.assert_frame_equal(table, expected)
diff --git a/pandas/tests/scalar/test_timestamp.py b/pandas/tests/scalar/test_timestamp.py
index 7cd1a7db0f9fe..8d47ce4802ac6 100644
--- a/pandas/tests/scalar/test_timestamp.py
+++ b/pandas/tests/scalar/test_timestamp.py
@@ -555,6 +555,14 @@ def check(value, equal):
for end in ends:
assert getattr(ts, end)
+ @pytest.mark.parametrize('data, expected',
+ [(Timestamp('2017-08-28 23:00:00'), 'Monday'),
+ (Timestamp('2017-08-28 23:00:00', tz='EST'),
+ 'Monday')])
+ def test_weekday_name(self, data, expected):
+ # GH 17354
+ assert data.weekday_name == expected
+
def test_pprint(self):
# GH12622
import pprint
diff --git a/pandas/tests/series/test_api.py b/pandas/tests/series/test_api.py
index b7fbe803f8d3b..d0805e2bb54d2 100644
--- a/pandas/tests/series/test_api.py
+++ b/pandas/tests/series/test_api.py
@@ -245,43 +245,6 @@ def test_iter(self):
for i, val in enumerate(self.ts):
assert val == self.ts[i]
- def test_iter_box(self):
- vals = [pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]
- s = pd.Series(vals)
- assert s.dtype == 'datetime64[ns]'
- for res, exp in zip(s, vals):
- assert isinstance(res, pd.Timestamp)
- assert res.tz is None
- assert res == exp
-
- vals = [pd.Timestamp('2011-01-01', tz='US/Eastern'),
- pd.Timestamp('2011-01-02', tz='US/Eastern')]
- s = pd.Series(vals)
-
- assert s.dtype == 'datetime64[ns, US/Eastern]'
- for res, exp in zip(s, vals):
- assert isinstance(res, pd.Timestamp)
- assert res.tz == exp.tz
- assert res == exp
-
- # timedelta
- vals = [pd.Timedelta('1 days'), pd.Timedelta('2 days')]
- s = pd.Series(vals)
- assert s.dtype == 'timedelta64[ns]'
- for res, exp in zip(s, vals):
- assert isinstance(res, pd.Timedelta)
- assert res == exp
-
- # period (object dtype, not boxed)
- vals = [pd.Period('2011-01-01', freq='M'),
- pd.Period('2011-01-02', freq='M')]
- s = pd.Series(vals)
- assert s.dtype == 'object'
- for res, exp in zip(s, vals):
- assert isinstance(res, pd.Period)
- assert res.freq == 'M'
- assert res == exp
-
def test_keys(self):
# HACK: By doing this in two stages, we avoid 2to3 wrapping the call
# to .keys() in a list()
diff --git a/pandas/tests/series/test_dtypes.py b/pandas/tests/series/test_dtypes.py
index c214280ee8386..fa9feb016726e 100644
--- a/pandas/tests/series/test_dtypes.py
+++ b/pandas/tests/series/test_dtypes.py
@@ -279,7 +279,7 @@ def test_infer_objects_series(self):
expected = Series([1., 2., 3., np.nan])
tm.assert_series_equal(actual, expected)
- # only soft conversions, uncovertable pass thru unchanged
+ # only soft conversions, unconvertable pass thru unchanged
actual = (Series(np.array([1, 2, 3, None, 'a'], dtype='O'))
.infer_objects())
expected = Series([1, 2, 3, None, 'a'])
diff --git a/pandas/tests/series/test_io.py b/pandas/tests/series/test_io.py
index 503185de427f1..5b7fd1ec94a90 100644
--- a/pandas/tests/series/test_io.py
+++ b/pandas/tests/series/test_io.py
@@ -10,7 +10,7 @@
from pandas import Series, DataFrame
-from pandas.compat import StringIO, u, long
+from pandas.compat import StringIO, u
from pandas.util.testing import (assert_series_equal, assert_almost_equal,
assert_frame_equal, ensure_clean)
import pandas.util.testing as tm
@@ -178,37 +178,3 @@ def test_to_dict(self, mapping):
from_method = Series(ts.to_dict(collections.Counter))
from_constructor = Series(collections.Counter(ts.iteritems()))
tm.assert_series_equal(from_method, from_constructor)
-
-
-class TestSeriesToList(TestData):
-
- def test_tolist(self):
- rs = self.ts.tolist()
- xp = self.ts.values.tolist()
- assert_almost_equal(rs, xp)
-
- # datetime64
- s = Series(self.ts.index)
- rs = s.tolist()
- assert self.ts.index[0] == rs[0]
-
- def test_tolist_np_int(self):
- # GH10904
- for t in ['int8', 'int16', 'int32', 'int64']:
- s = pd.Series([1], dtype=t)
- assert isinstance(s.tolist()[0], (int, long))
-
- def test_tolist_np_uint(self):
- # GH10904
- for t in ['uint8', 'uint16']:
- s = pd.Series([1], dtype=t)
- assert isinstance(s.tolist()[0], int)
- for t in ['uint32', 'uint64']:
- s = pd.Series([1], dtype=t)
- assert isinstance(s.tolist()[0], long)
-
- def test_tolist_np_float(self):
- # GH10904
- for t in ['float16', 'float32', 'float64']:
- s = pd.Series([1], dtype=t)
- assert isinstance(s.tolist()[0], float)
diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py
index 9e92c7cf1a9b8..38d78b12b31aa 100644
--- a/pandas/tests/test_base.py
+++ b/pandas/tests/test_base.py
@@ -13,9 +13,10 @@
is_object_dtype, is_datetimetz,
needs_i8_conversion)
import pandas.util.testing as tm
-from pandas import (Series, Index, DatetimeIndex, TimedeltaIndex, PeriodIndex,
- Timedelta, IntervalIndex, Interval)
-from pandas.compat import StringIO, PYPY
+from pandas import (Series, Index, DatetimeIndex, TimedeltaIndex,
+ PeriodIndex, Timedelta, IntervalIndex, Interval,
+ CategoricalIndex, Timestamp)
+from pandas.compat import StringIO, PYPY, long
from pandas.compat.numpy import np_array_datetime64_compat
from pandas.core.base import PandasDelegate, NoNewAttributesMixin
from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
@@ -433,7 +434,7 @@ def test_value_counts_unique_nunique(self):
# datetimetz Series returns array of Timestamp
assert result[0] == orig[0]
for r in result:
- assert isinstance(r, pd.Timestamp)
+ assert isinstance(r, Timestamp)
tm.assert_numpy_array_equal(result,
orig._values.asobject.values)
else:
@@ -1031,3 +1032,144 @@ def f():
pytest.raises(AttributeError, f)
assert not hasattr(t, "b")
+
+
+class TestToIterable(object):
+ # test that we convert an iterable to python types
+
+ dtypes = [
+ ('int8', (int, long)),
+ ('int16', (int, long)),
+ ('int32', (int, long)),
+ ('int64', (int, long)),
+ ('uint8', (int, long)),
+ ('uint16', (int, long)),
+ ('uint32', (int, long)),
+ ('uint64', (int, long)),
+ ('float16', float),
+ ('float32', float),
+ ('float64', float),
+ ('datetime64[ns]', Timestamp),
+ ('datetime64[ns, US/Eastern]', Timestamp),
+ ('timedelta64[ns]', Timedelta)]
+
+ @pytest.mark.parametrize(
+ 'dtype, rdtype', dtypes)
+ @pytest.mark.parametrize(
+ 'method',
+ [
+ lambda x: x.tolist(),
+ lambda x: list(x),
+ lambda x: list(x.__iter__()),
+ ], ids=['tolist', 'list', 'iter'])
+ @pytest.mark.parametrize('typ', [Series, Index])
+ def test_iterable(self, typ, method, dtype, rdtype):
+ # gh-10904
+ # gh-13258
+ # coerce iteration to underlying python / pandas types
+ s = typ([1], dtype=dtype)
+ result = method(s)[0]
+ assert isinstance(result, rdtype)
+
+ @pytest.mark.parametrize(
+ 'dtype, rdtype, obj',
+ [
+ ('object', object, 'a'),
+ ('object', (int, long), 1),
+ ('category', object, 'a'),
+ ('category', (int, long), 1)])
+ @pytest.mark.parametrize(
+ 'method',
+ [
+ lambda x: x.tolist(),
+ lambda x: list(x),
+ lambda x: list(x.__iter__()),
+ ], ids=['tolist', 'list', 'iter'])
+ @pytest.mark.parametrize('typ', [Series, Index])
+ def test_iterable_object_and_category(self, typ, method,
+ dtype, rdtype, obj):
+ # gh-10904
+ # gh-13258
+ # coerce iteration to underlying python / pandas types
+ s = typ([obj], dtype=dtype)
+ result = method(s)[0]
+ assert isinstance(result, rdtype)
+
+ @pytest.mark.parametrize(
+ 'dtype, rdtype', dtypes)
+ def test_iterable_items(self, dtype, rdtype):
+ # gh-13258
+ # test items / iteritems yields the correct boxed scalars
+ # this only applies to series
+ s = Series([1], dtype=dtype)
+ _, result = list(s.items())[0]
+ assert isinstance(result, rdtype)
+
+ _, result = list(s.iteritems())[0]
+ assert isinstance(result, rdtype)
+
+ @pytest.mark.parametrize(
+ 'dtype, rdtype',
+ dtypes + [
+ ('object', (int, long)),
+ ('category', (int, long))])
+ @pytest.mark.parametrize('typ', [Series, Index])
+ def test_iterable_map(self, typ, dtype, rdtype):
+ # gh-13236
+ # coerce iteration to underlying python / pandas types
+ s = typ([1], dtype=dtype)
+ result = s.map(type)[0]
+ if not isinstance(rdtype, tuple):
+ rdtype = tuple([rdtype])
+ assert result in rdtype
+
+ @pytest.mark.parametrize(
+ 'method',
+ [
+ lambda x: x.tolist(),
+ lambda x: list(x),
+ lambda x: list(x.__iter__()),
+ ], ids=['tolist', 'list', 'iter'])
+ def test_categorial_datetimelike(self, method):
+ i = CategoricalIndex([Timestamp('1999-12-31'),
+ Timestamp('2000-12-31')])
+
+ result = method(i)[0]
+ assert isinstance(result, Timestamp)
+
+ def test_iter_box(self):
+ vals = [pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]
+ s = pd.Series(vals)
+ assert s.dtype == 'datetime64[ns]'
+ for res, exp in zip(s, vals):
+ assert isinstance(res, pd.Timestamp)
+ assert res.tz is None
+ assert res == exp
+
+ vals = [pd.Timestamp('2011-01-01', tz='US/Eastern'),
+ pd.Timestamp('2011-01-02', tz='US/Eastern')]
+ s = pd.Series(vals)
+
+ assert s.dtype == 'datetime64[ns, US/Eastern]'
+ for res, exp in zip(s, vals):
+ assert isinstance(res, pd.Timestamp)
+ assert res.tz == exp.tz
+ assert res == exp
+
+ # timedelta
+ vals = [pd.Timedelta('1 days'), pd.Timedelta('2 days')]
+ s = pd.Series(vals)
+ assert s.dtype == 'timedelta64[ns]'
+ for res, exp in zip(s, vals):
+ assert isinstance(res, pd.Timedelta)
+ assert res == exp
+
+ # period (object dtype, not boxed)
+ vals = [pd.Period('2011-01-01', freq='M'),
+ pd.Period('2011-01-02', freq='M')]
+ s = pd.Series(vals)
+ assert s.dtype == 'object'
+ for res, exp in zip(s, vals):
+ assert isinstance(res, pd.Period)
+ assert res.freq == 'M'
+ assert res == exp
diff --git a/pandas/tests/test_categorical.py b/pandas/tests/test_categorical.py
index 7bbe220378993..1fa3c84dc0260 100644
--- a/pandas/tests/test_categorical.py
+++ b/pandas/tests/test_categorical.py
@@ -26,6 +26,7 @@
Interval, IntervalIndex)
from pandas.compat import range, lrange, u, PY3, PYPY
from pandas.core.config import option_context
+from pandas.core.categorical import _recode_for_categories
class TestCategorical(object):
@@ -963,6 +964,67 @@ def test_rename_categories(self):
with pytest.raises(ValueError):
cat.rename_categories([1, 2])
+ @pytest.mark.parametrize('codes, old, new, expected', [
+ ([0, 1], ['a', 'b'], ['a', 'b'], [0, 1]),
+ ([0, 1], ['b', 'a'], ['b', 'a'], [0, 1]),
+ ([0, 1], ['a', 'b'], ['b', 'a'], [1, 0]),
+ ([0, 1], ['b', 'a'], ['a', 'b'], [1, 0]),
+ ([0, 1, 0, 1], ['a', 'b'], ['a', 'b', 'c'], [0, 1, 0, 1]),
+ ([0, 1, 2, 2], ['a', 'b', 'c'], ['a', 'b'], [0, 1, -1, -1]),
+ ([0, 1, -1], ['a', 'b', 'c'], ['a', 'b', 'c'], [0, 1, -1]),
+ ([0, 1, -1], ['a', 'b', 'c'], ['b'], [-1, 0, -1]),
+ ([0, 1, -1], ['a', 'b', 'c'], ['d'], [-1, -1, -1]),
+ ([0, 1, -1], ['a', 'b', 'c'], [], [-1, -1, -1]),
+ ([-1, -1], [], ['a', 'b'], [-1, -1]),
+ ([1, 0], ['b', 'a'], ['a', 'b'], [0, 1]),
+ ])
+ def test_recode_to_categories(self, codes, old, new, expected):
+ codes = np.asanyarray(codes, dtype=np.int8)
+ expected = np.asanyarray(expected, dtype=np.int8)
+ old = Index(old)
+ new = Index(new)
+ result = _recode_for_categories(codes, old, new)
+ tm.assert_numpy_array_equal(result, expected)
+
+ def test_recode_to_categories_large(self):
+ N = 1000
+ codes = np.arange(N)
+ old = Index(codes)
+ expected = np.arange(N - 1, -1, -1, dtype=np.int16)
+ new = Index(expected)
+ result = _recode_for_categories(codes, old, new)
+ tm.assert_numpy_array_equal(result, expected)
+
+ @pytest.mark.parametrize('values, categories, new_categories', [
+ # No NaNs, same cats, same order
+ (['a', 'b', 'a'], ['a', 'b'], ['a', 'b'],),
+ # No NaNs, same cats, different order
+ (['a', 'b', 'a'], ['a', 'b'], ['b', 'a'],),
+ # Same, unsorted
+ (['b', 'a', 'a'], ['a', 'b'], ['a', 'b'],),
+ # No NaNs, same cats, different order
+ (['b', 'a', 'a'], ['a', 'b'], ['b', 'a'],),
+ # NaNs
+ (['a', 'b', 'c'], ['a', 'b'], ['a', 'b']),
+ (['a', 'b', 'c'], ['a', 'b'], ['b', 'a']),
+ (['b', 'a', 'c'], ['a', 'b'], ['a', 'b']),
+ (['b', 'a', 'c'], ['a', 'b'], ['a', 'b']),
+ # Introduce NaNs
+ (['a', 'b', 'c'], ['a', 'b'], ['a']),
+ (['a', 'b', 'c'], ['a', 'b'], ['b']),
+ (['b', 'a', 'c'], ['a', 'b'], ['a']),
+ (['b', 'a', 'c'], ['a', 'b'], ['a']),
+ # No overlap
+ (['a', 'b', 'c'], ['a', 'b'], ['d', 'e']),
+ ])
+ @pytest.mark.parametrize('ordered', [True, False])
+ def test_set_categories_many(self, values, categories, new_categories,
+ ordered):
+ c = Categorical(values, categories)
+ expected = Categorical(values, new_categories, ordered)
+ result = c.set_categories(new_categories, ordered=ordered)
+ tm.assert_categorical_equal(result, expected)
+
def test_reorder_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
@@ -4002,7 +4064,7 @@ def test_merge(self):
expected = df.copy()
# object-cat
- # note that we propogate the category
+ # note that we propagate the category
# because we don't have any matching rows
cright = right.copy()
cright['d'] = cright['d'].astype('category')
diff --git a/pandas/tests/test_sorting.py b/pandas/tests/test_sorting.py
index e58042961129d..a5b12bbf9608a 100644
--- a/pandas/tests/test_sorting.py
+++ b/pandas/tests/test_sorting.py
@@ -408,7 +408,7 @@ def test_mixed_integer(self):
tm.assert_numpy_array_equal(result, expected)
tm.assert_numpy_array_equal(result_labels, expected_labels)
- def test_mixed_interger_from_list(self):
+ def test_mixed_integer_from_list(self):
values = ['b', 1, 0, 'a', 0, 'b']
result = safe_sort(values)
expected = np.array([0, 0, 1, 'a', 'b', 'b'], dtype=object)
diff --git a/pandas/tests/tseries/test_offsets.py b/pandas/tests/tseries/test_offsets.py
index e03b3e0a85e5e..cd2c29ffe3ac6 100644
--- a/pandas/tests/tseries/test_offsets.py
+++ b/pandas/tests/tseries/test_offsets.py
@@ -111,7 +111,10 @@ def offset_types(self):
def _get_offset(self, klass, value=1, normalize=False):
# create instance from offset class
- if klass is FY5253 or klass is FY5253Quarter:
+ if klass is FY5253:
+ klass = klass(n=value, startingMonth=1, weekday=1,
+ variation='last', normalize=normalize)
+ elif klass is FY5253Quarter:
klass = klass(n=value, startingMonth=1, weekday=1,
qtr_with_extra_week=1, variation='last',
normalize=normalize)
@@ -1952,6 +1955,11 @@ def _check_roundtrip(obj):
_check_roundtrip(self._object(2))
_check_roundtrip(self._object() * 2)
+ def test_copy(self):
+ # GH 17452
+ off = self._object(weekmask='Mon Wed Fri')
+ assert off == off.copy()
+
class TestCustomBusinessMonthEnd(CustomBusinessMonthBase, Base):
_object = CBMonthEnd
@@ -2629,7 +2637,7 @@ def test_offset(self):
def test_day_of_month(self):
dt = datetime(2007, 1, 1)
- offset = MonthEnd(day=20)
+ offset = MonthEnd()
result = dt + offset
assert result == Timestamp(2007, 1, 31)
@@ -3678,7 +3686,7 @@ def test_onOffset(self):
1, startingMonth=8, weekday=WeekDay.THU,
qtr_with_extra_week=4)
offset_n = FY5253(weekday=WeekDay.TUE, startingMonth=12,
- variation="nearest", qtr_with_extra_week=4)
+ variation="nearest")
tests = [
# From Wikipedia
diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py
index 7f34bcaf52926..085a3a784557b 100644
--- a/pandas/tseries/frequencies.py
+++ b/pandas/tseries/frequencies.py
@@ -8,7 +8,6 @@
from pandas.core.dtypes.generic import ABCSeries
from pandas.core.dtypes.common import (
- is_integer,
is_period_arraylike,
is_timedelta64_dtype,
is_datetime64_dtype)
@@ -21,6 +20,10 @@
from pandas._libs import lib, tslib
from pandas._libs.tslib import Timedelta
+from pandas._libs.tslibs.frequencies import ( # noqa
+ get_freq_code, _base_and_stride, _period_str_to_code,
+ _INVALID_FREQ_ERROR, opattern, _lite_rule_alias, _dont_uppercase,
+ _period_code_map, _reverse_period_code_map)
from pytz import AmbiguousTimeError
@@ -298,58 +301,6 @@ def get_freq(freq):
return freq
-def get_freq_code(freqstr):
- """
- Return freq str or tuple to freq code and stride (mult)
-
- Parameters
- ----------
- freqstr : str or tuple
-
- Returns
- -------
- return : tuple of base frequency code and stride (mult)
-
- Example
- -------
- >>> get_freq_code('3D')
- (6000, 3)
-
- >>> get_freq_code('D')
- (6000, 1)
-
- >>> get_freq_code(('D', 3))
- (6000, 3)
- """
- if isinstance(freqstr, DateOffset):
- freqstr = (freqstr.rule_code, freqstr.n)
-
- if isinstance(freqstr, tuple):
- if (is_integer(freqstr[0]) and
- is_integer(freqstr[1])):
- # e.g., freqstr = (2000, 1)
- return freqstr
- else:
- # e.g., freqstr = ('T', 5)
- try:
- code = _period_str_to_code(freqstr[0])
- stride = freqstr[1]
- except:
- if is_integer(freqstr[1]):
- raise
- code = _period_str_to_code(freqstr[1])
- stride = freqstr[0]
- return code, stride
-
- if is_integer(freqstr):
- return (freqstr, 1)
-
- base, stride = _base_and_stride(freqstr)
- code = _period_str_to_code(base)
-
- return code, stride
-
-
def _get_freq_str(base, mult=1):
code = _reverse_period_code_map.get(base)
if mult == 1:
@@ -427,27 +378,6 @@ def get_period_alias(offset_str):
return _offset_to_period_map.get(offset_str, None)
-_lite_rule_alias = {
- 'W': 'W-SUN',
- 'Q': 'Q-DEC',
-
- 'A': 'A-DEC', # YearEnd(month=12),
- 'Y': 'A-DEC',
- 'AS': 'AS-JAN', # YearBegin(month=1),
- 'YS': 'AS-JAN',
- 'BA': 'BA-DEC', # BYearEnd(month=12),
- 'BY': 'BA-DEC',
- 'BAS': 'BAS-JAN', # BYearBegin(month=1),
- 'BYS': 'BAS-JAN',
-
- 'Min': 'T',
- 'min': 'T',
- 'ms': 'L',
- 'us': 'U',
- 'ns': 'N'
-}
-
-
_name_to_offset_map = {'days': Day(1),
'hours': Hour(1),
'minutes': Minute(1),
@@ -457,9 +387,6 @@ def get_period_alias(offset_str):
'nanoseconds': Nano(1)}
-_INVALID_FREQ_ERROR = "Invalid frequency: {0}"
-
-
@deprecate_kwarg(old_arg_name='freqstr', new_arg_name='freq')
def to_offset(freq):
"""
@@ -571,37 +498,6 @@ def to_offset(freq):
return delta
-# hack to handle WOM-1MON
-opattern = re.compile(
- r'([\-]?\d*|[\-]?\d*\.\d*)\s*([A-Za-z]+([\-][\dA-Za-z\-]+)?)'
-)
-
-
-def _base_and_stride(freqstr):
- """
- Return base freq and stride info from string representation
-
- Examples
- --------
- _freq_and_stride('5Min') -> 'Min', 5
- """
- groups = opattern.match(freqstr)
-
- if not groups:
- raise ValueError("Could not evaluate {freq}".format(freq=freqstr))
-
- stride = groups.group(1)
-
- if len(stride):
- stride = int(stride)
- else:
- stride = 1
-
- base = groups.group(2)
-
- return (base, stride)
-
-
def get_base_alias(freqstr):
"""
Returns the base frequency alias, e.g., '5D' -> 'D'
@@ -609,9 +505,6 @@ def get_base_alias(freqstr):
return _base_and_stride(freqstr)[0]
-_dont_uppercase = set(('MS', 'ms'))
-
-
def get_offset(name):
"""
Return DateOffset object associated with rule name
@@ -660,96 +553,6 @@ def get_standard_freq(freq):
# ---------------------------------------------------------------------
# Period codes
-# period frequency constants corresponding to scikits timeseries
-# originals
-_period_code_map = {
- # Annual freqs with various fiscal year ends.
- # eg, 2005 for A-FEB runs Mar 1, 2004 to Feb 28, 2005
- "A-DEC": 1000, # Annual - December year end
- "A-JAN": 1001, # Annual - January year end
- "A-FEB": 1002, # Annual - February year end
- "A-MAR": 1003, # Annual - March year end
- "A-APR": 1004, # Annual - April year end
- "A-MAY": 1005, # Annual - May year end
- "A-JUN": 1006, # Annual - June year end
- "A-JUL": 1007, # Annual - July year end
- "A-AUG": 1008, # Annual - August year end
- "A-SEP": 1009, # Annual - September year end
- "A-OCT": 1010, # Annual - October year end
- "A-NOV": 1011, # Annual - November year end
-
- # Quarterly frequencies with various fiscal year ends.
- # eg, Q42005 for Q-OCT runs Aug 1, 2005 to Oct 31, 2005
- "Q-DEC": 2000, # Quarterly - December year end
- "Q-JAN": 2001, # Quarterly - January year end
- "Q-FEB": 2002, # Quarterly - February year end
- "Q-MAR": 2003, # Quarterly - March year end
- "Q-APR": 2004, # Quarterly - April year end
- "Q-MAY": 2005, # Quarterly - May year end
- "Q-JUN": 2006, # Quarterly - June year end
- "Q-JUL": 2007, # Quarterly - July year end
- "Q-AUG": 2008, # Quarterly - August year end
- "Q-SEP": 2009, # Quarterly - September year end
- "Q-OCT": 2010, # Quarterly - October year end
- "Q-NOV": 2011, # Quarterly - November year end
-
- "M": 3000, # Monthly
-
- "W-SUN": 4000, # Weekly - Sunday end of week
- "W-MON": 4001, # Weekly - Monday end of week
- "W-TUE": 4002, # Weekly - Tuesday end of week
- "W-WED": 4003, # Weekly - Wednesday end of week
- "W-THU": 4004, # Weekly - Thursday end of week
- "W-FRI": 4005, # Weekly - Friday end of week
- "W-SAT": 4006, # Weekly - Saturday end of week
-
- "B": 5000, # Business days
- "D": 6000, # Daily
- "H": 7000, # Hourly
- "T": 8000, # Minutely
- "S": 9000, # Secondly
- "L": 10000, # Millisecondly
- "U": 11000, # Microsecondly
- "N": 12000, # Nanosecondly
-}
-
-_reverse_period_code_map = {}
-for _k, _v in compat.iteritems(_period_code_map):
- _reverse_period_code_map[_v] = _k
-
-# Yearly aliases
-year_aliases = {}
-
-for k, v in compat.iteritems(_period_code_map):
- if k.startswith("A-"):
- alias = "Y" + k[1:]
- year_aliases[alias] = v
-
-_period_code_map.update(**year_aliases)
-del year_aliases
-
-_period_code_map.update({
- "Q": 2000, # Quarterly - December year end (default quarterly)
- "A": 1000, # Annual
- "W": 4000, # Weekly
- "C": 5000, # Custom Business Day
-})
-
-
-def _period_str_to_code(freqstr):
- freqstr = _lite_rule_alias.get(freqstr, freqstr)
-
- if freqstr not in _dont_uppercase:
- lower = freqstr.lower()
- freqstr = _lite_rule_alias.get(lower, freqstr)
-
- if freqstr not in _dont_uppercase:
- freqstr = freqstr.upper()
- try:
- return _period_code_map[freqstr]
- except KeyError:
- raise ValueError(_INVALID_FREQ_ERROR.format(freqstr))
-
def infer_freq(index, warn=True):
"""
diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py
index 7ccecaa84e6d6..d82a3a209af6b 100644
--- a/pandas/tseries/offsets.py
+++ b/pandas/tseries/offsets.py
@@ -11,6 +11,7 @@
from dateutil.relativedelta import relativedelta, weekday
from dateutil.easter import easter
from pandas._libs import tslib, Timestamp, OutOfBoundsDatetime, Timedelta
+from pandas.util._decorators import cache_readonly
import functools
import operator
@@ -573,9 +574,9 @@ def __setstate__(self, state):
"""Reconstruct an instance from a pickled state"""
self.__dict__ = state
if 'weekmask' in state and 'holidays' in state:
- calendar, holidays = self.get_calendar(weekmask=self.weekmask,
- holidays=self.holidays,
- calendar=None)
+ calendar, holidays = _get_calendar(weekmask=self.weekmask,
+ holidays=self.holidays,
+ calendar=None)
self.kwds['calendar'] = self.calendar = calendar
self.kwds['holidays'] = self.holidays = holidays
self.kwds['weekmask'] = state['weekmask']
@@ -978,9 +979,9 @@ def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
self.normalize = normalize
self.kwds = kwds
self.offset = kwds.get('offset', timedelta(0))
- calendar, holidays = self.get_calendar(weekmask=weekmask,
- holidays=holidays,
- calendar=calendar)
+ calendar, holidays = _get_calendar(weekmask=weekmask,
+ holidays=holidays,
+ calendar=calendar)
# CustomBusinessDay instances are identified by the
# following two attributes. See DateOffset._params()
# holidays, weekmask
@@ -989,36 +990,6 @@ def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
self.kwds['holidays'] = self.holidays = holidays
self.kwds['calendar'] = self.calendar = calendar
- def get_calendar(self, weekmask, holidays, calendar):
- """Generate busdaycalendar"""
- if isinstance(calendar, np.busdaycalendar):
- if not holidays:
- holidays = tuple(calendar.holidays)
- elif not isinstance(holidays, tuple):
- holidays = tuple(holidays)
- else:
- # trust that calendar.holidays and holidays are
- # consistent
- pass
- return calendar, holidays
-
- if holidays is None:
- holidays = []
- try:
- holidays = holidays + calendar.holidays().tolist()
- except AttributeError:
- pass
- holidays = [self._to_dt64(dt, dtype='datetime64[D]') for dt in
- holidays]
- holidays = tuple(sorted(holidays))
-
- kwargs = {'weekmask': weekmask}
- if holidays:
- kwargs['holidays'] = holidays
-
- busdaycalendar = np.busdaycalendar(**kwargs)
- return busdaycalendar, holidays
-
@apply_wraps
def apply(self, other):
if self.n <= 0:
@@ -1050,25 +1021,10 @@ def apply(self, other):
def apply_index(self, i):
raise NotImplementedError
- @staticmethod
- def _to_dt64(dt, dtype='datetime64'):
- # Currently
- # > np.datetime64(dt.datetime(2013,5,1),dtype='datetime64[D]')
- # numpy.datetime64('2013-05-01T02:00:00.000000+0200')
- # Thus astype is needed to cast datetime to datetime64[D]
- if getattr(dt, 'tzinfo', None) is not None:
- i8 = tslib.pydt_to_i8(dt)
- dt = tslib.tz_convert_single(i8, 'UTC', dt.tzinfo)
- dt = Timestamp(dt)
- dt = np.datetime64(dt)
- if dt.dtype.name != dtype:
- dt = dt.astype(dtype)
- return dt
-
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
- day64 = self._to_dt64(dt, 'datetime64[D]')
+ day64 = _to_dt64(dt, 'datetime64[D]')
return np.is_busday(day64, busdaycal=self.calendar)
@@ -1087,19 +1043,25 @@ def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
self.n = int(n)
self.normalize = normalize
super(CustomBusinessHour, self).__init__(**kwds)
+
+ calendar, holidays = _get_calendar(weekmask=weekmask,
+ holidays=holidays,
+ calendar=calendar)
+ self.kwds['weekmask'] = self.weekmask = weekmask
+ self.kwds['holidays'] = self.holidays = holidays
+ self.kwds['calendar'] = self.calendar = calendar
+
+ @cache_readonly
+ def next_bday(self):
# used for moving to next businessday
if self.n >= 0:
nb_offset = 1
else:
nb_offset = -1
- self.next_bday = CustomBusinessDay(n=nb_offset,
- weekmask=weekmask,
- holidays=holidays,
- calendar=calendar)
-
- self.kwds['weekmask'] = self.next_bday.weekmask
- self.kwds['holidays'] = self.next_bday.holidays
- self.kwds['calendar'] = self.next_bday.calendar
+ return CustomBusinessDay(n=nb_offset,
+ weekmask=self.weekmask,
+ holidays=self.holidays,
+ calendar=self.calendar)
class MonthOffset(SingleConstructorOffset):
@@ -1471,11 +1433,25 @@ def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
self.normalize = normalize
self.kwds = kwds
self.offset = kwds.get('offset', timedelta(0))
- self.cbday = CustomBusinessDay(n=self.n, normalize=normalize,
- weekmask=weekmask, holidays=holidays,
- calendar=calendar, **kwds)
- self.m_offset = MonthEnd(n=1, normalize=normalize, **kwds)
- self.kwds['calendar'] = self.cbday.calendar # cache numpy calendar
+
+ calendar, holidays = _get_calendar(weekmask=weekmask,
+ holidays=holidays,
+ calendar=calendar)
+ self.kwds['weekmask'] = self.weekmask = weekmask
+ self.kwds['holidays'] = self.holidays = holidays
+ self.kwds['calendar'] = self.calendar = calendar
+
+ @cache_readonly
+ def cbday(self):
+ kwds = self.kwds
+ return CustomBusinessDay(n=self.n, normalize=self.normalize, **kwds)
+
+ @cache_readonly
+ def m_offset(self):
+ kwds = self.kwds
+ kwds = {key: kwds[key] for key in kwds
+ if key not in ['calendar', 'weekmask', 'holidays']}
+ return MonthEnd(n=1, normalize=self.normalize, **kwds)
@apply_wraps
def apply(self, other):
@@ -1531,11 +1507,27 @@ def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
self.normalize = normalize
self.kwds = kwds
self.offset = kwds.get('offset', timedelta(0))
- self.cbday = CustomBusinessDay(n=self.n, normalize=normalize,
- weekmask=weekmask, holidays=holidays,
- calendar=calendar, **kwds)
- self.m_offset = MonthBegin(n=1, normalize=normalize, **kwds)
- self.kwds['calendar'] = self.cbday.calendar # cache numpy calendar
+
+ # _get_calendar does validation and possible transformation
+ # of calendar and holidays.
+ calendar, holidays = _get_calendar(weekmask=weekmask,
+ holidays=holidays,
+ calendar=calendar)
+ kwds['calendar'] = self.calendar = calendar
+ kwds['weekmask'] = self.weekmask = weekmask
+ kwds['holidays'] = self.holidays = holidays
+
+ @cache_readonly
+ def cbday(self):
+ kwds = self.kwds
+ return CustomBusinessDay(n=self.n, normalize=self.normalize, **kwds)
+
+ @cache_readonly
+ def m_offset(self):
+ kwds = self.kwds
+ kwds = {key: kwds[key] for key in kwds
+ if key not in ['calendar', 'weekmask', 'holidays']}
+ return MonthBegin(n=1, normalize=self.normalize, **kwds)
@apply_wraps
def apply(self, other):
@@ -2861,6 +2853,54 @@ class Nano(Tick):
CBMonthBegin = CustomBusinessMonthBegin
CDay = CustomBusinessDay
+# ---------------------------------------------------------------------
+# Business Calendar helpers
+
+
+def _get_calendar(weekmask, holidays, calendar):
+ """Generate busdaycalendar"""
+ if isinstance(calendar, np.busdaycalendar):
+ if not holidays:
+ holidays = tuple(calendar.holidays)
+ elif not isinstance(holidays, tuple):
+ holidays = tuple(holidays)
+ else:
+ # trust that calendar.holidays and holidays are
+ # consistent
+ pass
+ return calendar, holidays
+
+ if holidays is None:
+ holidays = []
+ try:
+ holidays = holidays + calendar.holidays().tolist()
+ except AttributeError:
+ pass
+ holidays = [_to_dt64(dt, dtype='datetime64[D]') for dt in holidays]
+ holidays = tuple(sorted(holidays))
+
+ kwargs = {'weekmask': weekmask}
+ if holidays:
+ kwargs['holidays'] = holidays
+
+ busdaycalendar = np.busdaycalendar(**kwargs)
+ return busdaycalendar, holidays
+
+
+def _to_dt64(dt, dtype='datetime64'):
+ # Currently
+ # > np.datetime64(dt.datetime(2013,5,1),dtype='datetime64[D]')
+ # numpy.datetime64('2013-05-01T02:00:00.000000+0200')
+ # Thus astype is needed to cast datetime to datetime64[D]
+ if getattr(dt, 'tzinfo', None) is not None:
+ i8 = tslib.pydt_to_i8(dt)
+ dt = tslib.tz_convert_single(i8, 'UTC', dt.tzinfo)
+ dt = Timestamp(dt)
+ dt = np.datetime64(dt)
+ if dt.dtype.name != dtype:
+ dt = dt.astype(dtype)
+ return dt
+
def _get_firstbday(wkday):
"""
diff --git a/pandas/tseries/util.py b/pandas/tseries/util.py
index 5934f5843736c..dc8a41215139d 100644
--- a/pandas/tseries/util.py
+++ b/pandas/tseries/util.py
@@ -16,7 +16,7 @@ def pivot_annual(series, freq=None):
The output has as many rows as distinct years in the original series,
and as many columns as the length of a leap year in the units corresponding
to the original frequency (366 for daily frequency, 366*24 for hourly...).
- The fist column of the output corresponds to Jan. 1st, 00:00:00,
+ The first column of the output corresponds to Jan. 1st, 00:00:00,
while the last column corresponds to Dec, 31st, 23:59:59.
Entries corresponding to Feb. 29th are masked for non-leap years.
diff --git a/pandas/util/_doctools.py b/pandas/util/_doctools.py
index cbc9518b96416..d654c78b8b13f 100644
--- a/pandas/util/_doctools.py
+++ b/pandas/util/_doctools.py
@@ -15,12 +15,18 @@ def __init__(self, cell_width=0.37, cell_height=0.25, font_size=7.5):
self.font_size = font_size
def _shape(self, df):
- """Calcurate table chape considering index levels"""
+ """
+ Calculate table chape considering index levels.
+ """
+
row, col = df.shape
return row + df.columns.nlevels, col + df.index.nlevels
def _get_cells(self, left, right, vertical):
- """Calcurate appropriate figure size based on left and right data"""
+ """
+ Calculate appropriate figure size based on left and right data.
+ """
+
if vertical:
# calcurate required number of cells
vcells = max(sum([self._shape(l)[0] for l in left]),
diff --git a/setup.py b/setup.py
index 444db5bc4d275..664478cc35845 100755
--- a/setup.py
+++ b/setup.py
@@ -341,19 +341,13 @@ class CheckSDist(sdist_class):
'pandas/_libs/window.pyx',
'pandas/_libs/sparse.pyx',
'pandas/_libs/parsers.pyx',
+ 'pandas/_libs/tslibs/timezones.pyx',
+ 'pandas/_libs/tslibs/frequencies.pyx',
'pandas/io/sas/sas.pyx']
def initialize_options(self):
sdist_class.initialize_options(self)
- '''
- self._pyxfiles = []
- for root, dirs, files in os.walk('pandas'):
- for f in files:
- if f.endswith('.pyx'):
- self._pyxfiles.append(pjoin(root, f))
- '''
-
def run(self):
if 'cython' in cmdclass:
self.run_command('cython')
@@ -467,7 +461,6 @@ def pxd(name):
tseries_depends = ['pandas/_libs/src/datetime/np_datetime.h',
'pandas/_libs/src/datetime/np_datetime_strings.h',
- 'pandas/_libs/src/period_helper.h',
'pandas/_libs/src/datetime.pxd']
@@ -478,38 +471,39 @@ def pxd(name):
'_libs.lib': {'pyxfile': '_libs/lib',
'depends': lib_depends + tseries_depends},
'_libs.hashtable': {'pyxfile': '_libs/hashtable',
- 'pxdfiles': ['_libs/hashtable'],
'depends': (['pandas/_libs/src/klib/khash_python.h']
+ _pxi_dep['hashtable'])},
'_libs.tslib': {'pyxfile': '_libs/tslib',
- 'pxdfiles': ['_libs/src/util', '_libs/lib'],
+ 'pxdfiles': ['_libs/src/util'],
'depends': tseries_depends,
'sources': ['pandas/_libs/src/datetime/np_datetime.c',
- 'pandas/_libs/src/datetime/np_datetime_strings.c',
- 'pandas/_libs/src/period_helper.c']},
+ 'pandas/_libs/src/datetime/np_datetime_strings.c']},
+ '_libs.tslibs.timezones': {'pyxfile': '_libs/tslibs/timezones'},
'_libs.period': {'pyxfile': '_libs/period',
- 'depends': tseries_depends,
+ 'depends': (tseries_depends +
+ ['pandas/_libs/src/period_helper.h']),
'sources': ['pandas/_libs/src/datetime/np_datetime.c',
'pandas/_libs/src/datetime/np_datetime_strings.c',
'pandas/_libs/src/period_helper.c']},
+ '_libs.tslibs.frequencies': {'pyxfile': '_libs/tslibs/frequencies',
+ 'pxdfiles': ['_libs/src/util']},
'_libs.index': {'pyxfile': '_libs/index',
'sources': ['pandas/_libs/src/datetime/np_datetime.c',
'pandas/_libs/src/datetime/np_datetime_strings.c'],
- 'pxdfiles': ['_libs/src/util', '_libs/hashtable'],
+ 'pxdfiles': ['_libs/src/util'],
'depends': _pxi_dep['index']},
'_libs.algos': {'pyxfile': '_libs/algos',
- 'pxdfiles': ['_libs/src/util', '_libs/algos', '_libs/hashtable'],
+ 'pxdfiles': ['_libs/src/util'],
'depends': _pxi_dep['algos']},
'_libs.groupby': {'pyxfile': '_libs/groupby',
- 'pxdfiles': ['_libs/src/util', '_libs/algos'],
- 'depends': _pxi_dep['groupby']},
+ 'pxdfiles': ['_libs/src/util'],
+ 'depends': _pxi_dep['groupby']},
'_libs.join': {'pyxfile': '_libs/join',
- 'pxdfiles': ['_libs/src/util', '_libs/hashtable'],
+ 'pxdfiles': ['_libs/src/util'],
'depends': _pxi_dep['join']},
'_libs.reshape': {'pyxfile': '_libs/reshape',
'depends': _pxi_dep['reshape']},
'_libs.interval': {'pyxfile': '_libs/interval',
- 'pxdfiles': ['_libs/hashtable'],
'depends': _pxi_dep['interval']},
'_libs.window': {'pyxfile': '_libs/window',
'pxdfiles': ['_libs/src/skiplist', '_libs/src/util'],
@@ -522,12 +516,9 @@ def pxd(name):
'sources': ['pandas/_libs/src/parser/tokenizer.c',
'pandas/_libs/src/parser/io.c']},
'_libs.sparse': {'pyxfile': '_libs/sparse',
- 'depends': (['pandas/_libs/sparse.pyx'] +
- _pxi_dep['sparse'])},
- '_libs.testing': {'pyxfile': '_libs/testing',
- 'depends': ['pandas/_libs/testing.pyx']},
- '_libs.hashing': {'pyxfile': '_libs/hashing',
- 'depends': ['pandas/_libs/hashing.pyx']},
+ 'depends': _pxi_dep['sparse']},
+ '_libs.testing': {'pyxfile': '_libs/testing'},
+ '_libs.hashing': {'pyxfile': '_libs/hashing'},
'io.sas._sas': {'pyxfile': 'io/sas/sas'},
}
@@ -653,6 +644,7 @@ def pxd(name):
'pandas.io.formats',
'pandas.io.clipboard',
'pandas._libs',
+ 'pandas._libs.tslibs',
'pandas.plotting',
'pandas.stats',
'pandas.types',
diff --git a/tox.ini b/tox.ini
index 45ad7fc451e76..f055251581a93 100644
--- a/tox.ini
+++ b/tox.ini
@@ -19,6 +19,7 @@ deps =
xlrd
six
sqlalchemy
+ moto
# cd to anything but the default {toxinidir} which
# contains the pandas subdirectory and confuses
| This pull request adds some examples to ``Index.get_loc`` (actually to ``Index._index_shared_docs['get_loc']``) and clarifies the return value.
| https://api.github.com/repos/pandas-dev/pandas/pulls/17380 | 2017-08-30T15:54:11Z | 2017-09-17T16:26:56Z | null | 2023-05-11T01:16:18Z |
BUG: Try to sort result of Index.union rather than guessing sortability | diff --git a/appveyor.yml b/appveyor.yml
index ba001208864a8..4269784fee034 100644
--- a/appveyor.yml
+++ b/appveyor.yml
@@ -73,7 +73,7 @@ install:
- cmd: conda info -a
# create our env
- - cmd: conda create -n pandas python=%PYTHON_VERSION% cython pytest>=3.1.0 pytest-xdist
+ - cmd: conda create -n pandas python=%PYTHON_VERSION% cython pytest>=3.1.0
- cmd: activate pandas
- cmd: pip install moto
- SET REQ=ci\requirements-%PYTHON_VERSION%_WIN.run
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt
index 302f8043f3ba7..5a2a878f1e160 100644
--- a/doc/source/whatsnew/v0.23.0.txt
+++ b/doc/source/whatsnew/v0.23.0.txt
@@ -920,6 +920,7 @@ Numeric
Indexing
^^^^^^^^
+- Bug in the order of the result of ``Index.union()`` when indexes contain tuples (:issue:`17376`)
- Bug in :class:`Index` construction from list of mixed type tuples (:issue:`18505`)
- Bug in :func:`Index.drop` when passing a list of both tuples and non-tuples (:issue:`18304`)
- Bug in :meth:`~DataFrame.drop`, :meth:`~Panel.drop`, :meth:`~Series.drop`, :meth:`~Index.drop` where no ``KeyError`` is raised when dropping a non-existent element from an axis that contains duplicates (:issue:`19186`)
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 69a07a91838e1..7e7f99031a877 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -2406,35 +2406,21 @@ def union(self, other):
value_set = set(lvals)
result.extend([x for x in rvals if x not in value_set])
else:
- indexer = self.get_indexer(other)
- indexer, = (indexer == -1).nonzero()
-
+ indexer = np.where(self.get_indexer(other) == -1)[0]
if len(indexer) > 0:
other_diff = algos.take_nd(rvals, indexer,
allow_fill=False)
result = _concat._concat_compat((lvals, other_diff))
- try:
- lvals[0] < other_diff[0]
- except TypeError as e:
- warnings.warn("%s, sort order is undefined for "
- "incomparable objects" % e, RuntimeWarning,
- stacklevel=3)
- else:
- types = frozenset((self.inferred_type,
- other.inferred_type))
- if not types & _unsortable_types:
- result.sort()
-
else:
result = lvals
- try:
- result = np.sort(result)
- except TypeError as e:
- warnings.warn("%s, sort order is undefined for "
- "incomparable objects" % e, RuntimeWarning,
- stacklevel=3)
+ try:
+ result = sorting.safe_sort(result)
+ except TypeError as e:
+ warnings.warn("%s, sort order is undefined for "
+ "incomparable objects" % e, RuntimeWarning,
+ stacklevel=3)
# for subclasses
return self._wrap_union_result(other, result)
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index 964a6b14d2b1e..619fbd2f5a802 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -784,8 +784,7 @@ def test_union(self):
expected = Index(list('ab'), name='A')
tm.assert_index_equal(union, expected)
- with tm.assert_produces_warning(RuntimeWarning):
- firstCat = self.strIndex.union(self.dateIndex)
+ firstCat = self.strIndex.union(self.dateIndex)
secondCat = self.strIndex.union(self.strIndex)
if self.dateIndex.dtype == np.object_:
@@ -1453,28 +1452,25 @@ def test_drop_tuple(self, values, to_drop):
pytest.raises(KeyError, removed.drop, drop_me)
def test_tuple_union_bug(self):
- import pandas
- import numpy as np
-
aidx1 = np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B')],
dtype=[('num', int), ('let', 'a1')])
aidx2 = np.array([(1, 'A'), (2, 'A'), (1, 'B'),
(2, 'B'), (1, 'C'), (2, 'C')],
dtype=[('num', int), ('let', 'a1')])
- idx1 = pandas.Index(aidx1)
- idx2 = pandas.Index(aidx2)
+ idx1 = Index(aidx1)
+ idx2 = Index(aidx2)
- # intersection broken?
+ # intersection
int_idx = idx1.intersection(idx2)
+ expected = idx1 # pandas.Index(sorted(set(idx1) & set(idx2)))
# needs to be 1d like idx1 and idx2
- expected = idx1[:4] # pandas.Index(sorted(set(idx1) & set(idx2)))
assert int_idx.ndim == 1
tm.assert_index_equal(int_idx, expected)
- # union broken
+ # GH 17376 (union)
union_idx = idx1.union(idx2)
- expected = idx2
+ expected = idx2.sort_values()
assert union_idx.ndim == 1
tm.assert_index_equal(union_idx, expected)
@@ -1664,13 +1660,19 @@ def test_outer_join_sort(self):
left_idx = Index(np.random.permutation(15))
right_idx = tm.makeDateIndex(10)
- with tm.assert_produces_warning(RuntimeWarning):
+ if PY3:
+ with tm.assert_produces_warning(RuntimeWarning):
+ joined = left_idx.join(right_idx, how='outer')
+ else:
joined = left_idx.join(right_idx, how='outer')
# right_idx in this case because DatetimeIndex has join precedence over
# Int64Index
- with tm.assert_produces_warning(RuntimeWarning):
- expected = right_idx.astype(object).union(left_idx.astype(object))
+ if PY3:
+ with tm.assert_produces_warning(RuntimeWarning):
+ expected = right_idx.astype(object).union(left_idx)
+ else:
+ expected = right_idx.astype(object).union(left_idx)
tm.assert_index_equal(joined, expected)
def test_nan_first_take_datetime(self):
@@ -2059,10 +2061,7 @@ def test_copy_name(self):
s1 = Series(2, index=first)
s2 = Series(3, index=second[:-1])
- warning_type = RuntimeWarning if PY3 else None
- with tm.assert_produces_warning(warning_type):
- # Python 3: Unorderable types
- s3 = s1 * s2
+ s3 = s1 * s2
assert s3.index.name == 'mario'
@@ -2095,27 +2094,14 @@ def test_union_base(self):
first = idx[3:]
second = idx[:5]
- if PY3:
- with tm.assert_produces_warning(RuntimeWarning):
- # unorderable types
- result = first.union(second)
- expected = Index(['b', 2, 'c', 0, 'a', 1])
- tm.assert_index_equal(result, expected)
- else:
- result = first.union(second)
- expected = Index(['b', 2, 'c', 0, 'a', 1])
- tm.assert_index_equal(result, expected)
+ expected = Index([0, 1, 2, 'a', 'b', 'c'])
+ result = first.union(second)
+ tm.assert_index_equal(result, expected)
# GH 10149
cases = [klass(second.values)
for klass in [np.array, Series, list]]
for case in cases:
- if PY3:
- with tm.assert_produces_warning(RuntimeWarning):
- # unorderable types
- result = first.union(case)
- assert tm.equalContents(result, idx)
- else:
result = first.union(case)
assert tm.equalContents(result, idx)
diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_operators.py
index f90fcce973f00..4163ccfa1b31a 100644
--- a/pandas/tests/series/test_operators.py
+++ b/pandas/tests/series/test_operators.py
@@ -431,11 +431,7 @@ def test_comparison_label_based(self):
assert_series_equal(result, a[a])
for e in [Series(['z'])]:
- if compat.PY3:
- with tm.assert_produces_warning(RuntimeWarning):
- result = a[a | e]
- else:
- result = a[a | e]
+ result = a[a | e]
assert_series_equal(result, a[a])
# vs scalars
@@ -1472,24 +1468,12 @@ def test_operators_bitwise(self):
pytest.raises(TypeError, lambda: s_0123 & [0.1, 4, 3.14, 2])
# s_0123 will be all false now because of reindexing like s_tft
- if compat.PY3:
- # unable to sort incompatible object via .union.
- exp = Series([False] * 7, index=['b', 'c', 'a', 0, 1, 2, 3])
- with tm.assert_produces_warning(RuntimeWarning):
- assert_series_equal(s_tft & s_0123, exp)
- else:
- exp = Series([False] * 7, index=[0, 1, 2, 3, 'a', 'b', 'c'])
- assert_series_equal(s_tft & s_0123, exp)
+ exp = Series([False] * 7, index=[0, 1, 2, 3, 'a', 'b', 'c'])
+ assert_series_equal(s_tft & s_0123, exp)
# s_tft will be all false now because of reindexing like s_0123
- if compat.PY3:
- # unable to sort incompatible object via .union.
- exp = Series([False] * 7, index=[0, 1, 2, 3, 'b', 'c', 'a'])
- with tm.assert_produces_warning(RuntimeWarning):
- assert_series_equal(s_0123 & s_tft, exp)
- else:
- exp = Series([False] * 7, index=[0, 1, 2, 3, 'a', 'b', 'c'])
- assert_series_equal(s_0123 & s_tft, exp)
+ exp = Series([False] * 7, index=[0, 1, 2, 3, 'a', 'b', 'c'])
+ assert_series_equal(s_0123 & s_tft, exp)
assert_series_equal(s_0123 & False, Series([False] * 4))
assert_series_equal(s_0123 ^ False, Series([False, True, True, True]))
diff --git a/test.bat b/test.bat
index e07c84f257a69..358f0feb24a30 100644
--- a/test.bat
+++ b/test.bat
@@ -1,3 +1,3 @@
:: test on windows
-pytest --skip-slow --skip-network pandas -n 2 -r sxX --strict %*
+pytest -v --skip-slow --skip-network pandas -r sxX --strict %*
| - [x] closes #17376
- [x] tests added / passed
- [x] passes `git diff master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
The lines of code I removed seemed written with the intention of guaranteeing the same result under Python 2 and 3, but the docs say "Form the union of two Index objects and sorts _if possible._": so if it is possible in Python 2 we should do it, regardless of whether it is in Python 3. After all, this is how ``.sort_values()`` works. And more practically, trying to guess whether the result of an union can be sorted is hard. | https://api.github.com/repos/pandas-dev/pandas/pulls/17378 | 2017-08-30T06:30:30Z | 2018-10-11T01:49:29Z | null | 2018-10-11T01:49:29Z |
BUG: Return local Timestamp.weekday_name attribute (#17354) | diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt
index 81e52266f972e..14e07978c7adc 100644
--- a/doc/source/whatsnew/v0.21.0.txt
+++ b/doc/source/whatsnew/v0.21.0.txt
@@ -399,6 +399,7 @@ Conversion
- Fixed the return type of ``IntervalIndex.is_non_overlapping_monotonic`` to be a Python ``bool`` for consistency with similar attributes/methods. Previously returned a ``numpy.bool_``. (:issue:`17237`)
- Bug in ``IntervalIndex.is_non_overlapping_monotonic`` when intervals are closed on both sides and overlap at a point (:issue:`16560`)
- Bug in :func:`Series.fillna` returns frame when ``inplace=True`` and ``value`` is dict (:issue:`16156`)
+- Bug in :attr:`Timestamp.weekday_name` returning a UTC-based weekday name when localized to a timezone (:issue:`17354`)
Indexing
^^^^^^^^
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx
index 50e0b77c6d3a0..40b473e6ff85f 100644
--- a/pandas/_libs/tslib.pyx
+++ b/pandas/_libs/tslib.pyx
@@ -532,9 +532,7 @@ class Timestamp(_Timestamp):
@property
def weekday_name(self):
- out = get_date_name_field(
- np.array([self.value], dtype=np.int64), 'weekday_name')
- return out[0]
+ return self._get_named_field('weekday_name')
@property
def dayofyear(self):
@@ -1268,13 +1266,29 @@ cdef class _Timestamp(datetime):
# same timezone if specified)
return datetime.__sub__(self, other)
- cpdef _get_field(self, field):
+ cdef int64_t _maybe_convert_value_to_local(self):
+ """Convert UTC i8 value to local i8 value if tz exists"""
+ cdef:
+ int64_t val
val = self.value
if self.tz is not None and not _is_utc(self.tz):
val = tz_convert_single(self.value, 'UTC', self.tz)
+ return val
+
+ cpdef _get_field(self, field):
+ cdef:
+ int64_t val
+ val = self._maybe_convert_value_to_local()
out = get_date_field(np.array([val], dtype=np.int64), field)
return int(out[0])
+ cpdef _get_named_field(self, field):
+ cdef:
+ int64_t val
+ val = self._maybe_convert_value_to_local()
+ out = get_date_name_field(np.array([val], dtype=np.int64), field)
+ return out[0]
+
cpdef _get_start_end_field(self, field):
month_kw = self.freq.kwds.get(
'startingMonth', self.freq.kwds.get(
diff --git a/pandas/tests/scalar/test_timestamp.py b/pandas/tests/scalar/test_timestamp.py
index 7cd1a7db0f9fe..8d47ce4802ac6 100644
--- a/pandas/tests/scalar/test_timestamp.py
+++ b/pandas/tests/scalar/test_timestamp.py
@@ -555,6 +555,14 @@ def check(value, equal):
for end in ends:
assert getattr(ts, end)
+ @pytest.mark.parametrize('data, expected',
+ [(Timestamp('2017-08-28 23:00:00'), 'Monday'),
+ (Timestamp('2017-08-28 23:00:00', tz='EST'),
+ 'Monday')])
+ def test_weekday_name(self, data, expected):
+ # GH 17354
+ assert data.weekday_name == expected
+
def test_pprint(self):
# GH12622
import pprint
| - [x] closes #17354
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
@jreback created a new method `_get_named_field` based on your [comment](https://github.com/pandas-dev/pandas/issues/17354#issuecomment-325319229) in the issue thread. | https://api.github.com/repos/pandas-dev/pandas/pulls/17377 | 2017-08-30T06:28:00Z | 2017-09-07T00:46:51Z | 2017-09-07T00:46:51Z | 2017-12-20T02:04:37Z |
DOC: Cleaned references to pandas <v0.12 in docs | diff --git a/doc/source/basics.rst b/doc/source/basics.rst
index fe20a7eb2b786..35eb14eda238f 100644
--- a/doc/source/basics.rst
+++ b/doc/source/basics.rst
@@ -251,8 +251,8 @@ replace NaN with some other value using ``fillna`` if you wish).
Flexible Comparisons
~~~~~~~~~~~~~~~~~~~~
-Starting in v0.8, pandas introduced binary comparison methods eq, ne, lt, gt,
-le, and ge to Series and DataFrame whose behavior is analogous to the binary
+Series and DataFrame have the binary comparison methods ``eq``, ``ne``, ``lt``, ``gt``,
+``le``, and ``ge`` whose behavior is analogous to the binary
arithmetic operations described above:
.. ipython:: python
@@ -1908,7 +1908,7 @@ each type in a ``DataFrame``:
dft.get_dtype_counts()
-Numeric dtypes will propagate and can coexist in DataFrames (starting in v0.11.0).
+Numeric dtypes will propagate and can coexist in DataFrames.
If a dtype is passed (either directly via the ``dtype`` keyword, a passed ``ndarray``,
or a passed ``Series``, then it will be preserved in DataFrame operations. Furthermore,
different numeric dtypes will **NOT** be combined. The following example will give you a taste.
@@ -2137,7 +2137,7 @@ gotchas
~~~~~~~
Performing selection operations on ``integer`` type data can easily upcast the data to ``floating``.
-The dtype of the input data will be preserved in cases where ``nans`` are not introduced (starting in 0.11.0)
+The dtype of the input data will be preserved in cases where ``nans`` are not introduced.
See also :ref:`Support for integer NA <gotchas.intna>`
.. ipython:: python
diff --git a/doc/source/dsintro.rst b/doc/source/dsintro.rst
index 3c6572229802d..4652ccbf0ad34 100644
--- a/doc/source/dsintro.rst
+++ b/doc/source/dsintro.rst
@@ -73,7 +73,7 @@ index is passed, one will be created having values ``[0, ..., len(data) - 1]``.
.. note::
- Starting in v0.8.0, pandas supports non-unique index values. If an operation
+ pandas supports non-unique index values. If an operation
that does not support duplicate index values is attempted, an exception
will be raised at that time. The reason for being lazy is nearly all performance-based
(there are many instances in computations, like parts of GroupBy, where the index
@@ -698,7 +698,7 @@ DataFrame in tabular form, though it won't always fit the console width:
print(baseball.iloc[-20:, :12].to_string())
-New since 0.10.0, wide DataFrames will now be printed across multiple rows by
+Wide DataFrames will be printed across multiple rows by
default:
.. ipython:: python
@@ -845,19 +845,16 @@ DataFrame objects with mixed-type columns, all of the data will get upcasted to
.. note::
- Unfortunately Panel, being less commonly used than Series and DataFrame,
+ Panel, being less commonly used than Series and DataFrame,
has been slightly neglected feature-wise. A number of methods and options
- available in DataFrame are not available in Panel. This will get worked
- on, of course, in future releases. And faster if you join me in working on
- the codebase.
+ available in DataFrame are not available in Panel.
.. _dsintro.to_panel:
From DataFrame using ``to_panel`` method
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-This method was introduced in v0.7 to replace ``LongPanel.to_long``, and converts
-a DataFrame with a two-level index to a Panel.
+``to_panel`` converts a DataFrame with a two-level index to a Panel.
.. ipython:: python
:okwarning:
diff --git a/doc/source/groupby.rst b/doc/source/groupby.rst
index 937d682d238b3..53c0b771555f8 100644
--- a/doc/source/groupby.rst
+++ b/doc/source/groupby.rst
@@ -140,7 +140,7 @@ columns:
In [5]: grouped = df.groupby(get_letter_type, axis=1)
-Starting with 0.8, pandas Index objects now support duplicate values. If a
+pandas Index objects support duplicate values. If a
non-unique index is used as the group key in a groupby operation, all values
for the same index value will be considered to be in one group and thus the
output of aggregation functions will only contain unique index values:
@@ -288,8 +288,6 @@ chosen level:
s.sum(level='second')
-.. versionadded:: 0.6
-
Grouping with multiple levels is supported.
.. ipython:: python
diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst
index 53a259ad6eb15..4687e46490562 100644
--- a/doc/source/indexing.rst
+++ b/doc/source/indexing.rst
@@ -66,8 +66,6 @@ See the :ref:`cookbook<cookbook.selection>` for some advanced strategies
Different Choices for Indexing
------------------------------
-.. versionadded:: 0.11.0
-
Object selection has had a number of user-requested additions in order to
support more explicit location based indexing. Pandas now supports three types
of multi-axis indexing.
diff --git a/doc/source/io.rst b/doc/source/io.rst
index e338407361705..74ef6ea917ae7 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -364,7 +364,7 @@ warn_bad_lines : boolean, default ``True``
Specifying column data types
''''''''''''''''''''''''''''
-Starting with v0.10, you can indicate the data type for the whole DataFrame or
+You can indicate the data type for the whole DataFrame or
individual columns:
.. ipython:: python
@@ -3346,7 +3346,7 @@ Read/Write API
''''''''''''''
``HDFStore`` supports an top-level API using ``read_hdf`` for reading and ``to_hdf`` for writing,
-similar to how ``read_csv`` and ``to_csv`` work. (new in 0.11.0)
+similar to how ``read_csv`` and ``to_csv`` work.
.. ipython:: python
@@ -3791,7 +3791,7 @@ indexed dimension as the ``where``.
.. note::
- Indexes are automagically created (starting ``0.10.1``) on the indexables
+ Indexes are automagically created on the indexables
and any data columns you specify. This behavior can be turned off by passing
``index=False`` to ``append``.
@@ -3878,7 +3878,7 @@ create a new table!)
Iterator
++++++++
-Starting in ``0.11.0``, you can pass, ``iterator=True`` or ``chunksize=number_in_a_chunk``
+You can pass ``iterator=True`` or ``chunksize=number_in_a_chunk``
to ``select`` and ``select_as_multiple`` to return an iterator on the results.
The default is 50,000 rows returned in a chunk.
@@ -3986,8 +3986,8 @@ of rows in an object.
Multiple Table Queries
++++++++++++++++++++++
-New in 0.10.1 are the methods ``append_to_multiple`` and
-``select_as_multiple``, that can perform appending/selecting from
+The methods ``append_to_multiple`` and
+``select_as_multiple`` can perform appending/selecting from
multiple tables at once. The idea is to have one table (call it the
selector table) that you index most/all of the columns, and perform your
queries. The other table(s) are data tables with an index matching the
@@ -4291,7 +4291,7 @@ Pass ``min_itemsize`` on the first table creation to a-priori specify the minimu
``min_itemsize`` can be an integer, or a dict mapping a column name to an integer. You can pass ``values`` as a key to
allow all *indexables* or *data_columns* to have this min_itemsize.
-Starting in 0.11.0, passing a ``min_itemsize`` dict will cause all passed columns to be created as *data_columns* automatically.
+Passing a ``min_itemsize`` dict will cause all passed columns to be created as *data_columns* automatically.
.. note::
diff --git a/doc/source/missing_data.rst b/doc/source/missing_data.rst
index d54288baa389b..64a321d67a825 100644
--- a/doc/source/missing_data.rst
+++ b/doc/source/missing_data.rst
@@ -67,9 +67,8 @@ arise and we wish to also consider that "missing" or "not available" or "NA".
.. note::
- Prior to version v0.10.0 ``inf`` and ``-inf`` were also
- considered to be "NA" in computations. This is no longer the case by
- default; use the ``mode.use_inf_as_na`` option to recover it.
+ If you want to consider ``inf`` and ``-inf`` to be "NA" in computations,
+ you can set ``pandas.options.mode.use_inf_as_na = True``.
.. _missing.isna:
@@ -485,8 +484,8 @@ respectively:
Replacing Generic Values
~~~~~~~~~~~~~~~~~~~~~~~~
-Often times we want to replace arbitrary values with other values. New in v0.8
-is the ``replace`` method in Series/DataFrame that provides an efficient yet
+Often times we want to replace arbitrary values with other values. The
+``replace`` method in Series/DataFrame provides an efficient yet
flexible way to perform such replacements.
For a Series, you can replace a single value or a list of values by another
diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst
index ce4a920ad77b5..aded5e4402df2 100644
--- a/doc/source/timeseries.rst
+++ b/doc/source/timeseries.rst
@@ -1069,8 +1069,7 @@ Offset Aliases
~~~~~~~~~~~~~~
A number of string aliases are given to useful common time series
-frequencies. We will refer to these aliases as *offset aliases*
-(referred to as *time rules* prior to v0.8.0).
+frequencies. We will refer to these aliases as *offset aliases*.
.. csv-table::
:header: "Alias", "Description"
diff --git a/doc/source/visualization.rst b/doc/source/visualization.rst
index fb799c642131d..c637246537ca1 100644
--- a/doc/source/visualization.rst
+++ b/doc/source/visualization.rst
@@ -306,8 +306,6 @@ subplots:
df.diff().hist(color='k', alpha=0.5, bins=50)
-.. versionadded:: 0.10.0
-
The ``by`` keyword can be specified to plot grouped histograms:
.. ipython:: python
@@ -831,8 +829,6 @@ and take a :class:`Series` or :class:`DataFrame` as an argument.
Scatter Matrix Plot
~~~~~~~~~~~~~~~~~~~
-.. versionadded:: 0.7.3
-
You can create a scatter plot matrix using the
``scatter_matrix`` method in ``pandas.plotting``:
@@ -859,8 +855,6 @@ You can create a scatter plot matrix using the
Density Plot
~~~~~~~~~~~~
-.. versionadded:: 0.8.0
-
You can create density plots using the :meth:`Series.plot.kde` and :meth:`DataFrame.plot.kde` methods.
.. ipython:: python
| There is a lot of references to in the docs to when exactly some change occured. For newer changes this is great, but there comes a time when such references only disturb the reader rather than help him, as the versions referenced become so old, that they become noise rather than help.
I've cleaned up references up to and including v0.11.
IMO I could have gone higher (v.015?), but can do that in some later round.
Some issues I would be glad for input on:
* In ``gotschas.rst``, there is a sentence "As of pandas 0.11, pandas is not 100% thread safe." I haven't altered this, but I presume this still is correct in the newest version of pandas? Then IMO it should be changed to reference a newer version or simply to "pandas is currently not 100% thread safe."
* In ``io.rst`` there is a sentence "0.10.1 of ``HDFStore`` can read tables created in a prior version of pandas, ...". I'm not even sure the "0.10.1" references the version of pandas or a HDF library and I left it alone. The paragraph also discusses backwards compatability, which makes it somewhat relevant to keep around, even if it's an old change. | https://api.github.com/repos/pandas-dev/pandas/pulls/17375 | 2017-08-29T22:58:37Z | 2017-09-02T11:50:56Z | 2017-09-02T11:50:56Z | 2017-09-11T21:10:52Z |
BUG: intersection of decreasing RangeIndexes | diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt
index b24a6f067cee4..b73d106f09dbc 100644
--- a/doc/source/whatsnew/v0.21.0.txt
+++ b/doc/source/whatsnew/v0.21.0.txt
@@ -416,6 +416,7 @@ Indexing
- Bug in ``.iloc`` when used with inplace addition or assignment and an int indexer on a ``MultiIndex`` causing the wrong indexes to be read from and written to (:issue:`17148`)
- Bug in ``.isin()`` in which checking membership in empty ``Series`` objects raised an error (:issue:`16991`)
- Bug in ``CategoricalIndex`` reindexing in which specified indices containing duplicates were not being respected (:issue:`17323`)
+- Bug in intersection of ``RangeIndex`` with negative step (:issue:`17296`)
I/O
^^^
diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py
index 82412d3a7ef57..b759abaed4e56 100644
--- a/pandas/core/indexes/range.py
+++ b/pandas/core/indexes/range.py
@@ -324,12 +324,13 @@ def intersection(self, other):
if not len(self) or not len(other):
return RangeIndex._simple_new(None)
+ first = self[::-1] if self._step < 0 else self
+ second = other[::-1] if other._step < 0 else other
+
# check whether intervals intersect
# deals with in- and decreasing ranges
- int_low = max(min(self._start, self._stop + 1),
- min(other._start, other._stop + 1))
- int_high = min(max(self._stop, self._start + 1),
- max(other._stop, other._start + 1))
+ int_low = max(first._start, second._start)
+ int_high = min(first._stop, second._stop)
if int_high <= int_low:
return RangeIndex._simple_new(None)
@@ -337,21 +338,24 @@ def intersection(self, other):
# solve intersection problem
# performance hint: for identical step sizes, could use
# cheaper alternative
- gcd, s, t = self._extended_gcd(self._step, other._step)
+ gcd, s, t = first._extended_gcd(first._step, second._step)
# check whether element sets intersect
- if (self._start - other._start) % gcd:
+ if (first._start - second._start) % gcd:
return RangeIndex._simple_new(None)
# calculate parameters for the RangeIndex describing the
# intersection disregarding the lower bounds
- tmp_start = self._start + (other._start - self._start) * \
- self._step // gcd * s
- new_step = self._step * other._step // gcd
+ tmp_start = first._start + (second._start - first._start) * \
+ first._step // gcd * s
+ new_step = first._step * second._step // gcd
new_index = RangeIndex(tmp_start, int_high, new_step, fastpath=True)
# adjust index to limiting interval
new_index._start = new_index._min_fitting_element(int_low)
+
+ if (self._step < 0 and other._step < 0) is not (new_index._step < 0):
+ new_index = new_index[::-1]
return new_index
def _min_fitting_element(self, lower_limit):
diff --git a/pandas/tests/indexes/test_range.py b/pandas/tests/indexes/test_range.py
index 5ecf467b57fc5..06c8f0ee392c7 100644
--- a/pandas/tests/indexes/test_range.py
+++ b/pandas/tests/indexes/test_range.py
@@ -610,6 +610,21 @@ def test_intersection(self):
other.values)))
tm.assert_index_equal(result, expected)
+ # reversed (GH 17296)
+ result = other.intersection(self.index)
+ tm.assert_index_equal(result, expected)
+
+ # GH 17296: intersect two decreasing RangeIndexes
+ first = RangeIndex(10, -2, -2)
+ other = RangeIndex(5, -4, -1)
+ expected = first.astype(int).intersection(other.astype(int))
+ result = first.intersection(other).astype(int)
+ tm.assert_index_equal(result, expected)
+
+ # reversed
+ result = other.intersection(first).astype(int)
+ tm.assert_index_equal(result, expected)
+
index = RangeIndex(5)
# intersect of non-overlapping indices
| - [x] closes #17296
- [x] tests added / passed
- [x] passes `git diff master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/17374 | 2017-08-29T21:13:49Z | 2017-09-07T00:47:44Z | 2017-09-07T00:47:43Z | 2017-09-07T05:06:38Z |
DOC: Update Overview page in documentation | diff --git a/doc/source/overview.rst b/doc/source/overview.rst
index 92caeec319169..00a71603e1261 100644
--- a/doc/source/overview.rst
+++ b/doc/source/overview.rst
@@ -6,7 +6,11 @@
Package overview
****************
-:mod:`pandas` consists of the following things
+:mod:`pandas` is an open source, BSD-licensed library providing high-performance,
+easy-to-use data structures and data analysis tools for the `Python <https://www.python.org/>`__
+programming language.
+
+:mod:`pandas` consists of the following elements
* A set of labeled array data structures, the primary of which are
Series and DataFrame
@@ -21,27 +25,23 @@ Package overview
* Memory-efficient "sparse" versions of the standard data structures for storing
data that is mostly missing or mostly constant (some fixed value)
* Moving window statistics (rolling mean, rolling standard deviation, etc.)
- * Static and moving window linear and `panel regression
- <http://en.wikipedia.org/wiki/Panel_data>`__
-Data structures at a glance
----------------------------
+Data Structures
+---------------
.. csv-table::
:header: "Dimensions", "Name", "Description"
:widths: 15, 20, 50
- 1, Series, "1D labeled homogeneously-typed array"
- 2, DataFrame, "General 2D labeled, size-mutable tabular structure with
- potentially heterogeneously-typed columns"
- 3, Panel, "General 3D labeled, also size-mutable array"
+ 1, "Series", "1D labeled homogeneously-typed array"
+ 2, "DataFrame", "General 2D labeled, size-mutable tabular structure with potentially heterogeneously-typed column"
-Why more than 1 data structure?
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Why more than one data structure?
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The best way to think about the pandas data structures is as flexible
containers for lower dimensional data. For example, DataFrame is a container
-for Series, and Panel is a container for DataFrame objects. We would like to be
+for Series, and Series is a container for scalars. We would like to be
able to insert and remove objects from these containers in a dictionary-like
fashion.
@@ -85,36 +85,41 @@ The first stop for pandas issues and ideas is the `Github Issue Tracker
pandas community experts can answer through `Stack Overflow
<http://stackoverflow.com/questions/tagged/pandas>`__.
-Longer discussions occur on the `developer mailing list
-<http://groups.google.com/group/pystatsmodels>`__, and commercial support
-inquiries for Lambda Foundry should be sent to: support@lambdafoundry.com
+Community
+---------
-Credits
--------
+pandas is actively supported today by a community of like-minded individuals around
+the world who contribute their valuable time and energy to help make open source
+pandas possible. Thanks to `all of our contributors <https://github.com/pandas-dev/pandas/graphs/contributors>`__.
+
+If you're interested in contributing, please
+visit `Contributing to pandas webpage <http://pandas.pydata.org/pandas-docs/stable/contributing.html>`__.
-pandas development began at `AQR Capital Management <http://www.aqr.com>`__ in
-April 2008. It was open-sourced at the end of 2009. AQR continued to provide
-resources for development through the end of 2011, and continues to contribute
-bug reports today.
+pandas is a `NUMFocus <https://www.numfocus.org/open-source-projects/>`__ sponsored project.
+This will help ensure the success of development of pandas as a world-class open-source
+project, and makes it possible to `donate <https://pandas.pydata.org/donate.html>`__ to the project.
-Since January 2012, `Lambda Foundry <http://www.lambdafoundry.com>`__, has
-been providing development resources, as well as commercial support,
-training, and consulting for pandas.
+Project Governance
+------------------
-pandas is only made possible by a group of people around the world like you
-who have contributed new code, bug reports, fixes, comments and ideas. A
-complete list can be found `on Github <http://www.github.com/pandas-dev/pandas/contributors>`__.
+The governance process that pandas project has used informally since its inception in 2008 is formalized in `Project Governance documents <https://github.com/pandas-dev/pandas-governance>`__ .
+The documents clarify how decisions are made and how the various elements of our community interact, including the relationship between open source collaborative development and work that may be funded by for-profit or non-profit entities.
+
+Wes McKinney is the Benevolent Dictator for Life (BDFL).
Development Team
-----------------
+-----------------
+
+The list of the Core Team members and more detailed information can be found on the `people’s page <https://github.com/pandas-dev/pandas-governance/blob/master/people.md>`__ of the governance repo.
+
-pandas is a part of the PyData project. The PyData Development Team is a
-collection of developers focused on the improvement of Python's data
-libraries. The core team that coordinates development can be found on `Github
-<http://github.com/pydata>`__. If you're interested in contributing, please
-visit the `project website <http://pandas.pydata.org>`__.
+Institutional Partners
+----------------------
+
+The information about current institutional partners can be found on `pandas website page <https://pandas.pydata.org/about.html>`__
License
-------
.. literalinclude:: ../../LICENSE
+
| Updated overview page in documentation to match https://pandas.pydata.org/about.html
closes #17335 | https://api.github.com/repos/pandas-dev/pandas/pulls/17368 | 2017-08-29T11:40:26Z | 2017-08-30T16:39:46Z | 2017-08-30T16:39:46Z | 2017-08-30T16:40:08Z |
ENH: tolerance now takes list-like argument for reindex and get_indexer. | diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt
index 117e7c9d11259..c28e2684ea5f5 100644
--- a/doc/source/whatsnew/v0.21.0.txt
+++ b/doc/source/whatsnew/v0.21.0.txt
@@ -234,6 +234,7 @@ Other Enhancements
- :meth:`DataFrame.assign` will preserve the original order of ``**kwargs`` for Python 3.6+ users instead of sorting the column names. (:issue:`14207`)
- Improved the import time of pandas by about 2.25x. (:issue:`16764`)
- :func:`read_json` and :func:`to_json` now accept a ``compression`` argument which allows them to transparently handle compressed files. (:issue:`17798`)
+- :func:`Series.reindex`, :func:`DataFrame.reindex`, :func:`Index.get_indexer` now support list-like argument for ``tolerance``. (:issue:`17367`)
.. _whatsnew_0210.api_breaking:
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index fc3982dba93ce..5a311afc27c9a 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -2470,9 +2470,10 @@ def reindex_like(self, other, method=None, copy=True, limit=None,
Maximum number of consecutive labels to fill for inexact matches.
tolerance : optional
Maximum distance between labels of the other object and this
- object for inexact matches.
+ object for inexact matches. Can be list-like.
.. versionadded:: 0.17.0
+ .. versionadded:: 0.21.0 (list-like tolerance)
Notes
-----
@@ -2860,7 +2861,14 @@ def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
+ Tolerance may be a scalar value, which applies the same tolerance
+ to all values, or list-like, which applies variable tolerance per
+ element. List-like includes list, tuple, array, Series, and must be
+ the same size as the index and its dtype must exactly match the
+ index's type.
+
.. versionadded:: 0.17.0
+ .. versionadded:: 0.21.0 (list-like tolerance)
Examples
--------
@@ -3120,7 +3128,14 @@ def _reindex_multi(self, axes, copy, fill_value):
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
+ Tolerance may be a scalar value, which applies the same tolerance
+ to all values, or list-like, which applies variable tolerance per
+ element. List-like includes list, tuple, array, Series, and must be
+ the same size as the index and its dtype must exactly match the
+ index's type.
+
.. versionadded:: 0.17.0
+ .. versionadded:: 0.21.0 (list-like tolerance)
Examples
--------
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index c3343f149005c..a995fc10a6674 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -2484,7 +2484,14 @@ def _get_unique_index(self, dropna=False):
the index at the matching location most satisfy the equation
``abs(index[loc] - key) <= tolerance``.
+ Tolerance may be a scalar
+ value, which applies the same tolerance to all values, or
+ list-like, which applies variable tolerance per element. List-like
+ includes list, tuple, array, Series, and must be the same size as
+ the index and its dtype must exactly match the index's type.
+
.. versionadded:: 0.17.0
+ .. versionadded:: 0.21.0 (list-like tolerance)
Returns
-------
@@ -2627,7 +2634,14 @@ def _get_level_values(self, level):
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
+ Tolerance may be a scalar value, which applies the same tolerance
+ to all values, or list-like, which applies variable tolerance per
+ element. List-like includes list, tuple, array, Series, and must be
+ the same size as the index and its dtype must exactly match the
+ index's type.
+
.. versionadded:: 0.17.0
+ .. versionadded:: 0.21.0 (list-like tolerance)
Examples
--------
@@ -2647,7 +2661,7 @@ def get_indexer(self, target, method=None, limit=None, tolerance=None):
method = missing.clean_reindex_fill_method(method)
target = _ensure_index(target)
if tolerance is not None:
- tolerance = self._convert_tolerance(tolerance)
+ tolerance = self._convert_tolerance(tolerance, target)
# Treat boolean labels passed to a numeric index as not found. Without
# this fix False and True would be treated as 0 and 1 respectively.
@@ -2683,10 +2697,15 @@ def get_indexer(self, target, method=None, limit=None, tolerance=None):
'backfill or nearest reindexing')
indexer = self._engine.get_indexer(target._values)
+
return _ensure_platform_int(indexer)
- def _convert_tolerance(self, tolerance):
+ def _convert_tolerance(self, tolerance, target):
# override this method on subclasses
+ tolerance = np.asarray(tolerance)
+ if target.size != tolerance.size and tolerance.size > 1:
+ raise ValueError('list-like tolerance size must match '
+ 'target index size')
return tolerance
def _get_fill_indexer(self, target, method, limit=None, tolerance=None):
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py
index d5b4525e8a1eb..5d40975586e73 100644
--- a/pandas/core/indexes/datetimelike.py
+++ b/pandas/core/indexes/datetimelike.py
@@ -7,6 +7,7 @@
from pandas import compat
from pandas.compat.numpy import function as nv
+from pandas.core.tools.timedeltas import to_timedelta
import numpy as np
from pandas.core.dtypes.common import (
@@ -431,13 +432,12 @@ def asobject(self):
from pandas.core.index import Index
return Index(self._box_values(self.asi8), name=self.name, dtype=object)
- def _convert_tolerance(self, tolerance):
- try:
- return Timedelta(tolerance).to_timedelta64()
- except ValueError:
- raise ValueError('tolerance argument for %s must be convertible '
- 'to Timedelta: %r'
- % (type(self).__name__, tolerance))
+ def _convert_tolerance(self, tolerance, target):
+ tolerance = np.asarray(to_timedelta(tolerance, box=False))
+ if target.size != tolerance.size and tolerance.size > 1:
+ raise ValueError('list-like tolerance size must match '
+ 'target index size')
+ return tolerance
def _maybe_mask_results(self, result, fill_value=None, convert=None):
"""
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index 25897bee29845..d16251a7829b9 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -1423,7 +1423,7 @@ def get_loc(self, key, method=None, tolerance=None):
if tolerance is not None:
# try converting tolerance now, so errors don't get swallowed by
# the try/except clauses below
- tolerance = self._convert_tolerance(tolerance)
+ tolerance = self._convert_tolerance(tolerance, np.asarray(key))
if isinstance(key, datetime):
# needed to localize naive datetimes
@@ -1447,7 +1447,12 @@ def get_loc(self, key, method=None, tolerance=None):
try:
stamp = Timestamp(key, tz=self.tz)
return Index.get_loc(self, stamp, method, tolerance)
- except (KeyError, ValueError):
+ except KeyError:
+ raise KeyError(key)
+ except ValueError as e:
+ # list-like tolerance size must match target index size
+ if 'list-like' in str(e):
+ raise e
raise KeyError(key)
def _maybe_cast_slice_bound(self, label, side, kind):
diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py
index 9fc47ad7b773c..1f007b1961e06 100644
--- a/pandas/core/indexes/numeric.py
+++ b/pandas/core/indexes/numeric.py
@@ -71,12 +71,21 @@ def _convert_for_op(self, value):
return value
- def _convert_tolerance(self, tolerance):
- try:
- return float(tolerance)
- except ValueError:
- raise ValueError('tolerance argument for %s must be numeric: %r' %
- (type(self).__name__, tolerance))
+ def _convert_tolerance(self, tolerance, target):
+ tolerance = np.asarray(tolerance)
+ if target.size != tolerance.size and tolerance.size > 1:
+ raise ValueError('list-like tolerance size must match '
+ 'target index size')
+ if not np.issubdtype(tolerance.dtype, np.number):
+ if tolerance.ndim > 0:
+ raise ValueError(('tolerance argument for %s must contain '
+ 'numeric elements if it is list type') %
+ (type(self).__name__,))
+ else:
+ raise ValueError(('tolerance argument for %s must be numeric '
+ 'if it is a scalar: %r') %
+ (type(self).__name__, tolerance))
+ return tolerance
@classmethod
def _assert_safe_casting(cls, data, subarr):
diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py
index b70b4c4e4067c..148ca2725fbdc 100644
--- a/pandas/core/indexes/period.py
+++ b/pandas/core/indexes/period.py
@@ -641,12 +641,17 @@ def to_timestamp(self, freq=None, how='start'):
return DatetimeIndex(new_data, freq='infer', name=self.name)
def _maybe_convert_timedelta(self, other):
- if isinstance(other, (timedelta, np.timedelta64, offsets.Tick)):
+ if isinstance(
+ other, (timedelta, np.timedelta64, offsets.Tick, np.ndarray)):
offset = frequencies.to_offset(self.freq.rule_code)
if isinstance(offset, offsets.Tick):
- nanos = tslib._delta_to_nanoseconds(other)
+ if isinstance(other, np.ndarray):
+ nanos = np.vectorize(tslib._delta_to_nanoseconds)(other)
+ else:
+ nanos = tslib._delta_to_nanoseconds(other)
offset_nanos = tslib._delta_to_nanoseconds(offset)
- if nanos % offset_nanos == 0:
+ check = np.all(nanos % offset_nanos == 0)
+ if check:
return nanos // offset_nanos
elif isinstance(other, offsets.DateOffset):
freqstr = other.rule_code
@@ -782,7 +787,7 @@ def get_indexer(self, target, method=None, limit=None, tolerance=None):
target = target.asi8
if tolerance is not None:
- tolerance = self._convert_tolerance(tolerance)
+ tolerance = self._convert_tolerance(tolerance, target)
return Index.get_indexer(self._int64index, target, method,
limit, tolerance)
@@ -825,7 +830,8 @@ def get_loc(self, key, method=None, tolerance=None):
try:
ordinal = tslib.iNaT if key is tslib.NaT else key.ordinal
if tolerance is not None:
- tolerance = self._convert_tolerance(tolerance)
+ tolerance = self._convert_tolerance(tolerance,
+ np.asarray(key))
return self._int64index.get_loc(ordinal, method, tolerance)
except KeyError:
@@ -908,8 +914,12 @@ def _get_string_slice(self, key):
return slice(self.searchsorted(t1.ordinal, side='left'),
self.searchsorted(t2.ordinal, side='right'))
- def _convert_tolerance(self, tolerance):
- tolerance = DatetimeIndexOpsMixin._convert_tolerance(self, tolerance)
+ def _convert_tolerance(self, tolerance, target):
+ tolerance = DatetimeIndexOpsMixin._convert_tolerance(self, tolerance,
+ target)
+ if target.size != tolerance.size and tolerance.size > 1:
+ raise ValueError('list-like tolerance size must match '
+ 'target index size')
return self._maybe_convert_timedelta(tolerance)
def insert(self, loc, item):
diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py
index 89757c2bf40da..6e08c32f30dcd 100644
--- a/pandas/core/indexes/timedeltas.py
+++ b/pandas/core/indexes/timedeltas.py
@@ -699,7 +699,7 @@ def get_loc(self, key, method=None, tolerance=None):
if tolerance is not None:
# try converting tolerance now, so errors don't get swallowed by
# the try/except clauses below
- tolerance = self._convert_tolerance(tolerance)
+ tolerance = self._convert_tolerance(tolerance, np.asarray(key))
if _is_convertible_to_td(key):
key = Timedelta(key)
diff --git a/pandas/core/tools/timedeltas.py b/pandas/core/tools/timedeltas.py
index d5132826bb93f..f61d9f90d6ca2 100644
--- a/pandas/core/tools/timedeltas.py
+++ b/pandas/core/tools/timedeltas.py
@@ -83,6 +83,9 @@ def to_timedelta(arg, unit='ns', box=True, errors='raise'):
elif isinstance(arg, ABCIndexClass):
return _convert_listlike(arg, unit=unit, box=box,
errors=errors, name=arg.name)
+ elif is_list_like(arg) and getattr(arg, 'ndim', 1) == 0:
+ # extract array scalar and process below
+ arg = arg.item()
elif is_list_like(arg) and getattr(arg, 'ndim', 1) == 1:
return _convert_listlike(arg, unit=unit, box=box, errors=errors)
elif getattr(arg, 'ndim', 1) > 1:
diff --git a/pandas/tests/frame/test_indexing.py b/pandas/tests/frame/test_indexing.py
index 1a16e4ef48b64..f850b8f2ee178 100644
--- a/pandas/tests/frame/test_indexing.py
+++ b/pandas/tests/frame/test_indexing.py
@@ -1935,9 +1935,13 @@ def test_reindex_methods(self):
actual = df.reindex_like(df, method=method, tolerance=0)
assert_frame_equal(df, actual)
+ actual = df.reindex_like(df, method=method, tolerance=[0, 0, 0, 0])
+ assert_frame_equal(df, actual)
actual = df.reindex(target, method=method, tolerance=1)
assert_frame_equal(expected, actual)
+ actual = df.reindex(target, method=method, tolerance=[1, 1, 1, 1])
+ assert_frame_equal(expected, actual)
e2 = expected[::-1]
actual = df.reindex(target[::-1], method=method)
@@ -1958,6 +1962,11 @@ def test_reindex_methods(self):
actual = df.reindex(target, method='nearest', tolerance=0.2)
assert_frame_equal(expected, actual)
+ expected = pd.DataFrame({'x': [0, np.nan, 1, np.nan]}, index=target)
+ actual = df.reindex(target, method='nearest',
+ tolerance=[0.5, 0.01, 0.4, 0.1])
+ assert_frame_equal(expected, actual)
+
def test_reindex_frame_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
df = DataFrame({'A': np.random.randn(len(rng)), 'B': rng})
diff --git a/pandas/tests/indexes/datetimes/test_datetime.py b/pandas/tests/indexes/datetimes/test_datetime.py
index 47f53f53cfd02..8d9ac59cf9883 100644
--- a/pandas/tests/indexes/datetimes/test_datetime.py
+++ b/pandas/tests/indexes/datetimes/test_datetime.py
@@ -41,10 +41,17 @@ def test_get_loc(self):
tolerance=np.timedelta64(1, 'D')) == 1
assert idx.get_loc('2000-01-01T12', method='nearest',
tolerance=timedelta(1)) == 1
- with tm.assert_raises_regex(ValueError, 'must be convertible'):
+ with tm.assert_raises_regex(ValueError,
+ 'unit abbreviation w/o a number'):
idx.get_loc('2000-01-01T12', method='nearest', tolerance='foo')
with pytest.raises(KeyError):
idx.get_loc('2000-01-01T03', method='nearest', tolerance='2 hours')
+ with pytest.raises(
+ ValueError,
+ match='tolerance size must match target index size'):
+ idx.get_loc('2000-01-01', method='nearest',
+ tolerance=[pd.Timedelta('1day').to_timedelta64(),
+ pd.Timedelta('1day').to_timedelta64()])
assert idx.get_loc('2000', method='nearest') == slice(0, 3)
assert idx.get_loc('2000-01', method='nearest') == slice(0, 3)
@@ -93,6 +100,19 @@ def test_get_indexer(self):
idx.get_indexer(target, 'nearest',
tolerance=pd.Timedelta('1 hour')),
np.array([0, -1, 1], dtype=np.intp))
+ tol_raw = [pd.Timedelta('1 hour'),
+ pd.Timedelta('1 hour'),
+ pd.Timedelta('1 hour').to_timedelta64(), ]
+ tm.assert_numpy_array_equal(
+ idx.get_indexer(target, 'nearest',
+ tolerance=[np.timedelta64(x) for x in tol_raw]),
+ np.array([0, -1, 1], dtype=np.intp))
+ tol_bad = [pd.Timedelta('2 hour').to_timedelta64(),
+ pd.Timedelta('1 hour').to_timedelta64(),
+ 'foo', ]
+ with pytest.raises(
+ ValueError, match='abbreviation w/o a number'):
+ idx.get_indexer(target, 'nearest', tolerance=tol_bad)
with pytest.raises(ValueError):
idx.get_indexer(idx[[0]], method='nearest', tolerance='foo')
diff --git a/pandas/tests/indexes/period/test_period.py b/pandas/tests/indexes/period/test_period.py
index 51f7d13cb0638..ae500e66359b4 100644
--- a/pandas/tests/indexes/period/test_period.py
+++ b/pandas/tests/indexes/period/test_period.py
@@ -9,6 +9,7 @@
from pandas import (PeriodIndex, period_range, notna, DatetimeIndex, NaT,
Index, Period, Int64Index, Series, DataFrame, date_range,
offsets, compat)
+from pandas.core.indexes.period import IncompatibleFrequency
from ..datetimelike import DatetimeLike
@@ -83,7 +84,8 @@ def test_get_loc(self):
tolerance=np.timedelta64(1, 'D')) == 1
assert idx.get_loc('2000-01-02T12', method='nearest',
tolerance=timedelta(1)) == 1
- with tm.assert_raises_regex(ValueError, 'must be convertible'):
+ with tm.assert_raises_regex(ValueError,
+ 'unit abbreviation w/o a number'):
idx.get_loc('2000-01-10', method='nearest', tolerance='foo')
msg = 'Input has different freq from PeriodIndex\\(freq=D\\)'
@@ -91,6 +93,12 @@ def test_get_loc(self):
idx.get_loc('2000-01-10', method='nearest', tolerance='1 hour')
with pytest.raises(KeyError):
idx.get_loc('2000-01-10', method='nearest', tolerance='1 day')
+ with pytest.raises(
+ ValueError,
+ match='list-like tolerance size must match target index size'):
+ idx.get_loc('2000-01-10', method='nearest',
+ tolerance=[pd.Timedelta('1 day').to_timedelta64(),
+ pd.Timedelta('1 day').to_timedelta64()])
def test_where(self):
i = self.create_index()
@@ -158,6 +166,20 @@ def test_get_indexer(self):
tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest',
tolerance='1 day'),
np.array([0, 1, 1], dtype=np.intp))
+ tol_raw = [pd.Timedelta('1 hour'),
+ pd.Timedelta('1 hour'),
+ np.timedelta64(1, 'D'), ]
+ tm.assert_numpy_array_equal(
+ idx.get_indexer(target, 'nearest',
+ tolerance=[np.timedelta64(x) for x in tol_raw]),
+ np.array([0, -1, 1], dtype=np.intp))
+ tol_bad = [pd.Timedelta('2 hour').to_timedelta64(),
+ pd.Timedelta('1 hour').to_timedelta64(),
+ np.timedelta64(1, 'M'), ]
+ with pytest.raises(
+ IncompatibleFrequency,
+ match='Input has different freq from'):
+ idx.get_indexer(target, 'nearest', tolerance=tol_bad)
def test_repeat(self):
# GH10183
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index 81f113d58d680..307cda7f2d1cb 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -1075,40 +1075,58 @@ def test_get_indexer_invalid(self):
with tm.assert_raises_regex(ValueError, 'limit argument'):
idx.get_indexer([1, 0], limit=1)
- def test_get_indexer_nearest(self):
+ @pytest.mark.parametrize(
+ 'method, tolerance, indexer, expected',
+ [
+ ('pad', None, [0, 5, 9], [0, 5, 9]),
+ ('backfill', None, [0, 5, 9], [0, 5, 9]),
+ ('nearest', None, [0, 5, 9], [0, 5, 9]),
+ ('pad', 0, [0, 5, 9], [0, 5, 9]),
+ ('backfill', 0, [0, 5, 9], [0, 5, 9]),
+ ('nearest', 0, [0, 5, 9], [0, 5, 9]),
+
+ ('pad', None, [0.2, 1.8, 8.5], [0, 1, 8]),
+ ('backfill', None, [0.2, 1.8, 8.5], [1, 2, 9]),
+ ('nearest', None, [0.2, 1.8, 8.5], [0, 2, 9]),
+ ('pad', 1, [0.2, 1.8, 8.5], [0, 1, 8]),
+ ('backfill', 1, [0.2, 1.8, 8.5], [1, 2, 9]),
+ ('nearest', 1, [0.2, 1.8, 8.5], [0, 2, 9]),
+
+ ('pad', 0.2, [0.2, 1.8, 8.5], [0, -1, -1]),
+ ('backfill', 0.2, [0.2, 1.8, 8.5], [-1, 2, -1]),
+ ('nearest', 0.2, [0.2, 1.8, 8.5], [0, 2, -1])])
+ def test_get_indexer_nearest(self, method, tolerance, indexer, expected):
idx = Index(np.arange(10))
- all_methods = ['pad', 'backfill', 'nearest']
- for method in all_methods:
- actual = idx.get_indexer([0, 5, 9], method=method)
- tm.assert_numpy_array_equal(actual, np.array([0, 5, 9],
- dtype=np.intp))
-
- actual = idx.get_indexer([0, 5, 9], method=method, tolerance=0)
- tm.assert_numpy_array_equal(actual, np.array([0, 5, 9],
- dtype=np.intp))
-
- for method, expected in zip(all_methods, [[0, 1, 8], [1, 2, 9],
- [0, 2, 9]]):
- actual = idx.get_indexer([0.2, 1.8, 8.5], method=method)
- tm.assert_numpy_array_equal(actual, np.array(expected,
- dtype=np.intp))
-
- actual = idx.get_indexer([0.2, 1.8, 8.5], method=method,
- tolerance=1)
- tm.assert_numpy_array_equal(actual, np.array(expected,
- dtype=np.intp))
+ actual = idx.get_indexer(indexer, method=method, tolerance=tolerance)
+ tm.assert_numpy_array_equal(actual, np.array(expected,
+ dtype=np.intp))
+
+ @pytest.mark.parametrize('listtype', [list, tuple, Series, np.array])
+ @pytest.mark.parametrize(
+ 'tolerance, expected',
+ list(zip([[0.3, 0.3, 0.1], [0.2, 0.1, 0.1],
+ [0.1, 0.5, 0.5]],
+ [[0, 2, -1], [0, -1, -1],
+ [-1, 2, 9]])))
+ def test_get_indexer_nearest_listlike_tolerance(self, tolerance,
+ expected, listtype):
+ idx = Index(np.arange(10))
- for method, expected in zip(all_methods, [[0, -1, -1], [-1, 2, -1],
- [0, 2, -1]]):
- actual = idx.get_indexer([0.2, 1.8, 8.5], method=method,
- tolerance=0.2)
- tm.assert_numpy_array_equal(actual, np.array(expected,
- dtype=np.intp))
+ actual = idx.get_indexer([0.2, 1.8, 8.5], method='nearest',
+ tolerance=listtype(tolerance))
+ tm.assert_numpy_array_equal(actual, np.array(expected,
+ dtype=np.intp))
+ def test_get_indexer_nearest_error(self):
+ idx = Index(np.arange(10))
with tm.assert_raises_regex(ValueError, 'limit argument'):
idx.get_indexer([1, 0], method='nearest', limit=1)
+ with pytest.raises(ValueError, match='tolerance size must match'):
+ idx.get_indexer([1, 0], method='nearest',
+ tolerance=[1, 2, 3])
+
def test_get_indexer_nearest_decreasing(self):
idx = Index(np.arange(10))[::-1]
@@ -1141,6 +1159,10 @@ def test_get_indexer_strings(self):
with pytest.raises(TypeError):
idx.get_indexer(['a', 'b', 'c', 'd'], method='pad', tolerance=2)
+ with pytest.raises(TypeError):
+ idx.get_indexer(['a', 'b', 'c', 'd'], method='pad',
+ tolerance=[2, 2, 2, 2])
+
def test_get_indexer_numeric_index_boolean_target(self):
# GH 16877
numeric_idx = pd.Index(range(4))
@@ -1172,6 +1194,8 @@ def test_get_loc(self):
idx.get_loc(1.1, 'nearest', tolerance='invalid')
with tm.assert_raises_regex(ValueError, 'tolerance .* valid if'):
idx.get_loc(1.1, tolerance=1)
+ with pytest.raises(ValueError, match='tolerance size must match'):
+ idx.get_loc(1.1, 'nearest', tolerance=[1, 1])
idx = pd.Index(['a', 'c'])
with pytest.raises(TypeError):
diff --git a/pandas/tests/indexes/test_numeric.py b/pandas/tests/indexes/test_numeric.py
index dc38b0a2b1fb7..a96c677852339 100644
--- a/pandas/tests/indexes/test_numeric.py
+++ b/pandas/tests/indexes/test_numeric.py
@@ -354,6 +354,14 @@ def test_get_loc(self):
with tm.assert_raises_regex(ValueError, 'must be numeric'):
idx.get_loc(1.4, method='nearest', tolerance='foo')
+ with pytest.raises(ValueError, match='must contain numeric elements'):
+ idx.get_loc(1.4, method='nearest', tolerance=np.array(['foo']))
+
+ with pytest.raises(
+ ValueError,
+ match='tolerance size must match target index size'):
+ idx.get_loc(1.4, method='nearest', tolerance=np.array([1, 2]))
+
def test_get_loc_na(self):
idx = Float64Index([np.nan, 1, 2])
assert idx.get_loc(1) == 1
diff --git a/pandas/tests/indexes/timedeltas/test_timedelta.py b/pandas/tests/indexes/timedeltas/test_timedelta.py
index 0b3bd0b03bccf..0a09199eca9d5 100644
--- a/pandas/tests/indexes/timedeltas/test_timedelta.py
+++ b/pandas/tests/indexes/timedeltas/test_timedelta.py
@@ -60,9 +60,17 @@ def test_get_loc(self):
assert idx.get_loc(idx[1], 'pad',
tolerance=timedelta(0)) == 1
- with tm.assert_raises_regex(ValueError, 'must be convertible'):
+ with tm.assert_raises_regex(ValueError,
+ 'unit abbreviation w/o a number'):
idx.get_loc(idx[1], method='nearest', tolerance='foo')
+ with pytest.raises(
+ ValueError,
+ match='tolerance size must match'):
+ idx.get_loc(idx[1], method='nearest',
+ tolerance=[Timedelta(0).to_timedelta64(),
+ Timedelta(0).to_timedelta64()])
+
for method, loc in [('pad', 1), ('backfill', 2), ('nearest', 1)]:
assert idx.get_loc('1 day 1 hour', method) == loc
diff --git a/pandas/tests/series/test_indexing.py b/pandas/tests/series/test_indexing.py
index 75ae47ed2fdc1..d141b378fe214 100644
--- a/pandas/tests/series/test_indexing.py
+++ b/pandas/tests/series/test_indexing.py
@@ -2117,11 +2117,19 @@ def test_reindex_nearest(self):
actual = s.reindex_like(actual, method='nearest', tolerance=1)
assert_series_equal(expected, actual)
+ actual = s.reindex_like(actual, method='nearest',
+ tolerance=[1, 2, 3, 4])
+ assert_series_equal(expected, actual)
actual = s.reindex(target, method='nearest', tolerance=0.2)
expected = Series([0, 1, np.nan, 2], target)
assert_series_equal(expected, actual)
+ actual = s.reindex(target, method='nearest',
+ tolerance=[0.3, 0.01, 0.4, 3])
+ expected = Series([0, np.nan, np.nan, 2], target)
+ assert_series_equal(expected, actual)
+
def test_reindex_backfill(self):
pass
diff --git a/pandas/tests/sparse/test_indexing.py b/pandas/tests/sparse/test_indexing.py
index edbac8f09241b..37a287af71451 100644
--- a/pandas/tests/sparse/test_indexing.py
+++ b/pandas/tests/sparse/test_indexing.py
@@ -414,6 +414,11 @@ def test_reindex_nearest(self):
expected = pd.Series([0, 1, np.nan, 2], target).to_sparse()
tm.assert_sp_series_equal(expected, actual)
+ actual = s.reindex(target, method='nearest',
+ tolerance=[0.3, 0.01, 0.4, 3])
+ expected = pd.Series([0, np.nan, np.nan, 2], target).to_sparse()
+ tm.assert_sp_series_equal(expected, actual)
+
def tests_indexing_with_sparse(self):
# GH 13985
| Enable use of list-like values for tolerance argument in DataFrame.reindex(), Series.reindex(), Index.get_indexer().
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry | https://api.github.com/repos/pandas-dev/pandas/pulls/17367 | 2017-08-29T01:47:21Z | 2017-10-14T21:06:30Z | 2017-10-14T21:06:30Z | 2017-10-14T21:59:20Z |
CLN: Move test_intersect_str_dates from tests/indexes/test_range.py to test_base.py | diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index ef36e4a91aa1c..07e98c326bcaa 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -663,6 +663,15 @@ def test_intersection(self):
intersect = first.intersection(second)
assert intersect.name is None
+ def test_intersect_str_dates(self):
+ dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)]
+
+ i1 = Index(dt_dates, dtype=object)
+ i2 = Index(['aa'], dtype=object)
+ res = i2.intersection(i1)
+
+ assert len(res) == 0
+
def test_union(self):
first = self.strIndex[5:20]
second = self.strIndex[:10]
diff --git a/pandas/tests/indexes/test_range.py b/pandas/tests/indexes/test_range.py
index 566354da4870d..5ecf467b57fc5 100644
--- a/pandas/tests/indexes/test_range.py
+++ b/pandas/tests/indexes/test_range.py
@@ -639,15 +639,6 @@ def test_intersection(self):
expected = RangeIndex(0, 0, 1)
tm.assert_index_equal(result, expected)
- def test_intersect_str_dates(self):
- dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)]
-
- i1 = Index(dt_dates, dtype=object)
- i2 = Index(['aa'], dtype=object)
- res = i2.intersection(i1)
-
- assert len(res) == 0
-
def test_union_noncomparable(self):
from datetime import datetime, timedelta
# corner case, non-Int64Index
| - [X] closes #17362
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
It doesn't seem like `test_intersect_str_dates` has anything to do with `RangeIndex`, so I've moved it from `test_range.py` to `test_base.py`. | https://api.github.com/repos/pandas-dev/pandas/pulls/17366 | 2017-08-28T22:46:30Z | 2017-08-29T10:10:16Z | 2017-08-29T10:10:15Z | 2017-08-30T01:39:23Z |
BUG: make order of index from pd.concat deterministic | diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt
index fcadd26156b1d..806b5e9026c7b 100644
--- a/doc/source/whatsnew/v0.21.0.txt
+++ b/doc/source/whatsnew/v0.21.0.txt
@@ -405,6 +405,7 @@ Reshaping
- Bug in :func:`crosstab` where passing two ``Series`` with the same name raised a ``KeyError`` (:issue:`13279`)
- :func:`Series.argmin`, :func:`Series.argmax`, and their counterparts on ``DataFrame`` and groupby objects work correctly with floating point data that contains infinite values (:issue:`13595`).
- Bug in :func:`unique` where checking a tuple of strings raised a ``TypeError`` (:issue:`17108`)
+- Bug in :func:`concat` where order of result index was unpredictable if it contained non-comparable elements (:issue:`17344`)
Numeric
^^^^^^^
diff --git a/pandas/core/common.py b/pandas/core/common.py
index 44cb36b8a3207..515a401096120 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -629,3 +629,17 @@ def _random_state(state=None):
else:
raise ValueError("random_state must be an integer, a numpy "
"RandomState, or None")
+
+
+def _get_distinct_objs(objs):
+ """
+ Return a list with distinct elements of "objs" (different ids).
+ Preserves order.
+ """
+ ids = set()
+ res = []
+ for obj in objs:
+ if not id(obj) in ids:
+ ids.add(id(obj))
+ res.append(obj)
+ return res
diff --git a/pandas/core/indexes/api.py b/pandas/core/indexes/api.py
index db73a6878258a..323d50166e7b6 100644
--- a/pandas/core/indexes/api.py
+++ b/pandas/core/indexes/api.py
@@ -23,8 +23,7 @@
'PeriodIndex', 'DatetimeIndex',
'_new_Index', 'NaT',
'_ensure_index', '_get_na_value', '_get_combined_index',
- '_get_objs_combined_axis',
- '_get_distinct_indexes', '_union_indexes',
+ '_get_objs_combined_axis', '_union_indexes',
'_get_consensus_names',
'_all_indexes_same']
@@ -41,7 +40,7 @@ def _get_objs_combined_axis(objs, intersect=False, axis=0):
def _get_combined_index(indexes, intersect=False):
# TODO: handle index names!
- indexes = _get_distinct_indexes(indexes)
+ indexes = com._get_distinct_objs(indexes)
if len(indexes) == 0:
return Index([])
if len(indexes) == 1:
@@ -55,10 +54,6 @@ def _get_combined_index(indexes, intersect=False):
return _ensure_index(union)
-def _get_distinct_indexes(indexes):
- return list(dict((id(x), x) for x in indexes).values())
-
-
def _union_indexes(indexes):
if len(indexes) == 0:
raise AssertionError('Must have at least 1 Index to union')
diff --git a/pandas/tests/reshape/test_concat.py b/pandas/tests/reshape/test_concat.py
index 52cd18126859a..6e646f9b29442 100644
--- a/pandas/tests/reshape/test_concat.py
+++ b/pandas/tests/reshape/test_concat.py
@@ -5,7 +5,7 @@
from numpy.random import randn
from datetime import datetime
-from pandas.compat import StringIO, iteritems
+from pandas.compat import StringIO, iteritems, PY2
import pandas as pd
from pandas import (DataFrame, concat,
read_csv, isna, Series, date_range,
@@ -1944,6 +1944,17 @@ def test_concat_categoricalindex(self):
index=exp_idx)
tm.assert_frame_equal(result, exp)
+ def test_concat_order(self):
+ # GH 17344
+ dfs = [pd.DataFrame(index=range(3), columns=['a', 1, None])]
+ dfs += [pd.DataFrame(index=range(3), columns=[None, 1, 'a'])
+ for i in range(100)]
+ result = pd.concat(dfs).columns
+ expected = dfs[0].columns
+ if PY2:
+ expected = expected.sort_values()
+ tm.assert_index_equal(result, expected)
+
@pytest.mark.parametrize('pdt', [pd.Series, pd.DataFrame, pd.Panel])
@pytest.mark.parametrize('dt', np.sctypes['float'])
| - [x] closes #17344
- [x] tests added / passed
- [x] passes `git diff master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/17364 | 2017-08-28T22:33:57Z | 2017-08-29T17:03:18Z | 2017-08-29T17:03:18Z | 2017-08-29T20:39:30Z |
Separate parsing functions out from tslib | diff --git a/pandas/_libs/period.pyx b/pandas/_libs/period.pyx
index 943f925ec5b04..725da22104efc 100644
--- a/pandas/_libs/period.pyx
+++ b/pandas/_libs/period.pyx
@@ -37,10 +37,10 @@ from tslibs.timezones cimport (
is_utc, is_tzlocal, get_utcoffset, get_dst_info, maybe_get_tz)
from tslib cimport _nat_scalar_rules
+from tslibs.parsing import parse_time_string, NAT_SENTINEL
from tslibs.frequencies cimport get_freq_code
from pandas.tseries import offsets
-from pandas.core.tools.datetimes import parse_time_string
from pandas.tseries import frequencies
cdef int64_t NPY_NAT = util.get_nat()
@@ -1197,6 +1197,8 @@ class Period(_Period):
value = str(value)
value = value.upper()
dt, _, reso = parse_time_string(value, freq)
+ if dt is NAT_SENTINEL:
+ ordinal = iNaT
if freq is None:
try:
diff --git a/pandas/_libs/src/inference.pyx b/pandas/_libs/src/inference.pyx
index a2764e87eec55..ed883bf5db5bc 100644
--- a/pandas/_libs/src/inference.pyx
+++ b/pandas/_libs/src/inference.pyx
@@ -1384,165 +1384,6 @@ def convert_sql_column(x):
return maybe_convert_objects(x, try_float=1)
-def try_parse_dates(ndarray[object] values, parser=None,
- dayfirst=False, default=None):
- cdef:
- Py_ssize_t i, n
- ndarray[object] result
-
- n = len(values)
- result = np.empty(n, dtype='O')
-
- if parser is None:
- if default is None: # GH2618
- date=datetime.now()
- default=datetime(date.year, date.month, 1)
-
- try:
- from dateutil.parser import parse
- parse_date = lambda x: parse(x, dayfirst=dayfirst, default=default)
- except ImportError: # pragma: no cover
- def parse_date(s):
- try:
- return datetime.strptime(s, '%m/%d/%Y')
- except Exception:
- return s
- # EAFP here
- try:
- for i from 0 <= i < n:
- if values[i] == '':
- result[i] = np.nan
- else:
- result[i] = parse_date(values[i])
- except Exception:
- # failed
- return values
- else:
- parse_date = parser
-
- try:
- for i from 0 <= i < n:
- if values[i] == '':
- result[i] = np.nan
- else:
- result[i] = parse_date(values[i])
- except Exception:
- # raise if passed parser and it failed
- raise
-
- return result
-
-
-def try_parse_date_and_time(ndarray[object] dates, ndarray[object] times,
- date_parser=None, time_parser=None,
- dayfirst=False, default=None):
- cdef:
- Py_ssize_t i, n
- ndarray[object] result
-
- from datetime import date, time, datetime, timedelta
-
- n = len(dates)
- if len(times) != n:
- raise ValueError('Length of dates and times must be equal')
- result = np.empty(n, dtype='O')
-
- if date_parser is None:
- if default is None: # GH2618
- date=datetime.now()
- default=datetime(date.year, date.month, 1)
-
- try:
- from dateutil.parser import parse
- parse_date = lambda x: parse(x, dayfirst=dayfirst, default=default)
- except ImportError: # pragma: no cover
- def parse_date(s):
- try:
- return date.strptime(s, '%m/%d/%Y')
- except Exception:
- return s
- else:
- parse_date = date_parser
-
- if time_parser is None:
- try:
- from dateutil.parser import parse
- parse_time = lambda x: parse(x)
- except ImportError: # pragma: no cover
- def parse_time(s):
- try:
- return time.strptime(s, '%H:%M:%S')
- except Exception:
- return s
-
- else:
- parse_time = time_parser
-
- for i from 0 <= i < n:
- d = parse_date(str(dates[i]))
- t = parse_time(str(times[i]))
- result[i] = datetime(d.year, d.month, d.day,
- t.hour, t.minute, t.second)
-
- return result
-
-
-def try_parse_year_month_day(ndarray[object] years, ndarray[object] months,
- ndarray[object] days):
- cdef:
- Py_ssize_t i, n
- ndarray[object] result
-
- from datetime import datetime
-
- n = len(years)
- if len(months) != n or len(days) != n:
- raise ValueError('Length of years/months/days must all be equal')
- result = np.empty(n, dtype='O')
-
- for i from 0 <= i < n:
- result[i] = datetime(int(years[i]), int(months[i]), int(days[i]))
-
- return result
-
-
-def try_parse_datetime_components(ndarray[object] years,
- ndarray[object] months,
- ndarray[object] days,
- ndarray[object] hours,
- ndarray[object] minutes,
- ndarray[object] seconds):
-
- cdef:
- Py_ssize_t i, n
- ndarray[object] result
- int secs
- double float_secs
- double micros
-
- from datetime import datetime
-
- n = len(years)
- if (len(months) != n or len(days) != n or len(hours) != n or
- len(minutes) != n or len(seconds) != n):
- raise ValueError('Length of all datetime components must be equal')
- result = np.empty(n, dtype='O')
-
- for i from 0 <= i < n:
- float_secs = float(seconds[i])
- secs = int(float_secs)
-
- micros = float_secs - secs
- if micros > 0:
- micros = micros * 1000000
-
- result[i] = datetime(int(years[i]), int(months[i]), int(days[i]),
- int(hours[i]), int(minutes[i]), secs,
- int(micros))
-
- return result
-
-
def sanitize_objects(ndarray[object] values, set na_values,
convert_empty=True):
cdef:
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx
index d4ca5af09367e..4c34d0fcb1e5f 100644
--- a/pandas/_libs/tslib.pyx
+++ b/pandas/_libs/tslib.pyx
@@ -67,6 +67,9 @@ from khash cimport (
kh_init_int64, kh_int64_t,
kh_resize_int64, kh_get_int64)
+from .tslibs.parsing import parse_datetime_string
+from .tslibs.parsing import DateParseError # noqa
+
cimport cython
import re
@@ -1737,26 +1740,6 @@ def datetime_to_datetime64(ndarray[object] values):
return result, inferred_tz
-cdef:
- set _not_datelike_strings = set(['a', 'A', 'm', 'M', 'p', 'P', 't', 'T'])
-
-cpdef bint _does_string_look_like_datetime(object date_string):
- if date_string.startswith('0'):
- # Strings starting with 0 are more consistent with a
- # date-like string than a number
- return True
-
- try:
- if float(date_string) < 1000:
- return False
- except ValueError:
- pass
-
- if date_string in _not_datelike_strings:
- return False
-
- return True
-
def format_array_from_datetime(ndarray[int64_t] values, object tz=None,
object format=None, object na_rep=None):
@@ -1841,257 +1824,6 @@ def format_array_from_datetime(ndarray[int64_t] values, object tz=None,
return result
-class DateParseError(ValueError):
- pass
-
-
-cdef object _TIMEPAT = re.compile(r'^([01]?[0-9]|2[0-3]):([0-5][0-9])')
-
-
-def parse_datetime_string(object date_string, object freq=None,
- dayfirst=False, yearfirst=False, **kwargs):
- """parse datetime string, only returns datetime.
- Also cares special handling matching time patterns.
-
- Returns
- -------
- datetime
- """
-
- cdef:
- object dt
-
- if not _does_string_look_like_datetime(date_string):
- raise ValueError('Given date string not likely a datetime.')
-
- if _TIMEPAT.match(date_string):
- # use current datetime as default, not pass _DEFAULT_DATETIME
- dt = parse_date(date_string, dayfirst=dayfirst,
- yearfirst=yearfirst, **kwargs)
- return dt
- try:
- dt, _, _ = _parse_dateabbr_string(date_string, _DEFAULT_DATETIME, freq)
- return dt
- except DateParseError:
- raise
- except ValueError:
- pass
-
- try:
- dt = parse_date(date_string, default=_DEFAULT_DATETIME,
- dayfirst=dayfirst, yearfirst=yearfirst, **kwargs)
- except TypeError:
- # following may be raised from dateutil
- # TypeError: 'NoneType' object is not iterable
- raise ValueError('Given date string not likely a datetime.')
-
- return dt
-
-
-def parse_datetime_string_with_reso(object date_string, object freq=None,
- dayfirst=False, yearfirst=False, **kwargs):
- """parse datetime string, only returns datetime
-
- Returns
- -------
- datetime
- """
-
- cdef:
- object parsed, reso
-
- if not _does_string_look_like_datetime(date_string):
- raise ValueError('Given date string not likely a datetime.')
-
- try:
- return _parse_dateabbr_string(date_string, _DEFAULT_DATETIME, freq)
- except DateParseError:
- raise
- except ValueError:
- pass
-
- try:
- parsed, reso = dateutil_parse(date_string, _DEFAULT_DATETIME,
- dayfirst=dayfirst, yearfirst=yearfirst)
- except Exception as e:
- # TODO: allow raise of errors within instead
- raise DateParseError(e)
- if parsed is None:
- raise DateParseError("Could not parse %s" % date_string)
- return parsed, parsed, reso
-
-
-cdef inline object _parse_dateabbr_string(object date_string, object default,
- object freq):
- cdef:
- object ret
- int year, quarter = -1, month, mnum, date_len
-
- # special handling for possibilities eg, 2Q2005, 2Q05, 2005Q1, 05Q1
- assert util.is_string_object(date_string)
-
- # len(date_string) == 0
- # should be NaT???
-
- if date_string in _nat_strings:
- return NaT, NaT, ''
-
- date_string = date_string.upper()
- date_len = len(date_string)
-
- if date_len == 4:
- # parse year only like 2000
- try:
- ret = default.replace(year=int(date_string))
- return ret, ret, 'year'
- except ValueError:
- pass
-
- try:
- if 4 <= date_len <= 7:
- i = date_string.index('Q', 1, 6)
- if i == 1:
- quarter = int(date_string[0])
- if date_len == 4 or (date_len == 5
- and date_string[i + 1] == '-'):
- # r'(\d)Q-?(\d\d)')
- year = 2000 + int(date_string[-2:])
- elif date_len == 6 or (date_len == 7
- and date_string[i + 1] == '-'):
- # r'(\d)Q-?(\d\d\d\d)')
- year = int(date_string[-4:])
- else:
- raise ValueError
- elif i == 2 or i == 3:
- # r'(\d\d)-?Q(\d)'
- if date_len == 4 or (date_len == 5
- and date_string[i - 1] == '-'):
- quarter = int(date_string[-1])
- year = 2000 + int(date_string[:2])
- else:
- raise ValueError
- elif i == 4 or i == 5:
- if date_len == 6 or (date_len == 7
- and date_string[i - 1] == '-'):
- # r'(\d\d\d\d)-?Q(\d)'
- quarter = int(date_string[-1])
- year = int(date_string[:4])
- else:
- raise ValueError
-
- if not (1 <= quarter <= 4):
- msg = ('Incorrect quarterly string is given, quarter must be '
- 'between 1 and 4: {0}')
- raise DateParseError(msg.format(date_string))
-
- if freq is not None:
- # hack attack, #1228
- try:
- mnum = _MONTH_NUMBERS[_get_rule_month(freq)] + 1
- except (KeyError, ValueError):
- msg = ('Unable to retrieve month information from given '
- 'freq: {0}').format(freq)
- raise DateParseError(msg)
-
- month = (mnum + (quarter - 1) * 3) % 12 + 1
- if month > mnum:
- year -= 1
- else:
- month = (quarter - 1) * 3 + 1
-
- ret = default.replace(year=year, month=month)
- return ret, ret, 'quarter'
-
- except DateParseError:
- raise
- except ValueError:
- pass
-
- if date_len == 6 and (freq == 'M' or getattr(
- freq, 'rule_code', None) == 'M'):
- year = int(date_string[:4])
- month = int(date_string[4:6])
- try:
- ret = default.replace(year=year, month=month)
- return ret, ret, 'month'
- except ValueError:
- pass
-
- for pat in ['%Y-%m', '%m-%Y', '%b %Y', '%b-%Y']:
- try:
- ret = datetime.strptime(date_string, pat)
- return ret, ret, 'month'
- except ValueError:
- pass
-
- raise ValueError('Unable to parse {0}'.format(date_string))
-
-
-def dateutil_parse(object timestr, object default, ignoretz=False,
- tzinfos=None, **kwargs):
- """ lifted from dateutil to get resolution"""
-
- cdef:
- object fobj, res, attr, ret, tzdata
- object reso = None
- dict repl = {}
-
- fobj = StringIO(str(timestr))
- res = DEFAULTPARSER._parse(fobj, **kwargs)
-
- # dateutil 2.2 compat
- if isinstance(res, tuple):
- res, _ = res
-
- if res is None:
- msg = "Unknown datetime string format, unable to parse: {0}"
- raise ValueError(msg.format(timestr))
-
- for attr in ["year", "month", "day", "hour",
- "minute", "second", "microsecond"]:
- value = getattr(res, attr)
- if value is not None:
- repl[attr] = value
- reso = attr
-
- if reso is None:
- msg = "Unable to parse datetime string: {0}"
- raise ValueError(msg.format(timestr))
-
- if reso == 'microsecond':
- if repl['microsecond'] == 0:
- reso = 'second'
- elif repl['microsecond'] % 1000 == 0:
- reso = 'millisecond'
-
- ret = default.replace(**repl)
- if res.weekday is not None and not res.day:
- ret = ret + relativedelta.relativedelta(weekday=res.weekday)
- if not ignoretz:
- if callable(tzinfos) or tzinfos and res.tzname in tzinfos:
- if callable(tzinfos):
- tzdata = tzinfos(res.tzname, res.tzoffset)
- else:
- tzdata = tzinfos.get(res.tzname)
- if isinstance(tzdata, datetime.tzinfo):
- tzinfo = tzdata
- elif isinstance(tzdata, string_types):
- tzinfo = _dateutil_tzstr(tzdata)
- elif isinstance(tzdata, int):
- tzinfo = tzoffset(res.tzname, tzdata)
- else:
- raise ValueError("offset must be tzinfo subclass, "
- "tz string, or int offset")
- ret = ret.replace(tzinfo=tzinfo)
- elif res.tzname and res.tzname in time.tzname:
- ret = ret.replace(tzinfo=_dateutil_tzlocal())
- elif res.tzoffset == 0:
- ret = ret.replace(tzinfo=_dateutil_tzutc())
- elif res.tzoffset:
- ret = ret.replace(tzinfo=tzoffset(res.tzname, res.tzoffset))
- return ret, reso
-
-
# const for parsers
_DEFAULT_DATETIME = datetime(1, 1, 1).replace(
diff --git a/pandas/_libs/tslibs/parsing.pyx b/pandas/_libs/tslibs/parsing.pyx
new file mode 100644
index 0000000000000..845d1b8dcabba
--- /dev/null
+++ b/pandas/_libs/tslibs/parsing.pyx
@@ -0,0 +1,681 @@
+# -*- coding: utf-8 -*-
+# cython: profile=False
+# cython: linetrace=False
+# distutils: define_macros=CYTHON_TRACE=0
+# distutils: define_macros=CYTHON_TRACE_NOGIL=0
+"""
+Parsing functions for datetime and datetime-like strings.
+"""
+import sys
+import re
+
+from cpython cimport PyString_Check, PyUnicode_Check
+
+from libc.stdlib cimport free
+
+cimport cython
+from cython cimport Py_ssize_t
+
+
+from datetime import datetime
+import time
+
+import numpy as np
+cimport numpy as np
+from numpy cimport int64_t, ndarray
+np.import_array()
+
+# Avoid import from outside _libs
+if sys.version_info.major == 2:
+ string_types = basestring
+ from StringIO import StringIO
+else:
+ string_types = str
+ from io import StringIO
+
+
+# dateutil compat
+from dateutil.tz import (tzoffset,
+ tzlocal as _dateutil_tzlocal,
+ tzfile as _dateutil_tzfile,
+ tzutc as _dateutil_tzutc,
+ tzstr as _dateutil_tzstr)
+from dateutil.relativedelta import relativedelta
+from dateutil.parser import DEFAULTPARSER
+from dateutil.parser import parse as du_parse
+
+
+class DateParseError(ValueError):
+ pass
+
+_nat_strings = set(['NaT', 'nat', 'NAT', 'nan', 'NaN', 'NAN'])
+
+_DEFAULT_DATETIME = datetime(1, 1, 1).replace(hour=0, minute=0,
+ second=0, microsecond=0)
+_MONTHS = ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL',
+ 'AUG', 'SEP', 'OCT', 'NOV', 'DEC']
+_MONTH_NUMBERS = {k: i for i, k in enumerate(_MONTHS)}
+_MONTH_ALIASES = {(k + 1): v for k, v in enumerate(_MONTHS)}
+
+cdef object _TIMEPAT = re.compile(r'^([01]?[0-9]|2[0-3]):([0-5][0-9])')
+
+cdef set _not_datelike_strings = set(['a', 'A', 'm', 'M', 'p', 'P', 't', 'T'])
+
+NAT_SENTINEL = object()
+# This allows us to reference NaT without having to import it
+
+
+def parse_datetime_string(date_string, freq=None, dayfirst=False,
+ yearfirst=False, **kwargs):
+ """parse datetime string, only returns datetime.
+ Also cares special handling matching time patterns.
+
+ Returns
+ -------
+ datetime
+ """
+
+ cdef:
+ object dt
+
+ if not _does_string_look_like_datetime(date_string):
+ raise ValueError('Given date string not likely a datetime.')
+
+ if _TIMEPAT.match(date_string):
+ # use current datetime as default, not pass _DEFAULT_DATETIME
+ dt = du_parse(date_string, dayfirst=dayfirst,
+ yearfirst=yearfirst, **kwargs)
+ return dt
+
+ try:
+ dt, _, _ = _parse_dateabbr_string(date_string, _DEFAULT_DATETIME, freq)
+ return dt
+ except DateParseError:
+ raise
+ except ValueError:
+ pass
+
+ try:
+ dt = du_parse(date_string, default=_DEFAULT_DATETIME,
+ dayfirst=dayfirst, yearfirst=yearfirst, **kwargs)
+ except TypeError:
+ # following may be raised from dateutil
+ # TypeError: 'NoneType' object is not iterable
+ raise ValueError('Given date string not likely a datetime.')
+
+ return dt
+
+
+def parse_time_string(arg, freq=None, dayfirst=None, yearfirst=None):
+ """
+ Try hard to parse datetime string, leveraging dateutil plus some extra
+ goodies like quarter recognition.
+
+ Parameters
+ ----------
+ arg : compat.string_types
+ freq : str or DateOffset, default None
+ Helps with interpreting time string if supplied
+ dayfirst : bool, default None
+ If None uses default from print_config
+ yearfirst : bool, default None
+ If None uses default from print_config
+
+ Returns
+ -------
+ datetime, datetime/dateutil.parser._result, str
+ """
+ if not isinstance(arg, string_types):
+ return arg
+
+ if getattr(freq, "_typ", None) == "dateoffset":
+ freq = freq.rule_code
+
+ if dayfirst is None:
+ from pandas.core.config import get_option
+ dayfirst = get_option("display.date_dayfirst")
+ if yearfirst is None:
+ from pandas.core.config import get_option
+ yearfirst = get_option("display.date_yearfirst")
+
+ res = parse_datetime_string_with_reso(arg, freq=freq,
+ dayfirst=dayfirst,
+ yearfirst=yearfirst)
+ if res[0] is NAT_SENTINEL:
+ from pandas._libs.tslib import NaT
+ res = (NaT,) + res[1:]
+ return res
+
+
+def parse_datetime_string_with_reso(date_string, freq=None, dayfirst=False,
+ yearfirst=False, **kwargs):
+ """parse datetime string, only returns datetime
+
+ Returns
+ -------
+ datetime
+ """
+
+ cdef:
+ object parsed, reso
+
+ if not _does_string_look_like_datetime(date_string):
+ raise ValueError('Given date string not likely a datetime.')
+
+ try:
+ return _parse_dateabbr_string(date_string, _DEFAULT_DATETIME, freq)
+ except DateParseError:
+ raise
+ except ValueError:
+ pass
+
+ try:
+ parsed, reso = dateutil_parse(date_string, _DEFAULT_DATETIME,
+ dayfirst=dayfirst, yearfirst=yearfirst)
+ except Exception as e:
+ # TODO: allow raise of errors within instead
+ raise DateParseError(e)
+ if parsed is None:
+ raise DateParseError("Could not parse %s" % date_string)
+ return parsed, parsed, reso
+
+
+cpdef bint _does_string_look_like_datetime(object date_string):
+ if date_string.startswith('0'):
+ # Strings starting with 0 are more consistent with a
+ # date-like string than a number
+ return True
+
+ try:
+ if float(date_string) < 1000:
+ return False
+ except ValueError:
+ pass
+
+ if date_string in _not_datelike_strings:
+ return False
+
+ return True
+
+
+cdef inline object _parse_dateabbr_string(object date_string, object default,
+ object freq):
+ cdef:
+ object ret
+ int year, quarter = -1, month, mnum, date_len
+
+ # special handling for possibilities eg, 2Q2005, 2Q05, 2005Q1, 05Q1
+ assert isinstance(date_string, string_types)
+
+ # len(date_string) == 0
+ # should be NaT???
+
+ if date_string in _nat_strings:
+ return NAT_SENTINEL, NAT_SENTINEL, ''
+
+ date_string = date_string.upper()
+ date_len = len(date_string)
+
+ if date_len == 4:
+ # parse year only like 2000
+ try:
+ ret = default.replace(year=int(date_string))
+ return ret, ret, 'year'
+ except ValueError:
+ pass
+
+ try:
+ if 4 <= date_len <= 7:
+ i = date_string.index('Q', 1, 6)
+ if i == 1:
+ quarter = int(date_string[0])
+ if date_len == 4 or (date_len == 5
+ and date_string[i + 1] == '-'):
+ # r'(\d)Q-?(\d\d)')
+ year = 2000 + int(date_string[-2:])
+ elif date_len == 6 or (date_len == 7
+ and date_string[i + 1] == '-'):
+ # r'(\d)Q-?(\d\d\d\d)')
+ year = int(date_string[-4:])
+ else:
+ raise ValueError
+ elif i == 2 or i == 3:
+ # r'(\d\d)-?Q(\d)'
+ if date_len == 4 or (date_len == 5
+ and date_string[i - 1] == '-'):
+ quarter = int(date_string[-1])
+ year = 2000 + int(date_string[:2])
+ else:
+ raise ValueError
+ elif i == 4 or i == 5:
+ if date_len == 6 or (date_len == 7
+ and date_string[i - 1] == '-'):
+ # r'(\d\d\d\d)-?Q(\d)'
+ quarter = int(date_string[-1])
+ year = int(date_string[:4])
+ else:
+ raise ValueError
+
+ if not (1 <= quarter <= 4):
+ msg = ('Incorrect quarterly string is given, quarter must be '
+ 'between 1 and 4: {0}')
+ raise DateParseError(msg.format(date_string))
+
+ if freq is not None:
+ # hack attack, #1228
+ try:
+ mnum = _MONTH_NUMBERS[_get_rule_month(freq)] + 1
+ except (KeyError, ValueError):
+ msg = ('Unable to retrieve month information from given '
+ 'freq: {0}').format(freq)
+ raise DateParseError(msg)
+
+ month = (mnum + (quarter - 1) * 3) % 12 + 1
+ if month > mnum:
+ year -= 1
+ else:
+ month = (quarter - 1) * 3 + 1
+
+ ret = default.replace(year=year, month=month)
+ return ret, ret, 'quarter'
+
+ except DateParseError:
+ raise
+ except ValueError:
+ pass
+
+ if date_len == 6 and (freq == 'M' or
+ getattr(freq, 'rule_code', None) == 'M'):
+ year = int(date_string[:4])
+ month = int(date_string[4:6])
+ try:
+ ret = default.replace(year=year, month=month)
+ return ret, ret, 'month'
+ except ValueError:
+ pass
+
+ for pat in ['%Y-%m', '%m-%Y', '%b %Y', '%b-%Y']:
+ try:
+ ret = datetime.strptime(date_string, pat)
+ return ret, ret, 'month'
+ except ValueError:
+ pass
+
+ raise ValueError('Unable to parse {0}'.format(date_string))
+
+
+def dateutil_parse(object timestr, object default, ignoretz=False,
+ tzinfos=None, **kwargs):
+ """ lifted from dateutil to get resolution"""
+
+ cdef:
+ object fobj, res, attr, ret, tzdata
+ object reso = None
+ dict repl = {}
+
+ fobj = StringIO(str(timestr))
+ res = DEFAULTPARSER._parse(fobj, **kwargs)
+
+ # dateutil 2.2 compat
+ if isinstance(res, tuple): # PyTuple_Check
+ res, _ = res
+
+ if res is None:
+ msg = "Unknown datetime string format, unable to parse: {0}"
+ raise ValueError(msg.format(timestr))
+
+ for attr in ["year", "month", "day", "hour",
+ "minute", "second", "microsecond"]:
+ value = getattr(res, attr)
+ if value is not None:
+ repl[attr] = value
+ reso = attr
+
+ if reso is None:
+ msg = "Unable to parse datetime string: {0}"
+ raise ValueError(msg.format(timestr))
+
+ if reso == 'microsecond':
+ if repl['microsecond'] == 0:
+ reso = 'second'
+ elif repl['microsecond'] % 1000 == 0:
+ reso = 'millisecond'
+
+ ret = default.replace(**repl)
+ if res.weekday is not None and not res.day:
+ ret = ret + relativedelta.relativedelta(weekday=res.weekday)
+ if not ignoretz:
+ if callable(tzinfos) or tzinfos and res.tzname in tzinfos:
+ if callable(tzinfos):
+ tzdata = tzinfos(res.tzname, res.tzoffset)
+ else:
+ tzdata = tzinfos.get(res.tzname)
+ if isinstance(tzdata, datetime.tzinfo):
+ tzinfo = tzdata
+ elif isinstance(tzdata, string_types):
+ tzinfo = _dateutil_tzstr(tzdata)
+ elif isinstance(tzdata, int):
+ tzinfo = tzoffset(res.tzname, tzdata)
+ else:
+ raise ValueError("offset must be tzinfo subclass, "
+ "tz string, or int offset")
+ ret = ret.replace(tzinfo=tzinfo)
+ elif res.tzname and res.tzname in time.tzname:
+ ret = ret.replace(tzinfo=_dateutil_tzlocal())
+ elif res.tzoffset == 0:
+ ret = ret.replace(tzinfo=_dateutil_tzutc())
+ elif res.tzoffset:
+ ret = ret.replace(tzinfo=tzoffset(res.tzname, res.tzoffset))
+ return ret, reso
+
+
+cpdef object _get_rule_month(object source, object default='DEC'):
+ """
+ Return starting month of given freq, default is December.
+
+ Example
+ -------
+ >>> _get_rule_month('D')
+ 'DEC'
+
+ >>> _get_rule_month('A-JAN')
+ 'JAN'
+ """
+ if hasattr(source, 'freqstr'):
+ source = source.freqstr
+ source = source.upper()
+ if '-' not in source:
+ return default
+ else:
+ return source.split('-')[1]
+
+
+#----------------------------------------------------------------------
+# Parsing for type-inference
+
+
+def try_parse_dates(ndarray[object] values, parser=None,
+ dayfirst=False, default=None):
+ cdef:
+ Py_ssize_t i, n
+ ndarray[object] result
+
+ n = len(values)
+ result = np.empty(n, dtype='O')
+
+ if parser is None:
+ if default is None: # GH2618
+ date = datetime.now()
+ default = datetime(date.year, date.month, 1)
+
+ parse_date = lambda x: du_parse(x, dayfirst=dayfirst, default=default)
+
+ # EAFP here
+ try:
+ for i from 0 <= i < n:
+ if values[i] == '':
+ result[i] = np.nan
+ else:
+ result[i] = parse_date(values[i])
+ except Exception:
+ # failed
+ return values
+ else:
+ parse_date = parser
+
+ try:
+ for i from 0 <= i < n:
+ if values[i] == '':
+ result[i] = np.nan
+ else:
+ result[i] = parse_date(values[i])
+ except Exception:
+ # raise if passed parser and it failed
+ raise
+
+ return result
+
+
+def try_parse_date_and_time(ndarray[object] dates, ndarray[object] times,
+ date_parser=None, time_parser=None,
+ dayfirst=False, default=None):
+ cdef:
+ Py_ssize_t i, n
+ ndarray[object] result
+
+ n = len(dates)
+ if len(times) != n:
+ raise ValueError('Length of dates and times must be equal')
+ result = np.empty(n, dtype='O')
+
+ if date_parser is None:
+ if default is None: # GH2618
+ date = datetime.now()
+ default = datetime(date.year, date.month, 1)
+
+ parse_date = lambda x: du_parse(x, dayfirst=dayfirst, default=default)
+
+ else:
+ parse_date = date_parser
+
+ if time_parser is None:
+ parse_time = lambda x: du_parse(x)
+
+ else:
+ parse_time = time_parser
+
+ for i from 0 <= i < n:
+ d = parse_date(str(dates[i]))
+ t = parse_time(str(times[i]))
+ result[i] = datetime(d.year, d.month, d.day,
+ t.hour, t.minute, t.second)
+
+ return result
+
+
+def try_parse_year_month_day(ndarray[object] years, ndarray[object] months,
+ ndarray[object] days):
+ cdef:
+ Py_ssize_t i, n
+ ndarray[object] result
+
+ n = len(years)
+ if len(months) != n or len(days) != n:
+ raise ValueError('Length of years/months/days must all be equal')
+ result = np.empty(n, dtype='O')
+
+ for i from 0 <= i < n:
+ result[i] = datetime(int(years[i]), int(months[i]), int(days[i]))
+
+ return result
+
+
+def try_parse_datetime_components(ndarray[object] years,
+ ndarray[object] months,
+ ndarray[object] days,
+ ndarray[object] hours,
+ ndarray[object] minutes,
+ ndarray[object] seconds):
+
+ cdef:
+ Py_ssize_t i, n
+ ndarray[object] result
+ int secs
+ double float_secs
+ double micros
+
+ n = len(years)
+ if (len(months) != n or len(days) != n or len(hours) != n or
+ len(minutes) != n or len(seconds) != n):
+ raise ValueError('Length of all datetime components must be equal')
+ result = np.empty(n, dtype='O')
+
+ for i from 0 <= i < n:
+ float_secs = float(seconds[i])
+ secs = int(float_secs)
+
+ micros = float_secs - secs
+ if micros > 0:
+ micros = micros * 1000000
+
+ result[i] = datetime(int(years[i]), int(months[i]), int(days[i]),
+ int(hours[i]), int(minutes[i]), secs,
+ int(micros))
+
+ return result
+
+
+#----------------------------------------------------------------------
+# Miscellaneous
+
+_DATEUTIL_LEXER_SPLIT = None
+try:
+ # Since these are private methods from dateutil, it is safely imported
+ # here so in case this interface changes, pandas will just fallback
+ # to not using the functionality
+ from dateutil.parser import _timelex
+
+ if hasattr(_timelex, 'split'):
+ def _lexer_split_from_str(dt_str):
+ # The StringIO(str(_)) is for dateutil 2.2 compatibility
+ return _timelex.split(StringIO(str(dt_str)))
+
+ _DATEUTIL_LEXER_SPLIT = _lexer_split_from_str
+except (ImportError, AttributeError):
+ pass
+
+
+def _format_is_iso(f):
+ """
+ Does format match the iso8601 set that can be handled by the C parser?
+ Generally of form YYYY-MM-DDTHH:MM:SS - date separator can be different
+ but must be consistent. Leading 0s in dates and times are optional.
+ """
+ iso_template = '%Y{date_sep}%m{date_sep}%d{time_sep}%H:%M:%S.%f'.format
+ excluded_formats = ['%Y%m%d', '%Y%m', '%Y']
+
+ for date_sep in [' ', '/', '\\', '-', '.', '']:
+ for time_sep in [' ', 'T']:
+ if (iso_template(date_sep=date_sep,
+ time_sep=time_sep
+ ).startswith(f) and f not in excluded_formats):
+ return True
+ return False
+
+
+def _guess_datetime_format(dt_str, dayfirst=False, dt_str_parse=du_parse,
+ dt_str_split=_DATEUTIL_LEXER_SPLIT):
+ """
+ Guess the datetime format of a given datetime string.
+
+ Parameters
+ ----------
+ dt_str : string, datetime string to guess the format of
+ dayfirst : boolean, default False
+ If True parses dates with the day first, eg 20/01/2005
+ Warning: dayfirst=True is not strict, but will prefer to parse
+ with day first (this is a known bug).
+ dt_str_parse : function, defaults to `compat.parse_date` (dateutil)
+ This function should take in a datetime string and return
+ a `datetime.datetime` guess that the datetime string represents
+ dt_str_split : function, defaults to `_DATEUTIL_LEXER_SPLIT` (dateutil)
+ This function should take in a datetime string and return
+ a list of strings, the guess of the various specific parts
+ e.g. '2011/12/30' -> ['2011', '/', '12', '/', '30']
+
+ Returns
+ -------
+ ret : datetime format string (for `strftime` or `strptime`)
+ """
+ if dt_str_parse is None or dt_str_split is None:
+ return None
+
+ if not isinstance(dt_str, string_types):
+ return None
+
+ day_attribute_and_format = (('day',), '%d', 2)
+
+ # attr name, format, padding (if any)
+ datetime_attrs_to_format = [
+ (('year', 'month', 'day'), '%Y%m%d', 0),
+ (('year',), '%Y', 0),
+ (('month',), '%B', 0),
+ (('month',), '%b', 0),
+ (('month',), '%m', 2),
+ day_attribute_and_format,
+ (('hour',), '%H', 2),
+ (('minute',), '%M', 2),
+ (('second',), '%S', 2),
+ (('microsecond',), '%f', 6),
+ (('second', 'microsecond'), '%S.%f', 0),
+ ]
+
+ if dayfirst:
+ datetime_attrs_to_format.remove(day_attribute_and_format)
+ datetime_attrs_to_format.insert(0, day_attribute_and_format)
+
+ try:
+ parsed_datetime = dt_str_parse(dt_str, dayfirst=dayfirst)
+ except:
+ # In case the datetime can't be parsed, its format cannot be guessed
+ return None
+
+ if parsed_datetime is None:
+ return None
+
+ try:
+ tokens = dt_str_split(dt_str)
+ except:
+ # In case the datetime string can't be split, its format cannot
+ # be guessed
+ return None
+
+ format_guess = [None] * len(tokens)
+ found_attrs = set()
+
+ for attrs, attr_format, padding in datetime_attrs_to_format:
+ # If a given attribute has been placed in the format string, skip
+ # over other formats for that same underlying attribute (IE, month
+ # can be represented in multiple different ways)
+ if set(attrs) & found_attrs:
+ continue
+
+ if all(getattr(parsed_datetime, attr) is not None for attr in attrs):
+ for i, token_format in enumerate(format_guess):
+ token_filled = tokens[i].zfill(padding)
+ if (token_format is None and
+ token_filled == parsed_datetime.strftime(attr_format)):
+ format_guess[i] = attr_format
+ tokens[i] = token_filled
+ found_attrs.update(attrs)
+ break
+
+ # Only consider it a valid guess if we have a year, month and day
+ if len(set(['year', 'month', 'day']) & found_attrs) != 3:
+ return None
+
+ output_format = []
+ for i, guess in enumerate(format_guess):
+ if guess is not None:
+ # Either fill in the format placeholder (like %Y)
+ output_format.append(guess)
+ else:
+ # Or just the token separate (IE, the dashes in "01-01-2013")
+ try:
+ # If the token is numeric, then we likely didn't parse it
+ # properly, so our guess is wrong
+ float(tokens[i])
+ return None
+ except ValueError:
+ pass
+
+ output_format.append(tokens[i])
+
+ guessed_format = ''.join(output_format)
+
+ # rebuild string, capturing any inferred padding
+ dt_str = ''.join(tokens)
+ if parsed_datetime.strftime(guessed_format) == dt_str:
+ return guessed_format
+ else:
+ return None
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index f28ff9697e517..79c89f4ad2e25 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -7,6 +7,7 @@
algos as libalgos, join as libjoin,
Timestamp, Timedelta, )
from pandas._libs.lib import is_datetime_array
+from pandas._libs.tslibs import parsing
from pandas.compat import range, u
from pandas.compat.numpy import function as nv
@@ -1037,7 +1038,7 @@ def to_datetime(self, dayfirst=False):
if self.inferred_type == 'string':
from dateutil.parser import parse
parser = lambda x: parse(x, dayfirst=dayfirst)
- parsed = lib.try_parse_dates(self.values, parser=parser)
+ parsed = parsing.try_parse_dates(self.values, parser=parser)
return DatetimeIndex(parsed)
else:
return DatetimeIndex(self.values)
diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py
index bf89509fd1746..97ac8445faf4c 100644
--- a/pandas/core/tools/datetimes.py
+++ b/pandas/core/tools/datetimes.py
@@ -2,9 +2,14 @@
import numpy as np
from collections import MutableMapping
-from pandas._libs import lib, tslib
+from pandas._libs import tslib
from pandas._libs.tslibs.strptime import array_strptime
from pandas._libs.tslibs.timezones import get_timezone
+from pandas._libs.tslibs import parsing
+from pandas._libs.tslibs.parsing import ( # noqa
+ parse_time_string,
+ _format_is_iso,
+ _guess_datetime_format)
from pandas.core.dtypes.common import (
_ensure_object,
@@ -19,28 +24,10 @@
is_numeric_dtype)
from pandas.core.dtypes.generic import (
ABCIndexClass, ABCSeries,
- ABCDataFrame, ABCDateOffset)
+ ABCDataFrame)
from pandas.core.dtypes.missing import notna
from pandas.core import algorithms
-import pandas.compat as compat
-
-_DATEUTIL_LEXER_SPLIT = None
-try:
- # Since these are private methods from dateutil, it is safely imported
- # here so in case this interface changes, pandas will just fallback
- # to not using the functionality
- from dateutil.parser import _timelex
-
- if hasattr(_timelex, 'split'):
- def _lexer_split_from_str(dt_str):
- # The StringIO(str(_)) is for dateutil 2.2 compatibility
- return _timelex.split(compat.StringIO(str(dt_str)))
-
- _DATEUTIL_LEXER_SPLIT = _lexer_split_from_str
-except (ImportError, AttributeError):
- pass
-
def _infer_tzinfo(start, end):
def _infer(a, b):
@@ -60,123 +47,6 @@ def _infer(a, b):
return tz
-def _guess_datetime_format(dt_str, dayfirst=False,
- dt_str_parse=compat.parse_date,
- dt_str_split=_DATEUTIL_LEXER_SPLIT):
- """
- Guess the datetime format of a given datetime string.
-
- Parameters
- ----------
- dt_str : string, datetime string to guess the format of
- dayfirst : boolean, default False
- If True parses dates with the day first, eg 20/01/2005
- Warning: dayfirst=True is not strict, but will prefer to parse
- with day first (this is a known bug).
- dt_str_parse : function, defaults to `compat.parse_date` (dateutil)
- This function should take in a datetime string and return
- a `datetime.datetime` guess that the datetime string represents
- dt_str_split : function, defaults to `_DATEUTIL_LEXER_SPLIT` (dateutil)
- This function should take in a datetime string and return
- a list of strings, the guess of the various specific parts
- e.g. '2011/12/30' -> ['2011', '/', '12', '/', '30']
-
- Returns
- -------
- ret : datetime format string (for `strftime` or `strptime`)
- """
- if dt_str_parse is None or dt_str_split is None:
- return None
-
- if not isinstance(dt_str, compat.string_types):
- return None
-
- day_attribute_and_format = (('day',), '%d', 2)
-
- # attr name, format, padding (if any)
- datetime_attrs_to_format = [
- (('year', 'month', 'day'), '%Y%m%d', 0),
- (('year',), '%Y', 0),
- (('month',), '%B', 0),
- (('month',), '%b', 0),
- (('month',), '%m', 2),
- day_attribute_and_format,
- (('hour',), '%H', 2),
- (('minute',), '%M', 2),
- (('second',), '%S', 2),
- (('microsecond',), '%f', 6),
- (('second', 'microsecond'), '%S.%f', 0),
- ]
-
- if dayfirst:
- datetime_attrs_to_format.remove(day_attribute_and_format)
- datetime_attrs_to_format.insert(0, day_attribute_and_format)
-
- try:
- parsed_datetime = dt_str_parse(dt_str, dayfirst=dayfirst)
- except:
- # In case the datetime can't be parsed, its format cannot be guessed
- return None
-
- if parsed_datetime is None:
- return None
-
- try:
- tokens = dt_str_split(dt_str)
- except:
- # In case the datetime string can't be split, its format cannot
- # be guessed
- return None
-
- format_guess = [None] * len(tokens)
- found_attrs = set()
-
- for attrs, attr_format, padding in datetime_attrs_to_format:
- # If a given attribute has been placed in the format string, skip
- # over other formats for that same underlying attribute (IE, month
- # can be represented in multiple different ways)
- if set(attrs) & found_attrs:
- continue
-
- if all(getattr(parsed_datetime, attr) is not None for attr in attrs):
- for i, token_format in enumerate(format_guess):
- token_filled = tokens[i].zfill(padding)
- if (token_format is None and
- token_filled == parsed_datetime.strftime(attr_format)):
- format_guess[i] = attr_format
- tokens[i] = token_filled
- found_attrs.update(attrs)
- break
-
- # Only consider it a valid guess if we have a year, month and day
- if len(set(['year', 'month', 'day']) & found_attrs) != 3:
- return None
-
- output_format = []
- for i, guess in enumerate(format_guess):
- if guess is not None:
- # Either fill in the format placeholder (like %Y)
- output_format.append(guess)
- else:
- # Or just the token separate (IE, the dashes in "01-01-2013")
- try:
- # If the token is numeric, then we likely didn't parse it
- # properly, so our guess is wrong
- float(tokens[i])
- return None
- except ValueError:
- pass
-
- output_format.append(tokens[i])
-
- guessed_format = ''.join(output_format)
-
- # rebuild string, capturing any inferred padding
- dt_str = ''.join(tokens)
- if parsed_datetime.strftime(guessed_format) == dt_str:
- return guessed_format
-
-
def _guess_datetime_format_for_array(arr, **kwargs):
# Try to guess the format based on the first non-NaN element
non_nan_elements = notna(arr).nonzero()[0]
@@ -655,9 +525,9 @@ def _attempt_YYYYMMDD(arg, errors):
def calc(carg):
# calculate the actual result
carg = carg.astype(object)
- parsed = lib.try_parse_year_month_day(carg / 10000,
- carg / 100 % 100,
- carg % 100)
+ parsed = parsing.try_parse_year_month_day(carg / 10000,
+ carg / 100 % 100,
+ carg % 100)
return tslib.array_to_datetime(parsed, errors=errors)
def calc_with_mask(carg, mask):
@@ -691,60 +561,6 @@ def calc_with_mask(carg, mask):
return None
-def _format_is_iso(f):
- """
- Does format match the iso8601 set that can be handled by the C parser?
- Generally of form YYYY-MM-DDTHH:MM:SS - date separator can be different
- but must be consistent. Leading 0s in dates and times are optional.
- """
- iso_template = '%Y{date_sep}%m{date_sep}%d{time_sep}%H:%M:%S.%f'.format
- excluded_formats = ['%Y%m%d', '%Y%m', '%Y']
-
- for date_sep in [' ', '/', '\\', '-', '.', '']:
- for time_sep in [' ', 'T']:
- if (iso_template(date_sep=date_sep,
- time_sep=time_sep
- ).startswith(f) and f not in excluded_formats):
- return True
- return False
-
-
-def parse_time_string(arg, freq=None, dayfirst=None, yearfirst=None):
- """
- Try hard to parse datetime string, leveraging dateutil plus some extra
- goodies like quarter recognition.
-
- Parameters
- ----------
- arg : compat.string_types
- freq : str or DateOffset, default None
- Helps with interpreting time string if supplied
- dayfirst : bool, default None
- If None uses default from print_config
- yearfirst : bool, default None
- If None uses default from print_config
-
- Returns
- -------
- datetime, datetime/dateutil.parser._result, str
- """
- from pandas.core.config import get_option
- if not isinstance(arg, compat.string_types):
- return arg
-
- if isinstance(freq, ABCDateOffset):
- freq = freq.rule_code
-
- if dayfirst is None:
- dayfirst = get_option("display.date_dayfirst")
- if yearfirst is None:
- yearfirst = get_option("display.date_yearfirst")
-
- return tslib.parse_datetime_string_with_reso(arg, freq=freq,
- dayfirst=dayfirst,
- yearfirst=yearfirst)
-
-
DateParseError = tslib.DateParseError
normalize_date = tslib.normalize_date
diff --git a/pandas/io/date_converters.py b/pandas/io/date_converters.py
index 080d6c3e273a3..377373f8a0135 100644
--- a/pandas/io/date_converters.py
+++ b/pandas/io/date_converters.py
@@ -1,20 +1,20 @@
"""This module is designed for community supported date conversion functions"""
from pandas.compat import range, map
import numpy as np
-import pandas._libs.lib as lib
+from pandas._libs.tslibs import parsing
def parse_date_time(date_col, time_col):
date_col = _maybe_cast(date_col)
time_col = _maybe_cast(time_col)
- return lib.try_parse_date_and_time(date_col, time_col)
+ return parsing.try_parse_date_and_time(date_col, time_col)
def parse_date_fields(year_col, month_col, day_col):
year_col = _maybe_cast(year_col)
month_col = _maybe_cast(month_col)
day_col = _maybe_cast(day_col)
- return lib.try_parse_year_month_day(year_col, month_col, day_col)
+ return parsing.try_parse_year_month_day(year_col, month_col, day_col)
def parse_all_fields(year_col, month_col, day_col, hour_col, minute_col,
@@ -25,8 +25,9 @@ def parse_all_fields(year_col, month_col, day_col, hour_col, minute_col,
hour_col = _maybe_cast(hour_col)
minute_col = _maybe_cast(minute_col)
second_col = _maybe_cast(second_col)
- return lib.try_parse_datetime_components(year_col, month_col, day_col,
- hour_col, minute_col, second_col)
+ return parsing.try_parse_datetime_components(year_col, month_col, day_col,
+ hour_col, minute_col,
+ second_col)
def generic_parser(parse_func, *cols):
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index ed15d4295d688..eeb79552477e1 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -42,7 +42,7 @@
import pandas._libs.lib as lib
import pandas._libs.parsers as parsers
-
+from pandas._libs.tslibs import parsing
# BOM character (byte order mark)
# This exists at the beginning of a file to indicate endianness
@@ -2981,7 +2981,7 @@ def converter(*date_cols):
)
except:
return tools.to_datetime(
- lib.try_parse_dates(strs, dayfirst=dayfirst))
+ parsing.try_parse_dates(strs, dayfirst=dayfirst))
else:
try:
result = tools.to_datetime(
@@ -2992,9 +2992,9 @@ def converter(*date_cols):
except Exception:
try:
return tools.to_datetime(
- lib.try_parse_dates(_concat_date_cols(date_cols),
- parser=date_parser,
- dayfirst=dayfirst),
+ parsing.try_parse_dates(_concat_date_cols(date_cols),
+ parser=date_parser,
+ dayfirst=dayfirst),
errors='ignore')
except Exception:
return generic_parser(date_parser, *date_cols)
diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py
index e0ccedb834adf..bdfe6b5b09e45 100644
--- a/pandas/tests/indexes/datetimes/test_tools.py
+++ b/pandas/tests/indexes/datetimes/test_tools.py
@@ -12,7 +12,8 @@
from distutils.version import LooseVersion
import pandas as pd
-from pandas._libs import tslib, lib
+from pandas._libs import tslib
+from pandas._libs.tslibs import parsing
from pandas.core.tools import datetimes as tools
from pandas.core.tools.datetimes import normalize_date
from pandas.compat import lmap
@@ -1063,7 +1064,7 @@ def test_does_not_convert_mixed_integer(self):
bad_date_strings = ('-50000', '999', '123.1234', 'm', 'T')
for bad_date_string in bad_date_strings:
- assert not tslib._does_string_look_like_datetime(bad_date_string)
+ assert not parsing._does_string_look_like_datetime(bad_date_string)
good_date_strings = ('2012-01-01',
'01/01/2012',
@@ -1073,7 +1074,7 @@ def test_does_not_convert_mixed_integer(self):
'1-1', )
for good_date_string in good_date_strings:
- assert tslib._does_string_look_like_datetime(good_date_string)
+ assert parsing._does_string_look_like_datetime(good_date_string)
def test_parsers(self):
@@ -1412,7 +1413,7 @@ class TestArrayToDatetime(object):
def test_try_parse_dates(self):
arr = np.array(['5/1/2000', '6/1/2000', '7/1/2000'], dtype=object)
- result = lib.try_parse_dates(arr, dayfirst=True)
+ result = parsing.try_parse_dates(arr, dayfirst=True)
expected = [parse(d, dayfirst=True) for d in arr]
assert np.array_equal(result, expected)
diff --git a/pandas/tests/io/parser/parse_dates.py b/pandas/tests/io/parser/parse_dates.py
index e1ae1b577ea29..90103e7bf26b0 100644
--- a/pandas/tests/io/parser/parse_dates.py
+++ b/pandas/tests/io/parser/parse_dates.py
@@ -10,7 +10,7 @@
import pytest
import numpy as np
-import pandas._libs.lib as lib
+from pandas._libs.tslibs import parsing
from pandas._libs.lib import Timestamp
import pandas as pd
@@ -53,7 +53,8 @@ def test_multiple_date_col(self):
"""
def func(*date_cols):
- return lib.try_parse_dates(parsers._concat_date_cols(date_cols))
+ res = parsing.try_parse_dates(parsers._concat_date_cols(date_cols))
+ return res
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
diff --git a/setup.py b/setup.py
index 25a4924dad0bc..d25ae4a5fb45c 100755
--- a/setup.py
+++ b/setup.py
@@ -343,6 +343,7 @@ class CheckSDist(sdist_class):
'pandas/_libs/parsers.pyx',
'pandas/_libs/tslibs/timezones.pyx',
'pandas/_libs/tslibs/frequencies.pyx',
+ 'pandas/_libs/tslibs/parsing.pyx',
'pandas/io/sas/sas.pyx']
def initialize_options(self):
@@ -498,6 +499,8 @@ def pxd(name):
'sources': ['pandas/_libs/src/datetime/np_datetime.c',
'pandas/_libs/src/datetime/np_datetime_strings.c',
'pandas/_libs/src/period_helper.c']},
+ '_libs.tslibs.parsing': {'pyxfile': '_libs/tslibs/parsing',
+ 'pxdfiles': ['_libs/src/util']},
'_libs.tslibs.frequencies': {'pyxfile': '_libs/tslibs/frequencies',
'pxdfiles': ['_libs/src/util']},
'_libs.index': {'pyxfile': '_libs/index',
| This is part 3 in an N part series of PRs to split `tslib` into thematically distinct modules. The others so far are #17274 and #17342.
Moves parsing functions from _libs/src/inference and `core.tools.datetimes`.
The `tslibs.parsing` module has no within-pandas dependencies. There are some dateutil workarounds that ideally can be upstreamed.
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/17363 | 2017-08-28T21:39:38Z | 2017-09-26T13:29:19Z | 2017-09-26T13:29:19Z | 2017-10-30T16:23:20Z |
ENH: Support sorting frames by a combo of columns and index levels (GH 14353) | diff --git a/doc/source/basics.rst b/doc/source/basics.rst
index 74b3dbb83ea91..0b3f2cca55518 100644
--- a/doc/source/basics.rst
+++ b/doc/source/basics.rst
@@ -226,11 +226,11 @@ We can also do elementwise :func:`divmod`:
Missing data / operations with fill values
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-In Series and DataFrame, the arithmetic functions have the option of inputting
-a *fill_value*, namely a value to substitute when at most one of the values at
-a location are missing. For example, when adding two DataFrame objects, you may
-wish to treat NaN as 0 unless both DataFrames are missing that value, in which
-case the result will be NaN (you can later replace NaN with some other value
+In Series and DataFrame, the arithmetic functions have the option of inputting
+a *fill_value*, namely a value to substitute when at most one of the values at
+a location are missing. For example, when adding two DataFrame objects, you may
+wish to treat NaN as 0 unless both DataFrames are missing that value, in which
+case the result will be NaN (you can later replace NaN with some other value
using ``fillna`` if you wish).
.. ipython:: python
@@ -260,8 +260,8 @@ arithmetic operations described above:
df.gt(df2)
df2.ne(df)
-These operations produce a pandas object of the same type as the left-hand-side
-input that is of dtype ``bool``. These ``boolean`` objects can be used in
+These operations produce a pandas object of the same type as the left-hand-side
+input that is of dtype ``bool``. These ``boolean`` objects can be used in
indexing operations, see the section on :ref:`Boolean indexing<indexing.boolean>`.
.. _basics.reductions:
@@ -452,7 +452,7 @@ So, for instance, to reproduce :meth:`~DataFrame.combine_first` as above:
Descriptive statistics
----------------------
-There exists a large number of methods for computing descriptive statistics and
+There exists a large number of methods for computing descriptive statistics and
other related operations on :ref:`Series <api.series.stats>`, :ref:`DataFrame
<api.dataframe.stats>`, and :ref:`Panel <api.panel.stats>`. Most of these
are aggregations (hence producing a lower-dimensional result) like
@@ -540,7 +540,7 @@ will exclude NAs on Series input by default:
np.mean(df['one'])
np.mean(df['one'].values)
-:meth:`Series.nunique` will return the number of unique non-NA values in a
+:meth:`Series.nunique` will return the number of unique non-NA values in a
Series:
.. ipython:: python
@@ -852,7 +852,7 @@ Aggregation API
The aggregation API allows one to express possibly multiple aggregation operations in a single concise way.
This API is similar across pandas objects, see :ref:`groupby API <groupby.aggregate>`, the
:ref:`window functions API <stats.aggregate>`, and the :ref:`resample API <timeseries.aggregate>`.
-The entry point for aggregation is :meth:`DataFrame.aggregate`, or the alias
+The entry point for aggregation is :meth:`DataFrame.aggregate`, or the alias
:meth:`DataFrame.agg`.
We will use a similar starting frame from above:
@@ -864,8 +864,8 @@ We will use a similar starting frame from above:
tsdf.iloc[3:7] = np.nan
tsdf
-Using a single function is equivalent to :meth:`~DataFrame.apply`. You can also
-pass named methods as strings. These will return a ``Series`` of the aggregated
+Using a single function is equivalent to :meth:`~DataFrame.apply`. You can also
+pass named methods as strings. These will return a ``Series`` of the aggregated
output:
.. ipython:: python
@@ -887,7 +887,7 @@ Single aggregations on a ``Series`` this will return a scalar value:
Aggregating with multiple functions
+++++++++++++++++++++++++++++++++++
-You can pass multiple aggregation arguments as a list.
+You can pass multiple aggregation arguments as a list.
The results of each of the passed functions will be a row in the resulting ``DataFrame``.
These are naturally named from the aggregation function.
@@ -1430,7 +1430,7 @@ Series can also be used:
df.rename(columns={'one': 'foo', 'two': 'bar'},
index={'a': 'apple', 'b': 'banana', 'd': 'durian'})
-If the mapping doesn't include a column/index label, it isn't renamed. Note that
+If the mapping doesn't include a column/index label, it isn't renamed. Note that
extra labels in the mapping don't throw an error.
.. versionadded:: 0.21.0
@@ -1740,19 +1740,26 @@ description.
Sorting
-------
-There are two obvious kinds of sorting that you may be interested in: sorting
-by label and sorting by actual values.
+Pandas supports three kinds of sorting: sorting by index labels,
+sorting by column values, and sorting by a combination of both.
+
+.. _basics.sort_index:
By Index
~~~~~~~~
-The primary method for sorting axis
-labels (indexes) are the ``Series.sort_index()`` and the ``DataFrame.sort_index()`` methods.
+The :meth:`Series.sort_index` and :meth:`DataFrame.sort_index` methods are
+used to sort a pandas object by its index levels.
.. ipython:: python
+ df = pd.DataFrame({'one' : pd.Series(np.random.randn(3), index=['a', 'b', 'c']),
+ 'two' : pd.Series(np.random.randn(4), index=['a', 'b', 'c', 'd']),
+ 'three' : pd.Series(np.random.randn(3), index=['b', 'c', 'd'])})
+
unsorted_df = df.reindex(index=['a', 'd', 'c', 'b'],
columns=['three', 'two', 'one'])
+ unsorted_df
# DataFrame
unsorted_df.sort_index()
@@ -1762,20 +1769,22 @@ labels (indexes) are the ``Series.sort_index()`` and the ``DataFrame.sort_index(
# Series
unsorted_df['three'].sort_index()
+.. _basics.sort_values:
+
By Values
~~~~~~~~~
-The :meth:`Series.sort_values` and :meth:`DataFrame.sort_values` are the entry points for **value** sorting (i.e. the values in a column or row).
-:meth:`DataFrame.sort_values` can accept an optional ``by`` argument for ``axis=0``
-which will use an arbitrary vector or a column name of the DataFrame to
-determine the sort order:
+The :meth:`Series.sort_values` method is used to sort a `Series` by its values. The
+:meth:`DataFrame.sort_values` method is used to sort a `DataFrame` by its column or row values.
+The optional ``by`` parameter to :meth:`DataFrame.sort_values` may used to specify one or more columns
+to use to determine the sorted order.
.. ipython:: python
df1 = pd.DataFrame({'one':[2,1,1,1],'two':[1,3,2,4],'three':[5,4,3,2]})
df1.sort_values(by='two')
-The ``by`` argument can take a list of column names, e.g.:
+The ``by`` parameter can take a list of column names, e.g.:
.. ipython:: python
@@ -1790,6 +1799,39 @@ argument:
s.sort_values()
s.sort_values(na_position='first')
+.. _basics.sort_indexes_and_values:
+
+By Indexes and Values
+~~~~~~~~~~~~~~~~~~~~~
+
+.. versionadded:: 0.23.0
+
+Strings passed as the ``by`` parameter to :meth:`DataFrame.sort_values` may
+refer to either columns or index level names.
+
+.. ipython:: python
+
+ # Build MultiIndex
+ idx = pd.MultiIndex.from_tuples([('a', 1), ('a', 2), ('a', 2),
+ ('b', 2), ('b', 1), ('b', 1)])
+ idx.names = ['first', 'second']
+
+ # Build DataFrame
+ df_multi = pd.DataFrame({'A': np.arange(6, 0, -1)},
+ index=idx)
+ df_multi
+
+Sort by 'second' (index) and 'A' (column)
+
+.. ipython:: python
+
+ df_multi.sort_values(by=['second', 'A'])
+
+.. note::
+
+ If a string matches both a column name and an index level name then a
+ warning is issued and the column takes precedence. This will result in an
+ ambiguity error in a future version.
.. _basics.searchsorted:
@@ -1881,7 +1923,7 @@ The main types stored in pandas objects are ``float``, ``int``, ``bool``,
``int64`` and ``int32``. See :ref:`Series with TZ <timeseries.timezone_series>`
for more detail on ``datetime64[ns, tz]`` dtypes.
-A convenient :attr:`~DataFrame.dtypes` attribute for DataFrame returns a Series
+A convenient :attr:`~DataFrame.dtypes` attribute for DataFrame returns a Series
with the data type of each column.
.. ipython:: python
@@ -1902,8 +1944,8 @@ On a ``Series`` object, use the :attr:`~Series.dtype` attribute.
dft['A'].dtype
-If a pandas object contains data with multiple dtypes *in a single column*, the
-dtype of the column will be chosen to accommodate all of the data types
+If a pandas object contains data with multiple dtypes *in a single column*, the
+dtype of the column will be chosen to accommodate all of the data types
(``object`` is the most general).
.. ipython:: python
@@ -1941,7 +1983,7 @@ defaults
~~~~~~~~
By default integer types are ``int64`` and float types are ``float64``,
-*regardless* of platform (32-bit or 64-bit).
+*regardless* of platform (32-bit or 64-bit).
The following will all result in ``int64`` dtypes.
.. ipython:: python
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt
index 5fd7c3e217928..42ea429aae1de 100644
--- a/doc/source/whatsnew/v0.23.0.txt
+++ b/doc/source/whatsnew/v0.23.0.txt
@@ -62,6 +62,32 @@ levels <merging.merge_on_columns_and_levels>` documentation section.
left.merge(right, on=['key1', 'key2'])
+.. _whatsnew_0230.enhancements.sort_by_columns_and_levels:
+
+Sorting by a combination of columns and index levels
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Strings passed to :meth:`DataFrame.sort_values` as the ``by`` parameter may
+now refer to either column names or index level names. This enables sorting
+``DataFrame`` instances by a combination of index levels and columns without
+resetting indexes. See the :ref:`Sorting by Indexes and Values
+<basics.sort_indexes_and_values>` documentation section.
+(:issue:`14353`)
+
+.. ipython:: python
+
+ # Build MultiIndex
+ idx = pd.MultiIndex.from_tuples([('a', 1), ('a', 2), ('a', 2),
+ ('b', 2), ('b', 1), ('b', 1)])
+ idx.names = ['first', 'second']
+
+ # Build DataFrame
+ df_multi = pd.DataFrame({'A': np.arange(6, 0, -1)},
+ index=idx)
+ df_multi
+
+ # Sort by 'second' (index) and 'A' (column)
+ df_multi.sort_values(by=['second', 'A'])
.. _whatsnew_0230.enhancements.ran_inf:
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 9acc82b50aabf..821db3c263885 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -113,7 +113,15 @@
axes_single_arg="{0 or 'index', 1 or 'columns'}",
optional_by="""
by : str or list of str
- Name or list of names which refer to the axis items.""",
+ Name or list of names to sort by.
+
+ - if `axis` is 0 or `'index'` then `by` may contain index
+ levels and/or column labels
+ - if `axis` is 1 or `'columns'` then `by` may contain column
+ levels and/or index labels
+
+ .. versionmodified:: 0.23.0
+ Allow specifying index or column level names.""",
versionadded_to_excel='',
optional_labels="""labels : array-like, optional
New labels / index to conform the axis specified by 'axis' to.""",
@@ -3623,7 +3631,7 @@ def sort_values(self, by, axis=0, ascending=True, inplace=False,
kind='quicksort', na_position='last'):
inplace = validate_bool_kwarg(inplace, 'inplace')
axis = self._get_axis_number(axis)
- other_axis = 0 if axis == 1 else 1
+ stacklevel = 2 # Number of stack levels from df.sort_values
if not isinstance(by, list):
by = [by]
@@ -3635,10 +3643,8 @@ def sort_values(self, by, axis=0, ascending=True, inplace=False,
keys = []
for x in by:
- k = self.xs(x, axis=other_axis).values
- if k.ndim == 2:
- raise ValueError('Cannot sort by duplicate column %s' %
- str(x))
+ k = self._get_label_or_level_values(x, axis=axis,
+ stacklevel=stacklevel)
keys.append(k)
indexer = lexsort_indexer(keys, orders=ascending,
na_position=na_position)
@@ -3647,17 +3653,9 @@ def sort_values(self, by, axis=0, ascending=True, inplace=False,
from pandas.core.sorting import nargsort
by = by[0]
- k = self.xs(by, axis=other_axis).values
- if k.ndim == 2:
-
- # try to be helpful
- if isinstance(self.columns, MultiIndex):
- raise ValueError('Cannot sort by column %s in a '
- 'multi-index you need to explicitly '
- 'provide all the levels' % str(by))
+ k = self._get_label_or_level_values(by, axis=axis,
+ stacklevel=stacklevel)
- raise ValueError('Cannot sort by duplicate column %s' %
- str(by))
if isinstance(ascending, (tuple, list)):
ascending = ascending[0]
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 84799d12df0c4..09abe6d1faa38 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -69,7 +69,7 @@
args_transpose='axes to permute (int or label for object)',
optional_by="""
by : str or list of str
- Name or list of names which refer to the axis items.""")
+ Name or list of names to sort by""")
def _single_replace(self, to_replace, method, inplace, limit):
@@ -1156,7 +1156,7 @@ def _is_label_or_level_reference(self, key, axis=0):
return (self._is_level_reference(key, axis=axis) or
self._is_label_reference(key, axis=axis))
- def _check_label_or_level_ambiguity(self, key, axis=0):
+ def _check_label_or_level_ambiguity(self, key, axis=0, stacklevel=1):
"""
Check whether `key` matches both a level of the input `axis` and a
label of the other axis and raise a ``FutureWarning`` if this is the
@@ -1169,9 +1169,10 @@ def _check_label_or_level_ambiguity(self, key, axis=0):
----------
key: str or object
label or level name
-
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
+ stacklevel: int, default 1
+ Stack level used when a FutureWarning is raised (see below).
Returns
-------
@@ -1216,12 +1217,12 @@ def _check_label_or_level_ambiguity(self, key, axis=0):
label_article=label_article,
label_type=label_type)
- warnings.warn(msg, FutureWarning, stacklevel=2)
+ warnings.warn(msg, FutureWarning, stacklevel=stacklevel + 1)
return True
else:
return False
- def _get_label_or_level_values(self, key, axis=0):
+ def _get_label_or_level_values(self, key, axis=0, stacklevel=1):
"""
Return a 1-D array of values associated with `key`, a label or level
from the given `axis`.
@@ -1240,6 +1241,8 @@ def _get_label_or_level_values(self, key, axis=0):
Label or level name.
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
+ stacklevel: int, default 1
+ Stack level used when a FutureWarning is raised (see below).
Returns
-------
@@ -1251,6 +1254,9 @@ def _get_label_or_level_values(self, key, axis=0):
if `key` matches neither a label nor a level
ValueError
if `key` matches multiple labels
+ FutureWarning
+ if `key` is ambiguous. This will become an ambiguity error in a
+ future version
"""
axis = self._get_axis_number(axis)
@@ -1262,7 +1268,8 @@ def _get_label_or_level_values(self, key, axis=0):
.format(type=type(self)))
if self._is_label_reference(key, axis=axis):
- self._check_label_or_level_ambiguity(key, axis=axis)
+ self._check_label_or_level_ambiguity(key, axis=axis,
+ stacklevel=stacklevel + 1)
values = self.xs(key, axis=other_axes[0])._values
elif self._is_level_reference(key, axis=axis):
values = self.axes[axis].get_level_values(key)._values
@@ -1271,11 +1278,22 @@ def _get_label_or_level_values(self, key, axis=0):
# Check for duplicates
if values.ndim > 1:
+
+ if other_axes and isinstance(
+ self._get_axis(other_axes[0]), MultiIndex):
+ multi_message = ('\n'
+ 'For a multi-index, the label must be a '
+ 'tuple with elements corresponding to '
+ 'each level.')
+ else:
+ multi_message = ''
+
label_axis_name = 'column' if axis == 0 else 'index'
raise ValueError(("The {label_axis_name} label '{key}' "
- "is not unique")
+ "is not unique.{multi_message}")
.format(key=key,
- label_axis_name=label_axis_name))
+ label_axis_name=label_axis_name,
+ multi_message=multi_message))
return values
@@ -2956,7 +2974,7 @@ def add_suffix(self, suffix):
Parameters
----------%(optional_by)s
axis : %(axes_single_arg)s, default 0
- Axis to direct sorting
+ Axis to be sorted
ascending : bool or list of bool, default True
Sort ascending vs. descending. Specify list for multiple sort
orders. If this is a list of bools, must match the length of
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index 285a347153a82..082b6e2a8b1a0 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -2972,7 +2972,9 @@ def is_in_obj(gpr):
elif is_in_axis(gpr): # df.groupby('name')
if gpr in obj:
if validate:
- obj._check_label_or_level_ambiguity(gpr)
+ stacklevel = 5 # Number of stack levels from df.groupby
+ obj._check_label_or_level_ambiguity(
+ gpr, stacklevel=stacklevel)
in_axis, name, gpr = True, gpr, obj[gpr]
exclusions.append(name)
elif obj._is_level_reference(gpr):
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index 455c6f42ac74a..ad2a433b5632b 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -815,6 +815,7 @@ def _get_merge_keys(self):
right_drop = []
left_drop = []
left, right = self.left, self.right
+ stacklevel = 5 # Number of stack levels from df.merge
is_lkey = lambda x: isinstance(
x, (np.ndarray, Series)) and len(x) == len(left)
@@ -842,7 +843,8 @@ def _get_merge_keys(self):
else:
if rk is not None:
right_keys.append(
- right._get_label_or_level_values(rk))
+ right._get_label_or_level_values(
+ rk, stacklevel=stacklevel))
join_names.append(rk)
else:
# work-around for merge_asof(right_index=True)
@@ -852,7 +854,8 @@ def _get_merge_keys(self):
if not is_rkey(rk):
if rk is not None:
right_keys.append(
- right._get_label_or_level_values(rk))
+ right._get_label_or_level_values(
+ rk, stacklevel=stacklevel))
else:
# work-around for merge_asof(right_index=True)
right_keys.append(right.index)
@@ -865,7 +868,8 @@ def _get_merge_keys(self):
else:
right_keys.append(rk)
if lk is not None:
- left_keys.append(left._get_label_or_level_values(lk))
+ left_keys.append(left._get_label_or_level_values(
+ lk, stacklevel=stacklevel))
join_names.append(lk)
else:
# work-around for merge_asof(left_index=True)
@@ -877,7 +881,8 @@ def _get_merge_keys(self):
left_keys.append(k)
join_names.append(None)
else:
- left_keys.append(left._get_label_or_level_values(k))
+ left_keys.append(left._get_label_or_level_values(
+ k, stacklevel=stacklevel))
join_names.append(k)
if isinstance(self.right.index, MultiIndex):
right_keys = [lev._values.take(lab)
@@ -891,7 +896,8 @@ def _get_merge_keys(self):
right_keys.append(k)
join_names.append(None)
else:
- right_keys.append(right._get_label_or_level_values(k))
+ right_keys.append(right._get_label_or_level_values(
+ k, stacklevel=stacklevel))
join_names.append(k)
if isinstance(self.left.index, MultiIndex):
left_keys = [lev._values.take(lab)
diff --git a/pandas/tests/frame/test_sort_values_level_as_str.py b/pandas/tests/frame/test_sort_values_level_as_str.py
new file mode 100644
index 0000000000000..3b4eadfce81cd
--- /dev/null
+++ b/pandas/tests/frame/test_sort_values_level_as_str.py
@@ -0,0 +1,126 @@
+import numpy as np
+import pytest
+
+from pandas import DataFrame, Index
+from pandas.errors import PerformanceWarning
+from pandas.util import testing as tm
+from pandas.util.testing import assert_frame_equal
+
+
+@pytest.fixture
+def df_none():
+ return DataFrame({
+ 'outer': ['a', 'a', 'a', 'b', 'b', 'b'],
+ 'inner': [1, 2, 2, 2, 1, 1],
+ 'A': np.arange(6, 0, -1),
+ ('B', 5): ['one', 'one', 'two', 'two', 'one', 'one']})
+
+
+@pytest.fixture(params=[
+ ['outer'],
+ ['outer', 'inner']
+])
+def df_idx(request, df_none):
+ levels = request.param
+ return df_none.set_index(levels)
+
+
+@pytest.fixture(params=[
+ 'inner', # index level
+ ['outer'], # list of index level
+ 'A', # column
+ [('B', 5)], # list of column
+ ['inner', 'outer'], # two index levels
+ [('B', 5), 'outer'], # index level and column
+ ['A', ('B', 5)], # Two columns
+ ['inner', 'outer'] # two index levels and column
+])
+def sort_names(request):
+ return request.param
+
+
+@pytest.fixture(params=[True, False])
+def ascending(request):
+ return request.param
+
+
+def test_sort_index_level_and_column_label(
+ df_none, df_idx, sort_names, ascending):
+
+ # GH 14353
+
+ # Get index levels from df_idx
+ levels = df_idx.index.names
+
+ # Compute expected by sorting on columns and the setting index
+ expected = df_none.sort_values(by=sort_names,
+ ascending=ascending,
+ axis=0).set_index(levels)
+
+ # Compute result sorting on mix on columns and index levels
+ result = df_idx.sort_values(by=sort_names,
+ ascending=ascending,
+ axis=0)
+
+ assert_frame_equal(result, expected)
+
+
+def test_sort_column_level_and_index_label(
+ df_none, df_idx, sort_names, ascending):
+
+ # GH 14353
+
+ # Get levels from df_idx
+ levels = df_idx.index.names
+
+ # Compute expected by sorting on axis=0, setting index levels, and then
+ # transposing. For some cases this will result in a frame with
+ # multiple column levels
+ expected = df_none.sort_values(by=sort_names,
+ ascending=ascending,
+ axis=0).set_index(levels).T
+
+ # Compute result by transposing and sorting on axis=1.
+ result = df_idx.T.sort_values(by=sort_names,
+ ascending=ascending,
+ axis=1)
+
+ if len(levels) > 1:
+ # Accessing multi-level columns that are not lexsorted raises a
+ # performance warning
+ with tm.assert_produces_warning(PerformanceWarning,
+ check_stacklevel=False):
+ assert_frame_equal(result, expected)
+ else:
+ assert_frame_equal(result, expected)
+
+
+def test_sort_values_column_index_level_precedence():
+ # GH 14353, when a string passed as the `by` parameter
+ # matches a column and an index level the column takes
+ # precedence
+
+ # Construct DataFrame with index and column named 'idx'
+ idx = Index(np.arange(1, 7), name='idx')
+ df = DataFrame({'A': np.arange(11, 17),
+ 'idx': np.arange(6, 0, -1)},
+ index=idx)
+
+ # Sorting by 'idx' should sort by the idx column and raise a
+ # FutureWarning
+ with tm.assert_produces_warning(FutureWarning):
+ result = df.sort_values(by='idx')
+
+ # This should be equivalent to sorting by the 'idx' index level in
+ # descending order
+ expected = df.sort_index(level='idx', ascending=False)
+ assert_frame_equal(result, expected)
+
+ # Perform same test with MultiIndex
+ df_multi = df.set_index('A', append=True)
+
+ with tm.assert_produces_warning(FutureWarning):
+ result = df_multi.sort_values(by='idx')
+
+ expected = df_multi.sort_index(level='idx', ascending=False)
+ assert_frame_equal(result, expected)
diff --git a/pandas/tests/frame/test_sorting.py b/pandas/tests/frame/test_sorting.py
index a98439797dc28..5bd239f8a3034 100644
--- a/pandas/tests/frame/test_sorting.py
+++ b/pandas/tests/frame/test_sorting.py
@@ -455,26 +455,26 @@ def test_sort_index_duplicates(self):
df = DataFrame([lrange(5, 9), lrange(4)],
columns=['a', 'a', 'b', 'b'])
- with tm.assert_raises_regex(ValueError, 'duplicate'):
+ with tm.assert_raises_regex(ValueError, 'not unique'):
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by='a')
- with tm.assert_raises_regex(ValueError, 'duplicate'):
+ with tm.assert_raises_regex(ValueError, 'not unique'):
df.sort_values(by='a')
- with tm.assert_raises_regex(ValueError, 'duplicate'):
+ with tm.assert_raises_regex(ValueError, 'not unique'):
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by=['a'])
- with tm.assert_raises_regex(ValueError, 'duplicate'):
+ with tm.assert_raises_regex(ValueError, 'not unique'):
df.sort_values(by=['a'])
- with tm.assert_raises_regex(ValueError, 'duplicate'):
+ with tm.assert_raises_regex(ValueError, 'not unique'):
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
# multi-column 'by' is separate codepath
df.sort_index(by=['a', 'b'])
- with tm.assert_raises_regex(ValueError, 'duplicate'):
+ with tm.assert_raises_regex(ValueError, 'not unique'):
# multi-column 'by' is separate codepath
df.sort_values(by=['a', 'b'])
@@ -482,11 +482,11 @@ def test_sort_index_duplicates(self):
# GH4370
df = DataFrame(np.random.randn(4, 2),
columns=MultiIndex.from_tuples([('a', 0), ('a', 1)]))
- with tm.assert_raises_regex(ValueError, 'levels'):
+ with tm.assert_raises_regex(ValueError, 'level'):
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by='a')
- with tm.assert_raises_regex(ValueError, 'levels'):
+ with tm.assert_raises_regex(ValueError, 'level'):
df.sort_values(by='a')
# convert tuples to a list of tuples
diff --git a/pandas/tests/generic/test_label_or_level_utils.py b/pandas/tests/generic/test_label_or_level_utils.py
index 456cb48020500..1ad1b06aaefa2 100644
--- a/pandas/tests/generic/test_label_or_level_utils.py
+++ b/pandas/tests/generic/test_label_or_level_utils.py
@@ -175,8 +175,7 @@ def test_check_label_or_level_ambiguity_df(df_ambig, axis):
# df_ambig has both an on-axis level and off-axis label named L1
# Therefore L1 is ambiguous
with tm.assert_produces_warning(FutureWarning,
- clear=True,
- check_stacklevel=False) as w:
+ clear=True) as w:
assert df_ambig._check_label_or_level_ambiguity('L1', axis=axis)
warning_msg = w[0].message.args[0]
@@ -245,7 +244,8 @@ def assert_label_values(frame, labels, axis):
else:
expected = frame.loc[label]._values
- result = frame._get_label_or_level_values(label, axis=axis)
+ result = frame._get_label_or_level_values(label, axis=axis,
+ stacklevel=2)
assert array_equivalent(expected, result)
@@ -288,8 +288,7 @@ def test_get_label_or_level_values_df_ambig(df_ambig, axis):
# df has both an on-axis level and off-axis label named L1
# Therefore L1 is ambiguous but will default to label
- with tm.assert_produces_warning(FutureWarning,
- check_stacklevel=False):
+ with tm.assert_produces_warning(FutureWarning):
assert_label_values(df_ambig, ['L1'], axis=axis)
# df has an on-axis level named L2 and it is not ambiguous
diff --git a/pandas/tests/groupby/test_index_as_string.py b/pandas/tests/groupby/test_index_as_string.py
index cee78eab3a636..9fe677664049e 100644
--- a/pandas/tests/groupby/test_index_as_string.py
+++ b/pandas/tests/groupby/test_index_as_string.py
@@ -99,7 +99,7 @@ def test_grouper_column_index_level_precedence(frame,
frame['inner'] = [1, 1, 1, 1, 1, 1]
# Performing a groupby with strings should produce warning
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with tm.assert_produces_warning(FutureWarning):
result = frame.groupby(key_strs).mean()
# Grouping with key Grouper should produce the same result and no warning
diff --git a/pandas/tests/reshape/merge/test_merge_index_as_string.py b/pandas/tests/reshape/merge/test_merge_index_as_string.py
index 4c638f8e441fa..09109e2692a24 100644
--- a/pandas/tests/reshape/merge/test_merge_index_as_string.py
+++ b/pandas/tests/reshape/merge/test_merge_index_as_string.py
@@ -200,14 +200,14 @@ def test_merge_index_column_precedence(df1, df2):
# Merge left_df and right_df on 'outer' and 'inner'
# 'outer' for left_df should refer to the 'outer' column, not the
# 'outer' index level and a FutureWarning should be raised
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with tm.assert_produces_warning(FutureWarning):
result = left_df.merge(right_df, on=['outer', 'inner'])
# Check results
assert_frame_equal(result, expected)
# Perform the same using the left_on and right_on parameters
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with tm.assert_produces_warning(FutureWarning):
result = left_df.merge(right_df,
left_on=['outer', 'inner'],
right_on=['outer', 'inner'])
| This PR implements the changes proposed in #14353. @jorisvandenbossche
- [x] closes #14353
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
- [x] Rework to use utility methods introduced in #17484 after it is merged
| https://api.github.com/repos/pandas-dev/pandas/pulls/17361 | 2017-08-28T19:38:48Z | 2018-01-05T19:15:39Z | 2018-01-05T19:15:38Z | 2018-01-05T19:54:04Z |
BUG: Respect color argument in bar plot | diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt
index 722e19d2703b5..7dcee381a068d 100644
--- a/doc/source/whatsnew/v0.21.0.txt
+++ b/doc/source/whatsnew/v0.21.0.txt
@@ -524,6 +524,8 @@ Plotting
^^^^^^^^
- Bug in plotting methods using ``secondary_y`` and ``fontsize`` not setting secondary axis font size (:issue:`12565`)
- Bug when plotting ``timedelta`` and ``datetime`` dtypes on y-axis (:issue:`16953`)
+- Bug in ``Series.plot.bar`` or ``DataFramee.plot.bar`` with ``y`` not respecting user-passed ``color`` (:issue:`16822`)
+
Groupby/Resample/Rolling
^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py
index a0b7e93efd05c..029ea3c416757 100644
--- a/pandas/plotting/_core.py
+++ b/pandas/plotting/_core.py
@@ -1150,6 +1150,9 @@ class BarPlot(MPLPlot):
orientation = 'vertical'
def __init__(self, data, **kwargs):
+ # we have to treat a series differently than a
+ # 1-column DataFrame w.r.t. color handling
+ self._is_series = isinstance(data, ABCSeries)
self.bar_width = kwargs.pop('width', 0.5)
pos = kwargs.pop('position', 0.5)
kwargs.setdefault('align', 'center')
@@ -1204,7 +1207,10 @@ def _make_plot(self):
for i, (label, y) in enumerate(self._iter_data(fillna=0)):
ax = self._get_ax(i)
kwds = self.kwds.copy()
- kwds['color'] = colors[i % ncolors]
+ if self._is_series:
+ kwds['color'] = colors
+ else:
+ kwds['color'] = colors[i % ncolors]
errors = self._get_errorbars(label=label, index=i)
kwds = dict(kwds, **errors)
diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py
index f3b287a8889c3..545680ed70797 100644
--- a/pandas/tests/plotting/test_frame.py
+++ b/pandas/tests/plotting/test_frame.py
@@ -807,6 +807,20 @@ def test_bar_colors(self):
self._check_colors(ax.patches[::5], facecolors=['green'] * 5)
tm.close()
+ def test_bar_user_colors(self):
+ df = pd.DataFrame({"A": range(4),
+ "B": range(1, 5),
+ "color": ['red', 'blue', 'blue', 'red']})
+ # This should *only* work when `y` is specified, else
+ # we use one color per column
+ ax = df.plot.bar(y='A', color=df['color'])
+ result = [p.get_facecolor() for p in ax.patches]
+ expected = [(1., 0., 0., 1.),
+ (0., 0., 1., 1.),
+ (0., 0., 1., 1.),
+ (1., 0., 0., 1.)]
+ assert result == expected
+
@pytest.mark.slow
def test_bar_linewidth(self):
df = DataFrame(randn(5, 5))
diff --git a/pandas/tests/plotting/test_series.py b/pandas/tests/plotting/test_series.py
index 8164ad74a190a..2c708ecd39073 100644
--- a/pandas/tests/plotting/test_series.py
+++ b/pandas/tests/plotting/test_series.py
@@ -258,6 +258,16 @@ def test_bar_ignore_index(self):
ax = df.plot.bar(use_index=False, ax=ax)
self._check_text_labels(ax.get_xticklabels(), ['0', '1', '2', '3'])
+ def test_bar_user_colors(self):
+ s = Series([1, 2, 3, 4])
+ ax = s.plot.bar(color=['red', 'blue', 'blue', 'red'])
+ result = [p.get_facecolor() for p in ax.patches]
+ expected = [(1., 0., 0., 1.),
+ (0., 0., 1., 1.),
+ (0., 0., 1., 1.),
+ (1., 0., 0., 1.)]
+ assert result == expected
+
def test_rotation(self):
df = DataFrame(randn(5, 5))
# Default rot 0
| This affect Series-like barplots with user-defined colors. We were always
treating them as dataframe-like, with one color per column.
closes https://github.com/pandas-dev/pandas/issues/16822
This should go into 0.21.0, but if I have the time I'd like a put a more elegant solution in place. | https://api.github.com/repos/pandas-dev/pandas/pulls/17360 | 2017-08-28T15:46:17Z | 2017-09-17T13:20:08Z | 2017-09-17T13:20:08Z | 2017-09-17T13:20:15Z |
CLN: replace %s syntax with .format in io/formats/format.py | diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py
index 733fd3bd39b52..6a98497aa1bfe 100644
--- a/pandas/io/formats/format.py
+++ b/pandas/io/formats/format.py
@@ -47,6 +47,7 @@
import itertools
import csv
+from functools import partial
common_docstring = """
Parameters
@@ -109,7 +110,7 @@ def _get_footer(self):
if self.length:
if footer:
footer += ', '
- footer += "Length: %d" % len(self.categorical)
+ footer += "Length: {length}".format(length=len(self.categorical))
level_info = self.categorical._repr_categories_info()
@@ -135,7 +136,7 @@ def to_string(self):
fmt_values = self._get_formatted_values()
- result = ['%s' % i for i in fmt_values]
+ result = [u('{i}').format(i=i) for i in fmt_values]
result = [i.strip() for i in result]
result = u(', ').join(result)
result = [u('[') + result + u(']')]
@@ -191,7 +192,7 @@ def _get_footer(self):
footer = u('')
if getattr(self.series.index, 'freq', None) is not None:
- footer += 'Freq: %s' % self.series.index.freqstr
+ footer += 'Freq: {freq}'.format(freq=self.series.index.freqstr)
if self.name is not False and name is not None:
if footer:
@@ -199,20 +200,21 @@ def _get_footer(self):
series_name = pprint_thing(name,
escape_chars=('\t', '\r', '\n'))
- footer += ("Name: %s" % series_name) if name is not None else ""
+ footer += ((u"Name: {sname}".format(sname=series_name))
+ if name is not None else "")
if (self.length is True or
(self.length == 'truncate' and self.truncate_v)):
if footer:
footer += ', '
- footer += 'Length: %d' % len(self.series)
+ footer += 'Length: {length}'.format(length=len(self.series))
if self.dtype is not False and self.dtype is not None:
name = getattr(self.tr_series.dtype, 'name', None)
if name:
if footer:
footer += ', '
- footer += 'dtype: %s' % pprint_thing(name)
+ footer += u'dtype: {typ}'.format(typ=pprint_thing(name))
# level infos are added to the end and in a new line, like it is done
# for Categoricals
@@ -509,8 +511,10 @@ def _to_str_columns(self):
else:
if is_list_like(self.header):
if len(self.header) != len(self.columns):
- raise ValueError(('Writing %d cols but got %d aliases'
- % (len(self.columns), len(self.header))))
+ raise ValueError(('Writing {ncols} cols but got {nalias} '
+ 'aliases'
+ .format(ncols=len(self.columns),
+ nalias=len(self.header))))
str_columns = [[label] for label in self.header]
else:
str_columns = self._get_formatted_column_labels(frame)
@@ -578,10 +582,10 @@ def to_string(self):
frame = self.frame
if len(frame.columns) == 0 or len(frame.index) == 0:
- info_line = (u('Empty %s\nColumns: %s\nIndex: %s') %
- (type(self.frame).__name__,
- pprint_thing(frame.columns),
- pprint_thing(frame.index)))
+ info_line = (u('Empty {name}\nColumns: {col}\nIndex: {idx}')
+ .format(name=type(self.frame).__name__,
+ col=pprint_thing(frame.columns),
+ idx=pprint_thing(frame.index)))
text = info_line
else:
@@ -630,8 +634,8 @@ def to_string(self):
self.buf.writelines(text)
if self.should_show_dimensions:
- self.buf.write("\n\n[%d rows x %d columns]" %
- (len(frame), len(frame.columns)))
+ self.buf.write("\n\n[{nrows} rows x {ncols} columns]"
+ .format(nrows=len(frame), ncols=len(frame.columns)))
def _join_multiline(self, *strcols):
lwidth = self.line_width
@@ -805,7 +809,8 @@ def _get_formatted_index(self, frame):
# empty space for columns
if show_col_names:
- col_header = ['%s' % x for x in self._get_column_name_list()]
+ col_header = ['{x}'.format(x=x)
+ for x in self._get_column_name_list()]
else:
col_header = [''] * columns.nlevels
@@ -861,9 +866,10 @@ def write_result(self, buf):
# string representation of the columns
if len(self.frame.columns) == 0 or len(self.frame.index) == 0:
- info_line = (u('Empty %s\nColumns: %s\nIndex: %s') %
- (type(self.frame).__name__, self.frame.columns,
- self.frame.index))
+ info_line = (u('Empty {name}\nColumns: {col}\nIndex: {idx}')
+ .format(name=type(self.frame).__name__,
+ col=self.frame.columns,
+ idx=self.frame.index))
strcols = [[info_line]]
else:
strcols = self.fmt._to_str_columns()
@@ -906,14 +912,16 @@ def get_col_type(dtype):
column_format = index_format + column_format
elif not isinstance(column_format,
compat.string_types): # pragma: no cover
- raise AssertionError('column_format must be str or unicode, not %s'
- % type(column_format))
+ raise AssertionError('column_format must be str or unicode, '
+ 'not {typ}'.format(typ=type(column_format)))
if not self.longtable:
- buf.write('\\begin{tabular}{%s}\n' % column_format)
+ buf.write('\\begin{{tabular}}{{{fmt}}}\n'
+ .format(fmt=column_format))
buf.write('\\toprule\n')
else:
- buf.write('\\begin{longtable}{%s}\n' % column_format)
+ buf.write('\\begin{{longtable}}{{{fmt}}}\n'
+ .format(fmt=column_format))
buf.write('\\toprule\n')
ilevels = self.frame.index.nlevels
@@ -948,7 +956,7 @@ def get_col_type(dtype):
crow = [x if x else '{}' for x in row]
if self.bold_rows and self.fmt.index:
# bold row labels
- crow = ['\\textbf{%s}' % x
+ crow = ['\\textbf{{{x}}}'.format(x=x)
if j < ilevels and x.strip() not in ['', '{}'] else x
for j, x in enumerate(crow)]
if i < clevels and self.fmt.header and self.multicolumn:
@@ -986,9 +994,9 @@ def _format_multicolumn(self, row, ilevels):
def append_col():
# write multicolumn if needed
if ncol > 1:
- row2.append('\\multicolumn{{{0:d}}}{{{1:s}}}{{{2:s}}}'
- .format(ncol, self.multicolumn_format,
- coltext.strip()))
+ row2.append('\\multicolumn{{{ncol:d}}}{{{fmt:s}}}{{{txt:s}}}'
+ .format(ncol=ncol, fmt=self.multicolumn_format,
+ txt=coltext.strip()))
# don't modify where not needed
else:
row2.append(coltext)
@@ -1027,8 +1035,8 @@ def _format_multirow(self, row, ilevels, i, rows):
break
if nrow > 1:
# overwrite non-multirow entry
- row[j] = '\\multirow{{{0:d}}}{{*}}{{{1:s}}}'.format(
- nrow, row[j].strip())
+ row[j] = '\\multirow{{{nrow:d}}}{{*}}{{{row:s}}}'.format(
+ nrow=nrow, row=row[j].strip())
# save when to end the current block with \cline
self.clinebuf.append([i + nrow - 1, j + 1])
return row
@@ -1039,7 +1047,8 @@ def _print_cline(self, buf, i, icol):
"""
for cl in self.clinebuf:
if cl[0] == i:
- buf.write('\cline{{{0:d}-{1:d}}}\n'.format(cl[1], icol))
+ buf.write('\cline{{{cl:d}-{icol:d}}}\n'
+ .format(cl=cl[1], icol=icol))
# remove entries that have been written to buffer
self.clinebuf = [x for x in self.clinebuf if x[0] != i]
@@ -1076,7 +1085,8 @@ def write(self, s, indent=0):
def write_th(self, s, indent=0, tags=None):
if self.fmt.col_space is not None and self.fmt.col_space > 0:
tags = (tags or "")
- tags += 'style="min-width: %s;"' % self.fmt.col_space
+ tags += ('style="min-width: {colspace};"'
+ .format(colspace=self.fmt.col_space))
return self._write_cell(s, kind='th', indent=indent, tags=tags)
@@ -1085,9 +1095,9 @@ def write_td(self, s, indent=0, tags=None):
def _write_cell(self, s, kind='td', indent=0, tags=None):
if tags is not None:
- start_tag = '<%s %s>' % (kind, tags)
+ start_tag = '<{kind} {tags}>'.format(kind=kind, tags=tags)
else:
- start_tag = '<%s>' % kind
+ start_tag = '<{kind}>'.format(kind=kind)
if self.escape:
# escape & first to prevent double escaping of &
@@ -1096,7 +1106,8 @@ def _write_cell(self, s, kind='td', indent=0, tags=None):
else:
esc = {}
rs = pprint_thing(s, escape_chars=esc).strip()
- self.write('%s%s</%s>' % (start_tag, rs, kind), indent)
+ self.write(u'{start}{rs}</{kind}>'
+ .format(start=start_tag, rs=rs, kind=kind), indent)
def write_tr(self, line, indent=0, indent_delta=4, header=False,
align=None, tags=None, nindex_levels=0):
@@ -1106,7 +1117,8 @@ def write_tr(self, line, indent=0, indent_delta=4, header=False,
if align is None:
self.write('<tr>', indent)
else:
- self.write('<tr style="text-align: %s;">' % align, indent)
+ self.write('<tr style="text-align: {align};">'
+ .format(align=align), indent)
indent += indent_delta
for i, s in enumerate(line):
@@ -1146,8 +1158,8 @@ def write_result(self, buf):
if isinstance(self.classes, str):
self.classes = self.classes.split()
if not isinstance(self.classes, (list, tuple)):
- raise AssertionError('classes must be list or tuple, '
- 'not %s' % type(self.classes))
+ raise AssertionError('classes must be list or tuple, not {typ}'
+ .format(typ=type(self.classes)))
_classes.extend(self.classes)
if self.notebook:
@@ -1159,12 +1171,11 @@ def write_result(self, buf):
except (ImportError, AttributeError):
pass
- self.write('<div{0}>'.format(div_style))
+ self.write('<div{style}>'.format(style=div_style))
self.write_style()
- self.write('<table border="%s" class="%s">' % (self.border,
- ' '.join(_classes)),
- indent)
+ self.write('<table border="{border}" class="{cls}">'
+ .format(border=self.border, cls=' '.join(_classes)), indent)
indent += self.indent_delta
indent = self._write_header(indent)
@@ -1173,8 +1184,10 @@ def write_result(self, buf):
self.write('</table>', indent)
if self.should_show_dimensions:
by = chr(215) if compat.PY3 else unichr(215) # ×
- self.write(u('<p>%d rows %s %d columns</p>') %
- (len(frame), by, len(frame.columns)))
+ self.write(u('<p>{rows} rows {by} {cols} columns</p>')
+ .format(rows=len(frame),
+ by=by,
+ cols=len(frame.columns)))
if self.notebook:
self.write('</div>')
@@ -1199,7 +1212,7 @@ def _column_header():
row.append(single_column_table(self.columns.names))
else:
row.append('')
- style = "text-align: %s;" % self.fmt.justify
+ style = "text-align: {just};".format(just=self.fmt.justify)
row.extend([single_column_table(c, self.fmt.justify, style)
for c in self.columns])
else:
@@ -1214,7 +1227,7 @@ def _column_header():
indent += self.indent_delta
if isinstance(self.columns, MultiIndex):
- template = 'colspan="%d" halign="left"'
+ template = 'colspan="{span:d}" halign="left"'
if self.fmt.sparsify:
# GH3547
@@ -1282,7 +1295,7 @@ def _column_header():
for i, v in enumerate(values):
if i in records:
if records[i] > 1:
- tags[j] = template % records[i]
+ tags[j] = template.format(span=records[i])
else:
continue
j += 1
@@ -1372,7 +1385,7 @@ def _write_regular_rows(self, fmt_values, indent):
nindex_levels=1)
def _write_hierarchical_rows(self, fmt_values, indent):
- template = 'rowspan="%d" valign="top"'
+ template = 'rowspan="{span}" valign="top"'
truncate_h = self.fmt.truncate_h
truncate_v = self.fmt.truncate_v
@@ -1447,7 +1460,7 @@ def _write_hierarchical_rows(self, fmt_values, indent):
for records, v in zip(level_lengths, idx_values[i]):
if i in records:
if records[i] > 1:
- tags[j] = template % records[i]
+ tags[j] = template.format(span=records[i])
else:
sparse_offset += 1
continue
@@ -1615,8 +1628,9 @@ def _save_header(self):
return
if has_aliases:
if len(header) != len(cols):
- raise ValueError(('Writing %d cols but got %d aliases'
- % (len(cols), len(header))))
+ raise ValueError(('Writing {ncols} cols but got {nalias} '
+ 'aliases'.format(ncols=len(cols),
+ nalias=len(header))))
else:
write_cols = header
else:
@@ -1790,8 +1804,9 @@ def _format_strings(self):
if self.float_format is None:
float_format = get_option("display.float_format")
if float_format is None:
- fmt_str = '%% .%dg' % get_option("display.precision")
- float_format = lambda x: fmt_str % x
+ fmt_str = ('{{x: .{prec:d}g}}'
+ .format(prec=get_option("display.precision")))
+ float_format = lambda x: fmt_str.format(x=x)
else:
float_format = self.float_format
@@ -1807,10 +1822,10 @@ def _format(x):
return 'NaT'
return self.na_rep
elif isinstance(x, PandasObject):
- return '%s' % x
+ return u'{x}'.format(x=x)
else:
# object dtype
- return '%s' % formatter(x)
+ return u'{x}'.format(x=formatter(x))
vals = self.values
if isinstance(vals, Index):
@@ -1824,11 +1839,11 @@ def _format(x):
fmt_values = []
for i, v in enumerate(vals):
if not is_float_type[i] and leading_space:
- fmt_values.append(' %s' % _format(v))
+ fmt_values.append(u' {v}'.format(v=_format(v)))
elif is_float_type[i]:
fmt_values.append(float_format(v))
else:
- fmt_values.append(' %s' % _format(v))
+ fmt_values.append(u' {v}'.format(v=_format(v)))
return fmt_values
@@ -1864,7 +1879,7 @@ def _value_formatter(self, float_format=None, threshold=None):
# because str(0.0) = '0.0' while '%g' % 0.0 = '0'
if float_format:
def base_formatter(v):
- return (float_format % v) if notna(v) else self.na_rep
+ return float_format(value=v) if notna(v) else self.na_rep
else:
def base_formatter(v):
return str(v) if notna(v) else self.na_rep
@@ -1925,10 +1940,14 @@ def format_values_with(float_format):
# There is a special default string when we are fixed-width
# The default is otherwise to use str instead of a formatting string
- if self.float_format is None and self.fixed_width:
- float_format = '%% .%df' % self.digits
+ if self.float_format is None:
+ if self.fixed_width:
+ float_format = partial('{value: .{digits:d}f}'.format,
+ digits=self.digits)
+ else:
+ float_format = self.float_format
else:
- float_format = self.float_format
+ float_format = lambda value: self.float_format % value
formatted_values = format_values_with(float_format)
@@ -1955,7 +1974,8 @@ def format_values_with(float_format):
(abs_vals > 0)).any()
if has_small_values or (too_long and has_large_values):
- float_format = '%% .%de' % self.digits
+ float_format = partial('{value: .{digits:d}e}'.format,
+ digits=self.digits)
formatted_values = format_values_with(float_format)
return formatted_values
@@ -1971,7 +1991,7 @@ def _format_strings(self):
class IntArrayFormatter(GenericArrayFormatter):
def _format_strings(self):
- formatter = self.formatter or (lambda x: '% d' % x)
+ formatter = self.formatter or (lambda x: '{x: d}'.format(x=x))
fmt_values = [formatter(x) for x in self.values]
return fmt_values
@@ -2023,7 +2043,7 @@ def _format_strings(self):
# periods may contains different freq
values = Index(self.values, dtype='object').to_native_types()
- formatter = self.formatter or (lambda x: '%s' % x)
+ formatter = self.formatter or (lambda x: '{x}'.format(x=x))
fmt_values = [formatter(x) for x in values]
return fmt_values
@@ -2223,7 +2243,7 @@ def _formatter(x):
x = Timedelta(x)
result = x._repr_base(format=format)
if box:
- result = "'{0}'".format(result)
+ result = "'{res}'".format(res=result)
return result
return _formatter
@@ -2278,12 +2298,12 @@ def _cond(values):
def single_column_table(column, align=None, style=None):
table = '<table'
if align is not None:
- table += (' align="%s"' % align)
+ table += (' align="{align}"'.format(align=align))
if style is not None:
- table += (' style="%s"' % style)
+ table += (' style="{style}"'.format(style=style))
table += '><tbody>'
for i in column:
- table += ('<tr><td>%s</td></tr>' % str(i))
+ table += ('<tr><td>{i!s}</td></tr>'.format(i=i))
table += '</tbody></table>'
return table
@@ -2291,7 +2311,7 @@ def single_column_table(column, align=None, style=None):
def single_row_table(row): # pragma: no cover
table = '<table><tbody><tr>'
for i in row:
- table += ('<td>%s</td>' % str(i))
+ table += ('<td>{i!s}</td>'.format(i=i))
table += '</tr></tbody></table>'
return table
@@ -2385,18 +2405,19 @@ def __call__(self, num):
prefix = self.ENG_PREFIXES[int_pow10]
else:
if int_pow10 < 0:
- prefix = 'E-%02d' % (-int_pow10)
+ prefix = 'E-{pow10:02d}'.format(pow10=-int_pow10)
else:
- prefix = 'E+%02d' % int_pow10
+ prefix = 'E+{pow10:02d}'.format(pow10=int_pow10)
mant = sign * dnum / (10**pow10)
if self.accuracy is None: # pragma: no cover
- format_str = u("% g%s")
+ format_str = u("{mant: g}{prefix}")
else:
- format_str = (u("%% .%if%%s") % self.accuracy)
+ format_str = (u("{{mant: .{acc:d}f}}{{prefix}}")
+ .format(acc=self.accuracy))
- formatted = format_str % (mant, prefix)
+ formatted = format_str.format(mant=mant, prefix=prefix)
return formatted # .strip()
| Progress toward issue #16130. Converted old string formatting to new string formatting in io/formats/format.py.
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
| https://api.github.com/repos/pandas-dev/pandas/pulls/17358 | 2017-08-28T14:14:42Z | 2017-08-30T10:19:45Z | 2017-08-30T10:19:45Z | 2017-08-30T10:19:48Z |
CLN: replace %s syntax with .format in core/indexing.py | diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 6b9ad5cd2d93b..b7a51afcedabf 100755
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -188,8 +188,9 @@ def _has_valid_tuple(self, key):
if i >= self.obj.ndim:
raise IndexingError('Too many indexers')
if not self._has_valid_type(k, i):
- raise ValueError("Location based indexing can only have [%s] "
- "types" % self._valid_types)
+ raise ValueError("Location based indexing can only have "
+ "[{types}] types"
+ .format(types=self._valid_types))
def _should_validate_iterable(self, axis=0):
""" return a boolean whether this axes needs validation for a passed
@@ -263,11 +264,11 @@ def _has_valid_positional_setitem_indexer(self, indexer):
pass
elif is_integer(i):
if i >= len(ax):
- raise IndexError("{0} cannot enlarge its target object"
- .format(self.name))
+ raise IndexError("{name} cannot enlarge its target "
+ "object".format(name=self.name))
elif isinstance(i, dict):
- raise IndexError("{0} cannot enlarge its target object"
- .format(self.name))
+ raise IndexError("{name} cannot enlarge its target object"
+ .format(name=self.name))
return True
@@ -1235,7 +1236,8 @@ def _convert_to_indexer(self, obj, axis=0, is_setter=False):
mask = check == -1
if mask.any():
- raise KeyError('%s not in index' % objarr[mask])
+ raise KeyError('{mask} not in index'
+ .format(mask=objarr[mask]))
return _values_from_object(indexer)
@@ -1421,8 +1423,9 @@ def _has_valid_type(self, key, axis):
if (not is_iterator(key) and len(key) and
np.all(ax.get_indexer_for(key) < 0)):
- raise KeyError("None of [%s] are in the [%s]" %
- (key, self.obj._get_axis_name(axis)))
+ raise KeyError(u"None of [{key}] are in the [{axis}]"
+ .format(key=key,
+ axis=self.obj._get_axis_name(axis)))
return True
@@ -1432,8 +1435,9 @@ def error():
if isna(key):
raise TypeError("cannot use label indexing with a null "
"key")
- raise KeyError("the label [%s] is not in the [%s]" %
- (key, self.obj._get_axis_name(axis)))
+ raise KeyError(u"the label [{key}] is not in the [{axis}]"
+ .format(key=key,
+ axis=self.obj._get_axis_name(axis)))
try:
key = self._convert_scalar_indexer(key, axis)
| Progress toward issue #16130. Converted old string formatting to new string formatting in core/indexing.py.
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` | https://api.github.com/repos/pandas-dev/pandas/pulls/17357 | 2017-08-28T13:22:53Z | 2017-08-28T13:58:06Z | 2017-08-28T13:58:06Z | 2017-08-28T14:05:42Z |
BUG: Timestamp.replace chaining not compat with datetime.replace | diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt
index fcadd26156b1d..b6ca7b5ec50b3 100644
--- a/doc/source/whatsnew/v0.21.0.txt
+++ b/doc/source/whatsnew/v0.21.0.txt
@@ -342,6 +342,7 @@ Conversion
- Fixed the return type of ``IntervalIndex.is_non_overlapping_monotonic`` to be a Python ``bool`` for consistency with similar attributes/methods. Previously returned a ``numpy.bool_``. (:issue:`17237`)
- Bug in ``IntervalIndex.is_non_overlapping_monotonic`` when intervals are closed on both sides and overlap at a point (:issue:`16560`)
- Bug in :func:`Series.fillna` returns frame when ``inplace=True`` and ``value`` is dict (:issue:`16156`)
+- Bug in ``Timestamp.replace`` when replacing ``tzinfo`` around DST changes (:issue:`15683`)
Indexing
^^^^^^^^
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx
index b5aca2e3ec309..9ed849f34c9a0 100644
--- a/pandas/_libs/tslib.pyx
+++ b/pandas/_libs/tslib.pyx
@@ -700,14 +700,16 @@ class Timestamp(_Timestamp):
cdef:
pandas_datetimestruct dts
- int64_t value
- object _tzinfo, result, k, v
+ int64_t value, value_tz, offset
+ object _tzinfo, result, k, v, ts_input
# set to naive if needed
_tzinfo = self.tzinfo
value = self.value
if _tzinfo is not None:
- value = tz_convert_single(value, 'UTC', _tzinfo)
+ value_tz = tz_convert_single(value, _tzinfo, 'UTC')
+ offset = value - value_tz
+ value += offset
# setup components
pandas_datetime_to_datetimestruct(value, PANDAS_FR_ns, &dts)
@@ -741,16 +743,14 @@ class Timestamp(_Timestamp):
_tzinfo = tzinfo
# reconstruct & check bounds
- value = pandas_datetimestruct_to_datetime(PANDAS_FR_ns, &dts)
+ ts_input = datetime(dts.year, dts.month, dts.day, dts.hour, dts.min,
+ dts.sec, dts.us, tzinfo=_tzinfo)
+ ts = convert_to_tsobject(ts_input, _tzinfo, None, 0, 0)
+ value = ts.value + (dts.ps // 1000)
if value != NPY_NAT:
_check_dts_bounds(&dts)
- # set tz if needed
- if _tzinfo is not None:
- value = tz_convert_single(value, _tzinfo, 'UTC')
-
- result = create_timestamp_from_ts(value, dts, _tzinfo, self.freq)
- return result
+ return create_timestamp_from_ts(value, dts, _tzinfo, self.freq)
def isoformat(self, sep='T'):
base = super(_Timestamp, self).isoformat(sep=sep)
diff --git a/pandas/tests/tseries/test_timezones.py b/pandas/tests/tseries/test_timezones.py
index a9ecfd797a32b..ac1a338d2844d 100644
--- a/pandas/tests/tseries/test_timezones.py
+++ b/pandas/tests/tseries/test_timezones.py
@@ -1269,6 +1269,27 @@ def test_ambiguous_compat(self):
assert (result_pytz.to_pydatetime().tzname() ==
result_dateutil.to_pydatetime().tzname())
+ def test_replace_tzinfo(self):
+ # GH 15683
+ dt = datetime(2016, 3, 27, 1)
+ tzinfo = pytz.timezone('CET').localize(dt, is_dst=False).tzinfo
+
+ result_dt = dt.replace(tzinfo=tzinfo)
+ result_pd = Timestamp(dt).replace(tzinfo=tzinfo)
+
+ if hasattr(result_dt, 'timestamp'): # New method in Py 3.3
+ assert result_dt.timestamp() == result_pd.timestamp()
+ assert result_dt == result_pd
+ assert result_dt == result_pd.to_pydatetime()
+
+ result_dt = dt.replace(tzinfo=tzinfo).replace(tzinfo=None)
+ result_pd = Timestamp(dt).replace(tzinfo=tzinfo).replace(tzinfo=None)
+
+ if hasattr(result_dt, 'timestamp'): # New method in Py 3.3
+ assert result_dt.timestamp() == result_pd.timestamp()
+ assert result_dt == result_pd
+ assert result_dt == result_pd.to_pydatetime()
+
def test_index_equals_with_tz(self):
left = date_range('1/1/2011', periods=100, freq='H', tz='utc')
right = date_range('1/1/2011', periods=100, freq='H', tz='US/Eastern')
| - [x] closes #15683
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
This is a clean version of https://github.com/pandas-dev/pandas/pull/16110 and the last thing I’m going to do with this issue. | https://api.github.com/repos/pandas-dev/pandas/pulls/17356 | 2017-08-28T08:13:36Z | 2017-09-13T02:53:34Z | null | 2017-09-13T02:53:34Z |
BUG: Respect dups in reindexing CategoricalIndex | diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt
index fcadd26156b1d..942e37a29f8d5 100644
--- a/doc/source/whatsnew/v0.21.0.txt
+++ b/doc/source/whatsnew/v0.21.0.txt
@@ -358,6 +358,7 @@ Indexing
- Allow unicode empty strings as placeholders in multilevel columns in Python 2 (:issue:`17099`)
- Bug in ``.iloc`` when used with inplace addition or assignment and an int indexer on a ``MultiIndex`` causing the wrong indexes to be read from and written to (:issue:`17148`)
- Bug in ``.isin()`` in which checking membership in empty ``Series`` objects raised an error (:issue:`16991`)
+- Bug in ``CategoricalIndex`` reindexing in which specified indices containing duplicates were not being respected (:issue:`17323`)
I/O
^^^
diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py
index f22407308e094..0681202289311 100644
--- a/pandas/core/indexes/category.py
+++ b/pandas/core/indexes/category.py
@@ -487,7 +487,7 @@ def get_indexer(self, target, method=None, limit=None, tolerance=None):
method = missing.clean_reindex_fill_method(method)
target = ibase._ensure_index(target)
- if self.equals(target):
+ if self.is_unique and self.equals(target):
return np.arange(len(self), dtype='intp')
if method == 'pad' or method == 'backfill':
diff --git a/pandas/tests/indexes/test_category.py b/pandas/tests/indexes/test_category.py
index 64bd6df361aeb..05d31af57b36c 100644
--- a/pandas/tests/indexes/test_category.py
+++ b/pandas/tests/indexes/test_category.py
@@ -365,18 +365,18 @@ def test_astype(self):
tm.assert_index_equal(result, expected)
def test_reindex_base(self):
-
- # determined by cat ordering
- idx = self.create_index()
+ # Determined by cat ordering.
+ idx = CategoricalIndex(list("cab"), categories=list("cab"))
expected = np.arange(len(idx), dtype=np.intp)
actual = idx.get_indexer(idx)
tm.assert_numpy_array_equal(expected, actual)
- with tm.assert_raises_regex(ValueError, 'Invalid fill method'):
- idx.get_indexer(idx, method='invalid')
+ with tm.assert_raises_regex(ValueError, "Invalid fill method"):
+ idx.get_indexer(idx, method="invalid")
def test_reindexing(self):
+ np.random.seed(123456789)
ci = self.create_index()
oidx = Index(np.array(ci))
@@ -388,6 +388,18 @@ def test_reindexing(self):
actual = ci.get_indexer(finder)
tm.assert_numpy_array_equal(expected, actual)
+ # see gh-17323
+ #
+ # Even when indexer is equal to the
+ # members in the index, we should
+ # respect duplicates instead of taking
+ # the fast-track path.
+ for finder in [list("aabbca"), list("aababca")]:
+ expected = oidx.get_indexer_non_unique(finder)[0]
+
+ actual = ci.get_indexer(finder)
+ tm.assert_numpy_array_equal(expected, actual)
+
def test_reindex_dtype(self):
c = CategoricalIndex(['a', 'b', 'c', 'a'])
res, indexer = c.reindex(['a', 'c'])
| When the indexer is identical to the elements, we should still return duplicates when the indexer
contains duplicates.
Closes #17323.
| https://api.github.com/repos/pandas-dev/pandas/pulls/17355 | 2017-08-28T04:58:31Z | 2017-08-29T12:52:52Z | 2017-08-29T12:52:51Z | 2017-08-29T14:50:23Z |
DOC: Point to dev-docs in issue template | diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md
index 237e61487d13a..e33835c462511 100644
--- a/.github/ISSUE_TEMPLATE.md
+++ b/.github/ISSUE_TEMPLATE.md
@@ -12,6 +12,12 @@
**Note**: Many problems can be resolved by simply upgrading `pandas` to the latest version. Before submitting, please check if that solution works for you. If possible, you may want to check if `master` addresses this issue, but that is not necessary.
+For documentation-related issues, you can check the latest versions of the docs on `master` here:
+
+https://pandas-docs.github.io/pandas-docs-travis/
+
+If the issue has not been resolved there, go ahead and file it in the issue tracker.
+
#### Expected Output
#### Output of ``pd.show_versions()``
| It's all about helping people help themselves. 😄 | https://api.github.com/repos/pandas-dev/pandas/pulls/17353 | 2017-08-27T20:43:16Z | 2017-08-28T14:05:30Z | 2017-08-28T14:05:30Z | 2017-08-28T15:16:07Z |
DEPR: Deprecate convert parameter in take | diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt
index e0e0c18052550..9f7e9db5cf210 100644
--- a/doc/source/whatsnew/v0.21.0.txt
+++ b/doc/source/whatsnew/v0.21.0.txt
@@ -490,6 +490,7 @@ Deprecations
~~~~~~~~~~~~
- :func:`read_excel()` has deprecated ``sheetname`` in favor of ``sheet_name`` for consistency with ``.to_excel()`` (:issue:`10559`).
+- The ``convert`` parameter has been deprecated in the ``.take()`` method, as it was not being respected (:issue:`16948`)
- ``pd.options.html.border`` has been deprecated in favor of ``pd.options.display.html.border`` (:issue:`15793`).
- :func:`SeriesGroupBy.nth` has deprecated ``True`` in favor of ``'all'`` for its kwarg ``dropna`` (:issue:`11038`).
- :func:`DataFrame.as_blocks` is deprecated, as this is exposing the internal implementation (:issue:`17302`)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index a12e611f6618a..5d439f88bca15 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -2034,7 +2034,7 @@ def _ixs(self, i, axis=0):
return self.loc[:, lab_slice]
else:
if isinstance(label, Index):
- return self.take(i, axis=1, convert=True)
+ return self._take(i, axis=1, convert=True)
index_len = len(self.index)
@@ -2116,10 +2116,10 @@ def _getitem_array(self, key):
# be reindexed to match DataFrame rows
key = check_bool_indexer(self.index, key)
indexer = key.nonzero()[0]
- return self.take(indexer, axis=0, convert=False)
+ return self._take(indexer, axis=0, convert=False)
else:
indexer = self.loc._convert_to_indexer(key, axis=1)
- return self.take(indexer, axis=1, convert=True)
+ return self._take(indexer, axis=1, convert=True)
def _getitem_multilevel(self, key):
loc = self.columns.get_loc(key)
@@ -3355,7 +3355,7 @@ def dropna(self, axis=0, how='any', thresh=None, subset=None,
else:
raise TypeError('must specify how or thresh')
- result = self.take(mask.nonzero()[0], axis=axis, convert=False)
+ result = self._take(mask.nonzero()[0], axis=axis, convert=False)
if inplace:
self._update_inplace(result)
@@ -3486,7 +3486,7 @@ def sort_values(self, by, axis=0, ascending=True, inplace=False,
new_data = self._data.take(indexer,
axis=self._get_block_manager_axis(axis),
- convert=False, verify=False)
+ verify=False)
if inplace:
return self._update_inplace(new_data)
@@ -3547,7 +3547,7 @@ def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
baxis = self._get_block_manager_axis(axis)
new_data = self._data.take(indexer,
axis=baxis,
- convert=False, verify=False)
+ verify=False)
# reconstruct axis if needed
new_data.axes[baxis] = new_data.axes[baxis]._sort_levels_monotonic()
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 2fb0e348c01c0..99c58b246deb7 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -38,6 +38,7 @@
from pandas.core.index import (Index, MultiIndex, _ensure_index,
InvalidIndexError)
import pandas.core.indexing as indexing
+from pandas.core.indexing import maybe_convert_indices
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.period import PeriodIndex, Period
from pandas.core.internals import BlockManager
@@ -1822,7 +1823,8 @@ def _iget_item_cache(self, item):
if ax.is_unique:
lower = self._get_item_cache(ax[item])
else:
- lower = self.take(item, axis=self._info_axis_number, convert=True)
+ lower = self._take(item, axis=self._info_axis_number,
+ convert=True)
return lower
def _box_item_values(self, key, values):
@@ -2057,8 +2059,63 @@ def __delitem__(self, key):
except KeyError:
pass
- def take(self, indices, axis=0, convert=True, is_copy=True, **kwargs):
+ _shared_docs['_take'] = """
+ Return the elements in the given *positional* indices along an axis.
+
+ This means that we are not indexing according to actual values in
+ the index attribute of the object. We are indexing according to the
+ actual position of the element in the object.
+
+ This is the internal version of ``.take()`` and will contain a wider
+ selection of parameters useful for internal use but not as suitable
+ for public usage.
+
+ Parameters
+ ----------
+ indices : array-like
+ An array of ints indicating which positions to take.
+ axis : int, default 0
+ The axis on which to select elements. "0" means that we are
+ selecting rows, "1" means that we are selecting columns, etc.
+ convert : bool, default True
+ Whether to convert negative indices into positive ones.
+ For example, ``-1`` would map to the ``len(axis) - 1``.
+ The conversions are similar to the behavior of indexing a
+ regular Python list.
+ is_copy : bool, default True
+ Whether to return a copy of the original object or not.
+
+ Returns
+ -------
+ taken : type of caller
+ An array-like containing the elements taken from the object.
+
+ See Also
+ --------
+ numpy.ndarray.take
+ numpy.take
"""
+
+ @Appender(_shared_docs['_take'])
+ def _take(self, indices, axis=0, convert=True, is_copy=True):
+ self._consolidate_inplace()
+
+ if convert:
+ indices = maybe_convert_indices(indices, len(self._get_axis(axis)))
+
+ new_data = self._data.take(indices,
+ axis=self._get_block_manager_axis(axis),
+ verify=True)
+ result = self._constructor(new_data).__finalize__(self)
+
+ # Maybe set copy if we didn't actually change the index.
+ if is_copy:
+ if not result._get_axis(axis).equals(self._get_axis(axis)):
+ result._set_is_copy(self)
+
+ return result
+
+ _shared_docs['take'] = """
Return the elements in the given *positional* indices along an axis.
This means that we are not indexing according to actual values in
@@ -2073,9 +2130,12 @@ def take(self, indices, axis=0, convert=True, is_copy=True, **kwargs):
The axis on which to select elements. "0" means that we are
selecting rows, "1" means that we are selecting columns, etc.
convert : bool, default True
- Whether to convert negative indices to positive ones, just as with
- indexing into Python lists. For example, if `-1` was passed in,
- this index would be converted ``n - 1``.
+ .. deprecated:: 0.21.0
+
+ Whether to convert negative indices into positive ones.
+ For example, ``-1`` would map to the ``len(axis) - 1``.
+ The conversions are similar to the behavior of indexing a
+ regular Python list.
is_copy : bool, default True
Whether to return a copy of the original object or not.
@@ -2131,19 +2191,17 @@ class max_speed
numpy.ndarray.take
numpy.take
"""
+
+ @Appender(_shared_docs['take'])
+ def take(self, indices, axis=0, convert=True, is_copy=True, **kwargs):
nv.validate_take(tuple(), kwargs)
- self._consolidate_inplace()
- new_data = self._data.take(indices,
- axis=self._get_block_manager_axis(axis),
- convert=True, verify=True)
- result = self._constructor(new_data).__finalize__(self)
- # maybe set copy if we didn't actually change the index
- if is_copy:
- if not result._get_axis(axis).equals(self._get_axis(axis)):
- result._set_is_copy(self)
+ if not convert:
+ msg = ("The 'convert' parameter is deprecated "
+ "and will be removed in a future version.")
+ warnings.warn(msg, FutureWarning, stacklevel=2)
- return result
+ return self._take(indices, axis=axis, convert=convert, is_copy=is_copy)
def xs(self, key, axis=0, level=None, drop_level=True):
"""
@@ -2244,9 +2302,9 @@ def xs(self, key, axis=0, level=None, drop_level=True):
if isinstance(loc, np.ndarray):
if loc.dtype == np.bool_:
inds, = loc.nonzero()
- return self.take(inds, axis=axis, convert=False)
+ return self._take(inds, axis=axis, convert=False)
else:
- return self.take(loc, axis=axis, convert=True)
+ return self._take(loc, axis=axis, convert=True)
if not is_scalar(loc):
new_index = self.index[loc]
@@ -5112,7 +5170,7 @@ def at_time(self, time, asof=False):
"""
try:
indexer = self.index.indexer_at_time(time, asof=asof)
- return self.take(indexer, convert=False)
+ return self._take(indexer, convert=False)
except AttributeError:
raise TypeError('Index must be DatetimeIndex')
@@ -5136,7 +5194,7 @@ def between_time(self, start_time, end_time, include_start=True,
indexer = self.index.indexer_between_time(
start_time, end_time, include_start=include_start,
include_end=include_end)
- return self.take(indexer, convert=False)
+ return self._take(indexer, convert=False)
except AttributeError:
raise TypeError('Index must be DatetimeIndex')
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index a62ae40a85941..c9edf52d992e7 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -319,8 +319,8 @@ def _set_grouper(self, obj, sort=False):
# use stable sort to support first, last, nth
indexer = self.indexer = ax.argsort(kind='mergesort')
ax = ax.take(indexer)
- obj = obj.take(indexer, axis=self.axis,
- convert=False, is_copy=False)
+ obj = obj._take(indexer, axis=self.axis,
+ convert=False, is_copy=False)
self.obj = obj
self.grouper = ax
@@ -643,7 +643,7 @@ def get_group(self, name, obj=None):
if not len(inds):
raise KeyError(name)
- return obj.take(inds, axis=self.axis, convert=False)
+ return obj._take(inds, axis=self.axis, convert=False)
def __iter__(self):
"""
@@ -2202,7 +2202,7 @@ def _aggregate_series_fast(self, obj, func):
# avoids object / Series creation overhead
dummy = obj._get_values(slice(None, 0)).to_dense()
indexer = get_group_index_sorter(group_index, ngroups)
- obj = obj.take(indexer, convert=False).to_dense()
+ obj = obj._take(indexer, convert=False).to_dense()
group_index = algorithms.take_nd(
group_index, indexer, allow_fill=False)
grouper = lib.SeriesGrouper(obj, func, group_index, ngroups,
@@ -4435,7 +4435,7 @@ def __iter__(self):
yield i, self._chop(sdata, slice(start, end))
def _get_sorted_data(self):
- return self.data.take(self.sort_idx, axis=self.axis, convert=False)
+ return self.data._take(self.sort_idx, axis=self.axis, convert=False)
def _chop(self, sdata, slice_obj):
return sdata.iloc[slice_obj]
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index b7a51afcedabf..2ea1b8a238913 100755
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -1093,7 +1093,7 @@ def _getitem_iterable(self, key, axis=0):
if is_bool_indexer(key):
key = check_bool_indexer(labels, key)
inds, = key.nonzero()
- return self.obj.take(inds, axis=axis, convert=False)
+ return self.obj._take(inds, axis=axis, convert=False)
else:
# Have the index compute an indexer or return None
# if it cannot handle; we only act on all found values
@@ -1126,15 +1126,15 @@ def _getitem_iterable(self, key, axis=0):
keyarr)
if new_indexer is not None:
- result = self.obj.take(indexer[indexer != -1], axis=axis,
- convert=False)
+ result = self.obj._take(indexer[indexer != -1], axis=axis,
+ convert=False)
result = result._reindex_with_indexers(
{axis: [new_target, new_indexer]},
copy=True, allow_dups=True)
else:
- result = self.obj.take(indexer, axis=axis, convert=False)
+ result = self.obj._take(indexer, axis=axis)
return result
@@ -1265,7 +1265,7 @@ def _get_slice_axis(self, slice_obj, axis=0):
if isinstance(indexer, slice):
return self._slice(indexer, axis=axis, kind='iloc')
else:
- return self.obj.take(indexer, axis=axis, convert=False)
+ return self.obj._take(indexer, axis=axis, convert=False)
class _IXIndexer(_NDFrameIndexer):
@@ -1350,7 +1350,7 @@ def _getbool_axis(self, key, axis=0):
key = check_bool_indexer(labels, key)
inds, = key.nonzero()
try:
- return self.obj.take(inds, axis=axis, convert=False)
+ return self.obj._take(inds, axis=axis, convert=False)
except Exception as detail:
raise self._exception(detail)
@@ -1367,7 +1367,7 @@ def _get_slice_axis(self, slice_obj, axis=0):
if isinstance(indexer, slice):
return self._slice(indexer, axis=axis, kind='iloc')
else:
- return self.obj.take(indexer, axis=axis, convert=False)
+ return self.obj._take(indexer, axis=axis, convert=False)
class _LocIndexer(_LocationIndexer):
@@ -1707,7 +1707,7 @@ def _get_slice_axis(self, slice_obj, axis=0):
if isinstance(slice_obj, slice):
return self._slice(slice_obj, axis=axis, kind='iloc')
else:
- return self.obj.take(slice_obj, axis=axis, convert=False)
+ return self.obj._take(slice_obj, axis=axis, convert=False)
def _get_list_axis(self, key, axis=0):
"""
@@ -1723,7 +1723,7 @@ def _get_list_axis(self, key, axis=0):
Series object
"""
try:
- return self.obj.take(key, axis=axis, convert=False)
+ return self.obj._take(key, axis=axis, convert=False)
except IndexError:
# re-raise with different error message
raise IndexError("positional indexers are out-of-bounds")
diff --git a/pandas/core/series.py b/pandas/core/series.py
index a05324142b223..97f39a680c8c9 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -2563,35 +2563,24 @@ def memory_usage(self, index=True, deep=False):
v += self.index.memory_usage(deep=deep)
return v
- def take(self, indices, axis=0, convert=True, is_copy=False, **kwargs):
- """
- return Series corresponding to requested indices
-
- Parameters
- ----------
- indices : list / array of ints
- convert : translate negative to positive indices (default)
-
- Returns
- -------
- taken : Series
-
- See also
- --------
- numpy.ndarray.take
- """
- if kwargs:
- nv.validate_take(tuple(), kwargs)
-
- # check/convert indicies here
+ @Appender(generic._shared_docs['_take'])
+ def _take(self, indices, axis=0, convert=True, is_copy=False):
if convert:
indices = maybe_convert_indices(indices, len(self._get_axis(axis)))
indices = _ensure_platform_int(indices)
new_index = self.index.take(indices)
new_values = self._values.take(indices)
- return (self._constructor(new_values, index=new_index, fastpath=True)
- .__finalize__(self))
+
+ result = (self._constructor(new_values, index=new_index,
+ fastpath=True).__finalize__(self))
+
+ # Maybe set copy if we didn't actually change the index.
+ if is_copy:
+ if not result._get_axis(axis).equals(self._get_axis(axis)):
+ result._set_is_copy(self)
+
+ return result
def isin(self, values):
"""
diff --git a/pandas/core/sparse/series.py b/pandas/core/sparse/series.py
index 2aecb9d7c4ffb..5166dc927989e 100644
--- a/pandas/core/sparse/series.py
+++ b/pandas/core/sparse/series.py
@@ -602,16 +602,15 @@ def sparse_reindex(self, new_index):
sparse_index=new_index,
fill_value=self.fill_value).__finalize__(self)
+ @Appender(generic._shared_docs['take'])
def take(self, indices, axis=0, convert=True, *args, **kwargs):
- """
- Sparse-compatible version of ndarray.take
+ convert = nv.validate_take_with_convert(convert, args, kwargs)
- Returns
- -------
- taken : ndarray
- """
+ if not convert:
+ msg = ("The 'convert' parameter is deprecated "
+ "and will be removed in a future version.")
+ warnings.warn(msg, FutureWarning, stacklevel=2)
- convert = nv.validate_take_with_convert(convert, args, kwargs)
new_values = SparseArray.take(self.values, indices)
new_index = self.index.take(indices)
return self._constructor(new_values,
diff --git a/pandas/tests/frame/test_axis_select_reindex.py b/pandas/tests/frame/test_axis_select_reindex.py
index fb9b8c2ed7aff..219c1df301c4b 100644
--- a/pandas/tests/frame/test_axis_select_reindex.py
+++ b/pandas/tests/frame/test_axis_select_reindex.py
@@ -822,7 +822,7 @@ def test_take(self):
expected = df.loc[:, ['D', 'B', 'C', 'A']]
assert_frame_equal(result, expected, check_names=False)
- # neg indicies
+ # negative indices
order = [2, 1, -1]
for df in [self.frame]:
@@ -830,6 +830,10 @@ def test_take(self):
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
+ with tm.assert_produces_warning(FutureWarning):
+ result = df.take(order, convert=False, axis=0)
+ assert_frame_equal(result, expected)
+
# axis = 1
result = df.take(order, axis=1)
expected = df.loc[:, ['C', 'B', 'D']]
@@ -854,7 +858,7 @@ def test_take(self):
expected = df.loc[:, ['foo', 'B', 'C', 'A', 'D']]
assert_frame_equal(result, expected)
- # neg indicies
+ # negative indices
order = [4, 1, -2]
for df in [self.mixed_frame]:
diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py
index 3e863a59df67e..17316a714e260 100644
--- a/pandas/tests/indexing/test_loc.py
+++ b/pandas/tests/indexing/test_loc.py
@@ -581,11 +581,11 @@ def gen_test(l, l2):
def gen_expected(df, mask):
l = len(mask)
- return pd.concat([df.take([0], convert=False),
+ return pd.concat([df.take([0]),
DataFrame(np.ones((l, len(columns))),
index=[0] * l,
columns=columns),
- df.take(mask[1:], convert=False)])
+ df.take(mask[1:])])
df = gen_test(900, 100)
assert not df.index.is_unique
diff --git a/pandas/tests/series/test_indexing.py b/pandas/tests/series/test_indexing.py
index 83d6a09d38f41..272e8c7de5e49 100644
--- a/pandas/tests/series/test_indexing.py
+++ b/pandas/tests/series/test_indexing.py
@@ -1066,6 +1066,23 @@ def test_setitem_with_tz_dst(self):
s.iloc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
+ def test_take(self):
+ s = Series([-1, 5, 6, 2, 4])
+
+ actual = s.take([1, 3, 4])
+ expected = Series([5, 2, 4], index=[1, 3, 4])
+ tm.assert_series_equal(actual, expected)
+
+ actual = s.take([-1, 3, 4])
+ expected = Series([4, 2, 4], index=[4, 3, 4])
+ tm.assert_series_equal(actual, expected)
+
+ pytest.raises(IndexError, s.take, [1, 10])
+ pytest.raises(IndexError, s.take, [2, 5])
+
+ with tm.assert_produces_warning(FutureWarning):
+ s.take([-1, 3, 4], convert=False)
+
def test_where(self):
s = Series(np.random.randn(5))
cond = s > 0
diff --git a/pandas/tests/sparse/test_series.py b/pandas/tests/sparse/test_series.py
index 451f369593347..8c0ed322028e8 100644
--- a/pandas/tests/sparse/test_series.py
+++ b/pandas/tests/sparse/test_series.py
@@ -520,6 +520,9 @@ def _compare(idx):
exp = pd.Series(np.repeat(nan, 5))
tm.assert_series_equal(sp.take([0, 1, 2, 3, 4]), exp)
+ with tm.assert_produces_warning(FutureWarning):
+ sp.take([1, 5], convert=False)
+
def test_numpy_take(self):
sp = SparseSeries([1.0, 2.0, 3.0])
indices = [1, 2]
| xref #16948.
The parameter is not respected, nor is it a parameter in many 'take' implementations.
cc @jorisvandenbossche | https://api.github.com/repos/pandas-dev/pandas/pulls/17352 | 2017-08-27T20:20:35Z | 2017-10-01T20:05:02Z | 2017-10-01T20:05:02Z | 2017-10-03T09:03:45Z |
COMPAT: Pypy tweaks | diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt
index 942e37a29f8d5..7e83edfdde0e9 100644
--- a/doc/source/whatsnew/v0.21.0.txt
+++ b/doc/source/whatsnew/v0.21.0.txt
@@ -332,13 +332,11 @@ Performance Improvements
Bug Fixes
~~~~~~~~~
-
Conversion
^^^^^^^^^^
- Bug in assignment against datetime-like data with ``int`` may incorrectly convert to datetime-like (:issue:`14145`)
- Bug in assignment against ``int64`` data with ``np.ndarray`` with ``float64`` dtype may keep ``int64`` dtype (:issue:`14001`)
-- Fix :func:`DataFrame.memory_usage` to support PyPy. Objects on PyPy do not have a fixed size, so an approximation is used instead (:issue:`17228`)
- Fixed the return type of ``IntervalIndex.is_non_overlapping_monotonic`` to be a Python ``bool`` for consistency with similar attributes/methods. Previously returned a ``numpy.bool_``. (:issue:`17237`)
- Bug in ``IntervalIndex.is_non_overlapping_monotonic`` when intervals are closed on both sides and overlap at a point (:issue:`16560`)
- Bug in :func:`Series.fillna` returns frame when ``inplace=True`` and ``value`` is dict (:issue:`16156`)
@@ -420,6 +418,15 @@ Categorical
the ``.categories`` to be an empty ``Float64Index`` rather than an empty
``Index`` with object dtype (:issue:`17248`)
+PyPy
+^^^^
+
+- Compatibility with PyPy in :func:`read_csv` with ``usecols=[<unsorted ints>]`` and
+ :func:`read_json` (:issue:`17351`)
+- Split tests into cases for CPython and PyPy where needed, which highlights the fragility
+ of index matching with ``float('nan')``, ``np.nan`` and ``NAT`` (:issue:`17351`)
+- Fix :func:`DataFrame.memory_usage` to support PyPy. Objects on PyPy do not have a fixed size,
+ so an approximation is used instead (:issue:`17228`)
Other
^^^^^
diff --git a/pandas/_libs/src/ujson/python/JSONtoObj.c b/pandas/_libs/src/ujson/python/JSONtoObj.c
index b0132532c16af..85cf1d5e5e7a1 100644
--- a/pandas/_libs/src/ujson/python/JSONtoObj.c
+++ b/pandas/_libs/src/ujson/python/JSONtoObj.c
@@ -409,7 +409,7 @@ JSOBJ Object_npyEndObject(void *prv, JSOBJ obj) {
}
int Object_npyObjectAddKey(void *prv, JSOBJ obj, JSOBJ name, JSOBJ value) {
- PyObject *label;
+ PyObject *label, *labels;
npy_intp labelidx;
// add key to label array, value to values array
NpyArrContext *npyarr = (NpyArrContext *)obj;
@@ -424,11 +424,11 @@ int Object_npyObjectAddKey(void *prv, JSOBJ obj, JSOBJ name, JSOBJ value) {
if (!npyarr->labels[labelidx]) {
npyarr->labels[labelidx] = PyList_New(0);
}
-
+ labels = npyarr->labels[labelidx];
// only fill label array once, assumes all column labels are the same
// for 2-dimensional arrays.
- if (PyList_GET_SIZE(npyarr->labels[labelidx]) <= npyarr->elcount) {
- PyList_Append(npyarr->labels[labelidx], label);
+ if (PyList_Check(labels) && PyList_GET_SIZE(labels) <= npyarr->elcount) {
+ PyList_Append(labels, label);
}
if (((JSONObjectDecoder *)npyarr->dec)->arrayAddItem(prv, obj, value)) {
@@ -439,16 +439,16 @@ int Object_npyObjectAddKey(void *prv, JSOBJ obj, JSOBJ name, JSOBJ value) {
}
int Object_objectAddKey(void *prv, JSOBJ obj, JSOBJ name, JSOBJ value) {
- PyDict_SetItem(obj, name, value);
+ int ret = PyDict_SetItem(obj, name, value);
Py_DECREF((PyObject *)name);
Py_DECREF((PyObject *)value);
- return 1;
+ return ret == 0 ? 1 : 0;
}
int Object_arrayAddItem(void *prv, JSOBJ obj, JSOBJ value) {
- PyList_Append(obj, value);
+ int ret = PyList_Append(obj, value);
Py_DECREF((PyObject *)value);
- return 1;
+ return ret == 0 ? 1 : 0;
}
JSOBJ Object_newString(void *prv, wchar_t *start, wchar_t *end) {
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index a9821be3fa5e2..4198c30545ef6 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -1714,6 +1714,7 @@ def _set_noconvert_columns(self):
# A set of integers will be converted to a list in
# the correct order every single time.
usecols = list(self.usecols)
+ usecols.sort()
elif (callable(self.usecols) or
self.usecols_dtype not in ('empty', None)):
# The names attribute should have the correct columns
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index 07e98c326bcaa..e6c9b7ac71a77 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -9,7 +9,7 @@
from pandas.tests.indexes.common import Base
from pandas.compat import (range, lrange, lzip, u,
- text_type, zip, PY3, PY36)
+ text_type, zip, PY3, PY36, PYPY)
import operator
import numpy as np
@@ -1369,13 +1369,21 @@ def test_isin(self):
assert len(result) == 0
assert result.dtype == np.bool_
- def test_isin_nan(self):
+ @pytest.mark.skipif(PYPY, reason="np.nan is float('nan') on PyPy")
+ def test_isin_nan_not_pypy(self):
+ tm.assert_numpy_array_equal(Index(['a', np.nan]).isin([float('nan')]),
+ np.array([False, False]))
+
+ @pytest.mark.skipif(not PYPY, reason="np.nan is float('nan') on PyPy")
+ def test_isin_nan_pypy(self):
+ tm.assert_numpy_array_equal(Index(['a', np.nan]).isin([float('nan')]),
+ np.array([False, True]))
+
+ def test_isin_nan_common(self):
tm.assert_numpy_array_equal(Index(['a', np.nan]).isin([np.nan]),
np.array([False, True]))
tm.assert_numpy_array_equal(Index(['a', pd.NaT]).isin([pd.NaT]),
np.array([False, True]))
- tm.assert_numpy_array_equal(Index(['a', np.nan]).isin([float('nan')]),
- np.array([False, False]))
tm.assert_numpy_array_equal(Index(['a', np.nan]).isin([pd.NaT]),
np.array([False, False]))
diff --git a/pandas/tests/indexes/test_multi.py b/pandas/tests/indexes/test_multi.py
index c66775f4690cc..c7a149fb86182 100644
--- a/pandas/tests/indexes/test_multi.py
+++ b/pandas/tests/indexes/test_multi.py
@@ -14,7 +14,7 @@
from pandas import (CategoricalIndex, DataFrame, Index, MultiIndex,
compat, date_range, period_range)
-from pandas.compat import PY3, long, lrange, lzip, range, u
+from pandas.compat import PY3, long, lrange, lzip, range, u, PYPY
from pandas.errors import PerformanceWarning, UnsortedIndexError
from pandas.core.indexes.base import InvalidIndexError
from pandas._libs import lib
@@ -2573,13 +2573,22 @@ def test_isin(self):
assert len(result) == 0
assert result.dtype == np.bool_
- def test_isin_nan(self):
+ @pytest.mark.skipif(PYPY, reason="tuples cmp recursively on PyPy")
+ def test_isin_nan_not_pypy(self):
idx = MultiIndex.from_arrays([['foo', 'bar'], [1.0, np.nan]])
tm.assert_numpy_array_equal(idx.isin([('bar', np.nan)]),
np.array([False, False]))
tm.assert_numpy_array_equal(idx.isin([('bar', float('nan'))]),
np.array([False, False]))
+ @pytest.mark.skipif(not PYPY, reason="tuples cmp recursively on PyPy")
+ def test_isin_nan_pypy(self):
+ idx = MultiIndex.from_arrays([['foo', 'bar'], [1.0, np.nan]])
+ tm.assert_numpy_array_equal(idx.isin([('bar', np.nan)]),
+ np.array([False, True]))
+ tm.assert_numpy_array_equal(idx.isin([('bar', float('nan'))]),
+ np.array([False, True]))
+
def test_isin_level_kwarg(self):
idx = MultiIndex.from_arrays([['qux', 'baz', 'foo', 'bar'], np.arange(
4)])
diff --git a/pandas/tests/io/parser/test_parsers.py b/pandas/tests/io/parser/test_parsers.py
index 2fee2451c5e36..0ea4757b10e94 100644
--- a/pandas/tests/io/parser/test_parsers.py
+++ b/pandas/tests/io/parser/test_parsers.py
@@ -3,8 +3,10 @@
import os
import pandas.util.testing as tm
-from pandas import read_csv, read_table
+from pandas import read_csv, read_table, DataFrame
from pandas.core.common import AbstractMethodError
+from pandas._libs.lib import Timestamp
+from pandas.compat import StringIO
from .common import ParserTests
from .header import HeaderTests
@@ -100,3 +102,51 @@ def read_table(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = self.engine
return read_table(*args, **kwds)
+
+
+class TestUnsortedUsecols(object):
+ def test_override__set_noconvert_columns(self):
+ # GH 17351 - usecols needs to be sorted in _setnoconvert_columns
+ # based on the test_usecols_with_parse_dates test from usecols.py
+ from pandas.io.parsers import CParserWrapper, TextFileReader
+
+ s = """a,b,c,d,e
+ 0,1,20140101,0900,4
+ 0,1,20140102,1000,4"""
+
+ parse_dates = [[1, 2]]
+ cols = {
+ 'a': [0, 0],
+ 'c_d': [
+ Timestamp('2014-01-01 09:00:00'),
+ Timestamp('2014-01-02 10:00:00')
+ ]
+ }
+ expected = DataFrame(cols, columns=['c_d', 'a'])
+
+ class MyTextFileReader(TextFileReader):
+ def __init__(self):
+ self._currow = 0
+ self.squeeze = False
+
+ class MyCParserWrapper(CParserWrapper):
+ def _set_noconvert_columns(self):
+ if self.usecols_dtype == 'integer':
+ # self.usecols is a set, which is documented as unordered
+ # but in practice, a CPython set of integers is sorted.
+ # In other implementations this assumption does not hold.
+ # The following code simulates a different order, which
+ # before GH 17351 would cause the wrong columns to be
+ # converted via the parse_dates parameter
+ self.usecols = list(self.usecols)
+ self.usecols.reverse()
+ return CParserWrapper._set_noconvert_columns(self)
+
+ parser = MyTextFileReader()
+ parser.options = {'usecols': [0, 2, 3],
+ 'parse_dates': parse_dates,
+ 'delimiter': ','}
+ parser._engine = MyCParserWrapper(StringIO(s), **parser.options)
+ df = parser.read()
+
+ tm.assert_frame_equal(df, expected)
| a set of tweaks that I discovered when getting PyPY to pass tests, some could cause issues on CPython as well:
- 496cc3a clears up an assumption that sets are sorted
- d161b08 makes sure PyList_GET_SIZE is used only on PyListObject, if used on an ndarray it will return the value of the data pointer, which is not the intention at all
- 2744c5c separates the cases where ``float('nan') is not np.nan`` and like cases by implementation, on PyPy the ``is`` comparison always checks the value of struct.pack('d', val) for equality.
- 5425137 cleans up the use of ``rank`` and ``mean`` as keys (the warnings are ignored?) and adds a trivial assert where a test was simply making a call
I left these as seperate commits for ease of discussion and possible rejection, if desired I can squash them to a single commit
| https://api.github.com/repos/pandas-dev/pandas/pulls/17351 | 2017-08-27T20:19:04Z | 2017-09-07T11:56:33Z | 2017-09-07T11:56:33Z | 2017-09-07T11:57:24Z |
API: Warn about dups in names for read_csv | diff --git a/doc/source/io.rst b/doc/source/io.rst
index ab1ad74ee8516..d6abed6e9d1ad 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -113,8 +113,8 @@ header : int or list of ints, default ``'infer'``
rather than the first line of the file.
names : array-like, default ``None``
List of column names to use. If file contains no header row, then you should
- explicitly pass ``header=None``. Duplicates in this list are not allowed unless
- ``mangle_dupe_cols=True``, which is the default.
+ explicitly pass ``header=None``. Duplicates in this list will cause
+ a ``UserWarning`` to be issued.
index_col : int or sequence or ``False``, default ``None``
Column to use as the row labels of the DataFrame. If a sequence is given, a
MultiIndex is used. If you have a malformed file with delimiters at the end of
diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt
index 5003aa0d97c1c..41d38eea94e33 100644
--- a/doc/source/whatsnew/v0.21.0.txt
+++ b/doc/source/whatsnew/v0.21.0.txt
@@ -422,6 +422,7 @@ Other API Changes
- The Categorical constructor no longer accepts a scalar for the ``categories`` keyword. (:issue:`16022`)
- Accessing a non-existent attribute on a closed :class:`~pandas.HDFStore` will now
raise an ``AttributeError`` rather than a ``ClosedFileError`` (:issue:`16301`)
+- :func:`read_csv` now issues a ``UserWarning`` if the ``names`` parameter contains duplicates (:issue:`17095`)
- :func:`read_csv` now treats ``'null'`` strings as missing values by default (:issue:`16471`)
- :func:`read_csv` now treats ``'n/a'`` strings as missing values by default (:issue:`16078`)
- :class:`pandas.HDFStore`'s string representation is now faster and less detailed. For the previous behavior, use ``pandas.HDFStore.info()``. (:issue:`16503`).
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index d9e83176d0d6e..ed15d4295d688 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -84,8 +84,8 @@
rather than the first line of the file.
names : array-like, default None
List of column names to use. If file contains no header row, then you
- should explicitly pass header=None. Duplicates in this list are not
- allowed unless mangle_dupe_cols=True, which is the default.
+ should explicitly pass header=None. Duplicates in this list will cause
+ a ``UserWarning`` to be issued.
index_col : int or sequence or False, default None
Column to use as the row labels of the DataFrame. If a sequence is given, a
MultiIndex is used. If you have a malformed file with delimiters at the end
@@ -385,6 +385,32 @@ def _validate_integer(name, val, min_val=0):
return val
+def _validate_names(names):
+ """
+ Check if the `names` parameter contains duplicates.
+
+ If duplicates are found, we issue a warning before returning.
+
+ Parameters
+ ----------
+ names : array-like or None
+ An array containing a list of the names used for the output DataFrame.
+
+ Returns
+ -------
+ names : array-like or None
+ The original `names` parameter.
+ """
+
+ if names is not None:
+ if len(names) != len(set(names)):
+ msg = ("Duplicate names specified. This "
+ "will raise an error in the future.")
+ warnings.warn(msg, UserWarning, stacklevel=3)
+
+ return names
+
+
def _read(filepath_or_buffer, kwds):
"""Generic reader of line files."""
encoding = kwds.get('encoding', None)
@@ -407,6 +433,9 @@ def _read(filepath_or_buffer, kwds):
chunksize = _validate_integer('chunksize', kwds.get('chunksize', None), 1)
nrows = _validate_integer('nrows', kwds.get('nrows', None))
+ # Check for duplicates in names.
+ _validate_names(kwds.get("names", None))
+
# Create the parser.
parser = TextFileReader(filepath_or_buffer, **kwds)
diff --git a/pandas/tests/io/parser/common.py b/pandas/tests/io/parser/common.py
index cfc4a1d7c55eb..e85d3ad294655 100644
--- a/pandas/tests/io/parser/common.py
+++ b/pandas/tests/io/parser/common.py
@@ -1357,20 +1357,6 @@ def test_euro_decimal_format(self):
assert df2['Number2'].dtype == float
assert df2['Number3'].dtype == float
- def test_read_duplicate_names(self):
- # See gh-7160
- data = "a,b,a\n0,1,2\n3,4,5"
- df = self.read_csv(StringIO(data))
- expected = DataFrame([[0, 1, 2], [3, 4, 5]],
- columns=['a', 'b', 'a.1'])
- tm.assert_frame_equal(df, expected)
-
- data = "0,1,2\n3,4,5"
- df = self.read_csv(StringIO(data), names=["a", "b", "a"])
- expected = DataFrame([[0, 1, 2], [3, 4, 5]],
- columns=['a', 'b', 'a.1'])
- tm.assert_frame_equal(df, expected)
-
def test_inf_parsing(self):
data = """\
,A
diff --git a/pandas/tests/io/parser/dtypes.py b/pandas/tests/io/parser/dtypes.py
index 7311c9200f269..402fa0817595c 100644
--- a/pandas/tests/io/parser/dtypes.py
+++ b/pandas/tests/io/parser/dtypes.py
@@ -204,10 +204,11 @@ def test_empty_with_dup_column_pass_dtype_by_indexes(self):
result = self.read_csv(StringIO(data), dtype={0: 'u1', 1: 'f'})
tm.assert_frame_equal(result, expected, check_index_type=False)
- data = ''
- result = self.read_csv(StringIO(data), names=['one', 'one'],
- dtype={0: 'u1', 1: 'f'})
- tm.assert_frame_equal(result, expected, check_index_type=False)
+ with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
+ data = ''
+ result = self.read_csv(StringIO(data), names=['one', 'one'],
+ dtype={0: 'u1', 1: 'f'})
+ tm.assert_frame_equal(result, expected, check_index_type=False)
def test_raise_on_passed_int_dtype_with_nas(self):
# see gh-2631
diff --git a/pandas/tests/io/parser/mangle_dupes.py b/pandas/tests/io/parser/mangle_dupes.py
index e2efb1377f8b0..6df69eb475bf7 100644
--- a/pandas/tests/io/parser/mangle_dupes.py
+++ b/pandas/tests/io/parser/mangle_dupes.py
@@ -7,6 +7,9 @@
"""
from pandas.compat import StringIO
+from pandas import DataFrame
+
+import pandas.util.testing as tm
class DupeColumnTests(object):
@@ -25,6 +28,21 @@ def test_basic(self):
mangle_dupe_cols=True)
assert list(df.columns) == expected
+ def test_basic_names(self):
+ # See gh-7160
+ data = "a,b,a\n0,1,2\n3,4,5"
+ expected = DataFrame([[0, 1, 2], [3, 4, 5]],
+ columns=["a", "b", "a.1"])
+
+ df = self.read_csv(StringIO(data))
+ tm.assert_frame_equal(df, expected)
+
+ with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
+ data = "0,1,2\n3,4,5"
+ df = self.read_csv(StringIO(data),
+ names=["a", "b", "a"])
+ tm.assert_frame_equal(df, expected)
+
def test_thorough_mangle_columns(self):
# see gh-17060
data = "a,a,a.1\n1,2,3"
@@ -45,20 +63,26 @@ def test_thorough_mangle_names(self):
# see gh-17095
data = "a,b,b\n1,2,3"
names = ["a.1", "a.1", "a.1.1"]
- df = self.read_csv(StringIO(data), sep=",", names=names,
- mangle_dupe_cols=True)
- assert list(df.columns) == ["a.1", "a.1.1", "a.1.1.1"]
+
+ with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
+ df = self.read_csv(StringIO(data), sep=",", names=names,
+ mangle_dupe_cols=True)
+ assert list(df.columns) == ["a.1", "a.1.1", "a.1.1.1"]
data = "a,b,c,d,e,f\n1,2,3,4,5,6"
names = ["a", "a", "a.1", "a.1.1", "a.1.1.1", "a.1.1.1.1"]
- df = self.read_csv(StringIO(data), sep=",", names=names,
- mangle_dupe_cols=True)
- assert list(df.columns) == ["a", "a.1", "a.1.1", "a.1.1.1",
- "a.1.1.1.1", "a.1.1.1.1.1"]
+
+ with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
+ df = self.read_csv(StringIO(data), sep=",", names=names,
+ mangle_dupe_cols=True)
+ assert list(df.columns) == ["a", "a.1", "a.1.1", "a.1.1.1",
+ "a.1.1.1.1", "a.1.1.1.1.1"]
data = "a,b,c,d,e,f,g\n1,2,3,4,5,6,7"
names = ["a", "a", "a.3", "a.1", "a.2", "a", "a"]
- df = self.read_csv(StringIO(data), sep=",", names=names,
- mangle_dupe_cols=True)
- assert list(df.columns) == ["a", "a.1", "a.3", "a.1.1",
- "a.2", "a.2.1", "a.3.1"]
+
+ with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
+ df = self.read_csv(StringIO(data), sep=",", names=names,
+ mangle_dupe_cols=True)
+ assert list(df.columns) == ["a", "a.1", "a.3", "a.1.1",
+ "a.2", "a.2.1", "a.3.1"]
| Title is self-explanatory.
xref #17095. | https://api.github.com/repos/pandas-dev/pandas/pulls/17346 | 2017-08-26T12:53:58Z | 2017-09-24T13:13:38Z | 2017-09-24T13:13:38Z | 2017-09-25T00:59:51Z |
BUG: when Index is numeric and indexer is boolean (#16877) | diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt
index 36551fa30c3ad..b6bd86bd79a1f 100644
--- a/doc/source/whatsnew/v0.21.0.txt
+++ b/doc/source/whatsnew/v0.21.0.txt
@@ -481,7 +481,7 @@ Other API Changes
- :class:`Period` is now immutable, and will now raise an ``AttributeError`` when a user tries to assign a new value to the ``ordinal`` or ``freq`` attributes (:issue:`17116`).
- :func:`to_datetime` when passed a tz-aware ``origin=`` kwarg will now raise a more informative ``ValueError`` rather than a ``TypeError`` (:issue:`16842`)
- Renamed non-functional ``index`` to ``index_col`` in :func:`read_stata` to improve API consistency (:issue:`16342`)
-
+- Bug in :func:`DataFrame.drop` caused boolean labels ``False`` and ``True`` to be treated as labels 0 and 1 respectively when dropping indices from a numeric index. This will now raise a ValueError (:issue:`16877`)
.. _whatsnew_0210.deprecations:
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index f28ff9697e517..be26720adb0bd 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -2609,6 +2609,12 @@ def get_indexer(self, target, method=None, limit=None, tolerance=None):
if tolerance is not None:
tolerance = self._convert_tolerance(tolerance)
+ # Treat boolean labels passed to a numeric index as not found. Without
+ # this fix False and True would be treated as 0 and 1 respectively.
+ # (GH #16877)
+ if target.is_boolean() and self.is_numeric():
+ return _ensure_platform_int(np.repeat(-1, target.size))
+
pself, ptarget = self._maybe_promote(target)
if pself is not self or ptarget is not target:
return pself.get_indexer(ptarget, method=method, limit=limit,
@@ -2637,7 +2643,6 @@ def get_indexer(self, target, method=None, limit=None, tolerance=None):
'backfill or nearest reindexing')
indexer = self._engine.get_indexer(target._values)
-
return _ensure_platform_int(indexer)
def _convert_tolerance(self, tolerance):
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index 0bd2861e060ed..81f113d58d680 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -1141,6 +1141,13 @@ def test_get_indexer_strings(self):
with pytest.raises(TypeError):
idx.get_indexer(['a', 'b', 'c', 'd'], method='pad', tolerance=2)
+ def test_get_indexer_numeric_index_boolean_target(self):
+ # GH 16877
+ numeric_idx = pd.Index(range(4))
+ result = numeric_idx.get_indexer([True, False, True])
+ expected = np.array([-1, -1, -1], dtype=np.intp)
+ tm.assert_numpy_array_equal(result, expected)
+
def test_get_loc(self):
idx = pd.Index([0, 1, 2])
all_methods = [None, 'pad', 'backfill', 'nearest']
diff --git a/pandas/tests/series/test_indexing.py b/pandas/tests/series/test_indexing.py
index 91187b709463a..2182e3fbfc212 100644
--- a/pandas/tests/series/test_indexing.py
+++ b/pandas/tests/series/test_indexing.py
@@ -1783,6 +1783,11 @@ def test_drop(self):
expected = Series([3], index=[False])
assert_series_equal(result, expected)
+ # GH 16877
+ s = Series([2, 3], index=[0, 1])
+ with tm.assert_raises_regex(ValueError, 'not contained in axis'):
+ s.drop([False, True])
+
def test_align(self):
def _check_align(a, b, how='left', fill=None):
aa, ab = a.align(b, join=how, fill_value=fill)
| - [ X ] closes #16877
- [ X ] tests added / passed
- [ X ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ X ] whatsnew entry
This warns for both tickets:
https://github.com/pandas-dev/pandas/issues/6189
https://github.com/pandas-dev/pandas/issues/16877
There's one particular flaw: non-unique indices never run get_indexer and deals with dropping in a way where it is difficult to inspect types. I think it should be OK to leave that one for now though.
One of the reasons I chose to warn instead of Error is that running some_numeric_index.difference([True, False]) should possibly still be allowed, while still warning the user that the difference they are getting may just be due to conversion from True to 1 (or 1.0) and False to 0 (or 0.0). This probably is rare enough that an Error might still be the best approach.
The questions remaining to me are:
Is this severe enough that an Error should be raised? What type in this case?
Is the message sufficient?
Thanks. | https://api.github.com/repos/pandas-dev/pandas/pulls/17343 | 2017-08-26T06:38:29Z | 2017-09-25T21:50:18Z | 2017-09-25T21:50:18Z | 2017-09-25T21:50:37Z |
Separate out strptime.pyx from tslib | diff --git a/pandas/_libs/__init__.py b/pandas/_libs/__init__.py
index ab3832d0292ba..b4c3ff8008015 100644
--- a/pandas/_libs/__init__.py
+++ b/pandas/_libs/__init__.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
# flake8: noqa
from .tslib import iNaT, NaT, Timestamp, Timedelta, OutOfBoundsDatetime
diff --git a/pandas/_libs/src/datetime.pxd b/pandas/_libs/src/datetime.pxd
index 23620e790c132..86c8f3bfc74f3 100644
--- a/pandas/_libs/src/datetime.pxd
+++ b/pandas/_libs/src/datetime.pxd
@@ -94,6 +94,7 @@ cdef extern from "datetime/np_datetime.h":
PANDAS_DATETIMEUNIT fr,
pandas_datetimestruct *result) nogil
int days_per_month_table[2][12]
+ pandas_datetimestruct _NS_MIN_DTS, _NS_MAX_DTS
int dayofweek(int y, int m, int d) nogil
int is_leapyear(int64_t year) nogil
@@ -161,3 +162,17 @@ cdef inline int64_t _date_to_datetime64(object val,
dts.hour = dts.min = dts.sec = dts.us = 0
dts.ps = dts.as = 0
return pandas_datetimestruct_to_datetime(PANDAS_FR_ns, dts)
+
+
+cdef inline bint check_dts_bounds(pandas_datetimestruct *dts):
+ """Returns True if an error needs to be raised"""
+ cdef:
+ bint error = False
+
+ if (dts.year <= 1677 and
+ cmp_pandas_datetimestruct(dts, &_NS_MIN_DTS) == -1):
+ error = True
+ elif (dts.year >= 2262 and
+ cmp_pandas_datetimestruct(dts, &_NS_MAX_DTS) == 1):
+ error = True
+ return error
diff --git a/pandas/_libs/src/datetime/np_datetime.c b/pandas/_libs/src/datetime/np_datetime.c
index 8458418988863..ffb901981f939 100644
--- a/pandas/_libs/src/datetime/np_datetime.c
+++ b/pandas/_libs/src/datetime/np_datetime.c
@@ -40,6 +40,12 @@ This file is derived from NumPy 1.7. See NUMPY_LICENSE.txt
#define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask
#endif
+const pandas_datetimestruct _NS_MIN_DTS = {
+ 1677, 9, 21, 0, 12, 43, 145225, 0, 0};
+const pandas_datetimestruct _NS_MAX_DTS = {
+ 2262, 4, 11, 23, 47, 16, 854775, 807000, 0};
+
+
const int days_per_month_table[2][12] = {
{31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31},
{31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}};
diff --git a/pandas/_libs/src/datetime/np_datetime.h b/pandas/_libs/src/datetime/np_datetime.h
index 97ec5782b625b..a20bff60126aa 100644
--- a/pandas/_libs/src/datetime/np_datetime.h
+++ b/pandas/_libs/src/datetime/np_datetime.h
@@ -54,6 +54,9 @@ typedef struct {
int num;
} pandas_datetime_metadata;
+extern const pandas_datetimestruct _NS_MIN_DTS;
+extern const pandas_datetimestruct _NS_MAX_DTS;
+
// stuff pandas needs
// ----------------------------------------------------------------------------
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx
index c629ccbd8e1fd..d4ca5af09367e 100644
--- a/pandas/_libs/tslib.pyx
+++ b/pandas/_libs/tslib.pyx
@@ -50,6 +50,7 @@ from datetime cimport (
npy_datetime,
is_leapyear,
dayofweek,
+ check_dts_bounds,
PANDAS_FR_ns,
PyDateTime_Check, PyDate_Check,
PyDateTime_IMPORT,
@@ -69,6 +70,7 @@ from khash cimport (
cimport cython
import re
+import time
# dateutil compat
from dateutil.tz import (tzoffset, tzlocal as _dateutil_tzlocal,
@@ -1691,21 +1693,10 @@ class OutOfBoundsDatetime(ValueError):
pass
cdef inline _check_dts_bounds(pandas_datetimestruct *dts):
- cdef:
- bint error = False
-
- if dts.year <= 1677 and cmp_pandas_datetimestruct(dts, &_NS_MIN_DTS) == -1:
- error = True
- elif (
- dts.year >= 2262 and
- cmp_pandas_datetimestruct(dts, &_NS_MAX_DTS) == 1):
- error = True
-
- if error:
+ if check_dts_bounds(dts):
fmt = '%d-%.2d-%.2d %.2d:%.2d:%.2d' % (dts.year, dts.month,
dts.day, dts.hour,
dts.min, dts.sec)
-
raise OutOfBoundsDatetime(
'Out of bounds nanosecond timestamp: %s' % fmt)
@@ -3515,284 +3506,6 @@ cpdef convert_to_timedelta64(object ts, object unit):
return ts.astype('timedelta64[ns]')
-def array_strptime(ndarray[object] values, object fmt,
- bint exact=True, errors='raise'):
- """
- Parameters
- ----------
- values : ndarray of string-like objects
- fmt : string-like regex
- exact : matches must be exact if True, search if False
- coerce : if invalid values found, coerce to NaT
- """
-
- cdef:
- Py_ssize_t i, n = len(values)
- pandas_datetimestruct dts
- ndarray[int64_t] iresult
- int year, month, day, minute, hour, second, weekday, julian, tz
- int week_of_year, week_of_year_start
- int64_t us, ns
- object val, group_key, ampm, found
- dict found_key
- bint is_raise = errors=='raise'
- bint is_ignore = errors=='ignore'
- bint is_coerce = errors=='coerce'
-
- assert is_raise or is_ignore or is_coerce
-
- global _TimeRE_cache, _regex_cache
- with _cache_lock:
- if _getlang() != _TimeRE_cache.locale_time.lang:
- _TimeRE_cache = TimeRE()
- _regex_cache.clear()
- if len(_regex_cache) > _CACHE_MAX_SIZE:
- _regex_cache.clear()
- locale_time = _TimeRE_cache.locale_time
- format_regex = _regex_cache.get(fmt)
- if not format_regex:
- try:
- format_regex = _TimeRE_cache.compile(fmt)
- # KeyError raised when a bad format is found; can be specified as
- # \\, in which case it was a stray % but with a space after it
- except KeyError, err:
- bad_directive = err.args[0]
- if bad_directive == "\\":
- bad_directive = "%"
- del err
- raise ValueError("'%s' is a bad directive in format '%s'" %
- (bad_directive, fmt))
- # IndexError only occurs when the format string is "%"
- except IndexError:
- raise ValueError("stray %% in format '%s'" % fmt)
- _regex_cache[fmt] = format_regex
-
- result = np.empty(n, dtype='M8[ns]')
- iresult = result.view('i8')
-
- dts.us = dts.ps = dts.as = 0
-
- cdef dict _parse_code_table = {
- 'y': 0,
- 'Y': 1,
- 'm': 2,
- 'B': 3,
- 'b': 4,
- 'd': 5,
- 'H': 6,
- 'I': 7,
- 'M': 8,
- 'S': 9,
- 'f': 10,
- 'A': 11,
- 'a': 12,
- 'w': 13,
- 'j': 14,
- 'U': 15,
- 'W': 16,
- 'Z': 17,
- 'p': 18 # just an additional key, works only with I
- }
- cdef int parse_code
-
- for i in range(n):
- val = values[i]
- if util.is_string_object(val):
- if val in _nat_strings:
- iresult[i] = NPY_NAT
- continue
- else:
- if _checknull_with_nat(val):
- iresult[i] = NPY_NAT
- continue
- else:
- val = str(val)
-
- # exact matching
- if exact:
- found = format_regex.match(val)
- if not found:
- if is_coerce:
- iresult[i] = NPY_NAT
- continue
- raise ValueError("time data %r does not match "
- "format %r (match)" % (values[i], fmt))
- if len(val) != found.end():
- if is_coerce:
- iresult[i] = NPY_NAT
- continue
- raise ValueError("unconverted data remains: %s" %
- values[i][found.end():])
-
- # search
- else:
- found = format_regex.search(val)
- if not found:
- if is_coerce:
- iresult[i] = NPY_NAT
- continue
- raise ValueError("time data %r does not match format "
- "%r (search)" % (values[i], fmt))
-
- year = 1900
- month = day = 1
- hour = minute = second = ns = us = 0
- tz = -1
- # Default to -1 to signify that values not known; not critical to have,
- # though
- week_of_year = -1
- week_of_year_start = -1
- # weekday and julian defaulted to -1 so as to signal need to calculate
- # values
- weekday = julian = -1
- found_dict = found.groupdict()
- for group_key in found_dict.iterkeys():
- # Directives not explicitly handled below:
- # c, x, X
- # handled by making out of other directives
- # U, W
- # worthless without day of the week
- parse_code = _parse_code_table[group_key]
-
- if parse_code == 0:
- year = int(found_dict['y'])
- # Open Group specification for strptime() states that a %y
- #value in the range of [00, 68] is in the century 2000, while
- #[69,99] is in the century 1900
- if year <= 68:
- year += 2000
- else:
- year += 1900
- elif parse_code == 1:
- year = int(found_dict['Y'])
- elif parse_code == 2:
- month = int(found_dict['m'])
- elif parse_code == 3:
- # elif group_key == 'B':
- month = locale_time.f_month.index(found_dict['B'].lower())
- elif parse_code == 4:
- # elif group_key == 'b':
- month = locale_time.a_month.index(found_dict['b'].lower())
- elif parse_code == 5:
- # elif group_key == 'd':
- day = int(found_dict['d'])
- elif parse_code == 6:
- # elif group_key == 'H':
- hour = int(found_dict['H'])
- elif parse_code == 7:
- hour = int(found_dict['I'])
- ampm = found_dict.get('p', '').lower()
- # If there was no AM/PM indicator, we'll treat this like AM
- if ampm in ('', locale_time.am_pm[0]):
- # We're in AM so the hour is correct unless we're
- # looking at 12 midnight.
- # 12 midnight == 12 AM == hour 0
- if hour == 12:
- hour = 0
- elif ampm == locale_time.am_pm[1]:
- # We're in PM so we need to add 12 to the hour unless
- # we're looking at 12 noon.
- # 12 noon == 12 PM == hour 12
- if hour != 12:
- hour += 12
- elif parse_code == 8:
- minute = int(found_dict['M'])
- elif parse_code == 9:
- second = int(found_dict['S'])
- elif parse_code == 10:
- s = found_dict['f']
- # Pad to always return nanoseconds
- s += "0" * (9 - len(s))
- us = long(s)
- ns = us % 1000
- us = us / 1000
- elif parse_code == 11:
- weekday = locale_time.f_weekday.index(found_dict['A'].lower())
- elif parse_code == 12:
- weekday = locale_time.a_weekday.index(found_dict['a'].lower())
- elif parse_code == 13:
- weekday = int(found_dict['w'])
- if weekday == 0:
- weekday = 6
- else:
- weekday -= 1
- elif parse_code == 14:
- julian = int(found_dict['j'])
- elif parse_code == 15 or parse_code == 16:
- week_of_year = int(found_dict[group_key])
- if group_key == 'U':
- # U starts week on Sunday.
- week_of_year_start = 6
- else:
- # W starts week on Monday.
- week_of_year_start = 0
- elif parse_code == 17:
- # Since -1 is default value only need to worry about setting tz
- # if it can be something other than -1.
- found_zone = found_dict['Z'].lower()
- for value, tz_values in enumerate(locale_time.timezone):
- if found_zone in tz_values:
- # Deal w/ bad locale setup where timezone names are the
- # same and yet time.daylight is true; too ambiguous to
- # be able to tell what timezone has daylight savings
- if (time.tzname[0] == time.tzname[1] and
- time.daylight and found_zone not in (
- "utc", "gmt")):
- break
- else:
- tz = value
- break
- # If we know the wk of the year and what day of that wk, we can figure
- # out the Julian day of the year.
- if julian == -1 and week_of_year != -1 and weekday != -1:
- week_starts_Mon = True if week_of_year_start == 0 else False
- julian = _calc_julian_from_U_or_W(year, week_of_year, weekday,
- week_starts_Mon)
- # Cannot pre-calculate datetime_date() since can change in Julian
- # calculation and thus could have different value for the day of the wk
- # calculation.
- try:
- if julian == -1:
- # Need to add 1 to result since first day of the year is 1, not
- # 0.
- julian = datetime_date(year, month, day).toordinal() - \
- datetime_date(year, 1, 1).toordinal() + 1
- else: # Assume that if they bothered to include Julian day it will
- # be accurate.
- datetime_result = datetime_date.fromordinal(
- (julian - 1) + datetime_date(year, 1, 1).toordinal())
- year = datetime_result.year
- month = datetime_result.month
- day = datetime_result.day
- except ValueError:
- if is_coerce:
- iresult[i] = NPY_NAT
- continue
- raise
- if weekday == -1:
- weekday = datetime_date(year, month, day).weekday()
-
- dts.year = year
- dts.month = month
- dts.day = day
- dts.hour = hour
- dts.min = minute
- dts.sec = second
- dts.us = us
- dts.ps = ns * 1000
-
- iresult[i] = pandas_datetimestruct_to_datetime(PANDAS_FR_ns, &dts)
- try:
- _check_dts_bounds(&dts)
- except ValueError:
- if is_coerce:
- iresult[i] = NPY_NAT
- continue
- raise
-
- return result
-
-
#----------------------------------------------------------------------
# NaT methods/property setups
@@ -5176,320 +4889,3 @@ def shift_months(int64_t[:] dtindex, int months, object day=None):
raise ValueError("day must be None, 'start' or 'end'")
return np.asarray(out)
-
-#----------------------------------------------------------------------
-# Don't even ask
-
-"""Strptime-related classes and functions.
-
-CLASSES:
- LocaleTime -- Discovers and stores locale-specific time information
- TimeRE -- Creates regexes for pattern matching a string of text containing
- time information
-
-FUNCTIONS:
- _getlang -- Figure out what language is being used for the locale
- strptime -- Calculates the time struct represented by the passed-in string
-
-"""
-import time
-import locale
-import calendar
-from re import compile as re_compile
-from re import IGNORECASE
-from re import escape as re_escape
-from datetime import date as datetime_date
-
-# Python 2 vs Python 3
-try:
- from thread import allocate_lock as _thread_allocate_lock
-except:
- try:
- from _thread import allocate_lock as _thread_allocate_lock
- except:
- try:
- from dummy_thread import allocate_lock as _thread_allocate_lock
- except:
- from _dummy_thread import allocate_lock as _thread_allocate_lock
-
-__all__ = []
-
-
-def _getlang():
- # Figure out what the current language is set to.
- return locale.getlocale(locale.LC_TIME)
-
-
-class LocaleTime(object):
- """Stores and handles locale-specific information related to time.
-
- ATTRIBUTES:
- f_weekday -- full weekday names (7-item list)
- a_weekday -- abbreviated weekday names (7-item list)
- f_month -- full month names (13-item list; dummy value in [0], which
- is added by code)
- a_month -- abbreviated month names (13-item list, dummy value in
- [0], which is added by code)
- am_pm -- AM/PM representation (2-item list)
- LC_date_time -- format string for date/time representation (string)
- LC_date -- format string for date representation (string)
- LC_time -- format string for time representation (string)
- timezone -- daylight- and non-daylight-savings timezone representation
- (2-item list of sets)
- lang -- Language used by instance (2-item tuple)
- """
-
- def __init__(self):
- """Set all attributes.
-
- Order of methods called matters for dependency reasons.
-
- The locale language is set at the offset and then checked again before
- exiting. This is to make sure that the attributes were not set with a
- mix of information from more than one locale. This would most likely
- happen when using threads where one thread calls a locale-dependent
- function while another thread changes the locale while the function in
- the other thread is still running. Proper coding would call for
- locks to prevent changing the locale while locale-dependent code is
- running. The check here is done in case someone does not think about
- doing this.
-
- Only other possible issue is if someone changed the timezone and did
- not call tz.tzset . That is an issue for the programmer, though,
- since changing the timezone is worthless without that call.
-
- """
- self.lang = _getlang()
- self.__calc_weekday()
- self.__calc_month()
- self.__calc_am_pm()
- self.__calc_timezone()
- self.__calc_date_time()
- if _getlang() != self.lang:
- raise ValueError("locale changed during initialization")
-
- def __pad(self, seq, front):
- # Add '' to seq to either the front (is True), else the back.
- seq = list(seq)
- if front:
- seq.insert(0, '')
- else:
- seq.append('')
- return seq
-
- def __calc_weekday(self):
- # Set self.a_weekday and self.f_weekday using the calendar
- # module.
- a_weekday = [calendar.day_abbr[i].lower() for i in range(7)]
- f_weekday = [calendar.day_name[i].lower() for i in range(7)]
- self.a_weekday = a_weekday
- self.f_weekday = f_weekday
-
- def __calc_month(self):
- # Set self.f_month and self.a_month using the calendar module.
- a_month = [calendar.month_abbr[i].lower() for i in range(13)]
- f_month = [calendar.month_name[i].lower() for i in range(13)]
- self.a_month = a_month
- self.f_month = f_month
-
- def __calc_am_pm(self):
- # Set self.am_pm by using time.strftime().
-
- # The magic date (1999,3,17,hour,44,55,2,76,0) is not really that
- # magical; just happened to have used it everywhere else where a
- # static date was needed.
- am_pm = []
- for hour in (01, 22):
- time_tuple = time.struct_time(
- (1999, 3, 17, hour, 44, 55, 2, 76, 0))
- am_pm.append(time.strftime("%p", time_tuple).lower())
- self.am_pm = am_pm
-
- def __calc_date_time(self):
- # Set self.date_time, self.date, & self.time by using
- # time.strftime().
-
- # Use (1999,3,17,22,44,55,2,76,0) for magic date because the amount of
- # overloaded numbers is minimized. The order in which searches for
- # values within the format string is very important; it eliminates
- # possible ambiguity for what something represents.
- time_tuple = time.struct_time((1999, 3, 17, 22, 44, 55, 2, 76, 0))
- date_time = [None, None, None]
- date_time[0] = time.strftime("%c", time_tuple).lower()
- date_time[1] = time.strftime("%x", time_tuple).lower()
- date_time[2] = time.strftime("%X", time_tuple).lower()
- replacement_pairs = [('%', '%%'), (self.f_weekday[2], '%A'),
- (self.f_month[3],
- '%B'), (self.a_weekday[2], '%a'),
- (self.a_month[3], '%b'), (self.am_pm[1], '%p'),
- ('1999', '%Y'), ('99', '%y'), ('22', '%H'),
- ('44', '%M'), ('55', '%S'), ('76', '%j'),
- ('17', '%d'), ('03', '%m'), ('3', '%m'),
- # '3' needed for when no leading zero.
- ('2', '%w'), ('10', '%I')]
- replacement_pairs.extend([(tz, "%Z") for tz_values in self.timezone
- for tz in tz_values])
- for offset, directive in ((0, '%c'), (1, '%x'), (2, '%X')):
- current_format = date_time[offset]
- for old, new in replacement_pairs:
- # Must deal with possible lack of locale info
- # manifesting itself as the empty string (e.g., Swedish's
- # lack of AM/PM info) or a platform returning a tuple of empty
- # strings (e.g., MacOS 9 having timezone as ('','')).
- if old:
- current_format = current_format.replace(old, new)
- # If %W is used, then Sunday, 2005-01-03 will fall on week 0 since
- # 2005-01-03 occurs before the first Monday of the year. Otherwise
- # %U is used.
- time_tuple = time.struct_time((1999, 1, 3, 1, 1, 1, 6, 3, 0))
- if '00' in time.strftime(directive, time_tuple):
- U_W = '%W'
- else:
- U_W = '%U'
- date_time[offset] = current_format.replace('11', U_W)
- self.LC_date_time = date_time[0]
- self.LC_date = date_time[1]
- self.LC_time = date_time[2]
-
- def __calc_timezone(self):
- # Set self.timezone by using time.tzname.
- # Do not worry about possibility of time.tzname[0] == timetzname[1]
- # and time.daylight; handle that in strptime .
- try:
- time.tzset()
- except AttributeError:
- pass
- no_saving = frozenset(["utc", "gmt", time.tzname[0].lower()])
- if time.daylight:
- has_saving = frozenset([time.tzname[1].lower()])
- else:
- has_saving = frozenset()
- self.timezone = (no_saving, has_saving)
-
-
-class TimeRE(dict):
- """Handle conversion from format directives to regexes."""
-
- def __init__(self, locale_time=None):
- """Create keys/values.
-
- Order of execution is important for dependency reasons.
-
- """
- if locale_time:
- self.locale_time = locale_time
- else:
- self.locale_time = LocaleTime()
- base = super(TimeRE, self)
- base.__init__({
- # The " \d" part of the regex is to make %c from ANSI C work
- 'd': r"(?P<d>3[0-1]|[1-2]\d|0[1-9]|[1-9]| [1-9])",
- 'f': r"(?P<f>[0-9]{1,9})",
- 'H': r"(?P<H>2[0-3]|[0-1]\d|\d)",
- 'I': r"(?P<I>1[0-2]|0[1-9]|[1-9])",
- 'j': (r"(?P<j>36[0-6]|3[0-5]\d|[1-2]\d\d|0[1-9]\d|00[1-9]|"
- r"[1-9]\d|0[1-9]|[1-9])"),
- 'm': r"(?P<m>1[0-2]|0[1-9]|[1-9])",
- 'M': r"(?P<M>[0-5]\d|\d)",
- 'S': r"(?P<S>6[0-1]|[0-5]\d|\d)",
- 'U': r"(?P<U>5[0-3]|[0-4]\d|\d)",
- 'w': r"(?P<w>[0-6])",
- # W is set below by using 'U'
- 'y': r"(?P<y>\d\d)",
- #XXX: Does 'Y' need to worry about having less or more than
- # 4 digits?
- 'Y': r"(?P<Y>\d\d\d\d)",
- 'A': self.__seqToRE(self.locale_time.f_weekday, 'A'),
- 'a': self.__seqToRE(self.locale_time.a_weekday, 'a'),
- 'B': self.__seqToRE(self.locale_time.f_month[1:], 'B'),
- 'b': self.__seqToRE(self.locale_time.a_month[1:], 'b'),
- 'p': self.__seqToRE(self.locale_time.am_pm, 'p'),
- 'Z': self.__seqToRE([tz for tz_names in self.locale_time.timezone
- for tz in tz_names],
- 'Z'),
- '%': '%'})
- base.__setitem__('W', base.__getitem__('U').replace('U', 'W'))
- base.__setitem__('c', self.pattern(self.locale_time.LC_date_time))
- base.__setitem__('x', self.pattern(self.locale_time.LC_date))
- base.__setitem__('X', self.pattern(self.locale_time.LC_time))
-
- def __seqToRE(self, to_convert, directive):
- """Convert a list to a regex string for matching a directive.
-
- Want possible matching values to be from longest to shortest. This
- prevents the possibility of a match occuring for a value that also
- a substring of a larger value that should have matched (e.g., 'abc'
- matching when 'abcdef' should have been the match).
-
- """
- to_convert = sorted(to_convert, key=len, reverse=True)
- for value in to_convert:
- if value != '':
- break
- else:
- return ''
- regex = '|'.join([re_escape(stuff) for stuff in to_convert])
- regex = '(?P<%s>%s' % (directive, regex)
- return '%s)' % regex
-
- def pattern(self, format):
- """Return regex pattern for the format string.
-
- Need to make sure that any characters that might be interpreted as
- regex syntax are escaped.
-
- """
- processed_format = ''
- # The sub() call escapes all characters that might be misconstrued
- # as regex syntax. Cannot use re.escape since we have to deal with
- # format directives (%m, etc.).
- regex_chars = re_compile(r"([\\.^$*+?\(\){}\[\]|])")
- format = regex_chars.sub(r"\\\1", format)
- whitespace_replacement = re_compile(r'\s+')
- format = whitespace_replacement.sub(r'\\s+', format)
- while '%' in format:
- directive_index = format.index('%') +1
- processed_format = "%s%s%s" % (processed_format,
- format[:directive_index -1],
- self[format[directive_index]])
- format = format[directive_index +1:]
- return "%s%s" % (processed_format, format)
-
- def compile(self, format):
- """Return a compiled re object for the format string."""
- return re_compile(self.pattern(format), IGNORECASE)
-
-_cache_lock = _thread_allocate_lock()
-# DO NOT modify _TimeRE_cache or _regex_cache without acquiring the cache lock
-# first!
-_TimeRE_cache = TimeRE()
-_CACHE_MAX_SIZE = 5 # Max number of regexes stored in _regex_cache
-_regex_cache = {}
-
-cdef _calc_julian_from_U_or_W(int year, int week_of_year,
- int day_of_week, int week_starts_Mon):
- """Calculate the Julian day based on the year, week of the year, and day of
- the week, with week_start_day representing whether the week of the year
- assumes the week starts on Sunday or Monday (6 or 0)."""
-
- cdef:
- int first_weekday, week_0_length, days_to_week
-
- first_weekday = datetime_date(year, 1, 1).weekday()
- # If we are dealing with the %U directive (week starts on Sunday), it's
- # easier to just shift the view to Sunday being the first day of the
- # week.
- if not week_starts_Mon:
- first_weekday = (first_weekday + 1) % 7
- day_of_week = (day_of_week + 1) % 7
- # Need to watch out for a week 0 (when the first day of the year is not
- # the same as that specified by %U or %W).
- week_0_length = (7 - first_weekday) % 7
- if week_of_year == 0:
- return 1 + day_of_week - first_weekday
- else:
- days_to_week = week_0_length + (7 * (week_of_year - 1))
- return 1 + days_to_week + day_of_week
-
-# def _strptime_time(data_string, format="%a %b %d %H:%M:%S %Y"):
-# return _strptime(data_string, format)[0]
diff --git a/pandas/_libs/tslibs/strptime.pyx b/pandas/_libs/tslibs/strptime.pyx
new file mode 100644
index 0000000000000..20b24d6be9a58
--- /dev/null
+++ b/pandas/_libs/tslibs/strptime.pyx
@@ -0,0 +1,640 @@
+# -*- coding: utf-8 -*-
+# cython: profile=False
+"""Strptime-related classes and functions.
+"""
+import time
+import locale
+import calendar
+import re
+
+
+# Python 2 vs Python 3
+try:
+ from thread import allocate_lock as _thread_allocate_lock
+except:
+ try:
+ from _thread import allocate_lock as _thread_allocate_lock
+ except:
+ try:
+ from dummy_thread import allocate_lock as _thread_allocate_lock
+ except:
+ from _dummy_thread import allocate_lock as _thread_allocate_lock
+
+
+from cython cimport Py_ssize_t
+from cpython cimport PyFloat_Check
+
+cimport cython
+
+import numpy as np
+cimport numpy as np
+from numpy cimport ndarray, int64_t
+
+from datetime import date as datetime_date
+from datetime cimport datetime
+
+# This is src/datetime.pxd
+from datetime cimport (
+ PANDAS_FR_ns,
+ check_dts_bounds,
+ pandas_datetimestruct,
+ pandas_datetimestruct_to_datetime)
+
+from util cimport is_string_object, get_nat
+
+cdef int64_t NPY_NAT = get_nat()
+
+cdef set _nat_strings = set(['NaT', 'nat', 'NAT', 'nan', 'NaN', 'NAN'])
+
+
+# TODO: Consolidate with other implementations
+cdef inline bint _checknull_with_nat(object val):
+ """ utility to check if a value is a nat or not """
+ return (val is None or
+ (PyFloat_Check(val) and val != val) or
+ (isinstance(val, datetime) and not val == val))
+
+
+def array_strptime(ndarray[object] values, object fmt,
+ bint exact=True, errors='raise'):
+ """
+ Calculates the datetime structs represented by the passed array of strings
+
+ Parameters
+ ----------
+ values : ndarray of string-like objects
+ fmt : string-like regex
+ exact : matches must be exact if True, search if False
+ coerce : if invalid values found, coerce to NaT
+ """
+
+ cdef:
+ Py_ssize_t i, n = len(values)
+ pandas_datetimestruct dts
+ ndarray[int64_t] iresult
+ int year, month, day, minute, hour, second, weekday, julian, tz
+ int week_of_year, week_of_year_start
+ int64_t us, ns
+ object val, group_key, ampm, found
+ dict found_key
+ bint is_raise = errors=='raise'
+ bint is_ignore = errors=='ignore'
+ bint is_coerce = errors=='coerce'
+
+ assert is_raise or is_ignore or is_coerce
+
+ global _TimeRE_cache, _regex_cache
+ with _cache_lock:
+ if _getlang() != _TimeRE_cache.locale_time.lang:
+ _TimeRE_cache = TimeRE()
+ _regex_cache.clear()
+ if len(_regex_cache) > _CACHE_MAX_SIZE:
+ _regex_cache.clear()
+ locale_time = _TimeRE_cache.locale_time
+ format_regex = _regex_cache.get(fmt)
+ if not format_regex:
+ try:
+ format_regex = _TimeRE_cache.compile(fmt)
+ # KeyError raised when a bad format is found; can be specified as
+ # \\, in which case it was a stray % but with a space after it
+ except KeyError, err:
+ bad_directive = err.args[0]
+ if bad_directive == "\\":
+ bad_directive = "%"
+ del err
+ raise ValueError("'%s' is a bad directive in format '%s'" %
+ (bad_directive, fmt))
+ # IndexError only occurs when the format string is "%"
+ except IndexError:
+ raise ValueError("stray %% in format '%s'" % fmt)
+ _regex_cache[fmt] = format_regex
+
+ result = np.empty(n, dtype='M8[ns]')
+ iresult = result.view('i8')
+
+ dts.us = dts.ps = dts.as = 0
+
+ cdef dict _parse_code_table = {
+ 'y': 0,
+ 'Y': 1,
+ 'm': 2,
+ 'B': 3,
+ 'b': 4,
+ 'd': 5,
+ 'H': 6,
+ 'I': 7,
+ 'M': 8,
+ 'S': 9,
+ 'f': 10,
+ 'A': 11,
+ 'a': 12,
+ 'w': 13,
+ 'j': 14,
+ 'U': 15,
+ 'W': 16,
+ 'Z': 17,
+ 'p': 18 # just an additional key, works only with I
+ }
+ cdef int parse_code
+
+ for i in range(n):
+ val = values[i]
+ if is_string_object(val):
+ if val in _nat_strings:
+ iresult[i] = NPY_NAT
+ continue
+ else:
+ if _checknull_with_nat(val):
+ iresult[i] = NPY_NAT
+ continue
+ else:
+ val = str(val)
+
+ # exact matching
+ if exact:
+ found = format_regex.match(val)
+ if not found:
+ if is_coerce:
+ iresult[i] = NPY_NAT
+ continue
+ raise ValueError("time data %r does not match "
+ "format %r (match)" % (values[i], fmt))
+ if len(val) != found.end():
+ if is_coerce:
+ iresult[i] = NPY_NAT
+ continue
+ raise ValueError("unconverted data remains: %s" %
+ values[i][found.end():])
+
+ # search
+ else:
+ found = format_regex.search(val)
+ if not found:
+ if is_coerce:
+ iresult[i] = NPY_NAT
+ continue
+ raise ValueError("time data %r does not match format "
+ "%r (search)" % (values[i], fmt))
+
+ year = 1900
+ month = day = 1
+ hour = minute = second = ns = us = 0
+ tz = -1
+ # Default to -1 to signify that values not known; not critical to have,
+ # though
+ week_of_year = -1
+ week_of_year_start = -1
+ # weekday and julian defaulted to -1 so as to signal need to calculate
+ # values
+ weekday = julian = -1
+ found_dict = found.groupdict()
+ for group_key in found_dict.iterkeys():
+ # Directives not explicitly handled below:
+ # c, x, X
+ # handled by making out of other directives
+ # U, W
+ # worthless without day of the week
+ parse_code = _parse_code_table[group_key]
+
+ if parse_code == 0:
+ year = int(found_dict['y'])
+ # Open Group specification for strptime() states that a %y
+ #value in the range of [00, 68] is in the century 2000, while
+ #[69,99] is in the century 1900
+ if year <= 68:
+ year += 2000
+ else:
+ year += 1900
+ elif parse_code == 1:
+ year = int(found_dict['Y'])
+ elif parse_code == 2:
+ month = int(found_dict['m'])
+ elif parse_code == 3:
+ # elif group_key == 'B':
+ month = locale_time.f_month.index(found_dict['B'].lower())
+ elif parse_code == 4:
+ # elif group_key == 'b':
+ month = locale_time.a_month.index(found_dict['b'].lower())
+ elif parse_code == 5:
+ # elif group_key == 'd':
+ day = int(found_dict['d'])
+ elif parse_code == 6:
+ # elif group_key == 'H':
+ hour = int(found_dict['H'])
+ elif parse_code == 7:
+ hour = int(found_dict['I'])
+ ampm = found_dict.get('p', '').lower()
+ # If there was no AM/PM indicator, we'll treat this like AM
+ if ampm in ('', locale_time.am_pm[0]):
+ # We're in AM so the hour is correct unless we're
+ # looking at 12 midnight.
+ # 12 midnight == 12 AM == hour 0
+ if hour == 12:
+ hour = 0
+ elif ampm == locale_time.am_pm[1]:
+ # We're in PM so we need to add 12 to the hour unless
+ # we're looking at 12 noon.
+ # 12 noon == 12 PM == hour 12
+ if hour != 12:
+ hour += 12
+ elif parse_code == 8:
+ minute = int(found_dict['M'])
+ elif parse_code == 9:
+ second = int(found_dict['S'])
+ elif parse_code == 10:
+ s = found_dict['f']
+ # Pad to always return nanoseconds
+ s += "0" * (9 - len(s))
+ us = long(s)
+ ns = us % 1000
+ us = us / 1000
+ elif parse_code == 11:
+ weekday = locale_time.f_weekday.index(found_dict['A'].lower())
+ elif parse_code == 12:
+ weekday = locale_time.a_weekday.index(found_dict['a'].lower())
+ elif parse_code == 13:
+ weekday = int(found_dict['w'])
+ if weekday == 0:
+ weekday = 6
+ else:
+ weekday -= 1
+ elif parse_code == 14:
+ julian = int(found_dict['j'])
+ elif parse_code == 15 or parse_code == 16:
+ week_of_year = int(found_dict[group_key])
+ if group_key == 'U':
+ # U starts week on Sunday.
+ week_of_year_start = 6
+ else:
+ # W starts week on Monday.
+ week_of_year_start = 0
+ elif parse_code == 17:
+ # Since -1 is default value only need to worry about setting tz
+ # if it can be something other than -1.
+ found_zone = found_dict['Z'].lower()
+ for value, tz_values in enumerate(locale_time.timezone):
+ if found_zone in tz_values:
+ # Deal w/ bad locale setup where timezone names are the
+ # same and yet time.daylight is true; too ambiguous to
+ # be able to tell what timezone has daylight savings
+ if (time.tzname[0] == time.tzname[1] and
+ time.daylight and found_zone not in (
+ "utc", "gmt")):
+ break
+ else:
+ tz = value
+ break
+ # If we know the wk of the year and what day of that wk, we can figure
+ # out the Julian day of the year.
+ if julian == -1 and week_of_year != -1 and weekday != -1:
+ week_starts_Mon = True if week_of_year_start == 0 else False
+ julian = _calc_julian_from_U_or_W(year, week_of_year, weekday,
+ week_starts_Mon)
+ # Cannot pre-calculate datetime_date() since can change in Julian
+ # calculation and thus could have different value for the day of the wk
+ # calculation.
+ try:
+ if julian == -1:
+ # Need to add 1 to result since first day of the year is 1, not
+ # 0.
+ julian = datetime_date(year, month, day).toordinal() - \
+ datetime_date(year, 1, 1).toordinal() + 1
+ else: # Assume that if they bothered to include Julian day it will
+ # be accurate.
+ datetime_result = datetime_date.fromordinal(
+ (julian - 1) + datetime_date(year, 1, 1).toordinal())
+ year = datetime_result.year
+ month = datetime_result.month
+ day = datetime_result.day
+ except ValueError:
+ if is_coerce:
+ iresult[i] = NPY_NAT
+ continue
+ raise
+ if weekday == -1:
+ weekday = datetime_date(year, month, day).weekday()
+
+ dts.year = year
+ dts.month = month
+ dts.day = day
+ dts.hour = hour
+ dts.min = minute
+ dts.sec = second
+ dts.us = us
+ dts.ps = ns * 1000
+
+ iresult[i] = pandas_datetimestruct_to_datetime(PANDAS_FR_ns, &dts)
+ if check_dts_bounds(&dts):
+ if is_coerce:
+ iresult[i] = NPY_NAT
+ continue
+ else:
+ from pandas._libs.tslib import OutOfBoundsDatetime
+ fmt = '%d-%.2d-%.2d %.2d:%.2d:%.2d' % (dts.year, dts.month,
+ dts.day, dts.hour,
+ dts.min, dts.sec)
+ raise OutOfBoundsDatetime(
+ 'Out of bounds nanosecond timestamp: %s' % fmt)
+
+ return result
+
+
+"""_getlang, LocaleTime, TimeRE, _calc_julian_from_U_or_W are vendored
+from the standard library, see
+https://github.com/python/cpython/blob/master/Lib/_strptime.py
+The original module-level docstring follows.
+
+Strptime-related classes and functions.
+CLASSES:
+ LocaleTime -- Discovers and stores locale-specific time information
+ TimeRE -- Creates regexes for pattern matching a string of text containing
+ time information
+FUNCTIONS:
+ _getlang -- Figure out what language is being used for the locale
+ strptime -- Calculates the time struct represented by the passed-in string
+"""
+
+
+def _getlang():
+ """Figure out what language is being used for the locale"""
+ return locale.getlocale(locale.LC_TIME)
+
+
+class LocaleTime(object):
+ """Stores and handles locale-specific information related to time.
+
+ ATTRIBUTES:
+ f_weekday -- full weekday names (7-item list)
+ a_weekday -- abbreviated weekday names (7-item list)
+ f_month -- full month names (13-item list; dummy value in [0], which
+ is added by code)
+ a_month -- abbreviated month names (13-item list, dummy value in
+ [0], which is added by code)
+ am_pm -- AM/PM representation (2-item list)
+ LC_date_time -- format string for date/time representation (string)
+ LC_date -- format string for date representation (string)
+ LC_time -- format string for time representation (string)
+ timezone -- daylight- and non-daylight-savings timezone representation
+ (2-item list of sets)
+ lang -- Language used by instance (2-item tuple)
+ """
+
+ def __init__(self):
+ """Set all attributes.
+
+ Order of methods called matters for dependency reasons.
+
+ The locale language is set at the offset and then checked again before
+ exiting. This is to make sure that the attributes were not set with a
+ mix of information from more than one locale. This would most likely
+ happen when using threads where one thread calls a locale-dependent
+ function while another thread changes the locale while the function in
+ the other thread is still running. Proper coding would call for
+ locks to prevent changing the locale while locale-dependent code is
+ running. The check here is done in case someone does not think about
+ doing this.
+
+ Only other possible issue is if someone changed the timezone and did
+ not call tz.tzset . That is an issue for the programmer, though,
+ since changing the timezone is worthless without that call.
+
+ """
+ self.lang = _getlang()
+ self.__calc_weekday()
+ self.__calc_month()
+ self.__calc_am_pm()
+ self.__calc_timezone()
+ self.__calc_date_time()
+ if _getlang() != self.lang:
+ raise ValueError("locale changed during initialization")
+
+ def __pad(self, seq, front):
+ # Add '' to seq to either the front (is True), else the back.
+ seq = list(seq)
+ if front:
+ seq.insert(0, '')
+ else:
+ seq.append('')
+ return seq
+
+ def __calc_weekday(self):
+ # Set self.a_weekday and self.f_weekday using the calendar
+ # module.
+ a_weekday = [calendar.day_abbr[i].lower() for i in range(7)]
+ f_weekday = [calendar.day_name[i].lower() for i in range(7)]
+ self.a_weekday = a_weekday
+ self.f_weekday = f_weekday
+
+ def __calc_month(self):
+ # Set self.f_month and self.a_month using the calendar module.
+ a_month = [calendar.month_abbr[i].lower() for i in range(13)]
+ f_month = [calendar.month_name[i].lower() for i in range(13)]
+ self.a_month = a_month
+ self.f_month = f_month
+
+ def __calc_am_pm(self):
+ # Set self.am_pm by using time.strftime().
+
+ # The magic date (1999,3,17,hour,44,55,2,76,0) is not really that
+ # magical; just happened to have used it everywhere else where a
+ # static date was needed.
+ am_pm = []
+ for hour in (01, 22):
+ time_tuple = time.struct_time(
+ (1999, 3, 17, hour, 44, 55, 2, 76, 0))
+ am_pm.append(time.strftime("%p", time_tuple).lower())
+ self.am_pm = am_pm
+
+ def __calc_date_time(self):
+ # Set self.date_time, self.date, & self.time by using
+ # time.strftime().
+
+ # Use (1999,3,17,22,44,55,2,76,0) for magic date because the amount of
+ # overloaded numbers is minimized. The order in which searches for
+ # values within the format string is very important; it eliminates
+ # possible ambiguity for what something represents.
+ time_tuple = time.struct_time((1999, 3, 17, 22, 44, 55, 2, 76, 0))
+ date_time = [None, None, None]
+ date_time[0] = time.strftime("%c", time_tuple).lower()
+ date_time[1] = time.strftime("%x", time_tuple).lower()
+ date_time[2] = time.strftime("%X", time_tuple).lower()
+ replacement_pairs = [('%', '%%'), (self.f_weekday[2], '%A'),
+ (self.f_month[3],
+ '%B'), (self.a_weekday[2], '%a'),
+ (self.a_month[3], '%b'), (self.am_pm[1], '%p'),
+ ('1999', '%Y'), ('99', '%y'), ('22', '%H'),
+ ('44', '%M'), ('55', '%S'), ('76', '%j'),
+ ('17', '%d'), ('03', '%m'), ('3', '%m'),
+ # '3' needed for when no leading zero.
+ ('2', '%w'), ('10', '%I')]
+ replacement_pairs.extend([(tz, "%Z") for tz_values in self.timezone
+ for tz in tz_values])
+ for offset, directive in ((0, '%c'), (1, '%x'), (2, '%X')):
+ current_format = date_time[offset]
+ for old, new in replacement_pairs:
+ # Must deal with possible lack of locale info
+ # manifesting itself as the empty string (e.g., Swedish's
+ # lack of AM/PM info) or a platform returning a tuple of empty
+ # strings (e.g., MacOS 9 having timezone as ('','')).
+ if old:
+ current_format = current_format.replace(old, new)
+ # If %W is used, then Sunday, 2005-01-03 will fall on week 0 since
+ # 2005-01-03 occurs before the first Monday of the year. Otherwise
+ # %U is used.
+ time_tuple = time.struct_time((1999, 1, 3, 1, 1, 1, 6, 3, 0))
+ if '00' in time.strftime(directive, time_tuple):
+ U_W = '%W'
+ else:
+ U_W = '%U'
+ date_time[offset] = current_format.replace('11', U_W)
+ self.LC_date_time = date_time[0]
+ self.LC_date = date_time[1]
+ self.LC_time = date_time[2]
+
+ def __calc_timezone(self):
+ # Set self.timezone by using time.tzname.
+ # Do not worry about possibility of time.tzname[0] == timetzname[1]
+ # and time.daylight; handle that in strptime .
+ try:
+ time.tzset()
+ except AttributeError:
+ pass
+ no_saving = frozenset(["utc", "gmt", time.tzname[0].lower()])
+ if time.daylight:
+ has_saving = frozenset([time.tzname[1].lower()])
+ else:
+ has_saving = frozenset()
+ self.timezone = (no_saving, has_saving)
+
+
+class TimeRE(dict):
+ """
+ Handle conversion from format directives to regexes.
+
+ Creates regexes for pattern matching a string of text containing
+ time information
+ """
+
+ def __init__(self, locale_time=None):
+ """Create keys/values.
+
+ Order of execution is important for dependency reasons.
+
+ """
+ if locale_time:
+ self.locale_time = locale_time
+ else:
+ self.locale_time = LocaleTime()
+ base = super(TimeRE, self)
+ base.__init__({
+ # The " \d" part of the regex is to make %c from ANSI C work
+ 'd': r"(?P<d>3[0-1]|[1-2]\d|0[1-9]|[1-9]| [1-9])",
+ 'f': r"(?P<f>[0-9]{1,9})",
+ 'H': r"(?P<H>2[0-3]|[0-1]\d|\d)",
+ 'I': r"(?P<I>1[0-2]|0[1-9]|[1-9])",
+ 'j': (r"(?P<j>36[0-6]|3[0-5]\d|[1-2]\d\d|0[1-9]\d|00[1-9]|"
+ r"[1-9]\d|0[1-9]|[1-9])"),
+ 'm': r"(?P<m>1[0-2]|0[1-9]|[1-9])",
+ 'M': r"(?P<M>[0-5]\d|\d)",
+ 'S': r"(?P<S>6[0-1]|[0-5]\d|\d)",
+ 'U': r"(?P<U>5[0-3]|[0-4]\d|\d)",
+ 'w': r"(?P<w>[0-6])",
+ # W is set below by using 'U'
+ 'y': r"(?P<y>\d\d)",
+ #XXX: Does 'Y' need to worry about having less or more than
+ # 4 digits?
+ 'Y': r"(?P<Y>\d\d\d\d)",
+ 'A': self.__seqToRE(self.locale_time.f_weekday, 'A'),
+ 'a': self.__seqToRE(self.locale_time.a_weekday, 'a'),
+ 'B': self.__seqToRE(self.locale_time.f_month[1:], 'B'),
+ 'b': self.__seqToRE(self.locale_time.a_month[1:], 'b'),
+ 'p': self.__seqToRE(self.locale_time.am_pm, 'p'),
+ 'Z': self.__seqToRE([tz for tz_names in self.locale_time.timezone
+ for tz in tz_names],
+ 'Z'),
+ '%': '%'})
+ base.__setitem__('W', base.__getitem__('U').replace('U', 'W'))
+ base.__setitem__('c', self.pattern(self.locale_time.LC_date_time))
+ base.__setitem__('x', self.pattern(self.locale_time.LC_date))
+ base.__setitem__('X', self.pattern(self.locale_time.LC_time))
+
+ def __seqToRE(self, to_convert, directive):
+ """Convert a list to a regex string for matching a directive.
+
+ Want possible matching values to be from longest to shortest. This
+ prevents the possibility of a match occuring for a value that also
+ a substring of a larger value that should have matched (e.g., 'abc'
+ matching when 'abcdef' should have been the match).
+
+ """
+ to_convert = sorted(to_convert, key=len, reverse=True)
+ for value in to_convert:
+ if value != '':
+ break
+ else:
+ return ''
+ regex = '|'.join([re.escape(stuff) for stuff in to_convert])
+ regex = '(?P<%s>%s' % (directive, regex)
+ return '%s)' % regex
+
+ def pattern(self, format):
+ """Return regex pattern for the format string.
+
+ Need to make sure that any characters that might be interpreted as
+ regex syntax are escaped.
+
+ """
+ processed_format = ''
+ # The sub() call escapes all characters that might be misconstrued
+ # as regex syntax. Cannot use re.escape since we have to deal with
+ # format directives (%m, etc.).
+ regex_chars = re.compile(r"([\\.^$*+?\(\){}\[\]|])")
+ format = regex_chars.sub(r"\\\1", format)
+ whitespace_replacement = re.compile(r'\s+')
+ format = whitespace_replacement.sub(r'\\s+', format)
+ while '%' in format:
+ directive_index = format.index('%') +1
+ processed_format = "%s%s%s" % (processed_format,
+ format[:directive_index -1],
+ self[format[directive_index]])
+ format = format[directive_index +1:]
+ return "%s%s" % (processed_format, format)
+
+ def compile(self, format):
+ """Return a compiled re object for the format string."""
+ return re.compile(self.pattern(format), re.IGNORECASE)
+
+
+_cache_lock = _thread_allocate_lock()
+# DO NOT modify _TimeRE_cache or _regex_cache without acquiring the cache lock
+# first!
+_TimeRE_cache = TimeRE()
+_CACHE_MAX_SIZE = 5 # Max number of regexes stored in _regex_cache
+_regex_cache = {}
+
+
+cdef _calc_julian_from_U_or_W(int year, int week_of_year,
+ int day_of_week, int week_starts_Mon):
+ """Calculate the Julian day based on the year, week of the year, and day of
+ the week, with week_start_day representing whether the week of the year
+ assumes the week starts on Sunday or Monday (6 or 0)."""
+
+ cdef:
+ int first_weekday, week_0_length, days_to_week
+
+ first_weekday = datetime_date(year, 1, 1).weekday()
+ # If we are dealing with the %U directive (week starts on Sunday), it's
+ # easier to just shift the view to Sunday being the first day of the
+ # week.
+ if not week_starts_Mon:
+ first_weekday = (first_weekday + 1) % 7
+ day_of_week = (day_of_week + 1) % 7
+
+ # Need to watch out for a week 0 (when the first day of the year is not
+ # the same as that specified by %U or %W).
+ week_0_length = (7 - first_weekday) % 7
+ if week_of_year == 0:
+ return 1 + day_of_week - first_weekday
+ else:
+ days_to_week = week_0_length + (7 * (week_of_year - 1))
+ return 1 + days_to_week + day_of_week
diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py
index 95fe3ab83c2ab..bf89509fd1746 100644
--- a/pandas/core/tools/datetimes.py
+++ b/pandas/core/tools/datetimes.py
@@ -3,6 +3,7 @@
from collections import MutableMapping
from pandas._libs import lib, tslib
+from pandas._libs.tslibs.strptime import array_strptime
from pandas._libs.tslibs.timezones import get_timezone
from pandas.core.dtypes.common import (
@@ -416,8 +417,8 @@ def _convert_listlike(arg, box, format, name=None, tz=tz):
# fallback
if result is None:
try:
- result = tslib.array_strptime(arg, format, exact=exact,
- errors=errors)
+ result = array_strptime(arg, format, exact=exact,
+ errors=errors)
except tslib.OutOfBoundsDatetime:
if errors == 'raise':
raise
diff --git a/setup.py b/setup.py
index 555cf9dc4a9b3..25a4924dad0bc 100755
--- a/setup.py
+++ b/setup.py
@@ -471,7 +471,6 @@ def pxd(name):
'pandas/_libs/src/datetime/np_datetime_strings.h',
'pandas/_libs/src/datetime.pxd']
-
# some linux distros require it
libraries = ['m'] if not is_platform_windows() else []
@@ -483,6 +482,10 @@ def pxd(name):
'pxdfiles': ['_libs/hashtable'],
'depends': (['pandas/_libs/src/klib/khash_python.h']
+ _pxi_dep['hashtable'])},
+ '_libs.tslibs.strptime': {'pyxfile': '_libs/tslibs/strptime',
+ 'depends': tseries_depends,
+ 'sources': ['pandas/_libs/src/datetime/np_datetime.c',
+ 'pandas/_libs/src/datetime/np_datetime_strings.c']},
'_libs.tslib': {'pyxfile': '_libs/tslib',
'pxdfiles': ['_libs/src/util', '_libs/lib'],
'depends': tseries_depends,
| This is the 2nd of an N part series of PRs to split `tslib` into independent modules.
At the moment there is a big chunk of code at the bottom of `tslib` that looks like it was pasted in from somewhere else. The header for that section of the file reads `# Don't even ask`. So I won't.
The new `tslibs.strptime` only used in one place: `array_strptime` is called in `tools.datetimes`. Other than that, nothing needs to be exposed, and nothing else in `tslib` relies on it.
The one function from `tslib` that `strptime` _does_ need is `_check_dts_bounds`. This (mostly) moves that up to datetime.pxd, which both `tslib` and `strptime` already import anyway.
This is _mostly_ a copy/paste of the existing functions+classes. I cleaned up a couple of places where variables used camelCase.
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/17342 | 2017-08-26T02:20:58Z | 2017-09-25T10:07:47Z | 2017-09-25T10:07:47Z | 2017-10-30T16:23:23Z |
CLN: remove total_seconds compat from json | diff --git a/pandas/_libs/src/ujson/python/objToJSON.c b/pandas/_libs/src/ujson/python/objToJSON.c
index 4beaa3fd449df..1ee862b54cf0b 100644
--- a/pandas/_libs/src/ujson/python/objToJSON.c
+++ b/pandas/_libs/src/ujson/python/objToJSON.c
@@ -329,7 +329,7 @@ static Py_ssize_t get_attr_length(PyObject *obj, char *attr) {
return ret;
}
-npy_int64 get_long_attr(PyObject *o, const char *attr) {
+static npy_int64 get_long_attr(PyObject *o, const char *attr) {
npy_int64 long_val;
PyObject *value = PyObject_GetAttrString(o, attr);
long_val = (PyLong_Check(value) ?
@@ -338,15 +338,12 @@ npy_int64 get_long_attr(PyObject *o, const char *attr) {
return long_val;
}
-npy_float64 total_seconds(PyObject *td) {
- // Python 2.6 compat
- // TODO(anyone): remove this legacy workaround with a more
- // direct td.total_seconds()
- npy_int64 microseconds = get_long_attr(td, "microseconds");
- npy_int64 seconds = get_long_attr(td, "seconds");
- npy_int64 days = get_long_attr(td, "days");
- npy_int64 days_in_seconds = days * 24LL * 3600LL;
- return (microseconds + (seconds + days_in_seconds) * 1000000.0) / 1000000.0;
+static npy_float64 total_seconds(PyObject *td) {
+ npy_float64 double_val;
+ PyObject *value = PyObject_CallMethod(td, "total_seconds", NULL);
+ double_val = PyFloat_AS_DOUBLE(value);
+ Py_DECREF(value);
+ return double_val;
}
static PyObject *get_item(PyObject *obj, Py_ssize_t i) {
| - [x] closes #17340
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
cc @jbrockmendel
| https://api.github.com/repos/pandas-dev/pandas/pulls/17341 | 2017-08-25T22:26:46Z | 2017-08-29T10:06:30Z | 2017-08-29T10:06:30Z | 2017-08-29T10:06:36Z |
ENH - GH:17160, DataFrame.pivot accepts a list of values | diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt
index f760d0b6359a2..243cae1ac79e5 100644
--- a/doc/source/whatsnew/v0.21.0.txt
+++ b/doc/source/whatsnew/v0.21.0.txt
@@ -129,7 +129,7 @@ Other Enhancements
- `read_*` methods can now infer compression from non-string paths, such as ``pathlib.Path`` objects (:issue:`17206`).
- :func:`pd.read_sas()` now recognizes much more of the most frequently used date (datetime) formats in SAS7BDAT files (:issue:`15871`).
- :func:`DataFrame.items` and :func:`Series.items` is now present in both Python 2 and 3 and is lazy in all cases (:issue:`13918`, :issue:`17213`)
-
+- :func:`DataFrame.pivot` now accepts a list of values (:issue:`17160`).
.. _whatsnew_0210.api_breaking:
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index b5b3df64d24c0..857ac43586432 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -4102,7 +4102,7 @@ def pivot(self, index=None, columns=None, values=None):
existing index.
columns : string or object
Column name to use to make new frame's columns
- values : string or object, optional
+ values : string, object or a list of the previous, optional
Column name to use for populating new frame's values. If not
specified, all remaining columns will be used and the result will
have hierarchically indexed columns
diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py
index 455da9246783c..42f5cd2ae3c22 100644
--- a/pandas/core/reshape/reshape.py
+++ b/pandas/core/reshape/reshape.py
@@ -367,15 +367,17 @@ def pivot(self, index=None, columns=None, values=None):
cols = [columns] if index is None else [index, columns]
append = index is None
indexed = self.set_index(cols, append=append)
- return indexed.unstack(columns)
else:
- if index is None:
- index = self.index
+ index = self.index if index is None else self[index]
+ index = MultiIndex.from_arrays([index, self[columns]])
+ if isinstance(values, list):
+ indexed = DataFrame(self[values].values,
+ index=index,
+ columns=values)
else:
- index = self[index]
- indexed = Series(self[values].values,
- index=MultiIndex.from_arrays([index, self[columns]]))
- return indexed.unstack(columns)
+ indexed = Series(self[values].values,
+ index=index)
+ return indexed.unstack(columns)
def pivot_simple(index, columns, values):
diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py
index 879ac96680fbb..10519a0db96c2 100644
--- a/pandas/tests/reshape/test_pivot.py
+++ b/pandas/tests/reshape/test_pivot.py
@@ -351,6 +351,29 @@ def test_pivot_periods(self):
pv = df.pivot(index='p1', columns='p2', values='data1')
tm.assert_frame_equal(pv, expected)
+ def test_pivot_with_multi_values(self):
+ df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', 'two'],
+ 'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
+ 'baz': [1, 2, 3, 4, 5, 6],
+ 'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
+
+ results = df.pivot(index='zoo', columns='foo', values=['bar', 'baz'])
+
+ data = [[None, 'A', None, 4],
+ [None, 'C', None, 6],
+ [None, 'B', None, 5],
+ ['A', None, 1, None],
+ ['B', None, 2, None],
+ ['C', None, 3, None]]
+ index = Index(data=['q', 't', 'w', 'x', 'y', 'z'], name='zoo')
+ columns = MultiIndex(levels=[['bar', 'baz'], ['one', 'two']],
+ labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
+ names=[None, 'foo'])
+ expected = DataFrame(data=data, index=index,
+ columns=columns, dtype='object')
+
+ tm.assert_frame_equal(results, expected)
+
def test_margins(self):
def _check_output(result, values_col, index=['A', 'B'],
columns=['C'],
| - [x] closes #17160
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/17339 | 2017-08-25T19:58:11Z | 2017-11-25T16:15:25Z | null | 2017-11-25T16:15:25Z |
Revise What's New for inferring compression from non-string paths | diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt
index 261e12b824509..32dbeb32154e6 100644
--- a/doc/source/whatsnew/v0.21.0.txt
+++ b/doc/source/whatsnew/v0.21.0.txt
@@ -137,7 +137,7 @@ Other Enhancements
- :func:`date_range` now accepts 'Y' in addition to 'A' as an alias for end of year (:issue:`9313`)
- Integration with `Apache Parquet <https://parquet.apache.org/>`__, including a new top-level :func:`read_parquet` and :func:`DataFrame.to_parquet` method, see :ref:`here <io.parquet>`. (:issue:`15838`, :issue:`17438`)
- :func:`DataFrame.add_prefix` and :func:`DataFrame.add_suffix` now accept strings containing the '%' character. (:issue:`17151`)
-- `read_*` methods can now infer compression from non-string paths, such as ``pathlib.Path`` objects (:issue:`17206`).
+- Read/write methods that infer compression (:func:`read_csv`, :func:`read_table`, :func:`read_pickle`, and :meth:`~DataFrame.to_pickle`) can now infer from non-string paths, such as ``pathlib.Path`` objects (:issue:`17206`).
- :func:`pd.read_sas()` now recognizes much more of the most frequently used date (datetime) formats in SAS7BDAT files (:issue:`15871`).
- :func:`DataFrame.items` and :func:`Series.items` is now present in both Python 2 and 3 and is lazy in all cases (:issue:`13918`, :issue:`17213`)
- :func:`Styler.where` has been implemented. It is as a convenience for :func:`Styler.applymap` and enables simple DataFrame styling on the Jupyter notebook (:issue:`17474`).
| This pull request improves the What's New message corresponding to https://github.com/pandas-dev/pandas/pull/17206, which updated `io.common._infer_compression` to infer compression from non-string paths.
Refs https://github.com/pandas-dev/pandas/issues/17262
Refs https://github.com/pandas-dev/pandas/pull/17206#issuecomment-322586996 | https://api.github.com/repos/pandas-dev/pandas/pulls/17338 | 2017-08-25T18:59:31Z | 2017-09-24T09:56:13Z | 2017-09-24T09:56:13Z | 2017-09-24T09:56:27Z |
Add dunder method - __ne__ as it should complement __eq__ | diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py
index dc2c56ea476f9..c0b91c7761be4 100644
--- a/pandas/core/dtypes/dtypes.py
+++ b/pandas/core/dtypes/dtypes.py
@@ -142,6 +142,9 @@ def __eq__(self, other):
return isinstance(other, CategoricalDtype)
+ def __ne__(self, other):
+ return not self == other
+
@classmethod
def construct_from_string(cls, string):
""" attempt to construct this type from a string, raise a TypeError if
@@ -268,6 +271,9 @@ def __eq__(self, other):
self.unit == other.unit and
str(self.tz) == str(other.tz))
+ def __ne__(self, other):
+ return not self == other
+
class PeriodDtypeType(type):
"""
@@ -364,6 +370,9 @@ def __eq__(self, other):
return isinstance(other, PeriodDtype) and self.freq == other.freq
+ def __ne__(self, other):
+ return not self == other
+
@classmethod
def is_dtype(cls, dtype):
"""
@@ -486,6 +495,9 @@ def __eq__(self, other):
return (isinstance(other, IntervalDtype) and
self.subtype == other.subtype)
+ def __ne__(self, other):
+ return not self == other
+
@classmethod
def is_dtype(cls, dtype):
"""
diff --git a/pandas/core/indexes/frozen.py b/pandas/core/indexes/frozen.py
index 3c6b922178abf..5069592277d39 100644
--- a/pandas/core/indexes/frozen.py
+++ b/pandas/core/indexes/frozen.py
@@ -51,6 +51,9 @@ def __eq__(self, other):
other = list(other)
return super(FrozenList, self).__eq__(other)
+ def __ne__(self, other):
+ return not self == other
+
__req__ = __eq__
def __mul__(self, other):
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 82c80a13372d7..1cedba4af984e 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -1826,6 +1826,9 @@ def __eq__(self, other):
return all([getattr(self, a, None) == getattr(other, a, None)
for a in ['name', 'cname', 'dtype', 'pos']])
+ def __ne__(self, other):
+ return not self == other
+
def set_data(self, data, dtype=None):
self.data = data
if data is not None:
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index 253ed03c25db9..b4c9cd49d6e5d 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -805,6 +805,9 @@ def __eq__(self, other):
return (isinstance(other, self.__class__) and
self.string == other.string and self.value == other.value)
+ def __ne__(self, other):
+ return not self == other
+
@classmethod
def get_base_missing_value(cls, dtype):
if dtype == np.int8:
diff --git a/scripts/gen_release_notes.py b/scripts/gen_release_notes.py
index 7e4ffca59a0ab..72fe1895f6ce7 100644
--- a/scripts/gen_release_notes.py
+++ b/scripts/gen_release_notes.py
@@ -16,6 +16,9 @@ def __eq__(self, other):
return self.number == other.number
return False
+ def __ne__(self, other):
+ return not self == other
+
class Issue(object):
@@ -32,6 +35,9 @@ def __eq__(self, other):
return self.number == other.number
return False
+ def __ne__(self, other):
+ return not self == other
+
def get_issues():
all_issues = []
| null | https://api.github.com/repos/pandas-dev/pandas/pulls/17337 | 2017-08-25T18:59:19Z | 2017-08-26T10:29:17Z | null | 2023-05-11T01:16:17Z |
ENH: pd.DataFrame.info() to show line numbers GH17304 | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index 034a56b2ac0cb..9891a39aa9713 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -66,6 +66,35 @@ Current Behavior:
result
+.. _whatsnew_0240.enhancements.output_formatting:
+
+Output Formatting Enhancements
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+- :func:`DataFrame.info` now shows line numbers for the columns summary (:issue:`17304`)
+
+.. ipython:: python
+
+ df = pd.DataFrame({
+ 'int_col': [1, 2, 3, 4, 5],
+ 'text_col': ['alpha', 'beta', 'gamma', 'delta', 'epsilon'],
+ 'float_col': [0.0, 0.25, 0.5, 0.75, 1.0]})
+ df.info()
+
+Previous Behavior:
+
+.. code-block:: python
+
+ In [1]: df.info()
+ <class 'pandas.core.frame.DataFrame'>
+ RangeIndex: 5 entries, 0 to 4
+ Data columns (total 3 columns):
+ int_col 5 non-null int64
+ text_col 5 non-null object
+ float_col 5 non-null float64
+ dtypes: float64(1), int64(1), object(1)
+ memory usage: 200.0+ bytes
+
.. _whatsnew_0240.enhancements.other:
Other Enhancements
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 66f51cd0dae45..4d1663a2d3df8 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -2121,9 +2121,11 @@ def info(self, verbose=None, buf=None, max_cols=None, memory_usage=None,
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Data columns (total 3 columns):
- int_col 5 non-null int64
- text_col 5 non-null object
- float_col 5 non-null float64
+ #. Column Non-Null Count & Dtype
+ --- ------ ----------------------
+ 0 int_col 5 non-null int64
+ 1 text_col 5 non-null object
+ 2 float_col 5 non-null float64
dtypes: float64(1), int64(1), object(1)
memory usage: 200.0+ bytes
@@ -2161,9 +2163,11 @@ def info(self, verbose=None, buf=None, max_cols=None, memory_usage=None,
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 1000000 entries, 0 to 999999
Data columns (total 3 columns):
- column_1 1000000 non-null object
- column_2 1000000 non-null object
- column_3 1000000 non-null object
+ #. Column Non-Null Count & Dtype
+ --- ------ ----------------------
+ 0 column_1 1000000 non-null object
+ 1 column_2 1000000 non-null object
+ 2 column_3 1000000 non-null object
dtypes: object(3)
memory usage: 22.9+ MB
@@ -2171,9 +2175,11 @@ def info(self, verbose=None, buf=None, max_cols=None, memory_usage=None,
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 1000000 entries, 0 to 999999
Data columns (total 3 columns):
- column_1 1000000 non-null object
- column_2 1000000 non-null object
- column_3 1000000 non-null object
+ #. Column Non-Null Count & Dtype
+ --- ------ ----------------------
+ 0 column_1 1000000 non-null object
+ 1 column_2 1000000 non-null object
+ 2 column_3 1000000 non-null object
dtypes: object(3)
memory usage: 188.8 MB
"""
@@ -2192,48 +2198,62 @@ def info(self, verbose=None, buf=None, max_cols=None, memory_usage=None,
return
cols = self.columns
+ cols_count = len(cols)
# hack
if max_cols is None:
- max_cols = get_option('display.max_info_columns',
- len(self.columns) + 1)
+ max_cols = get_option('display.max_info_columns', cols_count + 1)
max_rows = get_option('display.max_info_rows', len(self) + 1)
if null_counts is None:
- show_counts = ((len(self.columns) <= max_cols) and
+ show_counts = ((cols_count <= max_cols) and
(len(self) < max_rows))
else:
show_counts = null_counts
- exceeds_info_cols = len(self.columns) > max_cols
+ exceeds_info_cols = cols_count > max_cols
def _verbose_repr():
- lines.append('Data columns (total %d columns):' %
- len(self.columns))
- space = max(len(pprint_thing(k)) for k in self.columns) + 4
+ lines.append('Data columns (total '
+ '{count} columns):'.format(count=cols_count))
+ space = max(len(pprint_thing(k)) for k in cols)
+ len_column = len(pprint_thing('Column'))
+ space = max(space, len_column) + 4
+ space_num = len(pprint_thing(cols_count))
+ len_id = len(pprint_thing(' #.'))
+ space_num = max(space_num, len_id) + 2
counts = None
- tmpl = "{count}{dtype}"
+ header = _put_str(' #.', space_num) + _put_str('Column', space)
if show_counts:
counts = self.count()
if len(cols) != len(counts): # pragma: no cover
raise AssertionError(
'Columns must equal counts '
- '({cols:d} != {counts:d})'.format(
- cols=len(cols), counts=len(counts)))
- tmpl = "{count} non-null {dtype}"
-
+ '({cols_count} != {count})'.format(
+ cols_count=cols_count, count=len(counts)))
+ col_header = 'Non-Null Count & Dtype'
+ tmpl = '{count} non-null {dtype}'
+ else:
+ col_header = 'Dtype'
+ tmpl = '{count}{dtype}'
+ header += col_header
+
+ lines.append(header)
+ lines.append(_put_str('-' * len_id, space_num) +
+ _put_str('-' * len_column, space) +
+ '-' * len(pprint_thing(col_header)))
dtypes = self.dtypes
- for i, col in enumerate(self.columns):
+ for i, col in enumerate(cols):
dtype = dtypes.iloc[i]
col = pprint_thing(col)
-
- count = ""
+ line_no = _put_str(' {num}'.format(num=i), space_num)
+ count = ''
if show_counts:
count = counts.iloc[i]
- lines.append(_put_str(col, space) + tmpl.format(count=count,
- dtype=dtype))
+ lines.append(line_no + _put_str(col, space) +
+ tmpl.format(count=count, dtype=dtype))
def _non_verbose_repr():
lines.append(self.columns._summary(name='Columns'))
diff --git a/pandas/tests/frame/test_repr_info.py b/pandas/tests/frame/test_repr_info.py
index 668613c494a47..ac6bb8d78c072 100644
--- a/pandas/tests/frame/test_repr_info.py
+++ b/pandas/tests/frame/test_repr_info.py
@@ -217,13 +217,33 @@ def test_info_memory(self):
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 2 entries, 0 to 1
Data columns (total 1 columns):
- a 2 non-null int64
+ #. Column Non-Null Count & Dtype
+ --- ------ ----------------------
+ 0 a 2 non-null int64
dtypes: int64(1)
memory usage: {} bytes
""".format(bytes))
assert result == expected
+ def test_info_without_null_counts(self):
+ df = pd.DataFrame({'a': [1, 2]})
+ buf = StringIO()
+ df.info(buf=buf, null_counts=False)
+ buf.seek(0)
+ lines = buf.readlines()
+ result = ''.join(lines[:-1])
+ expected = textwrap.dedent('''\
+ <class 'pandas.core.frame.DataFrame'>
+ RangeIndex: 2 entries, 0 to 1
+ Data columns (total 1 columns):
+ #. Column Dtype
+ --- ------ -----
+ 0 a int64
+ dtypes: int64(1)
+ ''')
+ assert result == expected
+
def test_info_wide(self):
from pandas import set_option, reset_option
io = StringIO()
@@ -259,8 +279,8 @@ def test_info_duplicate_columns_shows_correct_dtypes(self):
frame.info(buf=io)
io.seek(0)
lines = io.readlines()
- assert 'a 1 non-null int64\n' == lines[3]
- assert 'a 1 non-null float64\n' == lines[4]
+ assert ' 0 a 1 non-null int64\n' == lines[5]
+ assert ' 1 a 1 non-null float64\n' == lines[6]
def test_info_shows_column_dtypes(self):
dtypes = ['int64', 'float64', 'datetime64[ns]', 'timedelta64[ns]',
@@ -274,12 +294,13 @@ def test_info_shows_column_dtypes(self):
df.info(buf=buf)
res = buf.getvalue()
for i, dtype in enumerate(dtypes):
- name = '%d %d non-null %s' % (i, n, dtype)
+ name = '%s %d non-null %s' % (i, n, dtype)
+
assert name in res
def test_info_max_cols(self):
df = DataFrame(np.random.randn(10, 5))
- for len_, verbose in [(5, None), (5, False), (10, True)]:
+ for len_, verbose in [(5, None), (5, False), (12, True)]:
# For verbose always ^ setting ^ summarize ^ full output
with option_context('max_info_columns', 4):
buf = StringIO()
@@ -287,8 +308,7 @@ def test_info_max_cols(self):
res = buf.getvalue()
assert len(res.strip().split('\n')) == len_
- for len_, verbose in [(10, None), (5, False), (10, True)]:
-
+ for len_, verbose in [(12, None), (5, False), (12, True)]:
# max_cols no exceeded
with option_context('max_info_columns', 5):
buf = StringIO()
@@ -296,7 +316,7 @@ def test_info_max_cols(self):
res = buf.getvalue()
assert len(res.strip().split('\n')) == len_
- for len_, max_cols in [(10, 5), (5, 4)]:
+ for len_, max_cols in [(12, 5), (5, 4)]:
# setting truncates
with option_context('max_info_columns', 4):
buf = StringIO()
| - [x] closes #17304
- [x] tests updated and passed
- [x] passes flake8 diff
- [x] whatsnew entry
Refactored to `self.columns` and `len(self.columns)`
New output
```
>>> import pandas as pd
>>> df = pd.DataFrame(pd.np.random.rand(4, 10),
columns=['%s%s' % (x, pd.np.random.randint(2, 10)*'a') for x in range(10)])
>>> df.info()
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 4 entries, 0 to 3
Data columns (total 10 columns):
#. Column Non-Null Count & Dtype
--- ------ ----------------------
0 0aaaaaaaaa 4 non-null float64
1 1aaaaa 4 non-null float64
2 2aaaaaa 4 non-null float64
3 3aaaaaa 4 non-null float64
4 4aaaaaa 4 non-null float64
5 5aaaaaaa 4 non-null float64
6 6aa 4 non-null float64
7 7aaaaaaa 4 non-null float64
8 8aaaaaaa 4 non-null float64
9 9aaaaaaaaa 4 non-null float64
dtypes: float64(10)
memory usage: 392.0 bytes
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/17332 | 2017-08-25T08:42:20Z | 2019-02-27T23:31:54Z | null | 2019-09-09T17:02:16Z |
Remove property that re-computed microsecond | diff --git a/asv_bench/benchmarks/timestamp.py b/asv_bench/benchmarks/timestamp.py
new file mode 100644
index 0000000000000..066479b22739a
--- /dev/null
+++ b/asv_bench/benchmarks/timestamp.py
@@ -0,0 +1,60 @@
+from .pandas_vb_common import *
+from pandas import to_timedelta, Timestamp
+
+
+class TimestampProperties(object):
+ goal_time = 0.2
+
+ def setup(self):
+ self.ts = Timestamp('2017-08-25 08:16:14')
+
+ def time_tz(self):
+ self.ts.tz
+
+ def time_offset(self):
+ self.ts.offset
+
+ def time_dayofweek(self):
+ self.ts.dayofweek
+
+ def time_weekday_name(self):
+ self.ts.weekday_name
+
+ def time_dayofyear(self):
+ self.ts.dayofyear
+
+ def time_week(self):
+ self.ts.week
+
+ def time_quarter(self):
+ self.ts.quarter
+
+ def time_days_in_month(self):
+ self.ts.days_in_month
+
+ def time_freqstr(self):
+ self.ts.freqstr
+
+ def time_is_month_start(self):
+ self.ts.is_month_start
+
+ def time_is_month_end(self):
+ self.ts.is_month_end
+
+ def time_is_quarter_start(self):
+ self.ts.is_quarter_start
+
+ def time_is_quarter_end(self):
+ self.ts.is_quarter_end
+
+ def time_is_year_start(self):
+ self.ts.is_quarter_end
+
+ def time_is_year_end(self):
+ self.ts.is_quarter_end
+
+ def time_is_leap_year(self):
+ self.ts.is_quarter_end
+
+ def time_microsecond(self):
+ self.ts.microsecond
diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt
index fcadd26156b1d..6fec3db91cc51 100644
--- a/doc/source/whatsnew/v0.21.0.txt
+++ b/doc/source/whatsnew/v0.21.0.txt
@@ -325,7 +325,7 @@ Performance Improvements
- Improved performance of instantiating :class:`SparseDataFrame` (:issue:`16773`)
- :attr:`Series.dt` no longer performs frequency inference, yielding a large speedup when accessing the attribute (:issue:`17210`)
-
+- :attr:`Timestamp.microsecond` no longer re-computes on attribute access (:issue:`17331`)
.. _whatsnew_0210.bug_fixes:
diff --git a/pandas/_libs/period.pyx b/pandas/_libs/period.pyx
index a1d04fea89151..303823f41eb61 100644
--- a/pandas/_libs/period.pyx
+++ b/pandas/_libs/period.pyx
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
from datetime import datetime, date, timedelta
import operator
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx
index c4a38ec660a4c..4db2b0ba72832 100644
--- a/pandas/_libs/tslib.pyx
+++ b/pandas/_libs/tslib.pyx
@@ -548,10 +548,6 @@ class Timestamp(_Timestamp):
weekofyear = week
- @property
- def microsecond(self):
- return self._get_field('us')
-
@property
def quarter(self):
return self._get_field('q')
| Just accessing the already-existing attribute is about 50-130x faster than re-computing it. These results are from a 2011-era MBP.
Before:
```
In [1]: import pandas as pd
In [2]: ts = pd.Timestamp.now()
In [3]: %timeit ts.microsecond
The slowest run took 4.69 times longer than the fastest. This could mean that an intermediate result is being cached.
10000 loops, best of 3: 29.7 µs per loop
```
After:
```
In [1]: import pandas as pd
In [2]: ts = pd.Timestamp.now()
In [3]: %timeit ts.microsecond
The slowest run took 8.08 times longer than the fastest. This could mean that an intermediate result is being cached.
1000000 loops, best of 3: 620 ns per loop
```
This accounts for the discrepancy discussed in #17234 (https://github.com/pandas-dev/pandas/pull/17297#pullrequestreview-57686247)
Per requests from @jreback, I'm refraining from moving forward with any other optimizations that this makes possible (e.g. removing the roundabout call to `convert_to_tsobject` from `Timestamp.to_pydatetime`).
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/17331 | 2017-08-24T23:12:25Z | 2017-09-07T00:51:51Z | 2017-09-07T00:51:51Z | 2017-10-30T16:23:45Z |
BUG: Set index when reading stata file | diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt
index 33b7e128ef8bf..38727c2526bb5 100644
--- a/doc/source/whatsnew/v0.21.0.txt
+++ b/doc/source/whatsnew/v0.21.0.txt
@@ -293,6 +293,7 @@ Other API Changes
- :func:`Series.argmin` and :func:`Series.argmax` will now raise a ``TypeError`` when used with ``object`` dtypes, instead of a ``ValueError`` (:issue:`13595`)
- :class:`Period` is now immutable, and will now raise an ``AttributeError`` when a user tries to assign a new value to the ``ordinal`` or ``freq`` attributes (:issue:`17116`).
- :func:`to_datetime` when passed a tz-aware ``origin=`` kwarg will now raise a more informative ``ValueError`` rather than a ``TypeError`` (:issue:`16842`)
+- Renamed non-functional ``index`` to ``index_col`` in :func:`read_stata` to improve API consistency (:issue:`16342`)
.. _whatsnew_0210.deprecations:
@@ -370,6 +371,7 @@ I/O
- Bug in :func:`read_csv` when called with ``low_memory=False`` in which a CSV with at least one column > 2GB in size would incorrectly raise a ``MemoryError`` (:issue:`16798`).
- Bug in :func:`read_csv` when called with a single-element list ``header`` would return a ``DataFrame`` of all NaN values (:issue:`7757`)
- Bug in :func:`read_stata` where value labels could not be read when using an iterator (:issue:`16923`)
+- Bug in :func:`read_stata` where the index was not set (:issue:`16342`)
- Bug in :func:`read_html` where import check fails when run in multiple threads (:issue:`16928`)
Plotting
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index 253ed03c25db9..b303b1000ff4d 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -9,31 +9,30 @@
You can find more information on http://presbrey.mit.edu/PyDTA and
http://www.statsmodels.org/devel/
"""
-import numpy as np
-import sys
+import datetime
import struct
-from dateutil.relativedelta import relativedelta
+import sys
-from pandas.core.dtypes.common import (
- is_categorical_dtype, is_datetime64_dtype,
- _ensure_object)
+import numpy as np
+from dateutil.relativedelta import relativedelta
+from pandas._libs.lib import max_len_string_array, infer_dtype
+from pandas._libs.tslib import NaT, Timestamp
+import pandas as pd
+from pandas import compat, to_timedelta, to_datetime, isna, DatetimeIndex
+from pandas.compat import (lrange, lmap, lzip, text_type, string_types, range,
+ zip, BytesIO)
from pandas.core.base import StringMixin
from pandas.core.categorical import Categorical
+from pandas.core.dtypes.common import (is_categorical_dtype, _ensure_object,
+ is_datetime64_dtype)
from pandas.core.frame import DataFrame
from pandas.core.series import Series
-import datetime
-from pandas import compat, to_timedelta, to_datetime, isna, DatetimeIndex
-from pandas.compat import lrange, lmap, lzip, text_type, string_types, range, \
- zip, BytesIO
-from pandas.util._decorators import Appender
-import pandas as pd
-
from pandas.io.common import (get_filepath_or_buffer, BaseIterator,
_stringify_path)
-from pandas._libs.lib import max_len_string_array, infer_dtype
-from pandas._libs.tslib import NaT, Timestamp
+from pandas.util._decorators import Appender
+from pandas.util._decorators import deprecate_kwarg
VALID_ENCODINGS = ('ascii', 'us-ascii', 'latin-1', 'latin_1', 'iso-8859-1',
'iso8859-1', '8859', 'cp819', 'latin', 'latin1', 'L1')
@@ -53,8 +52,8 @@
Encoding used to parse the files. None defaults to latin-1."""
_statafile_processing_params2 = """\
-index : identifier of index column
- identifier of column that should be used as index of the DataFrame
+index_col : string, optional, default: None
+ Column to set as index
convert_missing : boolean, defaults to False
Flag indicating whether to convert missing values to their Stata
representations. If False, missing values are replaced with nans.
@@ -159,15 +158,16 @@
@Appender(_read_stata_doc)
+@deprecate_kwarg(old_arg_name='index', new_arg_name='index_col')
def read_stata(filepath_or_buffer, convert_dates=True,
- convert_categoricals=True, encoding=None, index=None,
+ convert_categoricals=True, encoding=None, index_col=None,
convert_missing=False, preserve_dtypes=True, columns=None,
order_categoricals=True, chunksize=None, iterator=False):
reader = StataReader(filepath_or_buffer,
convert_dates=convert_dates,
convert_categoricals=convert_categoricals,
- index=index, convert_missing=convert_missing,
+ index_col=index_col, convert_missing=convert_missing,
preserve_dtypes=preserve_dtypes,
columns=columns,
order_categoricals=order_categoricals,
@@ -944,8 +944,9 @@ def __init__(self, encoding):
class StataReader(StataParser, BaseIterator):
__doc__ = _stata_reader_doc
+ @deprecate_kwarg(old_arg_name='index', new_arg_name='index_col')
def __init__(self, path_or_buf, convert_dates=True,
- convert_categoricals=True, index=None,
+ convert_categoricals=True, index_col=None,
convert_missing=False, preserve_dtypes=True,
columns=None, order_categoricals=True,
encoding='latin-1', chunksize=None):
@@ -956,7 +957,7 @@ def __init__(self, path_or_buf, convert_dates=True,
# calls to read).
self._convert_dates = convert_dates
self._convert_categoricals = convert_categoricals
- self._index = index
+ self._index_col = index_col
self._convert_missing = convert_missing
self._preserve_dtypes = preserve_dtypes
self._columns = columns
@@ -1460,8 +1461,9 @@ def get_chunk(self, size=None):
return self.read(nrows=size)
@Appender(_read_method_doc)
+ @deprecate_kwarg(old_arg_name='index', new_arg_name='index_col')
def read(self, nrows=None, convert_dates=None,
- convert_categoricals=None, index=None,
+ convert_categoricals=None, index_col=None,
convert_missing=None, preserve_dtypes=None,
columns=None, order_categoricals=None):
# Handle empty file or chunk. If reading incrementally raise
@@ -1486,6 +1488,8 @@ def read(self, nrows=None, convert_dates=None,
columns = self._columns
if order_categoricals is None:
order_categoricals = self._order_categoricals
+ if index_col is None:
+ index_col = self._index_col
if nrows is None:
nrows = self.nobs
@@ -1524,14 +1528,14 @@ def read(self, nrows=None, convert_dates=None,
self._read_value_labels()
if len(data) == 0:
- data = DataFrame(columns=self.varlist, index=index)
+ data = DataFrame(columns=self.varlist)
else:
- data = DataFrame.from_records(data, index=index)
+ data = DataFrame.from_records(data)
data.columns = self.varlist
# If index is not specified, use actual row number rather than
# restarting at 0 for each chunk.
- if index is None:
+ if index_col is None:
ix = np.arange(self._lines_read - read_lines, self._lines_read)
data = data.set_index(ix)
@@ -1553,7 +1557,7 @@ def read(self, nrows=None, convert_dates=None,
cols_ = np.where(self.dtyplist)[0]
# Convert columns (if needed) to match input type
- index = data.index
+ ix = data.index
requires_type_conversion = False
data_formatted = []
for i in cols_:
@@ -1563,7 +1567,7 @@ def read(self, nrows=None, convert_dates=None,
if dtype != np.dtype(object) and dtype != self.dtyplist[i]:
requires_type_conversion = True
data_formatted.append(
- (col, Series(data[col], index, self.dtyplist[i])))
+ (col, Series(data[col], ix, self.dtyplist[i])))
else:
data_formatted.append((col, data[col]))
if requires_type_conversion:
@@ -1606,6 +1610,9 @@ def read(self, nrows=None, convert_dates=None,
if convert:
data = DataFrame.from_items(retyped_data)
+ if index_col is not None:
+ data = data.set_index(data.pop(index_col))
+
return data
def _do_convert_missing(self, data, convert_missing):
diff --git a/pandas/tests/io/test_stata.py b/pandas/tests/io/test_stata.py
index a414928d318c4..94a0ac31e093e 100644
--- a/pandas/tests/io/test_stata.py
+++ b/pandas/tests/io/test_stata.py
@@ -476,7 +476,7 @@ def test_read_write_reread_dta15(self):
tm.assert_frame_equal(parsed_114, parsed_117)
def test_timestamp_and_label(self):
- original = DataFrame([(1,)], columns=['var'])
+ original = DataFrame([(1,)], columns=['variable'])
time_stamp = datetime(2000, 2, 29, 14, 21)
data_label = 'This is a data file.'
with tm.ensure_clean() as path:
@@ -1309,3 +1309,12 @@ def test_value_labels_iterator(self, write_index):
dta_iter = pd.read_stata(path, iterator=True)
value_labels = dta_iter.value_labels()
assert value_labels == {'A': {0: 'A', 1: 'B', 2: 'C', 3: 'E'}}
+
+ def test_set_index(self):
+ # GH 17328
+ df = tm.makeDataFrame()
+ df.index.name = 'index'
+ with tm.ensure_clean() as path:
+ df.to_stata(path)
+ reread = pd.read_stata(path, index_col='index')
+ tm.assert_frame_equal(df, reread)
| Ensures index is set when requested when reading state dta file
closes #16342
- [x] closes #16342
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/17328 | 2017-08-24T16:08:14Z | 2017-09-16T13:40:42Z | 2017-09-16T13:40:41Z | 2018-04-22T21:12:03Z |
Fix typo that causes several NaT methods to have incorrect docstrings | diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt
index fcadd26156b1d..7eb4da981888f 100644
--- a/doc/source/whatsnew/v0.21.0.txt
+++ b/doc/source/whatsnew/v0.21.0.txt
@@ -423,3 +423,4 @@ Categorical
Other
^^^^^
- Bug in :func:`eval` where the ``inplace`` parameter was being incorrectly handled (:issue:`16732`)
+- Several ``NaT`` method docstrings (e.g. :func:`NaT.ctime`) were incorrect (:issue:`17327`)
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx
index c4a38ec660a4c..9df8cdfe91f9f 100644
--- a/pandas/_libs/tslib.pyx
+++ b/pandas/_libs/tslib.pyx
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
# cython: profile=False
import warnings
@@ -3925,7 +3926,7 @@ for _method_name in _nat_methods:
def f(*args, **kwargs):
return NaT
f.__name__ = func_name
- f.__doc__ = _get_docstring(_method_name)
+ f.__doc__ = _get_docstring(func_name)
return f
setattr(NaTType, _method_name, _make_nat_func(_method_name))
@@ -3937,7 +3938,7 @@ for _method_name in _nan_methods:
def f(*args, **kwargs):
return np.nan
f.__name__ = func_name
- f.__doc__ = _get_docstring(_method_name)
+ f.__doc__ = _get_docstring(func_name)
return f
setattr(NaTType, _method_name, _make_nan_func(_method_name))
@@ -3955,7 +3956,7 @@ for _maybe_method_name in dir(NaTType):
def f(*args, **kwargs):
raise ValueError("NaTType does not support " + func_name)
f.__name__ = func_name
- f.__doc__ = _get_docstring(_method_name)
+ f.__doc__ = _get_docstring(func_name)
return f
setattr(NaTType, _maybe_method_name,
diff --git a/pandas/tests/scalar/test_nat.py b/pandas/tests/scalar/test_nat.py
index 5f247cae1099b..6f852f2b394e1 100644
--- a/pandas/tests/scalar/test_nat.py
+++ b/pandas/tests/scalar/test_nat.py
@@ -247,3 +247,8 @@ def test_nat_arithmetic_index():
tm.assert_index_equal(right + left, exp)
tm.assert_index_equal(left - right, exp)
tm.assert_index_equal(right - left, exp)
+
+
+def test_nat_pinned_docstrings():
+ # GH17327
+ assert NaT.ctime.__doc__ == datetime.ctime.__doc__
| This block of code exists to make NaT raise ValueError for each of a bunch of datetime methods. This is supposed to attach the original docstring to the new function f. The typo _method_name instead of func_name means that a bunch of the docstrings are currently wrong.
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/17327 | 2017-08-24T15:47:46Z | 2017-08-29T17:04:08Z | 2017-08-29T17:04:08Z | 2017-10-30T16:23:46Z |
CLN: replace %s syntax with .format in missing.py, nanops.py, ops.py | diff --git a/pandas/core/missing.py b/pandas/core/missing.py
index 93281e20a2a96..8a6a870834c83 100644
--- a/pandas/core/missing.py
+++ b/pandas/core/missing.py
@@ -88,8 +88,8 @@ def clean_fill_method(method, allow_nearest=False):
valid_methods.append('nearest')
expecting = 'pad (ffill), backfill (bfill) or nearest'
if method not in valid_methods:
- msg = ('Invalid fill method. Expecting %s. Got %s' %
- (expecting, method))
+ msg = ('Invalid fill method. Expecting {expecting}. Got {method}'
+ .format(expecting=expecting, method=method))
raise ValueError(msg)
return method
@@ -104,8 +104,8 @@ def clean_interp_method(method, **kwargs):
raise ValueError("You must specify the order of the spline or "
"polynomial.")
if method not in valid:
- raise ValueError("method must be one of {0}."
- "Got '{1}' instead.".format(valid, method))
+ raise ValueError("method must be one of {valid}. Got '{method}' "
+ "instead.".format(valid=valid, method=method))
return method
@@ -146,8 +146,10 @@ def interpolate_1d(xvalues, yvalues, method='linear', limit=None,
valid_limit_directions = ['forward', 'backward', 'both']
limit_direction = limit_direction.lower()
if limit_direction not in valid_limit_directions:
- raise ValueError('Invalid limit_direction: expecting one of %r, got '
- '%r.' % (valid_limit_directions, limit_direction))
+ msg = ('Invalid limit_direction: expecting one of {valid!r}, '
+ 'got {invalid!r}.')
+ raise ValueError(msg.format(valid=valid_limit_directions,
+ invalid=limit_direction))
from pandas import Series
ys = Series(yvalues)
@@ -248,7 +250,8 @@ def _interpolate_scipy_wrapper(x, y, new_x, method, fill_value=None,
# TODO: Why is DatetimeIndex being imported here?
from pandas import DatetimeIndex # noqa
except ImportError:
- raise ImportError('{0} interpolation requires Scipy'.format(method))
+ raise ImportError('{method} interpolation requires SciPy'
+ .format(method=method))
new_x = np.asarray(new_x)
@@ -466,7 +469,8 @@ def pad_1d(values, limit=None, mask=None, dtype=None):
dtype = values.dtype
_method = None
if is_float_dtype(values):
- _method = getattr(algos, 'pad_inplace_%s' % dtype.name, None)
+ name = 'pad_inplace_{name}'.format(name=dtype.name)
+ _method = getattr(algos, name, None)
elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
_method = _pad_1d_datetime
elif is_integer_dtype(values):
@@ -476,7 +480,8 @@ def pad_1d(values, limit=None, mask=None, dtype=None):
_method = algos.pad_inplace_object
if _method is None:
- raise ValueError('Invalid dtype for pad_1d [%s]' % dtype.name)
+ raise ValueError('Invalid dtype for pad_1d [{name}]'
+ .format(name=dtype.name))
if mask is None:
mask = isna(values)
@@ -490,7 +495,8 @@ def backfill_1d(values, limit=None, mask=None, dtype=None):
dtype = values.dtype
_method = None
if is_float_dtype(values):
- _method = getattr(algos, 'backfill_inplace_%s' % dtype.name, None)
+ name = 'backfill_inplace_{name}'.format(name=dtype.name)
+ _method = getattr(algos, name, None)
elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
_method = _backfill_1d_datetime
elif is_integer_dtype(values):
@@ -500,7 +506,8 @@ def backfill_1d(values, limit=None, mask=None, dtype=None):
_method = algos.backfill_inplace_object
if _method is None:
- raise ValueError('Invalid dtype for backfill_1d [%s]' % dtype.name)
+ raise ValueError('Invalid dtype for backfill_1d [{name}]'
+ .format(name=dtype.name))
if mask is None:
mask = isna(values)
@@ -515,7 +522,8 @@ def pad_2d(values, limit=None, mask=None, dtype=None):
dtype = values.dtype
_method = None
if is_float_dtype(values):
- _method = getattr(algos, 'pad_2d_inplace_%s' % dtype.name, None)
+ name = 'pad_2d_inplace_{name}'.format(name=dtype.name)
+ _method = getattr(algos, name, None)
elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
_method = _pad_2d_datetime
elif is_integer_dtype(values):
@@ -525,7 +533,8 @@ def pad_2d(values, limit=None, mask=None, dtype=None):
_method = algos.pad_2d_inplace_object
if _method is None:
- raise ValueError('Invalid dtype for pad_2d [%s]' % dtype.name)
+ raise ValueError('Invalid dtype for pad_2d [{name}]'
+ .format(name=dtype.name))
if mask is None:
mask = isna(values)
@@ -544,7 +553,8 @@ def backfill_2d(values, limit=None, mask=None, dtype=None):
dtype = values.dtype
_method = None
if is_float_dtype(values):
- _method = getattr(algos, 'backfill_2d_inplace_%s' % dtype.name, None)
+ name = 'backfill_2d_inplace_{name}'.format(name=dtype.name)
+ _method = getattr(algos, name, None)
elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
_method = _backfill_2d_datetime
elif is_integer_dtype(values):
@@ -554,7 +564,8 @@ def backfill_2d(values, limit=None, mask=None, dtype=None):
_method = algos.backfill_2d_inplace_object
if _method is None:
- raise ValueError('Invalid dtype for backfill_2d [%s]' % dtype.name)
+ raise ValueError('Invalid dtype for backfill_2d [{name}]'
+ .format(name=dtype.name))
if mask is None:
mask = isna(values)
diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py
index b2bbf1c75b7ea..858aed7fd3e23 100644
--- a/pandas/core/nanops.py
+++ b/pandas/core/nanops.py
@@ -70,9 +70,8 @@ def __call__(self, f):
def _f(*args, **kwargs):
obj_iter = itertools.chain(args, compat.itervalues(kwargs))
if any(self.check(obj) for obj in obj_iter):
- raise TypeError('reduction operation {0!r} not allowed for '
- 'this dtype'.format(
- f.__name__.replace('nan', '')))
+ msg = 'reduction operation {name!r} not allowed for this dtype'
+ raise TypeError(msg.format(name=f.__name__.replace('nan', '')))
try:
with np.errstate(invalid='ignore'):
return f(*args, **kwargs)
@@ -786,7 +785,8 @@ def _ensure_numeric(x):
try:
x = complex(x)
except Exception:
- raise TypeError('Could not convert %s to numeric' % str(x))
+ raise TypeError('Could not convert {value!s} to numeric'
+ .format(value=x))
return x
# NA-friendly array comparisons
diff --git a/pandas/core/ops.py b/pandas/core/ops.py
index 82101414e4aa6..221f6ff8b92c6 100644
--- a/pandas/core/ops.py
+++ b/pandas/core/ops.py
@@ -63,9 +63,9 @@ def _create_methods(arith_method, comp_method, bool_method,
def names(x):
if x[-1] == "_":
- return "__%s_" % x
+ return "__{name}_".format(name=x)
else:
- return "__%s__" % x
+ return "__{name}__".format(name=x)
else:
names = lambda x: x
@@ -388,8 +388,8 @@ def _validate(self, lvalues, rvalues, name):
if name not in ('__div__', '__truediv__', '__mul__', '__rmul__'):
raise TypeError("can only operate on a timedelta and an "
"integer or a float for division and "
- "multiplication, but the operator [%s] was"
- "passed" % name)
+ "multiplication, but the operator [{name}] "
+ "was passed".format(name=name))
# 2 timedeltas
elif ((self.is_timedelta_lhs and
@@ -400,9 +400,9 @@ def _validate(self, lvalues, rvalues, name):
if name not in ('__div__', '__rdiv__', '__truediv__',
'__rtruediv__', '__add__', '__radd__', '__sub__',
'__rsub__'):
- raise TypeError("can only operate on a timedeltas for "
- "addition, subtraction, and division, but the"
- " operator [%s] was passed" % name)
+ raise TypeError("can only operate on a timedeltas for addition"
+ ", subtraction, and division, but the operator"
+ " [{name}] was passed".format(name=name))
# datetime and timedelta/DateOffset
elif (self.is_datetime_lhs and
@@ -411,23 +411,24 @@ def _validate(self, lvalues, rvalues, name):
if name not in ('__add__', '__radd__', '__sub__'):
raise TypeError("can only operate on a datetime with a rhs of "
"a timedelta/DateOffset for addition and "
- "subtraction, but the operator [%s] was "
- "passed" % name)
+ "subtraction, but the operator [{name}] was "
+ "passed".format(name=name))
elif (self.is_datetime_rhs and
(self.is_timedelta_lhs or self.is_offset_lhs)):
if name not in ('__add__', '__radd__', '__rsub__'):
raise TypeError("can only operate on a timedelta/DateOffset "
"with a rhs of a datetime for addition, "
- "but the operator [%s] was passed" % name)
+ "but the operator [{name}] was passed"
+ .format(name=name))
# 2 datetimes
elif self.is_datetime_lhs and self.is_datetime_rhs:
if name not in ('__sub__', '__rsub__'):
raise TypeError("can only operate on a datetimes for"
- " subtraction, but the operator [%s] was"
- " passed" % name)
+ " subtraction, but the operator [{name}] was"
+ " passed".format(name=name))
# if tz's must be equal (same or None)
if getattr(lvalues, 'tz', None) != getattr(rvalues, 'tz', None):
@@ -439,8 +440,8 @@ def _validate(self, lvalues, rvalues, name):
if name not in ('__add__', '__radd__'):
raise TypeError("can only operate on a timedelta/DateOffset "
- "and a datetime for addition, but the "
- "operator [%s] was passed" % name)
+ "and a datetime for addition, but the operator"
+ " [{name}] was passed".format(name=name))
else:
raise TypeError('cannot operate on a series without a rhs '
'of a series/ndarray of type datetime64[ns] '
@@ -498,7 +499,7 @@ def _convert_to_array(self, values, name=None, other=None):
values = values.to_timestamp().to_series()
elif name not in ('__truediv__', '__div__', '__mul__', '__rmul__'):
raise TypeError("incompatible type for a datetime/timedelta "
- "operation [{0}]".format(name))
+ "operation [{name}]".format(name=name))
elif inferred_type == 'floating':
if (isna(values).all() and
name in ('__add__', '__radd__', '__sub__', '__rsub__')):
@@ -508,8 +509,9 @@ def _convert_to_array(self, values, name=None, other=None):
elif self._is_offset(values):
return values
else:
- raise TypeError("incompatible type [{0}] for a datetime/timedelta"
- " operation".format(np.array(values).dtype))
+ raise TypeError("incompatible type [{dtype}] for a "
+ "datetime/timedelta operation"
+ .format(dtype=np.array(values).dtype))
return values
@@ -866,8 +868,8 @@ def wrapper(self, other, axis=None):
with np.errstate(all='ignore'):
res = na_op(values, other)
if is_scalar(res):
- raise TypeError('Could not compare %s type with Series' %
- type(other))
+ raise TypeError('Could not compare {typ} type with Series'
+ .format(typ=type(other)))
# always return a full value series here
res = _values_from_object(res)
@@ -906,9 +908,10 @@ def na_op(x, y):
y = bool(y)
result = lib.scalar_binop(x, y, op)
except:
- raise TypeError("cannot compare a dtyped [{0}] array with "
- "a scalar of type [{1}]".format(
- x.dtype, type(y).__name__))
+ msg = ("cannot compare a dtyped [{dtype}] array "
+ "with a scalar of type [{type}]"
+ ).format(dtype=x.dtype, type=type(y).__name__)
+ raise TypeError(msg)
return result
@@ -1140,14 +1143,17 @@ def _align_method_FRAME(left, right, axis):
""" convert rhs to meet lhs dims if input is list, tuple or np.ndarray """
def to_series(right):
- msg = 'Unable to coerce to Series, length must be {0}: given {1}'
+ msg = ('Unable to coerce to Series, length must be {req_len}: '
+ 'given {given_len}')
if axis is not None and left._get_axis_name(axis) == 'index':
if len(left.index) != len(right):
- raise ValueError(msg.format(len(left.index), len(right)))
+ raise ValueError(msg.format(req_len=len(left.index),
+ given_len=len(right)))
right = left._constructor_sliced(right, index=left.index)
else:
if len(left.columns) != len(right):
- raise ValueError(msg.format(len(left.columns), len(right)))
+ raise ValueError(msg.format(req_len=len(left.columns),
+ given_len=len(right)))
right = left._constructor_sliced(right, index=left.columns)
return right
@@ -1161,15 +1167,16 @@ def to_series(right):
elif right.ndim == 2:
if left.shape != right.shape:
- msg = ("Unable to coerce to DataFrame, "
- "shape must be {0}: given {1}")
- raise ValueError(msg.format(left.shape, right.shape))
+ msg = ("Unable to coerce to DataFrame, shape "
+ "must be {req_shape}: given {given_shape}"
+ ).format(req_shape=left.shape, given_shape=right.shape)
+ raise ValueError(msg)
right = left._constructor(right, index=left.index,
columns=left.columns)
else:
- msg = 'Unable to coerce to Series/DataFrame, dim must be <= 2: {0}'
- raise ValueError(msg.format(right.shape, ))
+ raise ValueError('Unable to coerce to Series/DataFrame, dim '
+ 'must be <= 2: {dim}'.format(dim=right.shape))
return right
@@ -1278,7 +1285,8 @@ def na_op(x, y):
return result
- @Appender('Wrapper for flexible comparison methods %s' % name)
+ @Appender('Wrapper for flexible comparison methods {name}'
+ .format(name=name))
def f(self, other, axis=default_axis, level=None):
other = _align_method_FRAME(self, other, axis)
@@ -1299,7 +1307,7 @@ def f(self, other, axis=default_axis, level=None):
def _comp_method_FRAME(func, name, str_rep, masker=False):
- @Appender('Wrapper for comparison method %s' % name)
+ @Appender('Wrapper for comparison method {name}'.format(name=name))
def f(self, other):
if isinstance(other, pd.DataFrame): # Another DataFrame
return self._compare_frame(other, func, str_rep)
@@ -1349,9 +1357,9 @@ def na_op(x, y):
# work only for scalars
def f(self, other):
if not is_scalar(other):
- raise ValueError('Simple arithmetic with %s can only be '
- 'done with scalar values' %
- self._constructor.__name__)
+ raise ValueError('Simple arithmetic with {name} can only be '
+ 'done with scalar values'
+ .format(name=self._constructor.__name__))
return self._combine(other, op)
@@ -1384,7 +1392,7 @@ def na_op(x, y):
return result
- @Appender('Wrapper for comparison method %s' % name)
+ @Appender('Wrapper for comparison method {name}'.format(name=name))
def f(self, other, axis=None):
# Validate the axis parameter
if axis is not None:
@@ -1394,8 +1402,8 @@ def f(self, other, axis=None):
return self._compare_constructor(other, na_op, try_cast=False)
elif isinstance(other, (self._constructor_sliced, pd.DataFrame,
ABCSeries)):
- raise Exception("input needs alignment for this object [%s]" %
- self._constructor)
+ raise Exception("input needs alignment for this object [{object}]"
+ .format(object=self._constructor))
else:
return self._combine_const(other, na_op, try_cast=False)
| Progress towards #16130
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
Replaced `%s` syntax with `.format` in `missing.py`, `nanops.py`, `ops.py`. Additionally, made some of the existing positional `.format` code more explicit. | https://api.github.com/repos/pandas-dev/pandas/pulls/17322 | 2017-08-24T05:51:14Z | 2017-08-24T09:50:19Z | 2017-08-24T09:50:19Z | 2017-08-24T14:09:30Z |
Bitesize offsets | diff --git a/asv_bench/benchmarks/timeseries.py b/asv_bench/benchmarks/timeseries.py
index b7151ad2eaa99..779fc0bd20964 100644
--- a/asv_bench/benchmarks/timeseries.py
+++ b/asv_bench/benchmarks/timeseries.py
@@ -56,7 +56,7 @@ def setup(self):
self.no_freq = self.rng7[:50000].append(self.rng7[50002:])
self.d_freq = self.rng7[:50000].append(self.rng7[50000:])
- self.rng8 = date_range(start='1/1/1700', freq='B', periods=100000)
+ self.rng8 = date_range(start='1/1/1700', freq='B', periods=75000)
self.b_freq = self.rng8[:50000].append(self.rng8[50000:])
def time_add_timedelta(self):
diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py
index 085a3a784557b..b055c4b4cb27f 100644
--- a/pandas/tseries/frequencies.py
+++ b/pandas/tseries/frequencies.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
from datetime import timedelta
from pandas.compat import long, zip
from pandas import compat
diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py
index 452d30322b4cf..ea37434e3a8d9 100644
--- a/pandas/tseries/offsets.py
+++ b/pandas/tseries/offsets.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
from datetime import date, datetime, timedelta
from pandas.compat import range
from pandas import compat
@@ -323,37 +324,42 @@ def _params(self):
def __repr__(self):
className = getattr(self, '_outputName', type(self).__name__)
+
+ if abs(self.n) != 1:
+ plural = 's'
+ else:
+ plural = ''
+
+ n_str = ""
+ if self.n != 1:
+ n_str = "%s * " % self.n
+
+ out = '<%s' % n_str + className + plural + self._repr_attrs() + '>'
+ return out
+
+ # TODO: Combine this with BusinessMixin version by defining a whitelisted
+ # set of attributes on each object rather than the existing behavior of
+ # iterating over internal ``__dict__``
+ def _repr_attrs(self):
exclude = set(['n', 'inc', 'normalize'])
attrs = []
for attr in sorted(self.__dict__):
- if ((attr == 'kwds' and len(self.kwds) == 0) or
- attr.startswith('_')):
+ if attr.startswith('_'):
continue
- elif attr == 'kwds':
+ elif attr == 'kwds': # TODO: get rid of this
kwds_new = {}
for key in self.kwds:
if not hasattr(self, key):
kwds_new[key] = self.kwds[key]
if len(kwds_new) > 0:
- attrs.append('='.join((attr, repr(kwds_new))))
- else:
- if attr not in exclude:
- attrs.append('='.join((attr, repr(getattr(self, attr)))))
-
- plural = ''
- if abs(self.n) != 1:
- plural = 's'
-
- n_str = ''
- if self.n != 1:
- n_str = '{n} * '.format(n=self.n)
+ attrs.append('kwds=%s' % (kwds_new))
+ elif attr not in exclude:
+ value = getattr(self, attr)
+ attrs.append('%s=%s' % (attr, value))
- attrs_str = ''
+ out = ''
if attrs:
- attrs_str = ': ' + ', '.join(attrs)
-
- repr_content = ''.join([n_str, className, plural, attrs_str])
- out = '<{content}>'.format(content=repr_content)
+ out += ': ' + ', '.join(attrs)
return out
@property
@@ -507,8 +513,18 @@ def freqstr(self):
else:
fstr = code
+ try:
+ if self._offset:
+ fstr += self._offset_str()
+ except AttributeError:
+ # TODO: standardize `_offset` vs `offset` naming convention
+ pass
+
return fstr
+ def _offset_str(self):
+ return ''
+
@property
def nanos(self):
raise ValueError("{name} is a non-fixed frequency".format(name=self))
@@ -527,23 +543,11 @@ def _from_name(cls, suffix=None):
class BusinessMixin(object):
""" mixin to business types to provide related functions """
- # TODO: Combine this with DateOffset by defining a whitelisted set of
- # attributes on each object rather than the existing behavior of iterating
- # over internal ``__dict__``
- def __repr__(self):
- className = getattr(self, '_outputName', self.__class__.__name__)
-
- plural = ''
- if abs(self.n) != 1:
- plural = 's'
-
- n_str = ''
- if self.n != 1:
- n_str = '{n} * '.format(n=self.n)
-
- repr_content = ''.join([n_str, className, plural, self._repr_attrs()])
- out = '<{content}>'.format(content=repr_content)
- return out
+ @property
+ def offset(self):
+ """Alias for self._offset"""
+ # Alias for backward compat
+ return self._offset
def _repr_attrs(self):
if self.offset:
@@ -572,6 +576,11 @@ def __getstate__(self):
def __setstate__(self, state):
"""Reconstruct an instance from a pickled state"""
+ if 'offset' in state:
+ # Older versions have offset attribute instead of _offset
+ if '_offset' in state: # pragma: no cover
+ raise ValueError('Unexpected key `_offset`')
+ state['_offset'] = state.pop('offset')
self.__dict__ = state
if 'weekmask' in state and 'holidays' in state:
calendar, holidays = _get_calendar(weekmask=self.weekmask,
@@ -593,24 +602,7 @@ def __init__(self, n=1, normalize=False, **kwds):
self.n = int(n)
self.normalize = normalize
self.kwds = kwds
- self.offset = kwds.get('offset', timedelta(0))
-
- @property
- def freqstr(self):
- try:
- code = self.rule_code
- except NotImplementedError:
- return repr(self)
-
- if self.n != 1:
- fstr = '{n}{code}'.format(n=self.n, code=code)
- else:
- fstr = code
-
- if self.offset:
- fstr += self._offset_str()
-
- return fstr
+ self._offset = kwds.get('offset', timedelta(0))
def _offset_str(self):
def get_str(td):
@@ -643,9 +635,6 @@ def get_str(td):
else:
return '+' + repr(self.offset)
- def isAnchored(self):
- return (self.n == 1)
-
@apply_wraps
def apply(self, other):
if isinstance(other, datetime):
@@ -709,7 +698,7 @@ def __init__(self, **kwds):
kwds['start'] = self._validate_time(kwds.get('start', '09:00'))
kwds['end'] = self._validate_time(kwds.get('end', '17:00'))
self.kwds = kwds
- self.offset = kwds.get('offset', timedelta(0))
+ self._offset = kwds.get('offset', timedelta(0))
self.start = kwds.get('start', '09:00')
self.end = kwds.get('end', '17:00')
@@ -776,7 +765,7 @@ def _get_business_hours_by_sec(self):
Return business hours in a day by seconds.
"""
if self._get_daytime_flag():
- # create dummy datetime to calcurate businesshours in a day
+ # create dummy datetime to calculate businesshours in a day
dtstart = datetime(2014, 4, 1, self.start.hour, self.start.minute)
until = datetime(2014, 4, 1, self.end.hour, self.end.minute)
return (until - dtstart).total_seconds()
@@ -811,7 +800,7 @@ def rollforward(self, dt):
@apply_wraps
def apply(self, other):
- # calcurate here because offset is not immutable
+ # calculate here because offset is not immutable
daytime = self._get_daytime_flag()
businesshours = self._get_business_hours_by_sec()
bhdelta = timedelta(seconds=businesshours)
@@ -860,7 +849,7 @@ def apply(self, other):
if n >= 0:
bday_edge = self._prev_opening_time(other)
bday_edge = bday_edge + bhdelta
- # calcurate remainder
+ # calculate remainder
bday_remain = result - bday_edge
result = self._next_opening_time(other)
result += bday_remain
@@ -898,7 +887,7 @@ def onOffset(self, dt):
def _onOffset(self, dt, businesshours):
"""
- Slight speedups using calcurated values
+ Slight speedups using calculated values
"""
# if self.normalize and not _is_normalized(dt):
# return False
@@ -975,7 +964,8 @@ def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
self.n = int(n)
self.normalize = normalize
self.kwds = kwds
- self.offset = kwds.get('offset', timedelta(0))
+ self._offset = kwds.get('offset', timedelta(0))
+
calendar, holidays = _get_calendar(weekmask=weekmask,
holidays=holidays,
calendar=calendar)
@@ -1337,9 +1327,6 @@ def _apply_index_days(self, i, roll):
class BusinessMonthEnd(MonthOffset):
"""DateOffset increments between business EOM dates"""
- def isAnchored(self):
- return (self.n == 1)
-
@apply_wraps
def apply(self, other):
n = self.n
@@ -1425,7 +1412,7 @@ def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
self.n = int(n)
self.normalize = normalize
self.kwds = kwds
- self.offset = kwds.get('offset', timedelta(0))
+ self._offset = kwds.get('offset', timedelta(0))
calendar, holidays = _get_calendar(weekmask=weekmask,
holidays=holidays,
@@ -1495,7 +1482,7 @@ def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
self.n = int(n)
self.normalize = normalize
self.kwds = kwds
- self.offset = kwds.get('offset', timedelta(0))
+ self._offset = kwds.get('offset', timedelta(0))
# _get_calendar does validation and possible transformation
# of calendar and holidays.
@@ -1966,9 +1953,6 @@ class QuarterEnd(QuarterOffset):
_default_startingMonth = 3
_prefix = 'Q'
- def isAnchored(self):
- return (self.n == 1 and self.startingMonth is not None)
-
@apply_wraps
def apply(self, other):
n = self.n
@@ -2004,9 +1988,6 @@ class QuarterBegin(QuarterOffset):
_from_name_startingMonth = 1
_prefix = 'QS'
- def isAnchored(self):
- return (self.n == 1 and self.startingMonth is not None)
-
@apply_wraps
def apply(self, other):
n = self.n
| This is the first of several PRs cleaning up `tseries.offsets`. The ultimate goals of this series of PRs are:
- Fix slow implementation of `DateOffset.__eq__`, as that gets called by `Period.__eq__`.
- Make `DateOffset` immutable, since it is attached to a `Period` object which is supposed to be immutable (TODO: fill in the appropriate GH issue)
- Move `tseries.offsets` into cython so that `_libs.period` and `_libs.tslib` can import it guilt-free and not need to do run-time imports. See TODO comment in `_libs.__init__`:
```
# TODO
# period is directly dependent on tslib and imports python
# modules, so exposing Period as an alias is currently not possible
```
The biggest impediment to the immutability goal is the `kwds` attribute, which is just a `dict`. The first couple of steps in this sequence is focused on whittling down the number of attributes set at runtime.
This PR is mainly fixing typos and removing redundant methods.
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
| https://api.github.com/repos/pandas-dev/pandas/pulls/17318 | 2017-08-23T19:07:13Z | 2017-09-23T17:36:29Z | 2017-09-23T17:36:29Z | 2017-10-30T16:23:27Z |
ENH: Better error message if usecols doesn't match columns | diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt
index 1a08a1353a605..1cad0e68d0f25 100644
--- a/doc/source/whatsnew/v0.22.0.txt
+++ b/doc/source/whatsnew/v0.22.0.txt
@@ -76,6 +76,7 @@ Other Enhancements
- Improved wording of ``ValueError`` raised in :func:`to_datetime` when ``unit=`` is passed with a non-convertible value (:issue:`14350`)
- :func:`Series.fillna` now accepts a Series or a dict as a ``value`` for a categorical dtype (:issue:`17033`)
- :func:`pandas.read_clipboard` updated to use qtpy, falling back to PyQt5 and then PyQt4, adding compatibility with Python3 and multiple python-qt bindings (:issue:`17722`)
+- Improved wording of ``ValueError`` raised in :func:`read_csv` when the ``usecols`` argument cannot match all columns. (:issue:`17301`)
.. _whatsnew_0220.api_breaking:
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index fe50b551ea948..83b1d8ec1a070 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -1141,6 +1141,38 @@ def _evaluate_usecols(usecols, names):
return usecols
+def _validate_usecols_names(usecols, names):
+ """
+ Validates that all usecols are present in a given
+ list of names. If not, raise a ValueError that
+ shows what usecols are missing.
+
+ Parameters
+ ----------
+ usecols : iterable of usecols
+ The columns to validate are present in names.
+ names : iterable of names
+ The column names to check against.
+
+ Returns
+ -------
+ usecols : iterable of usecols
+ The `usecols` parameter if the validation succeeds.
+
+ Raises
+ ------
+ ValueError : Columns were missing. Error message will list them.
+ """
+ missing = [c for c in usecols if c not in names]
+ if len(missing) > 0:
+ raise ValueError(
+ "Usecols do not match columns, "
+ "columns expected but not found: {missing}".format(missing=missing)
+ )
+
+ return usecols
+
+
def _validate_skipfooter_arg(skipfooter):
"""
Validate the 'skipfooter' parameter.
@@ -1753,14 +1785,14 @@ def __init__(self, src, **kwds):
# GH 14671
if (self.usecols_dtype == 'string' and
not set(usecols).issubset(self.orig_names)):
- raise ValueError("Usecols do not match names.")
+ _validate_usecols_names(usecols, self.orig_names)
if len(self.names) > len(usecols):
self.names = [n for i, n in enumerate(self.names)
if (i in usecols or n in usecols)]
if len(self.names) < len(usecols):
- raise ValueError("Usecols do not match names.")
+ _validate_usecols_names(usecols, self.names)
self._set_noconvert_columns()
@@ -2532,9 +2564,13 @@ def _handle_usecols(self, columns, usecols_key):
raise ValueError("If using multiple headers, usecols must "
"be integers.")
col_indices = []
+
for col in self.usecols:
if isinstance(col, string_types):
- col_indices.append(usecols_key.index(col))
+ try:
+ col_indices.append(usecols_key.index(col))
+ except ValueError:
+ _validate_usecols_names(self.usecols, usecols_key)
else:
col_indices.append(col)
else:
diff --git a/pandas/tests/io/parser/usecols.py b/pandas/tests/io/parser/usecols.py
index f582e5037ca07..0fa53e6288bda 100644
--- a/pandas/tests/io/parser/usecols.py
+++ b/pandas/tests/io/parser/usecols.py
@@ -480,10 +480,10 @@ def test_raise_on_usecols_names_mismatch(self):
# GH 14671
data = 'a,b,c,d\n1,2,3,4\n5,6,7,8'
- if self.engine == 'c':
- msg = 'Usecols do not match names'
- else:
- msg = 'is not in list'
+ msg = (
+ "Usecols do not match columns, "
+ "columns expected but not found: {missing}"
+ )
usecols = ['a', 'b', 'c', 'd']
df = self.read_csv(StringIO(data), usecols=usecols)
@@ -492,11 +492,16 @@ def test_raise_on_usecols_names_mismatch(self):
tm.assert_frame_equal(df, expected)
usecols = ['a', 'b', 'c', 'f']
- with tm.assert_raises_regex(ValueError, msg):
+ with tm.assert_raises_regex(ValueError, msg.format(missing="\['f'\]")):
self.read_csv(StringIO(data), usecols=usecols)
usecols = ['a', 'b', 'f']
- with tm.assert_raises_regex(ValueError, msg):
+ with tm.assert_raises_regex(ValueError, msg.format(missing="\['f'\]")):
+ self.read_csv(StringIO(data), usecols=usecols)
+
+ usecols = ['a', 'b', 'f', 'g']
+ with tm.assert_raises_regex(
+ ValueError, msg.format(missing="\[('f', 'g'|'g', 'f')\]")):
self.read_csv(StringIO(data), usecols=usecols)
names = ['A', 'B', 'C', 'D']
@@ -520,9 +525,9 @@ def test_raise_on_usecols_names_mismatch(self):
# tm.assert_frame_equal(df, expected)
usecols = ['A', 'B', 'C', 'f']
- with tm.assert_raises_regex(ValueError, msg):
+ with tm.assert_raises_regex(ValueError, msg.format(missing="\['f'\]")):
self.read_csv(StringIO(data), header=0, names=names,
usecols=usecols)
usecols = ['A', 'B', 'f']
- with tm.assert_raises_regex(ValueError, msg):
+ with tm.assert_raises_regex(ValueError, msg.format(missing="\['f'\]")):
self.read_csv(StringIO(data), names=names, usecols=usecols)
| GH17301: Improving the error message given when usecols
doesn't match with the columns provided.
- [x] closes #17301
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
NOTE: Do I need to add a whatsnew entry for something so small?
Happy to do so if needed. | https://api.github.com/repos/pandas-dev/pandas/pulls/17310 | 2017-08-22T16:03:14Z | 2017-12-03T15:26:51Z | 2017-12-03T15:26:51Z | 2017-12-21T17:00:00Z |
REF: Special case NumericIndex._append_same_dtype() | diff --git a/asv_bench/benchmarks/index_object.py b/asv_bench/benchmarks/index_object.py
index 454d9ccdda102..7697c3b9d3840 100644
--- a/asv_bench/benchmarks/index_object.py
+++ b/asv_bench/benchmarks/index_object.py
@@ -219,3 +219,22 @@ def time_min(self):
def time_min_trivial(self):
self.idx_inc.min()
+
+
+class IndexOps(object):
+ goal_time = 0.2
+
+ def setup(self):
+ N = 10000
+ self.ridx = [RangeIndex(i * 100, (i + 1) * 100) for i in range(N)]
+ self.iidx = [idx.astype(int) for idx in self.ridx]
+ self.oidx = [idx.astype(str) for idx in self.iidx]
+
+ def time_concat_range(self):
+ self.ridx[0].append(self.ridx[1:])
+
+ def time_concat_int(self):
+ self.iidx[0].append(self.iidx[1:])
+
+ def time_concat_obj(self):
+ self.oidx[0].append(self.oidx[1:])
diff --git a/pandas/core/dtypes/concat.py b/pandas/core/dtypes/concat.py
index 93993fd0a0cab..4e15aa50e4319 100644
--- a/pandas/core/dtypes/concat.py
+++ b/pandas/core/dtypes/concat.py
@@ -467,6 +467,11 @@ def _concat_datetimetz(to_concat, name=None):
return to_concat[0]._simple_new(new_values, tz=tz, name=name)
+def _concat_index_same_dtype(indexes, klass=None):
+ klass = klass if klass is not None else indexes[0].__class__
+ return klass(np.concatenate([x._values for x in indexes]))
+
+
def _concat_index_asobject(to_concat, name=None):
"""
concat all inputs as object. DatetimeIndex, TimedeltaIndex and
@@ -581,16 +586,15 @@ def _concat_rangeindex_same_dtype(indexes):
elif step is None:
# First non-empty index had only one element
if obj._start == start:
- return _concat_index_asobject(indexes)
+ from pandas import Int64Index
+ return _concat_index_same_dtype(indexes, klass=Int64Index)
step = obj._start - start
non_consecutive = ((step != obj._step and len(obj) > 1) or
(next is not None and obj._start != next))
if non_consecutive:
- # Int64Index._append_same_dtype([ix.astype(int) for ix in indexes])
- # would be preferred... but it currently resorts to
- # _concat_index_asobject anyway.
- return _concat_index_asobject(indexes)
+ from pandas import Int64Index
+ return _concat_index_same_dtype(indexes, klass=Int64Index)
if step is not None:
next = obj[-1] + step
diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py
index 1f007b1961e06..b0703869948c2 100644
--- a/pandas/core/indexes/numeric.py
+++ b/pandas/core/indexes/numeric.py
@@ -17,6 +17,7 @@
from pandas.core.indexes.base import (
Index, InvalidIndexError, _index_shared_docs)
from pandas.util._decorators import Appender, cache_readonly
+import pandas.core.dtypes.concat as _concat
import pandas.core.indexes.base as ibase
@@ -96,6 +97,9 @@ def _assert_safe_casting(cls, data, subarr):
"""
pass
+ def _concat_same_dtype(self, indexes, name):
+ return _concat._concat_index_same_dtype(indexes).rename(name)
+
@property
def is_all_dates(self):
"""
| - [x] tests passed
- [x] passes `git diff master -u -- "*.py" | flake8 --diff`
Simple patch which results in a modest speedup (~8%) of the following:
```python
In [2]: idxes = [pd.Int64Index(range(i)) for i in range(1, 10)]*200
In [3]: %timeit idxes[0].append(idxes[1:])
```
I don't know whether this is worth the extra code... just inquiring so that I can finalize #16236. | https://api.github.com/repos/pandas-dev/pandas/pulls/17307 | 2017-08-22T07:50:09Z | 2017-10-28T15:26:12Z | 2017-10-28T15:26:12Z | 2017-10-28T17:06:15Z |
CLN: replace %s syntax with .format in core.tools, algorithms.py, base.py | diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index f2359f3ff1a9d..5fa8563479a41 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -394,12 +394,12 @@ def isin(comps, values):
if not is_list_like(comps):
raise TypeError("only list-like objects are allowed to be passed"
- " to isin(), you passed a "
- "[{0}]".format(type(comps).__name__))
+ " to isin(), you passed a [{comps_type}]"
+ .format(comps_type=type(comps).__name__))
if not is_list_like(values):
raise TypeError("only list-like objects are allowed to be passed"
- " to isin(), you passed a "
- "[{0}]".format(type(values).__name__))
+ " to isin(), you passed a [{values_type}]"
+ .format(values_type=type(values).__name__))
if not isinstance(values, (ABCIndex, ABCSeries, np.ndarray)):
values = lib.list_to_object_array(list(values))
@@ -674,7 +674,7 @@ def mode(values):
try:
result = np.sort(result)
except TypeError as e:
- warn("Unable to sort modes: %s" % e)
+ warn("Unable to sort modes: {error}".format(error=e))
result = _reconstruct_data(result, original.dtype, original)
return Series(result)
diff --git a/pandas/core/base.py b/pandas/core/base.py
index 4ae4736035793..a7c991dc8d257 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -342,24 +342,25 @@ def _obj_with_exclusions(self):
def __getitem__(self, key):
if self._selection is not None:
- raise Exception('Column(s) %s already selected' % self._selection)
+ raise Exception('Column(s) {selection} already selected'
+ .format(selection=self._selection))
if isinstance(key, (list, tuple, ABCSeries, ABCIndexClass,
np.ndarray)):
if len(self.obj.columns.intersection(key)) != len(key):
bad_keys = list(set(key).difference(self.obj.columns))
- raise KeyError("Columns not found: %s"
- % str(bad_keys)[1:-1])
+ raise KeyError("Columns not found: {missing}"
+ .format(missing=str(bad_keys)[1:-1]))
return self._gotitem(list(key), ndim=2)
elif not getattr(self, 'as_index', False):
if key not in self.obj.columns:
- raise KeyError("Column not found: %s" % key)
+ raise KeyError("Column not found: {key}".format(key=key))
return self._gotitem(key, ndim=2)
else:
if key not in self.obj:
- raise KeyError("Column not found: %s" % key)
+ raise KeyError("Column not found: {key}".format(key=key))
return self._gotitem(key, ndim=1)
def _gotitem(self, key, ndim, subset=None):
@@ -409,7 +410,7 @@ def _try_aggregate_string_function(self, arg, *args, **kwargs):
if f is not None:
return f(self, *args, **kwargs)
- raise ValueError("{} is an unknown string function".format(arg))
+ raise ValueError("{arg} is an unknown string function".format(arg=arg))
def _aggregate(self, arg, *args, **kwargs):
"""
@@ -484,9 +485,9 @@ def nested_renaming_depr(level=4):
is_nested_renamer = True
if k not in obj.columns:
- raise SpecificationError('cannot perform renaming '
- 'for {0} with a nested '
- 'dictionary'.format(k))
+ msg = ('cannot perform renaming for {key} with a '
+ 'nested dictionary').format(key=k)
+ raise SpecificationError(msg)
nested_renaming_depr(4 + (_level or 0))
elif isinstance(obj, ABCSeries):
diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py
index 6ff4302937d07..53f58660cabdb 100644
--- a/pandas/core/tools/datetimes.py
+++ b/pandas/core/tools/datetimes.py
@@ -46,7 +46,8 @@ def _infer(a, b):
if b and b.tzinfo:
if not (tslib.get_timezone(tz) == tslib.get_timezone(b.tzinfo)):
raise AssertionError('Inputs must both have the same timezone,'
- ' {0} != {1}'.format(tz, b.tzinfo))
+ ' {timezone1} != {timezone2}'
+ .format(timezone1=tz, timezone2=b.tzinfo))
return tz
tz = None
@@ -491,10 +492,10 @@ def _convert_listlike(arg, box, format, name=None, tz=tz):
offset = tslib.Timestamp(origin) - tslib.Timestamp(0)
except tslib.OutOfBoundsDatetime:
raise tslib.OutOfBoundsDatetime(
- "origin {} is Out of Bounds".format(origin))
+ "origin {origin} is Out of Bounds".format(origin=origin))
except ValueError:
- raise ValueError("origin {} cannot be converted "
- "to a Timestamp".format(origin))
+ raise ValueError("origin {origin} cannot be converted "
+ "to a Timestamp".format(origin=origin))
# convert the offset to the unit of the arg
# this should be lossless in terms of precision
@@ -590,16 +591,16 @@ def f(value):
required = ['year', 'month', 'day']
req = sorted(list(set(required) - set(unit_rev.keys())))
if len(req):
- raise ValueError("to assemble mappings requires at "
- "least that [year, month, day] be specified: "
- "[{0}] is missing".format(','.join(req)))
+ raise ValueError("to assemble mappings requires at least that "
+ "[year, month, day] be specified: [{required}] "
+ "is missing".format(required=','.join(req)))
# keys we don't recognize
excess = sorted(list(set(unit_rev.keys()) - set(_unit_map.values())))
if len(excess):
raise ValueError("extra keys have been passed "
"to the datetime assemblage: "
- "[{0}]".format(','.join(excess)))
+ "[{excess}]".format(','.join(excess=excess)))
def coerce(values):
# we allow coercion to if errors allows
@@ -617,7 +618,7 @@ def coerce(values):
values = to_datetime(values, format='%Y%m%d', errors=errors)
except (TypeError, ValueError) as e:
raise ValueError("cannot assemble the "
- "datetimes: {0}".format(e))
+ "datetimes: {error}".format(error=e))
for u in ['h', 'm', 's', 'ms', 'us', 'ns']:
value = unit_rev.get(u)
@@ -627,8 +628,8 @@ def coerce(values):
unit=u,
errors=errors)
except (TypeError, ValueError) as e:
- raise ValueError("cannot assemble the datetimes "
- "[{0}]: {1}".format(value, e))
+ raise ValueError("cannot assemble the datetimes [{value}]: "
+ "{error}".format(value=value, error=e))
return values
@@ -810,8 +811,10 @@ def _convert_listlike(arg, format):
times.append(datetime.strptime(element, format).time())
except (ValueError, TypeError):
if errors == 'raise':
- raise ValueError("Cannot convert %s to a time with "
- "given format %s" % (element, format))
+ msg = ("Cannot convert {element} to a time with given "
+ "format {format}").format(element=element,
+ format=format)
+ raise ValueError(msg)
elif errors == 'ignore':
return arg
else:
@@ -876,6 +879,7 @@ def ole2datetime(oledt):
# Excel has a bug where it thinks the date 2/29/1900 exists
# we just reject any date before 3/1/1900.
if val < 61:
- raise ValueError("Value is outside of acceptable range: %s " % val)
+ msg = "Value is outside of acceptable range: {value}".format(value=val)
+ raise ValueError(msg)
return OLE_TIME_ZERO + timedelta(days=val)
diff --git a/pandas/core/tools/timedeltas.py b/pandas/core/tools/timedeltas.py
index f2d99d26a87b8..d5132826bb93f 100644
--- a/pandas/core/tools/timedeltas.py
+++ b/pandas/core/tools/timedeltas.py
@@ -129,7 +129,8 @@ def _validate_timedelta_unit(arg):
except:
if arg is None:
return 'ns'
- raise ValueError("invalid timedelta unit {0} provided".format(arg))
+ raise ValueError("invalid timedelta unit {arg} provided"
+ .format(arg=arg))
def _coerce_scalar_to_timedelta_type(r, unit='ns', box=True, errors='raise'):
@@ -161,8 +162,8 @@ def _convert_listlike(arg, unit='ns', box=True, errors='raise', name=None):
if is_timedelta64_dtype(arg):
value = arg.astype('timedelta64[ns]')
elif is_integer_dtype(arg):
- value = arg.astype('timedelta64[{0}]'.format(
- unit)).astype('timedelta64[ns]', copy=False)
+ value = arg.astype('timedelta64[{unit}]'.format(unit=unit)).astype(
+ 'timedelta64[ns]', copy=False)
else:
try:
value = tslib.array_to_timedelta64(_ensure_object(arg),
| Progress towards #16130
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
Replaced `%s` syntax with `.format` in core.tools, algorithms.py, base.py. Additionally, made some of the existing positional `.format` code more explicit. | https://api.github.com/repos/pandas-dev/pandas/pulls/17305 | 2017-08-22T03:37:38Z | 2017-08-22T13:57:53Z | 2017-08-22T13:57:53Z | 2017-08-23T02:19:07Z |
Update Performance Considerations section in docs | diff --git a/doc/source/io.rst b/doc/source/io.rst
index e338407361705..b027c7658f0e9 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -5208,85 +5208,112 @@ easy conversion to and from pandas.
Performance Considerations
--------------------------
-This is an informal comparison of various IO methods, using pandas 0.13.1.
+This is an informal comparison of various IO methods, using pandas
+0.20.3. Timings are machine dependent and small differences should be
+ignored.
.. code-block:: ipython
- In [1]: df = pd.DataFrame(randn(1000000,2),columns=list('AB'))
+ In [1]: sz = 1000000
+ In [2]: df = pd.DataFrame({'A': randn(sz), 'B': [1] * sz})
- In [2]: df.info()
+ In [3]: df.info()
<class 'pandas.core.frame.DataFrame'>
- Int64Index: 1000000 entries, 0 to 999999
+ RangeIndex: 1000000 entries, 0 to 999999
Data columns (total 2 columns):
A 1000000 non-null float64
- B 1000000 non-null float64
- dtypes: float64(2)
- memory usage: 22.9 MB
+ B 1000000 non-null int64
+ dtypes: float64(1), int64(1)
+ memory usage: 15.3 MB
Writing
.. code-block:: ipython
In [14]: %timeit test_sql_write(df)
- 1 loops, best of 3: 6.24 s per loop
+ 2.37 s ± 36.6 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
In [15]: %timeit test_hdf_fixed_write(df)
- 1 loops, best of 3: 237 ms per loop
+ 194 ms ± 65.9 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
In [26]: %timeit test_hdf_fixed_write_compress(df)
- 1 loops, best of 3: 245 ms per loop
+ 119 ms ± 2.15 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
In [16]: %timeit test_hdf_table_write(df)
- 1 loops, best of 3: 901 ms per loop
+ 623 ms ± 125 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
In [27]: %timeit test_hdf_table_write_compress(df)
- 1 loops, best of 3: 952 ms per loop
+ 563 ms ± 23.7 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
In [17]: %timeit test_csv_write(df)
- 1 loops, best of 3: 3.44 s per loop
+ 3.13 s ± 49.9 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
+
+ In [30]: %timeit test_feather_write(df)
+ 103 ms ± 5.88 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
+
+ In [31]: %timeit test_pickle_write(df)
+ 109 ms ± 3.72 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
+
+ In [32]: %timeit test_pickle_write_compress(df)
+ 3.33 s ± 55.2 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
Reading
.. code-block:: ipython
In [18]: %timeit test_sql_read()
- 1 loops, best of 3: 766 ms per loop
+ 1.35 s ± 14.7 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
In [19]: %timeit test_hdf_fixed_read()
- 10 loops, best of 3: 19.1 ms per loop
+ 14.3 ms ± 438 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
In [28]: %timeit test_hdf_fixed_read_compress()
- 10 loops, best of 3: 36.3 ms per loop
+ 23.5 ms ± 672 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)
In [20]: %timeit test_hdf_table_read()
- 10 loops, best of 3: 39 ms per loop
+ 35.4 ms ± 314 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)
In [29]: %timeit test_hdf_table_read_compress()
- 10 loops, best of 3: 60.6 ms per loop
+ 42.6 ms ± 2.1 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
In [22]: %timeit test_csv_read()
- 1 loops, best of 3: 620 ms per loop
+ 516 ms ± 27.1 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
+
+ In [33]: %timeit test_feather_read()
+ 4.06 ms ± 115 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
+
+ In [34]: %timeit test_pickle_read()
+ 6.5 ms ± 172 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
+
+ In [35]: %timeit test_pickle_read_compress()
+ 588 ms ± 3.57 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
Space on disk (in bytes)
.. code-block:: none
- 25843712 Apr 8 14:11 test.sql
- 24007368 Apr 8 14:11 test_fixed.hdf
- 15580682 Apr 8 14:11 test_fixed_compress.hdf
- 24458444 Apr 8 14:11 test_table.hdf
- 16797283 Apr 8 14:11 test_table_compress.hdf
- 46152810 Apr 8 14:11 test.csv
+ 34816000 Aug 21 18:00 test.sql
+ 24009240 Aug 21 18:00 test_fixed.hdf
+ 7919610 Aug 21 18:00 test_fixed_compress.hdf
+ 24458892 Aug 21 18:00 test_table.hdf
+ 8657116 Aug 21 18:00 test_table_compress.hdf
+ 28520770 Aug 21 18:00 test.csv
+ 16000248 Aug 21 18:00 test.feather
+ 16000848 Aug 21 18:00 test.pkl
+ 7554108 Aug 21 18:00 test.pkl.compress
And here's the code
.. code-block:: python
- import sqlite3
import os
+ import pandas as pd
+ import sqlite3
+ from numpy.random import randn
from pandas.io import sql
- df = pd.DataFrame(randn(1000000,2),columns=list('AB'))
+ sz = 1000000
+ df = pd.DataFrame({'A': randn(sz), 'B': [1] * sz})
def test_sql_write(df):
if os.path.exists('test.sql'):
@@ -5329,3 +5356,21 @@ And here's the code
def test_csv_read():
pd.read_csv('test.csv',index_col=0)
+
+ def test_feather_write(df):
+ df.to_feather('test.feather')
+
+ def test_feather_read():
+ pd.read_feather('test.feather')
+
+ def test_pickle_write(df):
+ df.to_pickle('test.pkl')
+
+ def test_pickle_read():
+ pd.read_pickle('test.pkl')
+
+ def test_pickle_write_compress(df):
+ df.to_pickle('test.pkl.compress', compression='xz')
+
+ def test_pickle_read_compress():
+ pd.read_pickle('test.pkl.compress', compression='xz')
| * re-run all tests
* add tests for feather and pickle
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/17303 | 2017-08-22T01:16:19Z | 2017-10-20T09:02:14Z | 2017-10-20T09:02:14Z | 2017-10-20T09:03:12Z |
BUG: Fixed regex in asv.conf.json | diff --git a/asv_bench/asv.conf.json b/asv_bench/asv.conf.json
index ced4f2b12445f..9c333f62810f4 100644
--- a/asv_bench/asv.conf.json
+++ b/asv_bench/asv.conf.json
@@ -118,9 +118,9 @@
// skipped for the matching benchmark.
//
"regressions_first_commits": {
- "*": "v0.20.0"
+ ".*": "v0.20.0"
},
"regression_thresholds": {
- "*": 0.05
+ ".*": 0.05
}
}
| In https://github.com/pandas-dev/pandas/pull/17293 I messed up the syntax. I
used a glob instead of a regex. According to the docs at
http://asv.readthedocs.io/en/latest/asv.conf.json.html#regressions-thresholds we
want to use a regex. I've actually manually tested this change and verified that
it works. | https://api.github.com/repos/pandas-dev/pandas/pulls/17300 | 2017-08-21T15:24:07Z | 2017-08-21T19:39:51Z | 2017-08-21T19:39:51Z | 2017-08-23T18:58:02Z |
BUG: revert collision warning | diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst
index 88e62b5d301a3..8474116c38082 100644
--- a/doc/source/indexing.rst
+++ b/doc/source/indexing.rst
@@ -269,21 +269,6 @@ new column. In 0.21.0 and later, this will raise a ``UserWarning``:
1 2.0
2 3.0
-Similarly, it is possible to create a column with a name which collides with one of Pandas's
-built-in methods or attributes, which can cause confusion later when attempting to access
-that column as an attribute. This behavior now warns:
-
-.. code-block:: ipython
-
- In[4]: df['sum'] = [5., 7., 9.]
- UserWarning: Column name 'sum' collides with a built-in method, which will cause unexpected attribute behavior
- In[5]: df.sum
- Out[5]:
- <bound method DataFrame.sum of one sum
- 0 1.0 5.0
- 1 2.0 7.0
- 2 3.0 9.0>
-
Slicing ranges
--------------
diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt
index 636bb2dc3e60e..fa00140fb4abd 100644
--- a/doc/source/whatsnew/v0.21.0.txt
+++ b/doc/source/whatsnew/v0.21.0.txt
@@ -67,8 +67,8 @@ Improved warnings when attempting to create columns
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
New users are often flummoxed by the relationship between column operations and attribute
-access on ``DataFrame`` instances (:issue:`5904` & :issue:`7175`). Two specific instances
-of this confusion include attempting to create a new column by setting into an attribute:
+access on ``DataFrame`` instances (:issue:`7175`). One specific instance
+of this confusion is attempting to create a new column by setting into an attribute:
.. code-block:: ipython
@@ -86,25 +86,7 @@ This does not raise any obvious exceptions, but also does not create a new colum
1 2.0
2 3.0
-The second source of confusion is creating a column whose name collides with a method or
-attribute already in the instance namespace:
-
-.. code-block:: ipython
-
- In[4]: df['sum'] = [5., 7., 9.]
-
-This does not permit that column to be accessed as an attribute:
-
-.. code-block:: ipython
-
- In[5]: df.sum
- Out[5]:
- <bound method DataFrame.sum of one sum
- 0 1.0 5.0
- 1 2.0 7.0
- 2 3.0 9.0>
-
-Both of these now raise a ``UserWarning`` about the potential for unexpected behavior. See :ref:`Attribute Access <indexing.attribute_access>`.
+Setting a list-like data structure into a new attribute now raise a ``UserWarning`` about the potential for unexpected behavior. See :ref:`Attribute Access <indexing.attribute_access>`.
.. _whatsnew_0210.enhancements.other:
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index cdb08d8887e05..df5f1a8326acd 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -1905,10 +1905,6 @@ def _slice(self, slobj, axis=0, kind=None):
return result
def _set_item(self, key, value):
- if isinstance(key, str) and callable(getattr(self, key, None)):
- warnings.warn("Column name '{key}' collides with a built-in "
- "method, which will cause unexpected attribute "
- "behavior".format(key=key), stacklevel=3)
self._data.set(key, value)
self._clear_item_cache()
@@ -3441,8 +3437,8 @@ def __setattr__(self, name, value):
object.__setattr__(self, name, value)
except (AttributeError, TypeError):
if isinstance(self, ABCDataFrame) and (is_list_like(value)):
- warnings.warn("Pandas doesn't allow Series to be assigned "
- "into nonexistent columns - see "
+ warnings.warn("Pandas doesn't allow columns to be "
+ "created via a new attribute name - see "
"https://pandas.pydata.org/pandas-docs/"
"stable/indexing.html#attribute-access",
stacklevel=2)
diff --git a/pandas/tests/dtypes/test_generic.py b/pandas/tests/dtypes/test_generic.py
index 82444d6c94157..bd365f9c3281f 100644
--- a/pandas/tests/dtypes/test_generic.py
+++ b/pandas/tests/dtypes/test_generic.py
@@ -48,7 +48,6 @@ def test_abc_types(self):
def test_setattr_warnings():
- # GH5904 - Suggestion: Warning for DataFrame colname-methodname clash
# GH7175 - GOTCHA: You can't use dot notation to add a column...
d = {'one': pd.Series([1., 2., 3.], index=['a', 'b', 'c']),
'two': pd.Series([1., 2., 3., 4.], index=['a', 'b', 'c', 'd'])}
@@ -78,7 +77,3 @@ def test_setattr_warnings():
# warn when setting column to nonexistent name
df.four = df.two + 2
assert df.four.sum() > df.two.sum()
-
- with tm.assert_produces_warning(UserWarning):
- # warn when column has same name as method
- df['sum'] = df.two
diff --git a/pandas/tests/io/test_pytables.py b/pandas/tests/io/test_pytables.py
index b5ecc4d34cd08..9c488cb2389be 100644
--- a/pandas/tests/io/test_pytables.py
+++ b/pandas/tests/io/test_pytables.py
@@ -2011,7 +2011,7 @@ def check(obj, comparator):
df['string'] = 'foo'
df['float322'] = 1.
df['float322'] = df['float322'].astype('float32')
- df['boolean'] = df['float322'] > 0
+ df['bool'] = df['float322'] > 0
df['time1'] = Timestamp('20130101')
df['time2'] = Timestamp('20130102')
check(df, tm.assert_frame_equal)
@@ -2141,7 +2141,7 @@ def test_table_values_dtypes_roundtrip(self):
df1['string'] = 'foo'
df1['float322'] = 1.
df1['float322'] = df1['float322'].astype('float32')
- df1['boolean'] = df1['float32'] > 0
+ df1['bool'] = df1['float32'] > 0
df1['time1'] = Timestamp('20130101')
df1['time2'] = Timestamp('20130102')
| - [ ] closes #17268
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Reverts work in #16951 that warns when creating a column whose name collides with a method. | https://api.github.com/repos/pandas-dev/pandas/pulls/17298 | 2017-08-20T22:59:57Z | 2017-09-07T10:52:12Z | 2017-09-07T10:52:12Z | 2017-09-07T14:23:40Z |
Remove unnecessary usage of _TSObject | diff --git a/pandas/_libs/period.pyx b/pandas/_libs/period.pyx
index e017d863e1907..6ba7ec0270f30 100644
--- a/pandas/_libs/period.pyx
+++ b/pandas/_libs/period.pyx
@@ -120,26 +120,6 @@ initialize_daytime_conversion_factor_matrix()
# Period logic
#----------------------------------------------------------------------
-cdef inline int64_t apply_mult(int64_t period_ord, int64_t mult):
- """
- Get freq+multiple ordinal value from corresponding freq-only ordinal value.
- For example, 5min ordinal will be 1/5th the 1min ordinal (rounding down to
- integer).
- """
- if mult == 1:
- return period_ord
-
- return (period_ord - 1) // mult
-
-cdef inline int64_t remove_mult(int64_t period_ord_w_mult, int64_t mult):
- """
- Get freq-only ordinal value from corresponding freq+multiple ordinal.
- """
- if mult == 1:
- return period_ord_w_mult
-
- return period_ord_w_mult * mult + 1;
-
@cython.wraparound(False)
@cython.boundscheck(False)
diff --git a/pandas/_libs/src/datetime.pxd b/pandas/_libs/src/datetime.pxd
index 2267c8282ec14..23620e790c132 100644
--- a/pandas/_libs/src/datetime.pxd
+++ b/pandas/_libs/src/datetime.pxd
@@ -88,11 +88,6 @@ cdef extern from "datetime/np_datetime.h":
int cmp_pandas_datetimestruct(pandas_datetimestruct *a,
pandas_datetimestruct *b)
- int convert_pydatetime_to_datetimestruct(PyObject *obj,
- pandas_datetimestruct *out,
- PANDAS_DATETIMEUNIT *out_bestunit,
- int apply_tzinfo)
-
npy_datetime pandas_datetimestruct_to_datetime(PANDAS_DATETIMEUNIT fr,
pandas_datetimestruct *d) nogil
void pandas_datetime_to_datetimestruct(npy_datetime val,
@@ -112,12 +107,6 @@ cdef extern from "datetime/np_datetime_strings.h":
PANDAS_DATETIMEUNIT *out_bestunit,
npy_bool *out_special)
- int make_iso_8601_datetime(pandas_datetimestruct *dts, char *outstr, int outlen,
- int local, PANDAS_DATETIMEUNIT base, int tzoffset,
- NPY_CASTING casting)
-
- int get_datetime_iso_8601_strlen(int local, PANDAS_DATETIMEUNIT base)
-
# int parse_python_string(object obj, pandas_datetimestruct *out) except -1
@@ -152,16 +141,6 @@ cdef inline int _cstring_to_dts(char *val, int length,
return result
-cdef inline object _datetime64_to_datetime(int64_t val):
- cdef pandas_datetimestruct dts
- pandas_datetime_to_datetimestruct(val, PANDAS_FR_ns, &dts)
- return _dts_to_pydatetime(&dts)
-
-cdef inline object _dts_to_pydatetime(pandas_datetimestruct *dts):
- return <object> PyDateTime_FromDateAndTime(dts.year, dts.month,
- dts.day, dts.hour,
- dts.min, dts.sec, dts.us)
-
cdef inline int64_t _pydatetime_to_dts(object val, pandas_datetimestruct *dts):
dts.year = PyDateTime_GET_YEAR(val)
dts.month = PyDateTime_GET_MONTH(val)
@@ -173,17 +152,6 @@ cdef inline int64_t _pydatetime_to_dts(object val, pandas_datetimestruct *dts):
dts.ps = dts.as = 0
return pandas_datetimestruct_to_datetime(PANDAS_FR_ns, dts)
-cdef inline int64_t _dtlike_to_datetime64(object val,
- pandas_datetimestruct *dts):
- dts.year = val.year
- dts.month = val.month
- dts.day = val.day
- dts.hour = val.hour
- dts.min = val.minute
- dts.sec = val.second
- dts.us = val.microsecond
- dts.ps = dts.as = 0
- return pandas_datetimestruct_to_datetime(PANDAS_FR_ns, dts)
cdef inline int64_t _date_to_datetime64(object val,
pandas_datetimestruct *dts):
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx
index 32b8c92a50269..c4a38ec660a4c 100644
--- a/pandas/_libs/tslib.pyx
+++ b/pandas/_libs/tslib.pyx
@@ -705,7 +705,6 @@ class Timestamp(_Timestamp):
pandas_datetimestruct dts
int64_t value
object _tzinfo, result, k, v
- _TSObject ts
# set to naive if needed
_tzinfo = self.tzinfo
@@ -1009,10 +1008,6 @@ def unique_deltas(ndarray[int64_t] arr):
return result
-cdef inline bint _is_multiple(int64_t us, int64_t mult):
- return us % mult == 0
-
-
cdef inline bint _cmp_scalar(int64_t lhs, int64_t rhs, int op) except -1:
if op == Py_EQ:
return lhs == rhs
@@ -4694,7 +4689,6 @@ def get_date_field(ndarray[int64_t] dtindex, object field):
field and return an array of these values.
"""
cdef:
- _TSObject ts
Py_ssize_t i, count = 0
ndarray[int32_t] out
ndarray[int32_t, ndim=2] _month_offset
@@ -4876,7 +4870,6 @@ def get_start_end_field(ndarray[int64_t] dtindex, object field,
(defined by frequency).
"""
cdef:
- _TSObject ts
Py_ssize_t i
int count = 0
bint is_business = 0
@@ -4925,9 +4918,8 @@ def get_start_end_field(ndarray[int64_t] dtindex, object field,
pandas_datetime_to_datetimestruct(
dtindex[i], PANDAS_FR_ns, &dts)
- ts = convert_to_tsobject(dtindex[i], None, None, 0, 0)
dom = dts.day
- dow = ts_dayofweek(ts)
+ dow = dayofweek(dts.year, dts.month, dts.day)
if (dom == 1 and dow < 5) or (dom <= 3 and dow == 0):
out[i] = 1
@@ -4951,13 +4943,12 @@ def get_start_end_field(ndarray[int64_t] dtindex, object field,
pandas_datetime_to_datetimestruct(
dtindex[i], PANDAS_FR_ns, &dts)
- ts = convert_to_tsobject(dtindex[i], None, None, 0, 0)
isleap = is_leapyear(dts.year)
mo_off = _month_offset[isleap, dts.month - 1]
dom = dts.day
doy = mo_off + dom
ldom = _month_offset[isleap, dts.month]
- dow = ts_dayofweek(ts)
+ dow = dayofweek(dts.year, dts.month, dts.day)
if (ldom == doy and dow < 5) or (
dow == 4 and (ldom - doy <= 2)):
@@ -4986,9 +4977,8 @@ def get_start_end_field(ndarray[int64_t] dtindex, object field,
pandas_datetime_to_datetimestruct(
dtindex[i], PANDAS_FR_ns, &dts)
- ts = convert_to_tsobject(dtindex[i], None, None, 0, 0)
dom = dts.day
- dow = ts_dayofweek(ts)
+ dow = dayofweek(dts.year, dts.month, dts.day)
if ((dts.month - start_month) % 3 == 0) and (
(dom == 1 and dow < 5) or (dom <= 3 and dow == 0)):
@@ -5013,13 +5003,12 @@ def get_start_end_field(ndarray[int64_t] dtindex, object field,
pandas_datetime_to_datetimestruct(
dtindex[i], PANDAS_FR_ns, &dts)
- ts = convert_to_tsobject(dtindex[i], None, None, 0, 0)
isleap = is_leapyear(dts.year)
mo_off = _month_offset[isleap, dts.month - 1]
dom = dts.day
doy = mo_off + dom
ldom = _month_offset[isleap, dts.month]
- dow = ts_dayofweek(ts)
+ dow = dayofweek(dts.year, dts.month, dts.day)
if ((dts.month - end_month) % 3 == 0) and (
(ldom == doy and dow < 5) or (
@@ -5049,9 +5038,8 @@ def get_start_end_field(ndarray[int64_t] dtindex, object field,
pandas_datetime_to_datetimestruct(
dtindex[i], PANDAS_FR_ns, &dts)
- ts = convert_to_tsobject(dtindex[i], None, None, 0, 0)
dom = dts.day
- dow = ts_dayofweek(ts)
+ dow = dayofweek(dts.year, dts.month, dts.day)
if (dts.month == start_month) and (
(dom == 1 and dow < 5) or (dom <= 3 and dow == 0)):
@@ -5076,12 +5064,11 @@ def get_start_end_field(ndarray[int64_t] dtindex, object field,
pandas_datetime_to_datetimestruct(
dtindex[i], PANDAS_FR_ns, &dts)
- ts = convert_to_tsobject(dtindex[i], None, None, 0, 0)
isleap = is_leapyear(dts.year)
dom = dts.day
mo_off = _month_offset[isleap, dts.month - 1]
doy = mo_off + dom
- dow = ts_dayofweek(ts)
+ dow = dayofweek(dts.year, dts.month, dts.day)
ldom = _month_offset[isleap, dts.month]
if (dts.month == end_month) and (
@@ -5095,7 +5082,6 @@ def get_start_end_field(ndarray[int64_t] dtindex, object field,
pandas_datetime_to_datetimestruct(
dtindex[i], PANDAS_FR_ns, &dts)
- ts = convert_to_tsobject(dtindex[i], None, None, 0, 0)
isleap = is_leapyear(dts.year)
mo_off = _month_offset[isleap, dts.month - 1]
dom = dts.day
@@ -5117,7 +5103,6 @@ def get_date_name_field(ndarray[int64_t] dtindex, object field):
name based on requested field (e.g. weekday_name)
"""
cdef:
- _TSObject ts
Py_ssize_t i, count = 0
ndarray[object] out
pandas_datetimestruct dts
@@ -5143,10 +5128,6 @@ def get_date_name_field(ndarray[int64_t] dtindex, object field):
raise ValueError("Field %s not supported" % field)
-cdef inline int m8_weekday(int64_t val):
- ts = convert_to_tsobject(val, None, None, 0, 0)
- return ts_dayofweek(ts)
-
cdef int64_t DAY_NS = 86400000000000LL
@@ -5156,11 +5137,9 @@ def date_normalize(ndarray[int64_t] stamps, tz=None):
cdef:
Py_ssize_t i, n = len(stamps)
pandas_datetimestruct dts
- _TSObject tso
ndarray[int64_t] result = np.empty(n, dtype=np.int64)
if tz is not None:
- tso = _TSObject()
tz = maybe_get_tz(tz)
result = _normalize_local(stamps, tz)
else:
@@ -5305,8 +5284,6 @@ def monthrange(int64_t year, int64_t month):
return (dayofweek(year, month, 1), days)
-cdef inline int64_t ts_dayofweek(_TSObject ts):
- return dayofweek(ts.dts.year, ts.dts.month, ts.dts.day)
cdef inline int days_in_month(pandas_datetimestruct dts) nogil:
return days_per_month_table[is_leapyear(dts.year)][dts.month -1]
| This is part 2 in an N-part series of PRs to disentangle inter-dependent pieces of tslib.pyx (and by extension, lib.pyx and period.pyx).
`tslib` has a `_TSObject` class that is used as a container during conversion steps. In a number of the places where it is currently used, it is not needed. All this PR does is remove it in cases where it is either unused or unneeded.
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/17297 | 2017-08-20T21:25:17Z | 2017-08-21T23:49:18Z | 2017-08-21T23:49:18Z | 2017-08-22T20:34:21Z |
API: warning to raise KeyError in the future if not all elements of a list are selected via .loc | diff --git a/doc/source/advanced.rst b/doc/source/advanced.rst
index cfdb53ec7e4b1..44358593793bc 100644
--- a/doc/source/advanced.rst
+++ b/doc/source/advanced.rst
@@ -1009,7 +1009,7 @@ The different indexing operation can potentially change the dtype of a ``Series`
series1 = pd.Series([1, 2, 3])
series1.dtype
- res = series1[[0,4]]
+ res = series1.reindex([0, 4])
res.dtype
res
diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst
index edbc4e6d7fd22..415f3fd702c43 100644
--- a/doc/source/indexing.rst
+++ b/doc/source/indexing.rst
@@ -333,8 +333,15 @@ Selection By Label
dfl.loc['20130102':'20130104']
+.. warning::
+
+ Starting in 0.21.0, pandas will show a ``FutureWarning`` if indexing with a list with missing labels. In the future
+ this will raise a ``KeyError``. See :ref:`list-like Using loc with missing keys in a list is Deprecated <indexing.deprecate_loc_reindex_listlike>`
+
pandas provides a suite of methods in order to have **purely label based indexing**. This is a strict inclusion based protocol.
-**At least 1** of the labels for which you ask, must be in the index or a ``KeyError`` will be raised! When slicing, both the start bound **AND** the stop bound are *included*, if present in the index. Integers are valid labels, but they refer to the label **and not the position**.
+All of the labels for which you ask, must be in the index or a ``KeyError`` will be raised!
+When slicing, both the start bound **AND** the stop bound are *included*, if present in the index.
+Integers are valid labels, but they refer to the label **and not the position**.
The ``.loc`` attribute is the primary access method. The following are valid inputs:
@@ -635,6 +642,107 @@ For getting *multiple* indexers, using ``.get_indexer``
dfd.iloc[[0, 2], dfd.columns.get_indexer(['A', 'B'])]
+.. _indexing.deprecate_loc_reindex_listlike:
+
+Indexing with list with missing labels is Deprecated
+----------------------------------------------------
+
+.. warning::
+
+ Starting in 0.21.0, using ``.loc`` or ``[]`` with a list with one or more missing labels, is deprecated, in favor of ``.reindex``.
+
+In prior versions, using ``.loc[list-of-labels]`` would work as long as *at least 1* of the keys was found (otherwise it
+would raise a ``KeyError``). This behavior is deprecated and will show a warning message pointing to this section. The
+recommeded alternative is to use ``.reindex()``.
+
+For example.
+
+.. ipython:: python
+
+ s = pd.Series([1, 2, 3])
+ s
+
+Selection with all keys found is unchanged.
+
+.. ipython:: python
+
+ s.loc[[1, 2]]
+
+Previous Behavior
+
+.. code-block:: ipython
+
+ In [4]: s.loc[[1, 2, 3]]
+ Out[4]:
+ 1 2.0
+ 2 3.0
+ 3 NaN
+ dtype: float64
+
+
+Current Behavior
+
+.. code-block:: ipython
+
+ In [4]: s.loc[[1, 2, 3]]
+ Passing list-likes to .loc with any non-matching elements will raise
+ KeyError in the future, you can use .reindex() as an alternative.
+
+ See the documentation here:
+ http://pandas.pydata.org/pandas-docs/stable/indexing.html#deprecate-loc-reindex-listlike
+
+ Out[4]:
+ 1 2.0
+ 2 3.0
+ 3 NaN
+ dtype: float64
+
+
+Reindexing
+~~~~~~~~~~
+
+The idiomatic way to achieve selecting potentially not-found elmenents is via ``.reindex()``. See also the section on :ref:`reindexing <basics.reindexing>`.
+
+.. ipython:: python
+
+ s.reindex([1, 2, 3])
+
+Alternatively, if you want to select only *valid* keys, the following is idiomatic and efficient; it is guaranteed to preserve the dtype of the selection.
+
+.. ipython:: python
+
+ labels = [1, 2, 3]
+ s.loc[s.index.intersection(labels)]
+
+Having a duplicated index will raise for a ``.reindex()``:
+
+.. ipython:: python
+
+ s = pd.Series(np.arange(4), index=['a', 'a', 'b', 'c'])
+ labels = ['c', 'd']
+
+.. code-block:: ipython
+
+ In [17]: s.reindex(labels)
+ ValueError: cannot reindex from a duplicate axis
+
+Generally, you can interesect the desired labels with the current
+axis, and then reindex.
+
+.. ipython:: python
+
+ s.loc[s.index.intersection(labels)].reindex(labels)
+
+However, this would *still* raise if your resulting index is duplicated.
+
+.. code-block:: ipython
+
+ In [41]: labels = ['a', 'd']
+
+ In [42]: s.loc[s.index.intersection(labels)].reindex(labels)
+ ValueError: cannot reindex from a duplicate axis
+
+
.. _indexing.basics.partial_setting:
Selecting Random Samples
@@ -852,7 +960,7 @@ when you don't know which of the sought labels are in fact present:
s[s.index.isin([2, 4, 6])]
# compare it to the following
- s[[2, 4, 6]]
+ s.reindex([2, 4, 6])
In addition to that, ``MultiIndex`` allows selecting a separate level to use
in the membership check:
diff --git a/doc/source/whatsnew/v0.15.0.txt b/doc/source/whatsnew/v0.15.0.txt
index 6282f15b6faeb..e44bc6e9e91e0 100644
--- a/doc/source/whatsnew/v0.15.0.txt
+++ b/doc/source/whatsnew/v0.15.0.txt
@@ -676,10 +676,19 @@ Other notable API changes:
Both will now return a frame reindex by [1,3]. E.g.
- .. ipython:: python
+ .. code-block:: ipython
- df.loc[[1,3]]
- df.loc[[1,3],:]
+ In [3]: df.loc[[1,3]]
+ Out[3]:
+ 0
+ 1 a
+ 3 NaN
+
+ In [4]: df.loc[[1,3],:]
+ Out[4]:
+ 0
+ 1 a
+ 3 NaN
This can also be seen in multi-axis indexing with a ``Panel``.
@@ -693,9 +702,14 @@ Other notable API changes:
The following would raise ``KeyError`` prior to 0.15.0:
- .. ipython:: python
+ .. code-block:: ipython
- p.loc[['ItemA','ItemD'],:,'D']
+ In [5]:
+ Out[5]:
+ ItemA ItemD
+ 1 3 NaN
+ 2 7 NaN
+ 3 11 NaN
Furthermore, ``.loc`` will raise If no values are found in a multi-index with a list-like indexer:
diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt
index f4ec8a5f2ad24..0d4eaa90d7ab3 100644
--- a/doc/source/whatsnew/v0.21.0.txt
+++ b/doc/source/whatsnew/v0.21.0.txt
@@ -300,6 +300,64 @@ If installed, we now require:
| Bottleneck | 1.0.0 | |
+--------------+-----------------+----------+
+.. _whatsnew_0210.api_breaking.loc:
+
+Indexing with a list with missing labels is Deprecated
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Previously, selecting with a list of labels, where one or more labels were missing would always succeed, returning ``NaN`` for missing labels.
+This will now show a ``FutureWarning``, in the future this will raise a ``KeyError`` (:issue:`15747`).
+This warning will trigger on a ``DataFrame`` or a ``Series`` for using ``.loc[]`` or ``[[]]`` when passing a list-of-labels with at least 1 missing label.
+See the :ref:`deprecation docs <indexing.deprecate_loc_reindex_listlike>`.
+
+
+.. ipython:: python
+
+ s = pd.Series([1, 2, 3])
+ s
+
+Previous Behavior
+
+.. code-block:: ipython
+
+ In [4]: s.loc[[1, 2, 3]]
+ Out[4]:
+ 1 2.0
+ 2 3.0
+ 3 NaN
+ dtype: float64
+
+
+Current Behavior
+
+.. code-block:: ipython
+
+ In [4]: s.loc[[1, 2, 3]]
+ Passing list-likes to .loc or [] with any missing label will raise
+ KeyError in the future, you can use .reindex() as an alternative.
+
+ See the documentation here:
+ http://pandas.pydata.org/pandas-docs/stable/indexing.html#deprecate-loc-reindex-listlike
+
+ Out[4]:
+ 1 2.0
+ 2 3.0
+ 3 NaN
+ dtype: float64
+
+The idiomatic way to achieve selecting potentially not-found elmenents is via ``.reindex()``
+
+.. ipython:: python
+
+ s.reindex([1, 2, 3])
+
+Selection with all keys found is unchanged.
+
+.. ipython:: python
+
+ s.loc[[1, 2]]
+
+
.. _whatsnew_0210.api_breaking.pandas_eval:
Improved error handling during item assignment in pd.eval
@@ -607,6 +665,7 @@ Deprecations
- ``pd.TimeGrouper`` is deprecated in favor of :class:`pandas.Grouper` (:issue:`16747`)
- ``cdate_range`` has been deprecated in favor of :func:`bdate_range`, which has gained ``weekmask`` and ``holidays`` parameters for building custom frequency date ranges. See the :ref:`documentation <timeseries.custom-freq-ranges>` for more details (:issue:`17596`)
- passing ``categories`` or ``ordered`` kwargs to :func:`Series.astype` is deprecated, in favor of passing a :ref:`CategoricalDtype <whatsnew_0210.enhancements.categorical_dtype>` (:issue:`17636`)
+- Passing a non-existant column in ``.to_excel(..., columns=)`` is deprecated and will raise a ``KeyError`` in the future (:issue:`17295`)
.. _whatsnew_0210.deprecations.argmin_min:
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 2ea1b8a238913..e977e84702982 100755
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -1419,13 +1419,33 @@ def _has_valid_type(self, key, axis):
if isinstance(key, tuple) and isinstance(ax, MultiIndex):
return True
- # TODO: don't check the entire key unless necessary
- if (not is_iterator(key) and len(key) and
- np.all(ax.get_indexer_for(key) < 0)):
+ if not is_iterator(key) and len(key):
- raise KeyError(u"None of [{key}] are in the [{axis}]"
- .format(key=key,
- axis=self.obj._get_axis_name(axis)))
+ # True indicates missing values
+ missing = ax.get_indexer_for(key) < 0
+
+ if np.any(missing):
+ if len(key) == 1 or np.all(missing):
+ raise KeyError(
+ u"None of [{key}] are in the [{axis}]".format(
+ key=key, axis=self.obj._get_axis_name(axis)))
+ else:
+
+ # we skip the warning on Categorical/Interval
+ # as this check is actually done (check for
+ # non-missing values), but a bit later in the
+ # code, so we want to avoid warning & then
+ # just raising
+ _missing_key_warning = textwrap.dedent("""
+ Passing list-likes to .loc or [] with any missing label will raise
+ KeyError in the future, you can use .reindex() as an alternative.
+
+ See the documentation here:
+ http://pandas.pydata.org/pandas-docs/stable/indexing.html#deprecate-loc-reindex-listlike""") # noqa
+
+ if not (ax.is_categorical() or ax.is_interval()):
+ warnings.warn(_missing_key_warning,
+ FutureWarning, stacklevel=5)
return True
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 97f39a680c8c9..58cac46f63d7e 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -691,7 +691,7 @@ def _get_with(self, key):
if key_type == 'integer':
if self.index.is_integer() or self.index.is_floating():
- return self.reindex(key)
+ return self.loc[key]
else:
return self._get_values(key)
elif key_type == 'boolean':
diff --git a/pandas/io/formats/excel.py b/pandas/io/formats/excel.py
index 51668bb6b0895..9e888c38edaa7 100644
--- a/pandas/io/formats/excel.py
+++ b/pandas/io/formats/excel.py
@@ -356,7 +356,21 @@ def __init__(self, df, na_rep='', float_format=None, cols=None,
self.styler = None
self.df = df
if cols is not None:
- self.df = df.loc[:, cols]
+
+ # all missing, raise
+ if not len(Index(cols) & df.columns):
+ raise KeyError(
+ "passes columns are not ALL present dataframe")
+
+ # deprecatedin gh-17295
+ # 1 missing is ok (for now)
+ if len(Index(cols) & df.columns) != len(cols):
+ warnings.warn(
+ "Not all names specified in 'columns' are found; "
+ "this will raise a KeyError in the future",
+ FutureWarning)
+
+ self.df = df.reindex(columns=cols)
self.columns = self.df.columns
self.float_format = float_format
self.index = index
diff --git a/pandas/tests/indexing/test_categorical.py b/pandas/tests/indexing/test_categorical.py
index 6874fedaa705f..2f01eced364a3 100644
--- a/pandas/tests/indexing/test_categorical.py
+++ b/pandas/tests/indexing/test_categorical.py
@@ -111,7 +111,8 @@ def test_loc_listlike(self):
assert_frame_equal(result, expected, check_index_type=True)
# not all labels in the categories
- pytest.raises(KeyError, lambda: self.df2.loc[['a', 'd']])
+ with pytest.raises(KeyError):
+ self.df2.loc[['a', 'd']]
def test_loc_listlike_dtypes(self):
# GH 11586
diff --git a/pandas/tests/indexing/test_datetime.py b/pandas/tests/indexing/test_datetime.py
index ddac80fbc4693..617757c888eb5 100644
--- a/pandas/tests/indexing/test_datetime.py
+++ b/pandas/tests/indexing/test_datetime.py
@@ -223,7 +223,9 @@ def test_series_partial_set_datetime(self):
Timestamp('2011-01-03')]
exp = Series([np.nan, 0.2, np.nan],
index=pd.DatetimeIndex(keys, name='idx'), name='s')
- tm.assert_series_equal(ser.loc[keys], exp, check_index_type=True)
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False):
+ tm.assert_series_equal(ser.loc[keys], exp, check_index_type=True)
def test_series_partial_set_period(self):
# GH 11497
@@ -248,5 +250,7 @@ def test_series_partial_set_period(self):
pd.Period('2011-01-03', freq='D')]
exp = Series([np.nan, 0.2, np.nan],
index=pd.PeriodIndex(keys, name='idx'), name='s')
- result = ser.loc[keys]
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False):
+ result = ser.loc[keys]
tm.assert_series_equal(result, exp)
diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py
index 39569f0b0cb38..c8e320f9d9c77 100644
--- a/pandas/tests/indexing/test_iloc.py
+++ b/pandas/tests/indexing/test_iloc.py
@@ -617,7 +617,8 @@ def test_iloc_non_unique_indexing(self):
expected = DataFrame(new_list)
expected = pd.concat([expected, DataFrame(index=idx[idx > sidx.max()])
])
- result = df2.loc[idx]
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ result = df2.loc[idx]
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_iloc_empty_list_indexer_is_ok(self):
diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py
index f1f51f26df55c..d64ed98243d72 100644
--- a/pandas/tests/indexing/test_indexing.py
+++ b/pandas/tests/indexing/test_indexing.py
@@ -176,7 +176,8 @@ def test_dups_fancy_indexing(self):
'test1': [7., 6, np.nan],
'other': ['d', 'c', np.nan]}, index=rows)
- result = df.loc[rows]
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ result = df.loc[rows]
tm.assert_frame_equal(result, expected)
# see GH5553, make sure we use the right indexer
@@ -186,7 +187,8 @@ def test_dups_fancy_indexing(self):
'other': [np.nan, np.nan, np.nan,
'd', 'c', np.nan]},
index=rows)
- result = df.loc[rows]
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ result = df.loc[rows]
tm.assert_frame_equal(result, expected)
# inconsistent returns for unique/duplicate indices when values are
@@ -203,12 +205,14 @@ def test_dups_fancy_indexing(self):
# GH 4619; duplicate indexer with missing label
df = DataFrame({"A": [0, 1, 2]})
- result = df.loc[[0, 8, 0]]
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ result = df.loc[[0, 8, 0]]
expected = DataFrame({"A": [0, np.nan, 0]}, index=[0, 8, 0])
tm.assert_frame_equal(result, expected, check_index_type=False)
df = DataFrame({"A": list('abc')})
- result = df.loc[[0, 8, 0]]
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ result = df.loc[[0, 8, 0]]
expected = DataFrame({"A": ['a', np.nan, 'a']}, index=[0, 8, 0])
tm.assert_frame_equal(result, expected, check_index_type=False)
@@ -216,7 +220,8 @@ def test_dups_fancy_indexing(self):
df = DataFrame({'test': [5, 7, 9, 11]}, index=['A', 'A', 'B', 'C'])
expected = DataFrame(
{'test': [5, 7, 5, 7, np.nan]}, index=['A', 'A', 'A', 'A', 'E'])
- result = df.loc[['A', 'A', 'E']]
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ result = df.loc[['A', 'A', 'E']]
tm.assert_frame_equal(result, expected)
# GH 5835
@@ -227,7 +232,8 @@ def test_dups_fancy_indexing(self):
expected = pd.concat(
[df.loc[:, ['A', 'B']], DataFrame(np.nan, columns=['C'],
index=df.index)], axis=1)
- result = df.loc[:, ['A', 'B', 'C']]
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ result = df.loc[:, ['A', 'B', 'C']]
tm.assert_frame_equal(result, expected)
# GH 6504, multi-axis indexing
diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py
index 95d6a24e68425..c6f38aeba9e87 100644
--- a/pandas/tests/indexing/test_loc.py
+++ b/pandas/tests/indexing/test_loc.py
@@ -152,15 +152,29 @@ def test_loc_getitem_label_list(self):
[Timestamp('20130102'), Timestamp('20130103')],
typs=['ts'], axes=0)
+ def test_loc_getitem_label_list_with_missing(self):
self.check_result('list lbl', 'loc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['empty'], fails=KeyError)
- self.check_result('list lbl', 'loc', [0, 2, 3], 'ix', [0, 2, 3],
- typs=['ints', 'uints'], axes=0, fails=KeyError)
- self.check_result('list lbl', 'loc', [3, 6, 7], 'ix', [3, 6, 7],
- typs=['ints', 'uints'], axes=1, fails=KeyError)
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ self.check_result('list lbl', 'loc', [0, 2, 3], 'ix', [0, 2, 3],
+ typs=['ints', 'uints'], axes=0, fails=KeyError)
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ self.check_result('list lbl', 'loc', [3, 6, 7], 'ix', [3, 6, 7],
+ typs=['ints', 'uints'], axes=1, fails=KeyError)
self.check_result('list lbl', 'loc', [4, 8, 10], 'ix', [4, 8, 10],
typs=['ints', 'uints'], axes=2, fails=KeyError)
+ def test_getitem_label_list_with_missing(self):
+ s = pd.Series(range(3), index=['a', 'b', 'c'])
+
+ # consistency
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ s[['a', 'd']]
+
+ s = pd.Series(range(3))
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ s[[0, 3]]
+
def test_loc_getitem_label_list_fails(self):
# fails
self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40],
@@ -249,7 +263,9 @@ def test_loc_to_fail(self):
pytest.raises(KeyError, lambda: s.loc[['4']])
s.loc[-1] = 3
- result = s.loc[[-1, -2]]
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False):
+ result = s.loc[[-1, -2]]
expected = Series([3, np.nan], index=[-1, -2])
tm.assert_series_equal(result, expected)
@@ -277,6 +293,23 @@ def f():
pytest.raises(KeyError, f)
+ def test_loc_getitem_list_with_fail(self):
+ # 15747
+ # should KeyError if *any* missing labels
+
+ s = Series([1, 2, 3])
+
+ s.loc[[2]]
+
+ with pytest.raises(KeyError):
+ s.loc[[3]]
+
+ # a non-match and a match
+ with tm.assert_produces_warning(FutureWarning):
+ expected = s.loc[[2, 3]]
+ result = s.reindex([2, 3])
+ tm.assert_series_equal(result, expected)
+
def test_loc_getitem_label_slice(self):
# label slices (with ints)
diff --git a/pandas/tests/indexing/test_partial.py b/pandas/tests/indexing/test_partial.py
index 93a85e247a787..41ddfe934a131 100644
--- a/pandas/tests/indexing/test_partial.py
+++ b/pandas/tests/indexing/test_partial.py
@@ -222,13 +222,21 @@ def test_series_partial_set(self):
# Regression from GH4825
ser = Series([0.1, 0.2], index=[1, 2])
- # loc
+ # loc equiv to .reindex
expected = Series([np.nan, 0.2, np.nan], index=[3, 2, 3])
- result = ser.loc[[3, 2, 3]]
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ result = ser.loc[[3, 2, 3]]
+ tm.assert_series_equal(result, expected, check_index_type=True)
+
+ result = ser.reindex([3, 2, 3])
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([np.nan, 0.2, np.nan, np.nan], index=[3, 2, 3, 'x'])
- result = ser.loc[[3, 2, 3, 'x']]
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ result = ser.loc[[3, 2, 3, 'x']]
+ tm.assert_series_equal(result, expected, check_index_type=True)
+
+ result = ser.reindex([3, 2, 3, 'x'])
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.2, 0.2, 0.1], index=[2, 2, 1])
@@ -236,38 +244,71 @@ def test_series_partial_set(self):
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.2, 0.2, np.nan, 0.1], index=[2, 2, 'x', 1])
- result = ser.loc[[2, 2, 'x', 1]]
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ result = ser.loc[[2, 2, 'x', 1]]
+ tm.assert_series_equal(result, expected, check_index_type=True)
+
+ result = ser.reindex([2, 2, 'x', 1])
tm.assert_series_equal(result, expected, check_index_type=True)
# raises as nothing in in the index
pytest.raises(KeyError, lambda: ser.loc[[3, 3, 3]])
expected = Series([0.2, 0.2, np.nan], index=[2, 2, 3])
- result = ser.loc[[2, 2, 3]]
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ result = ser.loc[[2, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
+ result = ser.reindex([2, 2, 3])
+ tm.assert_series_equal(result, expected, check_index_type=True)
+
+ s = Series([0.1, 0.2, 0.3], index=[1, 2, 3])
expected = Series([0.3, np.nan, np.nan], index=[3, 4, 4])
- result = Series([0.1, 0.2, 0.3], index=[1, 2, 3]).loc[[3, 4, 4]]
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ result = s.loc[[3, 4, 4]]
tm.assert_series_equal(result, expected, check_index_type=True)
+ result = s.reindex([3, 4, 4])
+ tm.assert_series_equal(result, expected, check_index_type=True)
+
+ s = Series([0.1, 0.2, 0.3, 0.4],
+ index=[1, 2, 3, 4])
expected = Series([np.nan, 0.3, 0.3], index=[5, 3, 3])
- result = Series([0.1, 0.2, 0.3, 0.4],
- index=[1, 2, 3, 4]).loc[[5, 3, 3]]
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ result = s.loc[[5, 3, 3]]
+ tm.assert_series_equal(result, expected, check_index_type=True)
+
+ result = s.reindex([5, 3, 3])
tm.assert_series_equal(result, expected, check_index_type=True)
+ s = Series([0.1, 0.2, 0.3, 0.4],
+ index=[1, 2, 3, 4])
expected = Series([np.nan, 0.4, 0.4], index=[5, 4, 4])
- result = Series([0.1, 0.2, 0.3, 0.4],
- index=[1, 2, 3, 4]).loc[[5, 4, 4]]
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ result = s.loc[[5, 4, 4]]
+ tm.assert_series_equal(result, expected, check_index_type=True)
+
+ result = s.reindex([5, 4, 4])
tm.assert_series_equal(result, expected, check_index_type=True)
+ s = Series([0.1, 0.2, 0.3, 0.4],
+ index=[4, 5, 6, 7])
expected = Series([0.4, np.nan, np.nan], index=[7, 2, 2])
- result = Series([0.1, 0.2, 0.3, 0.4],
- index=[4, 5, 6, 7]).loc[[7, 2, 2]]
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ result = s.loc[[7, 2, 2]]
tm.assert_series_equal(result, expected, check_index_type=True)
+ result = s.reindex([7, 2, 2])
+ tm.assert_series_equal(result, expected, check_index_type=True)
+
+ s = Series([0.1, 0.2, 0.3, 0.4],
+ index=[1, 2, 3, 4])
expected = Series([0.4, np.nan, np.nan], index=[4, 5, 5])
- result = Series([0.1, 0.2, 0.3, 0.4],
- index=[1, 2, 3, 4]).loc[[4, 5, 5]]
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ result = s.loc[[4, 5, 5]]
+ tm.assert_series_equal(result, expected, check_index_type=True)
+
+ result = s.reindex([4, 5, 5])
tm.assert_series_equal(result, expected, check_index_type=True)
# iloc
@@ -284,13 +325,15 @@ def test_series_partial_set_with_name(self):
# loc
exp_idx = Index([3, 2, 3], dtype='int64', name='idx')
expected = Series([np.nan, 0.2, np.nan], index=exp_idx, name='s')
- result = ser.loc[[3, 2, 3]]
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ result = ser.loc[[3, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([3, 2, 3, 'x'], dtype='object', name='idx')
expected = Series([np.nan, 0.2, np.nan, np.nan], index=exp_idx,
name='s')
- result = ser.loc[[3, 2, 3, 'x']]
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ result = ser.loc[[3, 2, 3, 'x']]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([2, 2, 1], dtype='int64', name='idx')
@@ -300,7 +343,8 @@ def test_series_partial_set_with_name(self):
exp_idx = Index([2, 2, 'x', 1], dtype='object', name='idx')
expected = Series([0.2, 0.2, np.nan, 0.1], index=exp_idx, name='s')
- result = ser.loc[[2, 2, 'x', 1]]
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ result = ser.loc[[2, 2, 'x', 1]]
tm.assert_series_equal(result, expected, check_index_type=True)
# raises as nothing in in the index
@@ -308,41 +352,49 @@ def test_series_partial_set_with_name(self):
exp_idx = Index([2, 2, 3], dtype='int64', name='idx')
expected = Series([0.2, 0.2, np.nan], index=exp_idx, name='s')
- result = ser.loc[[2, 2, 3]]
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ result = ser.loc[[2, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([3, 4, 4], dtype='int64', name='idx')
expected = Series([0.3, np.nan, np.nan], index=exp_idx, name='s')
idx = Index([1, 2, 3], dtype='int64', name='idx')
- result = Series([0.1, 0.2, 0.3], index=idx, name='s').loc[[3, 4, 4]]
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ result = Series([0.1, 0.2, 0.3],
+ index=idx,
+ name='s').loc[[3, 4, 4]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([5, 3, 3], dtype='int64', name='idx')
expected = Series([np.nan, 0.3, 0.3], index=exp_idx, name='s')
idx = Index([1, 2, 3, 4], dtype='int64', name='idx')
- result = Series([0.1, 0.2, 0.3, 0.4], index=idx,
- name='s').loc[[5, 3, 3]]
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ result = Series([0.1, 0.2, 0.3, 0.4], index=idx,
+ name='s').loc[[5, 3, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([5, 4, 4], dtype='int64', name='idx')
expected = Series([np.nan, 0.4, 0.4], index=exp_idx, name='s')
idx = Index([1, 2, 3, 4], dtype='int64', name='idx')
- result = Series([0.1, 0.2, 0.3, 0.4], index=idx,
- name='s').loc[[5, 4, 4]]
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ result = Series([0.1, 0.2, 0.3, 0.4], index=idx,
+ name='s').loc[[5, 4, 4]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([7, 2, 2], dtype='int64', name='idx')
expected = Series([0.4, np.nan, np.nan], index=exp_idx, name='s')
idx = Index([4, 5, 6, 7], dtype='int64', name='idx')
- result = Series([0.1, 0.2, 0.3, 0.4], index=idx,
- name='s').loc[[7, 2, 2]]
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ result = Series([0.1, 0.2, 0.3, 0.4], index=idx,
+ name='s').loc[[7, 2, 2]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([4, 5, 5], dtype='int64', name='idx')
expected = Series([0.4, np.nan, np.nan], index=exp_idx, name='s')
idx = Index([1, 2, 3, 4], dtype='int64', name='idx')
- result = Series([0.1, 0.2, 0.3, 0.4], index=idx,
- name='s').loc[[4, 5, 5]]
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ result = Series([0.1, 0.2, 0.3, 0.4], index=idx,
+ name='s').loc[[4, 5, 5]]
tm.assert_series_equal(result, expected, check_index_type=True)
# iloc
diff --git a/pandas/tests/io/test_excel.py b/pandas/tests/io/test_excel.py
index 6a399f41975e5..4e25fe0371718 100644
--- a/pandas/tests/io/test_excel.py
+++ b/pandas/tests/io/test_excel.py
@@ -1808,8 +1808,10 @@ def test_invalid_columns(self):
write_frame = DataFrame({'A': [1, 1, 1],
'B': [2, 2, 2]})
- write_frame.to_excel(path, 'test1', columns=['B', 'C'])
- expected = write_frame.loc[:, ['B', 'C']]
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False):
+ write_frame.to_excel(path, 'test1', columns=['B', 'C'])
+ expected = write_frame.reindex(columns=['B', 'C'])
read_frame = read_excel(path, 'test1')
tm.assert_frame_equal(expected, read_frame)
diff --git a/pandas/tests/reshape/test_concat.py b/pandas/tests/reshape/test_concat.py
index 6e646f9b29442..65d58a196d1eb 100644
--- a/pandas/tests/reshape/test_concat.py
+++ b/pandas/tests/reshape/test_concat.py
@@ -1222,7 +1222,7 @@ def test_handle_empty_objects(self):
frames = [baz, empty, empty, df[5:]]
concatted = concat(frames, axis=0)
- expected = df.loc[:, ['a', 'b', 'c', 'd', 'foo']]
+ expected = df.reindex(columns=['a', 'b', 'c', 'd', 'foo'])
expected['foo'] = expected['foo'].astype('O')
expected.loc[0:4, 'foo'] = 'bar'
diff --git a/pandas/tests/series/test_indexing.py b/pandas/tests/series/test_indexing.py
index 272e8c7de5e49..86211612a5955 100644
--- a/pandas/tests/series/test_indexing.py
+++ b/pandas/tests/series/test_indexing.py
@@ -590,8 +590,13 @@ def test_getitem_dups_with_missing(self):
# breaks reindex, so need to use .loc internally
# GH 4246
s = Series([1, 2, 3, 4], ['foo', 'bar', 'foo', 'bah'])
- expected = s.loc[['foo', 'bar', 'bah', 'bam']]
- result = s[['foo', 'bar', 'bah', 'bam']]
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False):
+ expected = s.loc[['foo', 'bar', 'bah', 'bam']]
+
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False):
+ result = s[['foo', 'bar', 'bah', 'bam']]
assert_series_equal(result, expected)
def test_getitem_dups(self):
diff --git a/pandas/tests/sparse/test_indexing.py b/pandas/tests/sparse/test_indexing.py
index 382cff4b9d0ac..edbac8f09241b 100644
--- a/pandas/tests/sparse/test_indexing.py
+++ b/pandas/tests/sparse/test_indexing.py
@@ -121,8 +121,8 @@ def test_loc(self):
tm.assert_sp_series_equal(result, exp)
# exceeds the bounds
- result = sparse.loc[[1, 3, 4, 5]]
- exp = orig.loc[[1, 3, 4, 5]].to_sparse()
+ result = sparse.reindex([1, 3, 4, 5])
+ exp = orig.reindex([1, 3, 4, 5]).to_sparse()
tm.assert_sp_series_equal(result, exp)
# padded with NaN
assert np.isnan(result[-1])
@@ -677,8 +677,8 @@ def test_loc(self):
tm.assert_sp_frame_equal(result, exp)
# exceeds the bounds
- result = sparse.loc[[1, 3, 4, 5]]
- exp = orig.loc[[1, 3, 4, 5]].to_sparse()
+ result = sparse.reindex([1, 3, 4, 5])
+ exp = orig.reindex([1, 3, 4, 5]).to_sparse()
tm.assert_sp_frame_equal(result, exp)
# dense array
| closes #15747
| https://api.github.com/repos/pandas-dev/pandas/pulls/17295 | 2017-08-20T19:58:26Z | 2017-10-03T10:55:51Z | 2017-10-03T10:55:50Z | 2017-10-04T11:29:21Z |
PERF: Update ASV publish config | diff --git a/asv_bench/asv.conf.json b/asv_bench/asv.conf.json
index 59c05400d06b0..ced4f2b12445f 100644
--- a/asv_bench/asv.conf.json
+++ b/asv_bench/asv.conf.json
@@ -117,8 +117,10 @@
// with results. If the commit is `null`, regression detection is
// skipped for the matching benchmark.
//
- // "regressions_first_commits": {
- // "some_benchmark": "352cdf", // Consider regressions only after this commit
- // "another_benchmark": null, // Skip regression detection altogether
- // }
+ "regressions_first_commits": {
+ "*": "v0.20.0"
+ },
+ "regression_thresholds": {
+ "*": 0.05
+ }
}
| Stricter cutoffs for considering regressions
[ci skip] | https://api.github.com/repos/pandas-dev/pandas/pulls/17293 | 2017-08-20T12:31:28Z | 2017-08-20T21:25:43Z | 2017-08-20T21:25:43Z | 2017-08-23T18:58:04Z |
TST: parameterize consistency tests for rolling/expanding windows | diff --git a/pandas/tests/test_window.py b/pandas/tests/test_window.py
index 21a9b05d48126..1cc0ad8bb4041 100644
--- a/pandas/tests/test_window.py
+++ b/pandas/tests/test_window.py
@@ -2009,6 +2009,15 @@ def no_nans(x):
_consistency_data = _create_consistency_data()
+def _rolling_consistency_cases():
+ for window in [1, 2, 3, 10, 20]:
+ for min_periods in set([0, 1, 2, 3, 4, window]):
+ if min_periods and (min_periods > window):
+ continue
+ for center in [False, True]:
+ yield window, min_periods, center
+
+
class TestMomentsConsistency(Base):
base_functions = [
(lambda v: Series(v).count(), None, 'count'),
@@ -2177,7 +2186,11 @@ def _non_null_values(x):
(mean_x * mean_y))
@pytest.mark.slow
- def test_ewm_consistency(self):
+ @pytest.mark.parametrize(
+ 'min_periods, adjust, ignore_na', product([0, 1, 2, 3, 4],
+ [True, False],
+ [False, True]))
+ def test_ewm_consistency(self, min_periods, adjust, ignore_na):
def _weights(s, com, adjust, ignore_na):
if isinstance(s, DataFrame):
if not len(s.columns):
@@ -2231,52 +2244,51 @@ def _ewma(s, com, min_periods, adjust, ignore_na):
return result
com = 3.
- for min_periods, adjust, ignore_na in product([0, 1, 2, 3, 4],
- [True, False],
- [False, True]):
- # test consistency between different ewm* moments
- self._test_moments_consistency(
- min_periods=min_periods,
- count=lambda x: x.expanding().count(),
- mean=lambda x: x.ewm(com=com, min_periods=min_periods,
- adjust=adjust,
- ignore_na=ignore_na).mean(),
- mock_mean=lambda x: _ewma(x, com=com,
- min_periods=min_periods,
- adjust=adjust,
- ignore_na=ignore_na),
- corr=lambda x, y: x.ewm(com=com, min_periods=min_periods,
- adjust=adjust,
- ignore_na=ignore_na).corr(y),
- var_unbiased=lambda x: (
- x.ewm(com=com, min_periods=min_periods,
- adjust=adjust,
- ignore_na=ignore_na).var(bias=False)),
- std_unbiased=lambda x: (
- x.ewm(com=com, min_periods=min_periods,
- adjust=adjust, ignore_na=ignore_na)
- .std(bias=False)),
- cov_unbiased=lambda x, y: (
- x.ewm(com=com, min_periods=min_periods,
- adjust=adjust, ignore_na=ignore_na)
- .cov(y, bias=False)),
- var_biased=lambda x: (
- x.ewm(com=com, min_periods=min_periods,
- adjust=adjust, ignore_na=ignore_na)
- .var(bias=True)),
- std_biased=lambda x: x.ewm(com=com, min_periods=min_periods,
- adjust=adjust,
- ignore_na=ignore_na).std(bias=True),
- cov_biased=lambda x, y: (
- x.ewm(com=com, min_periods=min_periods,
- adjust=adjust, ignore_na=ignore_na)
- .cov(y, bias=True)),
- var_debiasing_factors=lambda x: (
- _variance_debiasing_factors(x, com=com, adjust=adjust,
- ignore_na=ignore_na)))
+ # test consistency between different ewm* moments
+ self._test_moments_consistency(
+ min_periods=min_periods,
+ count=lambda x: x.expanding().count(),
+ mean=lambda x: x.ewm(com=com, min_periods=min_periods,
+ adjust=adjust,
+ ignore_na=ignore_na).mean(),
+ mock_mean=lambda x: _ewma(x, com=com,
+ min_periods=min_periods,
+ adjust=adjust,
+ ignore_na=ignore_na),
+ corr=lambda x, y: x.ewm(com=com, min_periods=min_periods,
+ adjust=adjust,
+ ignore_na=ignore_na).corr(y),
+ var_unbiased=lambda x: (
+ x.ewm(com=com, min_periods=min_periods,
+ adjust=adjust,
+ ignore_na=ignore_na).var(bias=False)),
+ std_unbiased=lambda x: (
+ x.ewm(com=com, min_periods=min_periods,
+ adjust=adjust, ignore_na=ignore_na)
+ .std(bias=False)),
+ cov_unbiased=lambda x, y: (
+ x.ewm(com=com, min_periods=min_periods,
+ adjust=adjust, ignore_na=ignore_na)
+ .cov(y, bias=False)),
+ var_biased=lambda x: (
+ x.ewm(com=com, min_periods=min_periods,
+ adjust=adjust, ignore_na=ignore_na)
+ .var(bias=True)),
+ std_biased=lambda x: x.ewm(com=com, min_periods=min_periods,
+ adjust=adjust,
+ ignore_na=ignore_na).std(bias=True),
+ cov_biased=lambda x, y: (
+ x.ewm(com=com, min_periods=min_periods,
+ adjust=adjust, ignore_na=ignore_na)
+ .cov(y, bias=True)),
+ var_debiasing_factors=lambda x: (
+ _variance_debiasing_factors(x, com=com, adjust=adjust,
+ ignore_na=ignore_na)))
@pytest.mark.slow
- def test_expanding_consistency(self):
+ @pytest.mark.parametrize(
+ 'min_periods', [0, 1, 2, 3, 4])
+ def test_expanding_consistency(self, min_periods):
# suppress warnings about empty slices, as we are deliberately testing
# with empty/0-length Series/DataFrames
@@ -2285,72 +2297,72 @@ def test_expanding_consistency(self):
message=".*(empty slice|0 for slice).*",
category=RuntimeWarning)
- for min_periods in [0, 1, 2, 3, 4]:
-
- # test consistency between different expanding_* moments
- self._test_moments_consistency(
- min_periods=min_periods,
- count=lambda x: x.expanding().count(),
- mean=lambda x: x.expanding(
- min_periods=min_periods).mean(),
- mock_mean=lambda x: x.expanding(
- min_periods=min_periods).sum() / x.expanding().count(),
- corr=lambda x, y: x.expanding(
- min_periods=min_periods).corr(y),
- var_unbiased=lambda x: x.expanding(
- min_periods=min_periods).var(),
- std_unbiased=lambda x: x.expanding(
- min_periods=min_periods).std(),
- cov_unbiased=lambda x, y: x.expanding(
- min_periods=min_periods).cov(y),
- var_biased=lambda x: x.expanding(
- min_periods=min_periods).var(ddof=0),
- std_biased=lambda x: x.expanding(
- min_periods=min_periods).std(ddof=0),
- cov_biased=lambda x, y: x.expanding(
- min_periods=min_periods).cov(y, ddof=0),
- var_debiasing_factors=lambda x: (
- x.expanding().count() /
- (x.expanding().count() - 1.)
- .replace(0., np.nan)))
-
- # test consistency between expanding_xyz() and either (a)
- # expanding_apply of Series.xyz(), or (b) expanding_apply of
- # np.nanxyz()
- for (x, is_constant, no_nans) in self.data:
- functions = self.base_functions
-
- # GH 8269
- if no_nans:
- functions = self.base_functions + self.no_nan_functions
- for (f, require_min_periods, name) in functions:
- expanding_f = getattr(
- x.expanding(min_periods=min_periods), name)
-
- if (require_min_periods and
- (min_periods is not None) and
- (min_periods < require_min_periods)):
- continue
-
- if name == 'count':
- expanding_f_result = expanding_f()
- expanding_apply_f_result = x.expanding(
- min_periods=0).apply(func=f)
+ # test consistency between different expanding_* moments
+ self._test_moments_consistency(
+ min_periods=min_periods,
+ count=lambda x: x.expanding().count(),
+ mean=lambda x: x.expanding(
+ min_periods=min_periods).mean(),
+ mock_mean=lambda x: x.expanding(
+ min_periods=min_periods).sum() / x.expanding().count(),
+ corr=lambda x, y: x.expanding(
+ min_periods=min_periods).corr(y),
+ var_unbiased=lambda x: x.expanding(
+ min_periods=min_periods).var(),
+ std_unbiased=lambda x: x.expanding(
+ min_periods=min_periods).std(),
+ cov_unbiased=lambda x, y: x.expanding(
+ min_periods=min_periods).cov(y),
+ var_biased=lambda x: x.expanding(
+ min_periods=min_periods).var(ddof=0),
+ std_biased=lambda x: x.expanding(
+ min_periods=min_periods).std(ddof=0),
+ cov_biased=lambda x, y: x.expanding(
+ min_periods=min_periods).cov(y, ddof=0),
+ var_debiasing_factors=lambda x: (
+ x.expanding().count() /
+ (x.expanding().count() - 1.)
+ .replace(0., np.nan)))
+
+ # test consistency between expanding_xyz() and either (a)
+ # expanding_apply of Series.xyz(), or (b) expanding_apply of
+ # np.nanxyz()
+ for (x, is_constant, no_nans) in self.data:
+ functions = self.base_functions
+
+ # GH 8269
+ if no_nans:
+ functions = self.base_functions + self.no_nan_functions
+ for (f, require_min_periods, name) in functions:
+ expanding_f = getattr(
+ x.expanding(min_periods=min_periods), name)
+
+ if (require_min_periods and
+ (min_periods is not None) and
+ (min_periods < require_min_periods)):
+ continue
+
+ if name == 'count':
+ expanding_f_result = expanding_f()
+ expanding_apply_f_result = x.expanding(
+ min_periods=0).apply(func=f)
+ else:
+ if name in ['cov', 'corr']:
+ expanding_f_result = expanding_f(
+ pairwise=False)
else:
- if name in ['cov', 'corr']:
- expanding_f_result = expanding_f(
- pairwise=False)
- else:
- expanding_f_result = expanding_f()
- expanding_apply_f_result = x.expanding(
- min_periods=min_periods).apply(func=f)
-
- if not tm._incompat_bottleneck_version(name):
- assert_equal(expanding_f_result,
- expanding_apply_f_result)
+ expanding_f_result = expanding_f()
+ expanding_apply_f_result = x.expanding(
+ min_periods=min_periods).apply(func=f)
+
+ if not tm._incompat_bottleneck_version(name):
+ assert_equal(expanding_f_result,
+ expanding_apply_f_result)
@pytest.mark.slow
- def test_rolling_consistency(self):
+ @pytest.mark.parametrize(
+ 'window,min_periods,center', list(_rolling_consistency_cases()))
+ def test_rolling_consistency(self, window, min_periods, center):
# suppress warnings about empty slices, as we are deliberately testing
# with empty/0-length Series/DataFrames
@@ -2359,100 +2371,91 @@ def test_rolling_consistency(self):
message=".*(empty slice|0 for slice).*",
category=RuntimeWarning)
- def cases():
- for window in [1, 2, 3, 10, 20]:
- for min_periods in set([0, 1, 2, 3, 4, window]):
- if min_periods and (min_periods > window):
- continue
- for center in [False, True]:
- yield window, min_periods, center
-
- for window, min_periods, center in cases():
- # test consistency between different rolling_* moments
- self._test_moments_consistency(
- min_periods=min_periods,
- count=lambda x: (
- x.rolling(window=window, center=center)
- .count()),
- mean=lambda x: (
- x.rolling(window=window, min_periods=min_periods,
- center=center).mean()),
- mock_mean=lambda x: (
- x.rolling(window=window,
- min_periods=min_periods,
- center=center).sum()
- .divide(x.rolling(window=window,
- min_periods=min_periods,
- center=center).count())),
- corr=lambda x, y: (
- x.rolling(window=window, min_periods=min_periods,
- center=center).corr(y)),
-
- var_unbiased=lambda x: (
- x.rolling(window=window, min_periods=min_periods,
- center=center).var()),
-
- std_unbiased=lambda x: (
- x.rolling(window=window, min_periods=min_periods,
- center=center).std()),
-
- cov_unbiased=lambda x, y: (
- x.rolling(window=window, min_periods=min_periods,
- center=center).cov(y)),
-
- var_biased=lambda x: (
- x.rolling(window=window, min_periods=min_periods,
- center=center).var(ddof=0)),
-
- std_biased=lambda x: (
- x.rolling(window=window, min_periods=min_periods,
- center=center).std(ddof=0)),
-
- cov_biased=lambda x, y: (
- x.rolling(window=window, min_periods=min_periods,
- center=center).cov(y, ddof=0)),
- var_debiasing_factors=lambda x: (
- x.rolling(window=window, center=center).count()
- .divide((x.rolling(window=window, center=center)
- .count() - 1.)
- .replace(0., np.nan))))
-
- # test consistency between rolling_xyz() and either (a)
- # rolling_apply of Series.xyz(), or (b) rolling_apply of
- # np.nanxyz()
- for (x, is_constant, no_nans) in self.data:
- functions = self.base_functions
-
- # GH 8269
- if no_nans:
- functions = self.base_functions + self.no_nan_functions
- for (f, require_min_periods, name) in functions:
- rolling_f = getattr(
- x.rolling(window=window, center=center,
- min_periods=min_periods), name)
-
- if require_min_periods and (
- min_periods is not None) and (
- min_periods < require_min_periods):
- continue
+ # test consistency between different rolling_* moments
+ self._test_moments_consistency(
+ min_periods=min_periods,
+ count=lambda x: (
+ x.rolling(window=window, center=center)
+ .count()),
+ mean=lambda x: (
+ x.rolling(window=window, min_periods=min_periods,
+ center=center).mean()),
+ mock_mean=lambda x: (
+ x.rolling(window=window,
+ min_periods=min_periods,
+ center=center).sum()
+ .divide(x.rolling(window=window,
+ min_periods=min_periods,
+ center=center).count())),
+ corr=lambda x, y: (
+ x.rolling(window=window, min_periods=min_periods,
+ center=center).corr(y)),
- if name == 'count':
- rolling_f_result = rolling_f()
- rolling_apply_f_result = x.rolling(
- window=window, min_periods=0,
- center=center).apply(func=f)
+ var_unbiased=lambda x: (
+ x.rolling(window=window, min_periods=min_periods,
+ center=center).var()),
+
+ std_unbiased=lambda x: (
+ x.rolling(window=window, min_periods=min_periods,
+ center=center).std()),
+
+ cov_unbiased=lambda x, y: (
+ x.rolling(window=window, min_periods=min_periods,
+ center=center).cov(y)),
+
+ var_biased=lambda x: (
+ x.rolling(window=window, min_periods=min_periods,
+ center=center).var(ddof=0)),
+
+ std_biased=lambda x: (
+ x.rolling(window=window, min_periods=min_periods,
+ center=center).std(ddof=0)),
+
+ cov_biased=lambda x, y: (
+ x.rolling(window=window, min_periods=min_periods,
+ center=center).cov(y, ddof=0)),
+ var_debiasing_factors=lambda x: (
+ x.rolling(window=window, center=center).count()
+ .divide((x.rolling(window=window, center=center)
+ .count() - 1.)
+ .replace(0., np.nan))))
+
+ # test consistency between rolling_xyz() and either (a)
+ # rolling_apply of Series.xyz(), or (b) rolling_apply of
+ # np.nanxyz()
+ for (x, is_constant, no_nans) in self.data:
+ functions = self.base_functions
+
+ # GH 8269
+ if no_nans:
+ functions = self.base_functions + self.no_nan_functions
+ for (f, require_min_periods, name) in functions:
+ rolling_f = getattr(
+ x.rolling(window=window, center=center,
+ min_periods=min_periods), name)
+
+ if require_min_periods and (
+ min_periods is not None) and (
+ min_periods < require_min_periods):
+ continue
+
+ if name == 'count':
+ rolling_f_result = rolling_f()
+ rolling_apply_f_result = x.rolling(
+ window=window, min_periods=0,
+ center=center).apply(func=f)
+ else:
+ if name in ['cov', 'corr']:
+ rolling_f_result = rolling_f(
+ pairwise=False)
else:
- if name in ['cov', 'corr']:
- rolling_f_result = rolling_f(
- pairwise=False)
- else:
- rolling_f_result = rolling_f()
- rolling_apply_f_result = x.rolling(
- window=window, min_periods=min_periods,
- center=center).apply(func=f)
- if not tm._incompat_bottleneck_version(name):
- assert_equal(rolling_f_result,
- rolling_apply_f_result)
+ rolling_f_result = rolling_f()
+ rolling_apply_f_result = x.rolling(
+ window=window, min_periods=min_periods,
+ center=center).apply(func=f)
+ if not tm._incompat_bottleneck_version(name):
+ assert_equal(rolling_f_result,
+ rolling_apply_f_result)
# binary moments
def test_rolling_cov(self):
| https://api.github.com/repos/pandas-dev/pandas/pulls/17292 | 2017-08-19T16:35:40Z | 2017-08-19T21:55:35Z | 2017-08-19T21:55:35Z | 2017-08-19T21:56:28Z | |
BUG: Fix strange behaviour of Series.iloc on MultiIndex Series (#17148) | diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt
index 85685ed7b430d..ab5071e8e9ff7 100644
--- a/doc/source/whatsnew/v0.21.0.txt
+++ b/doc/source/whatsnew/v0.21.0.txt
@@ -329,6 +329,7 @@ Indexing
- Fixes ``DataFrame.loc`` for setting with alignment and tz-aware ``DatetimeIndex`` (:issue:`16889`)
- Avoids ``IndexError`` when passing an Index or Series to ``.iloc`` with older numpy (:issue:`17193`)
- Allow unicode empty strings as placeholders in multilevel columns in Python 2 (:issue:`17099`)
+- Bug in ``.iloc`` when used with inplace addition or assignment and an int indexer on a ``MultiIndex`` causing the wrong indexes to be read from and written to (:issue:`17148`)
I/O
^^^
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 109183827de4e..757608128a73a 100755
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -146,7 +146,8 @@ def _get_setitem_indexer(self, key):
return self._convert_tuple(key, is_setter=True)
axis = self.obj._get_axis(0)
- if isinstance(axis, MultiIndex):
+
+ if isinstance(axis, MultiIndex) and self.name != 'iloc':
try:
return axis.get_loc(key)
except Exception:
diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py
index 31fee303a41e2..39569f0b0cb38 100644
--- a/pandas/tests/indexing/test_iloc.py
+++ b/pandas/tests/indexing/test_iloc.py
@@ -269,6 +269,35 @@ def test_iloc_setitem(self):
expected = Series([0, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
+ @pytest.mark.parametrize(
+ 'data, indexes, values, expected_k', [
+ # test without indexer value in first level of MultiIndex
+ ([[2, 22, 5], [2, 33, 6]], [0, -1, 1], [2, 3, 1], [7, 10]),
+ # test like code sample 1 in the issue
+ ([[1, 22, 555], [1, 33, 666]], [0, -1, 1], [200, 300, 100],
+ [755, 1066]),
+ # test like code sample 2 in the issue
+ ([[1, 3, 7], [2, 4, 8]], [0, -1, 1], [10, 10, 1000], [17, 1018]),
+ # test like code sample 3 in the issue
+ ([[1, 11, 4], [2, 22, 5], [3, 33, 6]], [0, -1, 1], [4, 7, 10],
+ [8, 15, 13])
+ ])
+ def test_iloc_setitem_int_multiindex_series(
+ self, data, indexes, values, expected_k):
+ # GH17148
+ df = pd.DataFrame(
+ data=data,
+ columns=['i', 'j', 'k'])
+ df = df.set_index(['i', 'j'])
+
+ series = df.k.copy()
+ for i, v in zip(indexes, values):
+ series.iloc[i] += v
+
+ df['k'] = expected_k
+ expected = df.k
+ tm.assert_series_equal(series, expected)
+
def test_iloc_setitem_list(self):
# setitem with an iloc list
| - [x] closes #17148
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/17291 | 2017-08-19T15:06:51Z | 2017-08-22T14:31:15Z | 2017-08-22T14:31:15Z | 2017-08-22T14:31:18Z |
CLN: replace %s syntax with .format in pandas.tseries | diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py
index aa33a3849acb3..7f34bcaf52926 100644
--- a/pandas/tseries/frequencies.py
+++ b/pandas/tseries/frequencies.py
@@ -409,16 +409,17 @@ def _get_freq_str(base, mult=1):
need_suffix = ['QS', 'BQ', 'BQS', 'YS', 'AS', 'BY', 'BA', 'BYS', 'BAS']
for __prefix in need_suffix:
for _m in tslib._MONTHS:
- _offset_to_period_map['%s-%s' % (__prefix, _m)] = \
- _offset_to_period_map[__prefix]
+ _alias = '{prefix}-{month}'.format(prefix=__prefix, month=_m)
+ _offset_to_period_map[_alias] = _offset_to_period_map[__prefix]
for __prefix in ['A', 'Q']:
for _m in tslib._MONTHS:
- _alias = '%s-%s' % (__prefix, _m)
+ _alias = '{prefix}-{month}'.format(prefix=__prefix, month=_m)
_offset_to_period_map[_alias] = _alias
_days = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']
for _d in _days:
- _offset_to_period_map['W-%s' % _d] = 'W-%s' % _d
+ _alias = 'W-{day}'.format(day=_d)
+ _offset_to_period_map[_alias] = _alias
def get_period_alias(offset_str):
@@ -587,7 +588,7 @@ def _base_and_stride(freqstr):
groups = opattern.match(freqstr)
if not groups:
- raise ValueError("Could not evaluate %s" % freqstr)
+ raise ValueError("Could not evaluate {freq}".format(freq=freqstr))
stride = groups.group(1)
@@ -775,8 +776,8 @@ def infer_freq(index, warn=True):
if not (is_datetime64_dtype(values) or
is_timedelta64_dtype(values) or
values.dtype == object):
- raise TypeError("cannot infer freq from a non-convertible "
- "dtype on a Series of {0}".format(index.dtype))
+ raise TypeError("cannot infer freq from a non-convertible dtype "
+ "on a Series of {dtype}".format(dtype=index.dtype))
index = values
if is_period_arraylike(index):
@@ -789,7 +790,7 @@ def infer_freq(index, warn=True):
if isinstance(index, pd.Index) and not isinstance(index, pd.DatetimeIndex):
if isinstance(index, (pd.Int64Index, pd.Float64Index)):
raise TypeError("cannot infer freq from a non-convertible index "
- "type {0}".format(type(index)))
+ "type {type}".format(type=type(index)))
index = index.values
if not isinstance(index, pd.DatetimeIndex):
@@ -956,15 +957,17 @@ def _infer_daily_rule(self):
if annual_rule:
nyears = self.ydiffs[0]
month = _month_aliases[self.rep_stamp.month]
- return _maybe_add_count('%s-%s' % (annual_rule, month), nyears)
+ alias = '{prefix}-{month}'.format(prefix=annual_rule, month=month)
+ return _maybe_add_count(alias, nyears)
quarterly_rule = self._get_quarterly_rule()
if quarterly_rule:
nquarters = self.mdiffs[0] / 3
mod_dict = {0: 12, 2: 11, 1: 10}
month = _month_aliases[mod_dict[self.rep_stamp.month % 3]]
- return _maybe_add_count('%s-%s' % (quarterly_rule, month),
- nquarters)
+ alias = '{prefix}-{month}'.format(prefix=quarterly_rule,
+ month=month)
+ return _maybe_add_count(alias, nquarters)
monthly_rule = self._get_monthly_rule()
if monthly_rule:
@@ -974,8 +977,8 @@ def _infer_daily_rule(self):
days = self.deltas[0] / _ONE_DAY
if days % 7 == 0:
# Weekly
- alias = _weekday_rule_aliases[self.rep_stamp.weekday()]
- return _maybe_add_count('W-%s' % alias, days / 7)
+ day = _weekday_rule_aliases[self.rep_stamp.weekday()]
+ return _maybe_add_count('W-{day}'.format(day=day), days / 7)
else:
return _maybe_add_count('D', days)
@@ -1048,7 +1051,7 @@ def _get_wom_rule(self):
week = week_of_months[0] + 1
wd = _weekday_rule_aliases[weekdays[0]]
- return 'WOM-%d%s' % (week, wd)
+ return 'WOM-{week}{weekday}'.format(week=week, weekday=wd)
class _TimedeltaFrequencyInferer(_FrequencyInferer):
@@ -1058,15 +1061,16 @@ def _infer_daily_rule(self):
days = self.deltas[0] / _ONE_DAY
if days % 7 == 0:
# Weekly
- alias = _weekday_rule_aliases[self.rep_stamp.weekday()]
- return _maybe_add_count('W-%s' % alias, days / 7)
+ wd = _weekday_rule_aliases[self.rep_stamp.weekday()]
+ alias = 'W-{weekday}'.format(weekday=wd)
+ return _maybe_add_count(alias, days / 7)
else:
return _maybe_add_count('D', days)
def _maybe_add_count(base, count):
if count != 1:
- return '%d%s' % (count, base)
+ return '{count}{base}'.format(count=int(count), base=base)
else:
return base
diff --git a/pandas/tseries/holiday.py b/pandas/tseries/holiday.py
index 9acb52ebe0e9f..d8bfa3013f8f7 100644
--- a/pandas/tseries/holiday.py
+++ b/pandas/tseries/holiday.py
@@ -174,16 +174,16 @@ class from pandas.tseries.offsets
def __repr__(self):
info = ''
if self.year is not None:
- info += 'year=%s, ' % self.year
- info += 'month=%s, day=%s, ' % (self.month, self.day)
+ info += 'year={year}, '.format(year=self.year)
+ info += 'month={mon}, day={day}, '.format(mon=self.month, day=self.day)
if self.offset is not None:
- info += 'offset=%s' % self.offset
+ info += 'offset={offset}'.format(offset=self.offset)
if self.observance is not None:
- info += 'observance=%s' % self.observance
+ info += 'observance={obs}'.format(obs=self.observance)
- repr = 'Holiday: %s (%s)' % (self.name, info)
+ repr = 'Holiday: {name} ({info})'.format(name=self.name, info=info)
return repr
def dates(self, start_date, end_date, return_name=False):
@@ -374,8 +374,8 @@ def holidays(self, start=None, end=None, return_name=False):
DatetimeIndex of holidays
"""
if self.rules is None:
- raise Exception('Holiday Calendar %s does not have any '
- 'rules specified' % self.name)
+ raise Exception('Holiday Calendar {name} does not have any '
+ 'rules specified'.format(name=self.name))
if start is None:
start = AbstractHolidayCalendar.start_date
diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py
index 56ef703e67ca0..29cdda5548896 100644
--- a/pandas/tseries/offsets.py
+++ b/pandas/tseries/offsets.py
@@ -261,10 +261,10 @@ def apply_index(self, i):
"""
if not type(self) is DateOffset:
- raise NotImplementedError("DateOffset subclass %s "
+ raise NotImplementedError("DateOffset subclass {name} "
"does not have a vectorized "
- "implementation"
- % (self.__class__.__name__,))
+ "implementation".format(
+ name=self.__class__.__name__))
relativedelta_fast = set(['years', 'months', 'weeks',
'days', 'hours', 'minutes',
'seconds', 'microseconds'])
@@ -295,10 +295,10 @@ def apply_index(self, i):
return i + (self._offset * self.n)
else:
# relativedelta with other keywords
+ kwd = set(self.kwds) - relativedelta_fast
raise NotImplementedError("DateOffset with relativedelta "
- "keyword(s) %s not able to be "
- "applied vectorized" %
- (set(self.kwds) - relativedelta_fast),)
+ "keyword(s) {kwd} not able to be "
+ "applied vectorized".format(kwd=kwd))
def isAnchored(self):
return (self.n == 1)
@@ -339,19 +339,20 @@ def __repr__(self):
if attr not in exclude:
attrs.append('='.join((attr, repr(getattr(self, attr)))))
+ plural = ''
if abs(self.n) != 1:
plural = 's'
- else:
- plural = ''
- n_str = ""
+ n_str = ''
if self.n != 1:
- n_str = "%s * " % self.n
+ n_str = '{n} * '.format(n=self.n)
- out = '<%s' % n_str + className + plural
+ attrs_str = ''
if attrs:
- out += ': ' + ', '.join(attrs)
- out += '>'
+ attrs_str = ': ' + ', '.join(attrs)
+
+ repr_content = ''.join([n_str, className, plural, attrs_str])
+ out = '<{content}>'.format(content=repr_content)
return out
@property
@@ -501,7 +502,7 @@ def freqstr(self):
return repr(self)
if self.n != 1:
- fstr = '%d%s' % (self.n, code)
+ fstr = '{n}{code}'.format(n=self.n, code=code)
else:
fstr = code
@@ -509,7 +510,7 @@ def freqstr(self):
@property
def nanos(self):
- raise ValueError("{0} is a non-fixed frequency".format(self))
+ raise ValueError("{name} is a non-fixed frequency".format(name=self))
class SingleConstructorOffset(DateOffset):
@@ -518,7 +519,7 @@ class SingleConstructorOffset(DateOffset):
def _from_name(cls, suffix=None):
# default _from_name calls cls with no args
if suffix:
- raise ValueError("Bad freq suffix %s" % suffix)
+ raise ValueError("Bad freq suffix {suffix}".format(suffix=suffix))
return cls()
@@ -531,21 +532,21 @@ class BusinessMixin(object):
def __repr__(self):
className = getattr(self, '_outputName', self.__class__.__name__)
+ plural = ''
if abs(self.n) != 1:
plural = 's'
- else:
- plural = ''
- n_str = ""
+ n_str = ''
if self.n != 1:
- n_str = "%s * " % self.n
+ n_str = '{n} * '.format(n=self.n)
- out = '<%s' % n_str + className + plural + self._repr_attrs() + '>'
+ repr_content = ''.join([n_str, className, plural, self._repr_attrs()])
+ out = '<{content}>'.format(content=repr_content)
return out
def _repr_attrs(self):
if self.offset:
- attrs = ['offset=%s' % repr(self.offset)]
+ attrs = ['offset={offset!r}'.format(offset=self.offset)]
else:
attrs = None
out = ''
@@ -601,7 +602,7 @@ def freqstr(self):
return repr(self)
if self.n != 1:
- fstr = '%d%s' % (self.n, code)
+ fstr = '{n}{code}'.format(n=self.n, code=code)
else:
fstr = code
@@ -1109,7 +1110,8 @@ def name(self):
if self.isAnchored:
return self.rule_code
else:
- return "%s-%s" % (self.rule_code, _int_to_month[self.n])
+ return "{code}-{month}".format(code=self.rule_code,
+ month=_int_to_month[self.n])
class MonthEnd(MonthOffset):
@@ -1176,9 +1178,9 @@ def __init__(self, n=1, day_of_month=None, normalize=False, **kwds):
else:
self.day_of_month = int(day_of_month)
if not self._min_day_of_month <= self.day_of_month <= 27:
- raise ValueError('day_of_month must be '
- '{}<=day_of_month<=27, got {}'.format(
- self._min_day_of_month, self.day_of_month))
+ msg = 'day_of_month must be {min}<=day_of_month<=27, got {day}'
+ raise ValueError(msg.format(min=self._min_day_of_month,
+ day=self.day_of_month))
self.n = int(n)
self.normalize = normalize
self.kwds = kwds
@@ -1190,7 +1192,7 @@ def _from_name(cls, suffix=None):
@property
def rule_code(self):
- suffix = '-{}'.format(self.day_of_month)
+ suffix = '-{day_of_month}'.format(day_of_month=self.day_of_month)
return self._prefix + suffix
@apply_wraps
@@ -1576,8 +1578,8 @@ def __init__(self, n=1, normalize=False, **kwds):
if self.weekday is not None:
if self.weekday < 0 or self.weekday > 6:
- raise ValueError('Day must be 0<=day<=6, got %d' %
- self.weekday)
+ raise ValueError('Day must be 0<=day<=6, got {day}'
+ .format(day=self.weekday))
self._inc = timedelta(weeks=1)
self.kwds = kwds
@@ -1630,7 +1632,7 @@ def onOffset(self, dt):
def rule_code(self):
suffix = ''
if self.weekday is not None:
- suffix = '-%s' % (_int_to_weekday[self.weekday])
+ suffix = '-{weekday}'.format(weekday=_int_to_weekday[self.weekday])
return self._prefix + suffix
@classmethod
@@ -1696,11 +1698,11 @@ def __init__(self, n=1, normalize=False, **kwds):
raise ValueError('N cannot be 0')
if self.weekday < 0 or self.weekday > 6:
- raise ValueError('Day must be 0<=day<=6, got %d' %
- self.weekday)
+ raise ValueError('Day must be 0<=day<=6, got {day}'
+ .format(day=self.weekday))
if self.week < 0 or self.week > 3:
- raise ValueError('Week must be 0<=day<=3, got %d' %
- self.week)
+ raise ValueError('Week must be 0<=week<=3, got {week}'
+ .format(week=self.week))
self.kwds = kwds
@@ -1746,15 +1748,18 @@ def onOffset(self, dt):
@property
def rule_code(self):
- return '%s-%d%s' % (self._prefix, self.week + 1,
- _int_to_weekday.get(self.weekday, ''))
+ weekday = _int_to_weekday.get(self.weekday, '')
+ return '{prefix}-{week}{weekday}'.format(prefix=self._prefix,
+ week=self.week + 1,
+ weekday=weekday)
_prefix = 'WOM'
@classmethod
def _from_name(cls, suffix=None):
if not suffix:
- raise ValueError("Prefix %r requires a suffix." % (cls._prefix))
+ raise ValueError("Prefix {prefix!r} requires a suffix."
+ .format(prefix=cls._prefix))
# TODO: handle n here...
# only one digit weeks (1 --> week 0, 2 --> week 1, etc.)
week = int(suffix[0]) - 1
@@ -1789,8 +1794,8 @@ def __init__(self, n=1, normalize=False, **kwds):
raise ValueError('N cannot be 0')
if self.weekday < 0 or self.weekday > 6:
- raise ValueError('Day must be 0<=day<=6, got %d' %
- self.weekday)
+ raise ValueError('Day must be 0<=day<=6, got {day}'
+ .format(day=self.weekday))
self.kwds = kwds
@@ -1829,14 +1834,17 @@ def onOffset(self, dt):
@property
def rule_code(self):
- return '%s-%s' % (self._prefix, _int_to_weekday.get(self.weekday, ''))
+ weekday = _int_to_weekday.get(self.weekday, '')
+ return '{prefix}-{weekday}'.format(prefix=self._prefix,
+ weekday=weekday)
_prefix = 'LWOM'
@classmethod
def _from_name(cls, suffix=None):
if not suffix:
- raise ValueError("Prefix %r requires a suffix." % (cls._prefix))
+ raise ValueError("Prefix {prefix!r} requires a suffix."
+ .format(prefix=cls._prefix))
# TODO: handle n here...
weekday = _weekday_to_int[suffix]
return cls(weekday=weekday)
@@ -1876,7 +1884,8 @@ def _from_name(cls, suffix=None):
@property
def rule_code(self):
- return '%s-%s' % (self._prefix, _int_to_month[self.startingMonth])
+ month = _int_to_month[self.startingMonth]
+ return '{prefix}-{month}'.format(prefix=self._prefix, month=month)
class BQuarterEnd(QuarterOffset):
@@ -2045,8 +2054,7 @@ def apply(self, other):
@apply_index_wraps
def apply_index(self, i):
freq_month = 12 if self.startingMonth == 1 else self.startingMonth - 1
- # freq_month = self.startingMonth
- freqstr = 'Q-%s' % (_int_to_month[freq_month],)
+ freqstr = 'Q-{month}'.format(month=_int_to_month[freq_month])
return self._beg_apply_index(i, freqstr)
@@ -2071,7 +2079,8 @@ def _from_name(cls, suffix=None):
@property
def rule_code(self):
- return '%s-%s' % (self._prefix, _int_to_month[self.month])
+ month = _int_to_month[self.month]
+ return '{prefix}-{month}'.format(prefix=self._prefix, month=month)
class BYearEnd(YearOffset):
@@ -2246,7 +2255,7 @@ def _rollf(date):
@apply_index_wraps
def apply_index(self, i):
freq_month = 12 if self.month == 1 else self.month - 1
- freqstr = 'A-%s' % (_int_to_month[freq_month],)
+ freqstr = 'A-{month}'.format(month=_int_to_month[freq_month])
return self._beg_apply_index(i, freqstr)
def onOffset(self, dt):
@@ -2312,7 +2321,8 @@ def __init__(self, n=1, normalize=False, **kwds):
raise ValueError('N cannot be 0')
if self.variation not in ["nearest", "last"]:
- raise ValueError('%s is not a valid variation' % self.variation)
+ raise ValueError('{variation} is not a valid variation'
+ .format(variation=self.variation))
if self.variation == "nearest":
weekday_offset = weekday(self.weekday)
@@ -2438,8 +2448,9 @@ def _get_year_end_last(self, dt):
@property
def rule_code(self):
+ prefix = self._get_prefix()
suffix = self.get_rule_code_suffix()
- return "%s-%s" % (self._get_prefix(), suffix)
+ return "{prefix}-{suffix}".format(prefix=prefix, suffix=suffix)
def _get_prefix(self):
return self._prefix
@@ -2451,9 +2462,11 @@ def _get_suffix_prefix(self):
return self._suffix_prefix_last
def get_rule_code_suffix(self):
- return '%s-%s-%s' % (self._get_suffix_prefix(),
- _int_to_month[self.startingMonth],
- _int_to_weekday[self.weekday])
+ prefix = self._get_suffix_prefix()
+ month = _int_to_month[self.startingMonth]
+ weekday = _int_to_weekday[self.weekday]
+ return '{prefix}-{month}-{weekday}'.format(prefix=prefix, month=month,
+ weekday=weekday)
@classmethod
def _parse_suffix(cls, varion_code, startingMonth_code, weekday_code):
@@ -2463,7 +2476,7 @@ def _parse_suffix(cls, varion_code, startingMonth_code, weekday_code):
variation = "last"
else:
raise ValueError(
- "Unable to parse varion_code: %s" % (varion_code,))
+ "Unable to parse varion_code: {code}".format(code=varion_code))
startingMonth = _month_to_int[startingMonth_code]
weekday = _weekday_to_int[weekday_code]
@@ -2628,8 +2641,9 @@ def onOffset(self, dt):
@property
def rule_code(self):
suffix = self._offset.get_rule_code_suffix()
- return "%s-%s" % (self._prefix,
- "%s-%d" % (suffix, self.qtr_with_extra_week))
+ qtr = self.qtr_with_extra_week
+ return "{prefix}-{suffix}-{qtr}".format(prefix=self._prefix,
+ suffix=suffix, qtr=qtr)
@classmethod
def _from_name(cls, *args):
@@ -2712,8 +2726,8 @@ def __add__(self, other):
except ApplyTypeError:
return NotImplemented
except OverflowError:
- raise OverflowError("the add operation between {} and {} "
- "will overflow".format(self, other))
+ raise OverflowError("the add operation between {self} and {other} "
+ "will overflow".format(self=self, other=other))
def __eq__(self, other):
if isinstance(other, compat.string_types):
@@ -2771,7 +2785,8 @@ def apply(self, other):
elif isinstance(other, type(self)):
return type(self)(self.n + other.n)
- raise ApplyTypeError('Unhandled type: %s' % type(other).__name__)
+ raise ApplyTypeError('Unhandled type: {type_str}'
+ .format(type_str=type(other).__name__))
_prefix = 'undefined'
@@ -2921,7 +2936,8 @@ def generate_range(start=None, end=None, periods=None,
# faster than cur + offset
next_date = offset.apply(cur)
if next_date <= cur:
- raise ValueError('Offset %s did not increment date' % offset)
+ raise ValueError('Offset {offset} did not increment date'
+ .format(offset=offset))
cur = next_date
else:
while cur >= end:
@@ -2930,7 +2946,8 @@ def generate_range(start=None, end=None, periods=None,
# faster than cur + offset
next_date = offset.apply(cur)
if next_date >= cur:
- raise ValueError('Offset %s did not decrement date' % offset)
+ raise ValueError('Offset {offset} did not decrement date'
+ .format(offset=offset))
cur = next_date
| Progress towards #16130
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
Replaced `%s` syntax with `.format` in pandas.tseries. Additionally, made some of the existing positional `.format` code more explicit.
| https://api.github.com/repos/pandas-dev/pandas/pulls/17290 | 2017-08-18T22:58:47Z | 2017-08-19T16:51:06Z | 2017-08-19T16:51:06Z | 2017-08-19T18:51:26Z |
Replace usage of total_seconds compat func with timedelta method | diff --git a/pandas/_libs/period.pyx b/pandas/_libs/period.pyx
index e017d863e1907..6a23703146dc3 100644
--- a/pandas/_libs/period.pyx
+++ b/pandas/_libs/period.pyx
@@ -10,9 +10,6 @@ from numpy cimport (int8_t, int32_t, int64_t, import_array, ndarray,
NPY_INT64, NPY_DATETIME, NPY_TIMEDELTA)
import numpy as np
-cdef extern from "datetime_helper.h":
- double total_seconds(object)
-
from libc.stdlib cimport free
from pandas import compat
@@ -570,7 +567,7 @@ cdef _reso_local(ndarray[int64_t] stamps, object tz):
&dts)
dt = datetime(dts.year, dts.month, dts.day, dts.hour,
dts.min, dts.sec, dts.us, tz)
- delta = int(total_seconds(_get_utcoffset(tz, dt))) * 1000000000
+ delta = int(_get_utcoffset(tz, dt).total_seconds()) * 1000000000
pandas_datetime_to_datetimestruct(stamps[i] + delta,
PANDAS_FR_ns, &dts)
curr_reso = _reso_stamp(&dts)
@@ -637,7 +634,7 @@ cdef ndarray[int64_t] localize_dt64arr_to_period(ndarray[int64_t] stamps,
&dts)
dt = datetime(dts.year, dts.month, dts.day, dts.hour,
dts.min, dts.sec, dts.us, tz)
- delta = int(total_seconds(_get_utcoffset(tz, dt))) * 1000000000
+ delta = int(_get_utcoffset(tz, dt).total_seconds()) * 1000000000
pandas_datetime_to_datetimestruct(stamps[i] + delta,
PANDAS_FR_ns, &dts)
result[i] = get_period_ordinal(dts.year, dts.month, dts.day,
diff --git a/pandas/_libs/src/datetime_helper.h b/pandas/_libs/src/datetime_helper.h
deleted file mode 100644
index 8023285f85b9b..0000000000000
--- a/pandas/_libs/src/datetime_helper.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
-Copyright (c) 2016, PyData Development Team
-All rights reserved.
-
-Distributed under the terms of the BSD Simplified License.
-
-The full license is in the LICENSE file, distributed with this software.
-*/
-
-#ifndef PANDAS__LIBS_SRC_DATETIME_HELPER_H_
-#define PANDAS__LIBS_SRC_DATETIME_HELPER_H_
-
-#include <stdio.h>
-#include "datetime.h"
-#include "numpy/arrayobject.h"
-#include "numpy/arrayscalars.h"
-
-npy_int64 get_long_attr(PyObject *o, const char *attr) {
- npy_int64 long_val;
- PyObject *value = PyObject_GetAttrString(o, attr);
- long_val = (PyLong_Check(value) ?
- PyLong_AsLongLong(value) : PyInt_AS_LONG(value));
- Py_DECREF(value);
- return long_val;
-}
-
-npy_float64 total_seconds(PyObject *td) {
- // Python 2.6 compat
- npy_int64 microseconds = get_long_attr(td, "microseconds");
- npy_int64 seconds = get_long_attr(td, "seconds");
- npy_int64 days = get_long_attr(td, "days");
- npy_int64 days_in_seconds = days * 24LL * 3600LL;
- return (microseconds + (seconds + days_in_seconds) * 1000000.0) / 1000000.0;
-}
-
-#endif // PANDAS__LIBS_SRC_DATETIME_HELPER_H_
diff --git a/pandas/_libs/src/ujson/python/objToJSON.c b/pandas/_libs/src/ujson/python/objToJSON.c
index f2c0b18d35131..4beaa3fd449df 100644
--- a/pandas/_libs/src/ujson/python/objToJSON.c
+++ b/pandas/_libs/src/ujson/python/objToJSON.c
@@ -47,9 +47,9 @@ Numeric decoder derived from from TCL library
#include <numpy_helper.h> // NOLINT(build/include_order)
#include <stdio.h> // NOLINT(build/include_order)
#include <ultrajson.h> // NOLINT(build/include_order)
-#include <datetime_helper.h> // NOLINT(build/include_order)
#include <np_datetime.h> // NOLINT(build/include_order)
#include <np_datetime_strings.h> // NOLINT(build/include_order)
+#include "datetime.h"
static PyObject *type_decimal;
@@ -329,6 +329,26 @@ static Py_ssize_t get_attr_length(PyObject *obj, char *attr) {
return ret;
}
+npy_int64 get_long_attr(PyObject *o, const char *attr) {
+ npy_int64 long_val;
+ PyObject *value = PyObject_GetAttrString(o, attr);
+ long_val = (PyLong_Check(value) ?
+ PyLong_AsLongLong(value) : PyInt_AS_LONG(value));
+ Py_DECREF(value);
+ return long_val;
+}
+
+npy_float64 total_seconds(PyObject *td) {
+ // Python 2.6 compat
+ // TODO(anyone): remove this legacy workaround with a more
+ // direct td.total_seconds()
+ npy_int64 microseconds = get_long_attr(td, "microseconds");
+ npy_int64 seconds = get_long_attr(td, "seconds");
+ npy_int64 days = get_long_attr(td, "days");
+ npy_int64 days_in_seconds = days * 24LL * 3600LL;
+ return (microseconds + (seconds + days_in_seconds) * 1000000.0) / 1000000.0;
+}
+
static PyObject *get_item(PyObject *obj, Py_ssize_t i) {
PyObject *tmp = PyInt_FromSsize_t(i);
PyObject *ret;
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx
index 32b8c92a50269..8725f38537b5e 100644
--- a/pandas/_libs/tslib.pyx
+++ b/pandas/_libs/tslib.pyx
@@ -26,9 +26,6 @@ from cpython cimport (
cdef extern from "Python.h":
cdef PyTypeObject *Py_TYPE(object)
-cdef extern from "datetime_helper.h":
- double total_seconds(object)
-
# this is our datetime.pxd
from libc.stdlib cimport free
@@ -1644,7 +1641,7 @@ cdef inline void _localize_tso(_TSObject obj, object tz):
pandas_datetime_to_datetimestruct(obj.value, PANDAS_FR_ns, &obj.dts)
dt = datetime(obj.dts.year, obj.dts.month, obj.dts.day, obj.dts.hour,
obj.dts.min, obj.dts.sec, obj.dts.us, tz)
- delta = int(total_seconds(_get_utcoffset(tz, dt))) * 1000000000
+ delta = int(_get_utcoffset(tz, dt).total_seconds()) * 1000000000
if obj.value != NPY_NAT:
pandas_datetime_to_datetimestruct(obj.value + delta,
PANDAS_FR_ns, &obj.dts)
@@ -4141,7 +4138,7 @@ def tz_convert(ndarray[int64_t] vals, object tz1, object tz2):
pandas_datetime_to_datetimestruct(v, PANDAS_FR_ns, &dts)
dt = datetime(dts.year, dts.month, dts.day, dts.hour,
dts.min, dts.sec, dts.us, tz1)
- delta = (int(total_seconds(_get_utcoffset(tz1, dt)))
+ delta = (int(_get_utcoffset(tz1, dt).total_seconds())
* 1000000000)
utc_dates[i] = v - delta
else:
@@ -4181,8 +4178,8 @@ def tz_convert(ndarray[int64_t] vals, object tz1, object tz2):
pandas_datetime_to_datetimestruct(v, PANDAS_FR_ns, &dts)
dt = datetime(dts.year, dts.month, dts.day, dts.hour,
dts.min, dts.sec, dts.us, tz2)
- delta = int(total_seconds(
- _get_utcoffset(tz2, dt))) * 1000000000
+ delta = (int(_get_utcoffset(tz2, dt).total_seconds())
+ * 1000000000)
result[i] = v + delta
return result
@@ -4248,7 +4245,7 @@ def tz_convert_single(int64_t val, object tz1, object tz2):
pandas_datetime_to_datetimestruct(val, PANDAS_FR_ns, &dts)
dt = datetime(dts.year, dts.month, dts.day, dts.hour,
dts.min, dts.sec, dts.us, tz1)
- delta = int(total_seconds(_get_utcoffset(tz1, dt))) * 1000000000
+ delta = int(_get_utcoffset(tz1, dt).total_seconds()) * 1000000000
utc_date = val - delta
elif _get_zone(tz1) != 'UTC':
trans, deltas, typ = _get_dst_info(tz1)
@@ -4266,7 +4263,7 @@ def tz_convert_single(int64_t val, object tz1, object tz2):
pandas_datetime_to_datetimestruct(val, PANDAS_FR_ns, &dts)
dt = datetime(dts.year, dts.month, dts.day, dts.hour,
dts.min, dts.sec, dts.us, tz2)
- delta = int(total_seconds(_get_utcoffset(tz2, dt))) * 1000000000
+ delta = int(_get_utcoffset(tz2, dt).total_seconds()) * 1000000000
return utc_date + delta
# Convert UTC to other timezone
@@ -4338,7 +4335,7 @@ cdef object _get_dst_info(object tz):
"""
cache_key = _tz_cache_key(tz)
if cache_key is None:
- num = int(total_seconds(_get_utcoffset(tz, None))) * 1000000000
+ num = int(_get_utcoffset(tz, None).total_seconds()) * 1000000000
return (np.array([NPY_NAT + 1], dtype=np.int64),
np.array([num], dtype=np.int64),
None)
@@ -4385,7 +4382,7 @@ cdef object _get_dst_info(object tz):
else:
# static tzinfo
trans = np.array([NPY_NAT + 1], dtype=np.int64)
- num = int(total_seconds(_get_utcoffset(tz, None))) * 1000000000
+ num = int(_get_utcoffset(tz, None).total_seconds()) * 1000000000
deltas = np.array([num], dtype=np.int64)
typ = 'static'
@@ -4408,9 +4405,6 @@ cdef object _get_utc_trans_times_from_dateutil_tz(object tz):
return new_trans
-def tot_seconds(td):
- return total_seconds(td)
-
cpdef ndarray _unbox_utcoffsets(object transinfo):
cdef:
Py_ssize_t i, sz
@@ -4420,7 +4414,7 @@ cpdef ndarray _unbox_utcoffsets(object transinfo):
arr = np.empty(sz, dtype='i8')
for i in range(sz):
- arr[i] = int(total_seconds(transinfo[i][0])) * 1000000000
+ arr[i] = int(transinfo[i][0].total_seconds()) * 1000000000
return arr
@@ -4463,7 +4457,7 @@ def tz_localize_to_utc(ndarray[int64_t] vals, object tz, object ambiguous=None,
pandas_datetime_to_datetimestruct(v, PANDAS_FR_ns, &dts)
dt = datetime(dts.year, dts.month, dts.day, dts.hour,
dts.min, dts.sec, dts.us, tz)
- delta = int(total_seconds(_get_utcoffset(tz, dt))) * 1000000000
+ delta = int(_get_utcoffset(tz, dt).total_seconds()) * 1000000000
result[i] = v - delta
return result
@@ -5202,7 +5196,7 @@ cdef _normalize_local(ndarray[int64_t] stamps, object tz):
pandas_datetime_to_datetimestruct(stamps[i], PANDAS_FR_ns, &dts)
dt = datetime(dts.year, dts.month, dts.day, dts.hour,
dts.min, dts.sec, dts.us, tz)
- delta = int(total_seconds(_get_utcoffset(tz, dt))) * 1000000000
+ delta = int(_get_utcoffset(tz, dt).total_seconds()) * 1000000000
pandas_datetime_to_datetimestruct(stamps[i] + delta,
PANDAS_FR_ns, &dts)
result[i] = _normalized_stamp(&dts)
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 82c80a13372d7..712e9e9903f0a 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -4381,7 +4381,7 @@ def _get_tz(tz):
""" for a tz-aware type, return an encoded zone """
zone = tslib.get_timezone(tz)
if zone is None:
- zone = tslib.tot_seconds(tz.utcoffset())
+ zone = tz.utcoffset().total_seconds()
return zone
diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py
index 56ef703e67ca0..4e3327e71ce35 100644
--- a/pandas/tseries/offsets.py
+++ b/pandas/tseries/offsets.py
@@ -777,12 +777,12 @@ def _get_business_hours_by_sec(self):
# create dummy datetime to calcurate businesshours in a day
dtstart = datetime(2014, 4, 1, self.start.hour, self.start.minute)
until = datetime(2014, 4, 1, self.end.hour, self.end.minute)
- return tslib.tot_seconds(until - dtstart)
+ return (until - dtstart).total_seconds()
else:
self.daytime = False
dtstart = datetime(2014, 4, 1, self.start.hour, self.start.minute)
until = datetime(2014, 4, 2, self.end.hour, self.end.minute)
- return tslib.tot_seconds(until - dtstart)
+ return (until - dtstart).total_seconds()
@apply_wraps
def rollback(self, dt):
@@ -906,7 +906,7 @@ def _onOffset(self, dt, businesshours):
op = self._prev_opening_time(dt)
else:
op = self._next_opening_time(dt)
- span = tslib.tot_seconds(dt - op)
+ span = (dt - op).total_seconds()
if span <= businesshours:
return True
else:
diff --git a/setup.py b/setup.py
index a912b25328954..07c6f1af43afe 100755
--- a/setup.py
+++ b/setup.py
@@ -467,7 +467,6 @@ def pxd(name):
tseries_depends = ['pandas/_libs/src/datetime/np_datetime.h',
'pandas/_libs/src/datetime/np_datetime_strings.h',
- 'pandas/_libs/src/datetime_helper.h',
'pandas/_libs/src/period_helper.h',
'pandas/_libs/src/datetime.pxd']
@@ -597,7 +596,6 @@ def pxd(name):
ujson_ext = Extension('pandas._libs.json',
depends=['pandas/_libs/src/ujson/lib/ultrajson.h',
- 'pandas/_libs/src/datetime_helper.h',
'pandas/_libs/src/numpy_helper.h'],
sources=['pandas/_libs/src/ujson/python/ujson.c',
'pandas/_libs/src/ujson/python/objToJSON.c',
| `total_seconds` and `tot_seconds` functions were apparently made for python 2.6 compat. This removes them and replaces their usage with `timedelta.total_seconds` method.
There is one remaining use of this function in a C file. I expect it will be easy to remove for someone who knows what they're doing.
See discussion https://github.com/pandas-dev/pandas/pull/17274#issuecomment-323363823
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/17289 | 2017-08-18T22:16:31Z | 2017-08-25T20:29:58Z | 2017-08-25T20:29:58Z | 2017-10-30T16:23:55Z |
BUG: clip should handle null values | diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt
index c5fe89282bf52..d0932118612ad 100644
--- a/doc/source/whatsnew/v0.21.0.txt
+++ b/doc/source/whatsnew/v0.21.0.txt
@@ -128,11 +128,11 @@ Other Enhancements
- :func:`DataFrame.add_prefix` and :func:`DataFrame.add_suffix` now accept strings containing the '%' character. (:issue:`17151`)
- `read_*` methods can now infer compression from non-string paths, such as ``pathlib.Path`` objects (:issue:`17206`).
- :func:`pd.read_sas()` now recognizes much more of the most frequently used date (datetime) formats in SAS7BDAT files (:issue:`15871`).
+- :func:`Series.clip()` and :func:`DataFrame.clip()` now treat NA values for upper and lower arguments as None instead of raising `ValueError` (:issue:`17276`).
- :func:`DataFrame.items` and :func:`Series.items` is now present in both Python 2 and 3 and is lazy in all cases (:issue:`13918`, :issue:`17213`)
-
.. _whatsnew_0210.api_breaking:
Backwards incompatible API changes
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index c83b1073afc8e..7a3c642109c36 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -4741,9 +4741,6 @@ def _clip_with_one_bound(self, threshold, method, axis, inplace):
if axis is not None:
axis = self._get_axis_number(axis)
- if np.any(isna(threshold)):
- raise ValueError("Cannot use an NA value as a clip threshold")
-
# method is self.le for upper bound and self.ge for lower bound
if is_scalar(threshold) and is_number(threshold):
if method.__name__ == 'le':
@@ -4823,6 +4820,12 @@ def clip(self, lower=None, upper=None, axis=None, inplace=False,
axis = nv.validate_clip_with_axis(axis, args, kwargs)
+ # GH 17276
+ if np.any(pd.isnull(lower)):
+ lower = None
+ if np.any(pd.isnull(upper)):
+ upper = None
+
# GH 2747 (arguments were reversed)
if lower is not None and upper is not None:
if is_scalar(lower) and is_scalar(upper):
@@ -4839,7 +4842,6 @@ def clip(self, lower=None, upper=None, axis=None, inplace=False,
if upper is not None:
if inplace:
result = self
-
result = result.clip_upper(upper, axis, inplace=inplace)
return result
diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py
index 484a09f11b58a..93514a8a42215 100644
--- a/pandas/tests/frame/test_analytics.py
+++ b/pandas/tests/frame/test_analytics.py
@@ -1931,22 +1931,16 @@ def test_clip_against_frame(self, axis):
tm.assert_frame_equal(clipped_df[ub_mask], ub[ub_mask])
tm.assert_frame_equal(clipped_df[mask], df[mask])
- def test_clip_na(self):
- msg = "Cannot use an NA"
- with tm.assert_raises_regex(ValueError, msg):
- self.frame.clip(lower=np.nan)
-
- with tm.assert_raises_regex(ValueError, msg):
- self.frame.clip(lower=[np.nan])
-
- with tm.assert_raises_regex(ValueError, msg):
- self.frame.clip(upper=np.nan)
-
- with tm.assert_raises_regex(ValueError, msg):
- self.frame.clip(upper=[np.nan])
-
- with tm.assert_raises_regex(ValueError, msg):
- self.frame.clip(lower=np.nan, upper=np.nan)
+ def test_clip_with_na_args(self):
+ """Should process np.nan argument as None """
+ # GH # 17276
+ tm.assert_frame_equal(self.frame.clip(np.nan), self.frame)
+ tm.assert_frame_equal(self.frame.clip(upper=[1, 2, np.nan]),
+ self.frame)
+ tm.assert_frame_equal(self.frame.clip(lower=[1, np.nan, 3]),
+ self.frame)
+ tm.assert_frame_equal(self.frame.clip(upper=np.nan, lower=np.nan),
+ self.frame)
# Matrix-like
diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py
index 44da0968d7024..f1d044f7a1132 100644
--- a/pandas/tests/series/test_analytics.py
+++ b/pandas/tests/series/test_analytics.py
@@ -1000,6 +1000,17 @@ def test_clip_types_and_nulls(self):
assert list(isna(s)) == list(isna(l))
assert list(isna(s)) == list(isna(u))
+ def test_clip_with_na_args(self):
+ """Should process np.nan argument as None """
+ # GH # 17276
+ s = Series([1, 2, 3])
+
+ assert_series_equal(s.clip(np.nan), Series([1, 2, 3]))
+ assert_series_equal(s.clip(upper=[1, 1, np.nan]), Series([1, 2, 3]))
+ assert_series_equal(s.clip(lower=[1, np.nan, 1]), Series([1, 2, 3]))
+ assert_series_equal(s.clip(upper=np.nan, lower=np.nan),
+ Series([1, 2, 3]))
+
def test_clip_against_series(self):
# GH #6966
| - [ x] closes #17276
- [ x] tests added / passed
- [ x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/17288 | 2017-08-18T21:07:21Z | 2017-08-21T23:53:51Z | null | 2017-08-21T23:53:57Z |
BUG: fillna returns frame when inplace=True if value is a dict (#16156) | diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt
index 85685ed7b430d..93d5c191a1d63 100644
--- a/doc/source/whatsnew/v0.21.0.txt
+++ b/doc/source/whatsnew/v0.21.0.txt
@@ -314,7 +314,7 @@ Conversion
- Fix :func:`DataFrame.memory_usage` to support PyPy. Objects on PyPy do not have a fixed size, so an approximation is used instead (:issue:`17228`)
- Fixed the return type of ``IntervalIndex.is_non_overlapping_monotonic`` to be a Python ``bool`` for consistency with similar attributes/methods. Previously returned a ``numpy.bool_``. (:issue:`17237`)
- Bug in ``IntervalIndex.is_non_overlapping_monotonic`` when intervals are closed on both sides and overlap at a point (:issue:`16560`)
-
+- Bug in :func:`Series.fillna` returns frame when ``inplace=True`` and ``value`` is dict (:issue:`16156`)
Indexing
^^^^^^^^
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 5a7f37bba91aa..6480d75a61859 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -4054,7 +4054,8 @@ def fillna(self, value=None, method=None, axis=None, inplace=False,
continue
obj = result[k]
obj.fillna(v, limit=limit, inplace=True, downcast=downcast)
- return result
+ return result if not inplace else None
+
elif not is_list_like(value):
new_data = self._data.fillna(value=value, limit=limit,
inplace=inplace,
diff --git a/pandas/tests/frame/test_missing.py b/pandas/tests/frame/test_missing.py
index 77f0357685cab..ebd15b3180a33 100644
--- a/pandas/tests/frame/test_missing.py
+++ b/pandas/tests/frame/test_missing.py
@@ -407,6 +407,9 @@ def test_fillna_inplace(self):
df.fillna(value=0, inplace=True)
tm.assert_frame_equal(df, expected)
+ expected = df.fillna(value={0: 0}, inplace=True)
+ assert expected is None
+
df[1][:4] = np.nan
df[3][-4:] = np.nan
expected = df.fillna(method='ffill')
| - [x] closes #16156
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/17279 | 2017-08-18T09:29:05Z | 2017-08-21T23:55:11Z | 2017-08-21T23:55:11Z | 2017-08-21T23:55:14Z |
Remove inline declarations in pxd files | diff --git a/pandas/_libs/hashtable.pxd b/pandas/_libs/hashtable.pxd
index 014da22df3382..b6b3c82c06862 100644
--- a/pandas/_libs/hashtable.pxd
+++ b/pandas/_libs/hashtable.pxd
@@ -38,7 +38,7 @@ cdef class MultiIndexHashTable(HashTable):
cpdef get_item(self, object val)
cpdef set_item(self, object key, Py_ssize_t val)
- cdef inline void _check_for_collision(self, Py_ssize_t loc, object label)
+ cdef void _check_for_collision(self, Py_ssize_t loc, object label)
cdef class StringHashTable(HashTable):
@@ -58,5 +58,5 @@ cdef class Int64Vector:
cdef resize(self)
cpdef to_array(self)
- cdef inline void append(self, int64_t x)
+ cdef void append(self, int64_t x)
cdef extend(self, int64_t[:] x)
diff --git a/pandas/_libs/src/khash.pxd b/pandas/_libs/src/khash.pxd
index adb0fe285dbb8..ba9a3c70097b2 100644
--- a/pandas/_libs/src/khash.pxd
+++ b/pandas/_libs/src/khash.pxd
@@ -11,13 +11,13 @@ cdef extern from "khash_python.h":
PyObject **keys
size_t *vals
- inline kh_pymap_t* kh_init_pymap()
- inline void kh_destroy_pymap(kh_pymap_t*)
- inline void kh_clear_pymap(kh_pymap_t*)
- inline khint_t kh_get_pymap(kh_pymap_t*, PyObject*)
- inline void kh_resize_pymap(kh_pymap_t*, khint_t)
- inline khint_t kh_put_pymap(kh_pymap_t*, PyObject*, int*)
- inline void kh_del_pymap(kh_pymap_t*, khint_t)
+ kh_pymap_t* kh_init_pymap()
+ void kh_destroy_pymap(kh_pymap_t*)
+ void kh_clear_pymap(kh_pymap_t*)
+ khint_t kh_get_pymap(kh_pymap_t*, PyObject*)
+ void kh_resize_pymap(kh_pymap_t*, khint_t)
+ khint_t kh_put_pymap(kh_pymap_t*, PyObject*, int*)
+ void kh_del_pymap(kh_pymap_t*, khint_t)
bint kh_exist_pymap(kh_pymap_t*, khiter_t)
@@ -27,13 +27,13 @@ cdef extern from "khash_python.h":
PyObject **keys
size_t *vals
- inline kh_pyset_t* kh_init_pyset()
- inline void kh_destroy_pyset(kh_pyset_t*)
- inline void kh_clear_pyset(kh_pyset_t*)
- inline khint_t kh_get_pyset(kh_pyset_t*, PyObject*)
- inline void kh_resize_pyset(kh_pyset_t*, khint_t)
- inline khint_t kh_put_pyset(kh_pyset_t*, PyObject*, int*)
- inline void kh_del_pyset(kh_pyset_t*, khint_t)
+ kh_pyset_t* kh_init_pyset()
+ void kh_destroy_pyset(kh_pyset_t*)
+ void kh_clear_pyset(kh_pyset_t*)
+ khint_t kh_get_pyset(kh_pyset_t*, PyObject*)
+ void kh_resize_pyset(kh_pyset_t*, khint_t)
+ khint_t kh_put_pyset(kh_pyset_t*, PyObject*, int*)
+ void kh_del_pyset(kh_pyset_t*, khint_t)
bint kh_exist_pyset(kh_pyset_t*, khiter_t)
@@ -45,13 +45,13 @@ cdef extern from "khash_python.h":
kh_cstr_t *keys
size_t *vals
- inline kh_str_t* kh_init_str() nogil
- inline void kh_destroy_str(kh_str_t*) nogil
- inline void kh_clear_str(kh_str_t*) nogil
- inline khint_t kh_get_str(kh_str_t*, kh_cstr_t) nogil
- inline void kh_resize_str(kh_str_t*, khint_t) nogil
- inline khint_t kh_put_str(kh_str_t*, kh_cstr_t, int*) nogil
- inline void kh_del_str(kh_str_t*, khint_t) nogil
+ kh_str_t* kh_init_str() nogil
+ void kh_destroy_str(kh_str_t*) nogil
+ void kh_clear_str(kh_str_t*) nogil
+ khint_t kh_get_str(kh_str_t*, kh_cstr_t) nogil
+ void kh_resize_str(kh_str_t*, khint_t) nogil
+ khint_t kh_put_str(kh_str_t*, kh_cstr_t, int*) nogil
+ void kh_del_str(kh_str_t*, khint_t) nogil
bint kh_exist_str(kh_str_t*, khiter_t) nogil
@@ -61,13 +61,13 @@ cdef extern from "khash_python.h":
int64_t *keys
size_t *vals
- inline kh_int64_t* kh_init_int64() nogil
- inline void kh_destroy_int64(kh_int64_t*) nogil
- inline void kh_clear_int64(kh_int64_t*) nogil
- inline khint_t kh_get_int64(kh_int64_t*, int64_t) nogil
- inline void kh_resize_int64(kh_int64_t*, khint_t) nogil
- inline khint_t kh_put_int64(kh_int64_t*, int64_t, int*) nogil
- inline void kh_del_int64(kh_int64_t*, khint_t) nogil
+ kh_int64_t* kh_init_int64() nogil
+ void kh_destroy_int64(kh_int64_t*) nogil
+ void kh_clear_int64(kh_int64_t*) nogil
+ khint_t kh_get_int64(kh_int64_t*, int64_t) nogil
+ void kh_resize_int64(kh_int64_t*, khint_t) nogil
+ khint_t kh_put_int64(kh_int64_t*, int64_t, int*) nogil
+ void kh_del_int64(kh_int64_t*, khint_t) nogil
bint kh_exist_int64(kh_int64_t*, khiter_t) nogil
@@ -79,13 +79,13 @@ cdef extern from "khash_python.h":
khuint64_t *keys
size_t *vals
- inline kh_uint64_t* kh_init_uint64() nogil
- inline void kh_destroy_uint64(kh_uint64_t*) nogil
- inline void kh_clear_uint64(kh_uint64_t*) nogil
- inline khint_t kh_get_uint64(kh_uint64_t*, int64_t) nogil
- inline void kh_resize_uint64(kh_uint64_t*, khint_t) nogil
- inline khint_t kh_put_uint64(kh_uint64_t*, int64_t, int*) nogil
- inline void kh_del_uint64(kh_uint64_t*, khint_t) nogil
+ kh_uint64_t* kh_init_uint64() nogil
+ void kh_destroy_uint64(kh_uint64_t*) nogil
+ void kh_clear_uint64(kh_uint64_t*) nogil
+ khint_t kh_get_uint64(kh_uint64_t*, int64_t) nogil
+ void kh_resize_uint64(kh_uint64_t*, khint_t) nogil
+ khint_t kh_put_uint64(kh_uint64_t*, int64_t, int*) nogil
+ void kh_del_uint64(kh_uint64_t*, khint_t) nogil
bint kh_exist_uint64(kh_uint64_t*, khiter_t) nogil
@@ -95,13 +95,13 @@ cdef extern from "khash_python.h":
float64_t *keys
size_t *vals
- inline kh_float64_t* kh_init_float64() nogil
- inline void kh_destroy_float64(kh_float64_t*) nogil
- inline void kh_clear_float64(kh_float64_t*) nogil
- inline khint_t kh_get_float64(kh_float64_t*, float64_t) nogil
- inline void kh_resize_float64(kh_float64_t*, khint_t) nogil
- inline khint_t kh_put_float64(kh_float64_t*, float64_t, int*) nogil
- inline void kh_del_float64(kh_float64_t*, khint_t) nogil
+ kh_float64_t* kh_init_float64() nogil
+ void kh_destroy_float64(kh_float64_t*) nogil
+ void kh_clear_float64(kh_float64_t*) nogil
+ khint_t kh_get_float64(kh_float64_t*, float64_t) nogil
+ void kh_resize_float64(kh_float64_t*, khint_t) nogil
+ khint_t kh_put_float64(kh_float64_t*, float64_t, int*) nogil
+ void kh_del_float64(kh_float64_t*, khint_t) nogil
bint kh_exist_float64(kh_float64_t*, khiter_t) nogil
@@ -111,13 +111,13 @@ cdef extern from "khash_python.h":
int32_t *keys
size_t *vals
- inline kh_int32_t* kh_init_int32() nogil
- inline void kh_destroy_int32(kh_int32_t*) nogil
- inline void kh_clear_int32(kh_int32_t*) nogil
- inline khint_t kh_get_int32(kh_int32_t*, int32_t) nogil
- inline void kh_resize_int32(kh_int32_t*, khint_t) nogil
- inline khint_t kh_put_int32(kh_int32_t*, int32_t, int*) nogil
- inline void kh_del_int32(kh_int32_t*, khint_t) nogil
+ kh_int32_t* kh_init_int32() nogil
+ void kh_destroy_int32(kh_int32_t*) nogil
+ void kh_clear_int32(kh_int32_t*) nogil
+ khint_t kh_get_int32(kh_int32_t*, int32_t) nogil
+ void kh_resize_int32(kh_int32_t*, khint_t) nogil
+ khint_t kh_put_int32(kh_int32_t*, int32_t, int*) nogil
+ void kh_del_int32(kh_int32_t*, khint_t) nogil
bint kh_exist_int32(kh_int32_t*, khiter_t) nogil
@@ -129,12 +129,12 @@ cdef extern from "khash_python.h":
kh_cstr_t *keys
PyObject **vals
- inline kh_strbox_t* kh_init_strbox() nogil
- inline void kh_destroy_strbox(kh_strbox_t*) nogil
- inline void kh_clear_strbox(kh_strbox_t*) nogil
- inline khint_t kh_get_strbox(kh_strbox_t*, kh_cstr_t) nogil
- inline void kh_resize_strbox(kh_strbox_t*, khint_t) nogil
- inline khint_t kh_put_strbox(kh_strbox_t*, kh_cstr_t, int*) nogil
- inline void kh_del_strbox(kh_strbox_t*, khint_t) nogil
+ kh_strbox_t* kh_init_strbox() nogil
+ void kh_destroy_strbox(kh_strbox_t*) nogil
+ void kh_clear_strbox(kh_strbox_t*) nogil
+ khint_t kh_get_strbox(kh_strbox_t*, kh_cstr_t) nogil
+ void kh_resize_strbox(kh_strbox_t*, khint_t) nogil
+ khint_t kh_put_strbox(kh_strbox_t*, kh_cstr_t, int*) nogil
+ void kh_del_strbox(kh_strbox_t*, khint_t) nogil
bint kh_exist_strbox(kh_strbox_t*, khiter_t) nogil
diff --git a/pandas/_libs/src/skiplist.pxd b/pandas/_libs/src/skiplist.pxd
index 69e9df5b542aa..214aa1c7aeaf0 100644
--- a/pandas/_libs/src/skiplist.pxd
+++ b/pandas/_libs/src/skiplist.pxd
@@ -14,9 +14,9 @@ cdef extern from "skiplist.h":
int size
int maxlevels
- inline skiplist_t* skiplist_init(int) nogil
- inline void skiplist_destroy(skiplist_t*) nogil
- inline double skiplist_get(skiplist_t*, int, int*) nogil
- inline int skiplist_insert(skiplist_t*, double) nogil
- inline int skiplist_remove(skiplist_t*, double) nogil
+ skiplist_t* skiplist_init(int) nogil
+ void skiplist_destroy(skiplist_t*) nogil
+ double skiplist_get(skiplist_t*, int, int*) nogil
+ int skiplist_insert(skiplist_t*, double) nogil
+ int skiplist_remove(skiplist_t*, double) nogil
diff --git a/pandas/_libs/src/util.pxd b/pandas/_libs/src/util.pxd
index 076bc1cd56003..f7a68c4ade71b 100644
--- a/pandas/_libs/src/util.pxd
+++ b/pandas/_libs/src/util.pxd
@@ -3,26 +3,26 @@ cimport numpy as cnp
cimport cpython
cdef extern from "numpy_helper.h":
- inline void set_array_owndata(ndarray ao)
- inline void set_array_not_contiguous(ndarray ao)
-
- inline int is_integer_object(object)
- inline int is_float_object(object)
- inline int is_complex_object(object)
- inline int is_bool_object(object)
- inline int is_string_object(object)
- inline int is_datetime64_object(object)
- inline int is_timedelta64_object(object)
- inline int assign_value_1d(ndarray, Py_ssize_t, object) except -1
- inline cnp.int64_t get_nat()
- inline object get_value_1d(ndarray, Py_ssize_t)
- inline int floatify(object, double*) except -1
- inline char *get_c_string(object) except NULL
- inline object char_to_string(char*)
- inline void transfer_object_column(char *dst, char *src, size_t stride,
+ void set_array_owndata(ndarray ao)
+ void set_array_not_contiguous(ndarray ao)
+
+ int is_integer_object(object)
+ int is_float_object(object)
+ int is_complex_object(object)
+ int is_bool_object(object)
+ int is_string_object(object)
+ int is_datetime64_object(object)
+ int is_timedelta64_object(object)
+ int assign_value_1d(ndarray, Py_ssize_t, object) except -1
+ cnp.int64_t get_nat()
+ object get_value_1d(ndarray, Py_ssize_t)
+ int floatify(object, double*) except -1
+ char *get_c_string(object) except NULL
+ object char_to_string(char*)
+ void transfer_object_column(char *dst, char *src, size_t stride,
size_t length)
object sarr_from_data(cnp.dtype, int length, void* data)
- inline object unbox_if_zerodim(object arr)
+ object unbox_if_zerodim(object arr)
ctypedef fused numeric:
cnp.int8_t
| At the moment (I think since .26, not sure) they don't do anything but cause lots of warnings during cythonizing. Eventually they will raise errors.
https://github.com/cython/cython/issues/1706#issuecomment-302347613
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/17277 | 2017-08-18T05:46:38Z | 2017-08-19T16:49:44Z | null | 2017-10-30T16:24:04Z |
Refactor timezones functions out of tslib | diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx
index 42ba0c1cadaec..7e48c7d94ccf0 100644
--- a/pandas/_libs/index.pyx
+++ b/pandas/_libs/index.pyx
@@ -24,6 +24,8 @@ from datetime import datetime, timedelta
from datetime cimport (get_datetime64_value, _pydatetime_to_dts,
pandas_datetimestruct)
+from tslibs.timezones cimport _get_utcoffset, _is_utc
+
from cpython cimport PyTuple_Check, PyList_Check
cdef extern from "datetime.h":
@@ -554,14 +556,11 @@ cdef inline _to_i8(object val):
# Save the original date value so we can get the utcoffset from it.
ival = _pydatetime_to_dts(val, &dts)
if tzinfo is not None and not _is_utc(tzinfo):
- offset = tslib._get_utcoffset(tzinfo, val)
+ offset = _get_utcoffset(tzinfo, val)
ival -= tslib._delta_to_nanoseconds(offset)
return ival
return val
-cdef inline bint _is_utc(object tz):
- return tz is UTC or isinstance(tz, _du_utc)
-
cdef class MultiIndexObjectEngine(ObjectEngine):
"""
diff --git a/pandas/_libs/period.pyx b/pandas/_libs/period.pyx
index 816b7ebfff86d..114d43cbab5e4 100644
--- a/pandas/_libs/period.pyx
+++ b/pandas/_libs/period.pyx
@@ -31,14 +31,15 @@ cimport util, lib
from lib cimport is_null_datetimelike, is_period
from pandas._libs import tslib, lib
-from pandas._libs.tslib import (Timedelta, Timestamp, iNaT,
- NaT, _get_utcoffset)
-from tslib cimport (
- maybe_get_tz,
- _is_utc,
- _is_tzlocal,
+from pandas._libs.tslib import Timedelta, Timestamp, iNaT, NaT
+from tslib cimport _nat_scalar_rules
+
+from tslibs.timezones cimport (
+ _get_utcoffset,
_get_dst_info,
- _nat_scalar_rules)
+ _is_tzlocal,
+ _is_utc,
+ maybe_get_tz)
from pandas.tseries import offsets
from pandas.core.tools.datetimes import parse_time_string
@@ -116,6 +117,7 @@ cdef extern from "period_helper.h":
initialize_daytime_conversion_factor_matrix()
+
# Period logic
#----------------------------------------------------------------------
diff --git a/pandas/_libs/tslib.pxd b/pandas/_libs/tslib.pxd
index aa8cbcb2cedc7..ee8adfe67bb5e 100644
--- a/pandas/_libs/tslib.pxd
+++ b/pandas/_libs/tslib.pxd
@@ -2,9 +2,5 @@ from numpy cimport ndarray, int64_t
cdef convert_to_tsobject(object, object, object, bint, bint)
cpdef convert_to_timedelta64(object, object)
-cpdef object maybe_get_tz(object)
-cdef bint _is_utc(object)
-cdef bint _is_tzlocal(object)
-cdef object _get_dst_info(object)
cdef bint _nat_scalar_rules[6]
cdef bint _check_all_nulls(obj)
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx
index b5aca2e3ec309..1f72ccbee6568 100644
--- a/pandas/_libs/tslib.pyx
+++ b/pandas/_libs/tslib.pyx
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
# cython: profile=False
import warnings
@@ -33,6 +34,13 @@ from util cimport (is_integer_object, is_float_object, is_datetime64_object,
is_timedelta64_object, INT64_MAX)
cimport util
+cdef extern from "datetime.h":
+ bint PyDateTime_Check(object o)
+ bint PyDate_Check(object o)
+ void PyDateTime_IMPORT()
+
+from datetime cimport datetime, timedelta
+
# this is our datetime.pxd
from datetime cimport (
pandas_datetimestruct,
@@ -50,12 +58,34 @@ from datetime cimport (
npy_datetime,
is_leapyear,
dayofweek,
- PANDAS_FR_ns,
- PyDateTime_Check, PyDate_Check,
- PyDateTime_IMPORT,
- timedelta, datetime
+ PANDAS_FR_ns
)
+from tslibs.timezones import (
+ tzoffset,
+ _dateutil_gettz,
+ _dateutil_tzlocal,
+ _dateutil_tzfile,
+ _dateutil_tzutc,
+ maybe_get_tz,
+ _get_utcoffset,
+ _unbox_utcoffsets,
+ get_timezone,
+ _p_tz_cache_key)
+from tslibs.timezones cimport (
+ _is_utc,
+ maybe_get_tz,
+ _is_tzlocal,
+ _get_dst_info,
+ _get_utcoffset,
+ _unbox_utcoffsets,
+ _is_fixed_offset,
+ _get_zone,
+ _get_utc_trans_times_from_dateutil_tz,
+ _tz_cache_key,
+ _treat_tz_as_pytz,
+ _treat_tz_as_dateutil)
+
# stdlib datetime imports
from datetime import timedelta, datetime
from datetime import time as datetime_time
@@ -71,26 +101,16 @@ cimport cython
import re
# dateutil compat
-from dateutil.tz import (tzoffset, tzlocal as _dateutil_tzlocal,
- tzfile as _dateutil_tzfile,
- tzutc as _dateutil_tzutc,
- tzstr as _dateutil_tzstr)
-
-from pandas.compat import is_platform_windows
-if is_platform_windows():
- from dateutil.zoneinfo import gettz as _dateutil_gettz
-else:
- from dateutil.tz import gettz as _dateutil_gettz
+from dateutil.tz import tzstr as _dateutil_tzstr
+
from dateutil.relativedelta import relativedelta
from dateutil.parser import DEFAULTPARSER
-from pytz.tzinfo import BaseTzInfo as _pytz_BaseTzInfo
from pandas.compat import (parse_date, string_types, iteritems,
StringIO, callable)
import operator
import collections
-import warnings
# initialize numpy
import_array()
@@ -232,24 +252,6 @@ def ints_to_pytimedelta(ndarray[int64_t] arr, box=False):
return result
-cdef inline bint _is_tzlocal(object tz):
- return isinstance(tz, _dateutil_tzlocal)
-
-
-cdef inline bint _is_fixed_offset(object tz):
- if _treat_tz_as_dateutil(tz):
- if len(tz._trans_idx) == 0 and len(tz._trans_list) == 0:
- return 1
- else:
- return 0
- elif _treat_tz_as_pytz(tz):
- if (len(tz._transition_info) == 0
- and len(tz._utc_transition_times) == 0):
- return 1
- else:
- return 0
- return 1
-
_zero_time = datetime_time(0, 0)
_no_input = object()
@@ -1429,11 +1431,6 @@ cdef class _TSObject:
def __get__(self):
return self.value
-cpdef _get_utcoffset(tzinfo, obj):
- try:
- return tzinfo._utcoffset
- except AttributeError:
- return tzinfo.utcoffset(obj)
# helper to extract datetime and int64 from several different possibilities
cdef convert_to_tsobject(object ts, object tz, object unit,
@@ -1698,71 +1695,6 @@ def _localize_pydatetime(object dt, object tz):
return dt.replace(tzinfo=tz)
-def get_timezone(tz):
- return _get_zone(tz)
-
-cdef inline bint _is_utc(object tz):
- return tz is UTC or isinstance(tz, _dateutil_tzutc)
-
-cdef inline object _get_zone(object tz):
- """
- We need to do several things here:
- 1) Distinguish between pytz and dateutil timezones
- 2) Not be over-specific (e.g. US/Eastern with/without DST is same *zone*
- but a different tz object)
- 3) Provide something to serialize when we're storing a datetime object
- in pytables.
-
- We return a string prefaced with dateutil if it's a dateutil tz, else just
- the tz name. It needs to be a string so that we can serialize it with
- UJSON/pytables. maybe_get_tz (below) is the inverse of this process.
- """
- if _is_utc(tz):
- return 'UTC'
- else:
- if _treat_tz_as_dateutil(tz):
- if '.tar.gz' in tz._filename:
- raise ValueError(
- 'Bad tz filename. Dateutil on python 3 on windows has a '
- 'bug which causes tzfile._filename to be the same for all '
- 'timezone files. Please construct dateutil timezones '
- 'implicitly by passing a string like "dateutil/Europe'
- '/London" when you construct your pandas objects instead '
- 'of passing a timezone object. See '
- 'https://github.com/pandas-dev/pandas/pull/7362')
- return 'dateutil/' + tz._filename
- else:
- # tz is a pytz timezone or unknown.
- try:
- zone = tz.zone
- if zone is None:
- return tz
- return zone
- except AttributeError:
- return tz
-
-
-cpdef inline object maybe_get_tz(object tz):
- """
- (Maybe) Construct a timezone object from a string. If tz is a string, use
- it to construct a timezone object. Otherwise, just return tz.
- """
- if isinstance(tz, string_types):
- if tz == 'tzlocal()':
- tz = _dateutil_tzlocal()
- elif tz.startswith('dateutil/'):
- zone = tz[9:]
- tz = _dateutil_gettz(zone)
- # On Python 3 on Windows, the filename is not always set correctly.
- if isinstance(tz, _dateutil_tzfile) and '.tar.gz' in tz._filename:
- tz._filename = zone
- else:
- tz = pytz.timezone(tz)
- elif is_integer_object(tz):
- tz = pytz.FixedOffset(tz / 60)
- return tz
-
-
class OutOfBoundsDatetime(ValueError):
pass
@@ -4081,6 +4013,7 @@ def pydt_to_i8(object pydt):
return ts.value
+# TODO: Never used?
def i8_to_pydt(int64_t i8, object tzinfo = None):
"""
Inverse of pydt_to_i8
@@ -4271,148 +4204,6 @@ def tz_convert_single(int64_t val, object tz1, object tz2):
offset = deltas[pos]
return utc_date + offset
-# Timezone data caches, key is the pytz string or dateutil file name.
-dst_cache = {}
-
-cdef inline bint _treat_tz_as_pytz(object tz):
- return hasattr(tz, '_utc_transition_times') and hasattr(
- tz, '_transition_info')
-
-cdef inline bint _treat_tz_as_dateutil(object tz):
- return hasattr(tz, '_trans_list') and hasattr(tz, '_trans_idx')
-
-
-def _p_tz_cache_key(tz):
- """ Python interface for cache function to facilitate testing."""
- return _tz_cache_key(tz)
-
-
-cdef inline object _tz_cache_key(object tz):
- """
- Return the key in the cache for the timezone info object or None
- if unknown.
-
- The key is currently the tz string for pytz timezones, the filename for
- dateutil timezones.
-
- Notes
- =====
- This cannot just be the hash of a timezone object. Unfortunately, the
- hashes of two dateutil tz objects which represent the same timezone are
- not equal (even though the tz objects will compare equal and represent
- the same tz file). Also, pytz objects are not always hashable so we use
- str(tz) instead.
- """
- if isinstance(tz, _pytz_BaseTzInfo):
- return tz.zone
- elif isinstance(tz, _dateutil_tzfile):
- if '.tar.gz' in tz._filename:
- raise ValueError('Bad tz filename. Dateutil on python 3 on '
- 'windows has a bug which causes tzfile._filename '
- 'to be the same for all timezone files. Please '
- 'construct dateutil timezones implicitly by '
- 'passing a string like "dateutil/Europe/London" '
- 'when you construct your pandas objects instead '
- 'of passing a timezone object. See '
- 'https://github.com/pandas-dev/pandas/pull/7362')
- return 'dateutil' + tz._filename
- else:
- return None
-
-
-cdef object _get_dst_info(object tz):
- """
- return a tuple of :
- (UTC times of DST transitions,
- UTC offsets in microseconds corresponding to DST transitions,
- string of type of transitions)
-
- """
- cache_key = _tz_cache_key(tz)
- if cache_key is None:
- num = int(_get_utcoffset(tz, None).total_seconds()) * 1000000000
- return (np.array([NPY_NAT + 1], dtype=np.int64),
- np.array([num], dtype=np.int64),
- None)
-
- if cache_key not in dst_cache:
- if _treat_tz_as_pytz(tz):
- trans = np.array(tz._utc_transition_times, dtype='M8[ns]')
- trans = trans.view('i8')
- try:
- if tz._utc_transition_times[0].year == 1:
- trans[0] = NPY_NAT + 1
- except Exception:
- pass
- deltas = _unbox_utcoffsets(tz._transition_info)
- typ = 'pytz'
-
- elif _treat_tz_as_dateutil(tz):
- if len(tz._trans_list):
- # get utc trans times
- trans_list = _get_utc_trans_times_from_dateutil_tz(tz)
- trans = np.hstack([
- np.array([0], dtype='M8[s]'), # place holder for first item
- np.array(trans_list, dtype='M8[s]')]).astype(
- 'M8[ns]') # all trans listed
- trans = trans.view('i8')
- trans[0] = NPY_NAT + 1
-
- # deltas
- deltas = np.array([v.offset for v in (
- tz._ttinfo_before,) + tz._trans_idx], dtype='i8')
- deltas *= 1000000000
- typ = 'dateutil'
-
- elif _is_fixed_offset(tz):
- trans = np.array([NPY_NAT + 1], dtype=np.int64)
- deltas = np.array([tz._ttinfo_std.offset],
- dtype='i8') * 1000000000
- typ = 'fixed'
- else:
- trans = np.array([], dtype='M8[ns]')
- deltas = np.array([], dtype='i8')
- typ = None
-
- else:
- # static tzinfo
- trans = np.array([NPY_NAT + 1], dtype=np.int64)
- num = int(_get_utcoffset(tz, None).total_seconds()) * 1000000000
- deltas = np.array([num], dtype=np.int64)
- typ = 'static'
-
- dst_cache[cache_key] = (trans, deltas, typ)
-
- return dst_cache[cache_key]
-
-cdef object _get_utc_trans_times_from_dateutil_tz(object tz):
- """
- Transition times in dateutil timezones are stored in local non-dst
- time. This code converts them to UTC. It's the reverse of the code
- in dateutil.tz.tzfile.__init__.
- """
- new_trans = list(tz._trans_list)
- last_std_offset = 0
- for i, (trans, tti) in enumerate(zip(tz._trans_list, tz._trans_idx)):
- if not tti.isdst:
- last_std_offset = tti.offset
- new_trans[i] = trans - last_std_offset
- return new_trans
-
-
-cpdef ndarray _unbox_utcoffsets(object transinfo):
- cdef:
- Py_ssize_t i, sz
- ndarray[int64_t] arr
-
- sz = len(transinfo)
- arr = np.empty(sz, dtype='i8')
-
- for i in range(sz):
- arr[i] = int(transinfo[i][0].total_seconds()) * 1000000000
-
- return arr
-
@cython.boundscheck(False)
@cython.wraparound(False)
diff --git a/pandas/_libs/tslibs/__init__.py b/pandas/_libs/tslibs/__init__.py
new file mode 100644
index 0000000000000..6ffc2a84242e8
--- /dev/null
+++ b/pandas/_libs/tslibs/__init__.py
@@ -0,0 +1,3 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# cython: profile=False
diff --git a/pandas/_libs/tslibs/timezones.pxd b/pandas/_libs/tslibs/timezones.pxd
new file mode 100644
index 0000000000000..59efb24600966
--- /dev/null
+++ b/pandas/_libs/tslibs/timezones.pxd
@@ -0,0 +1,20 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# cython: profile=False
+
+from numpy cimport ndarray, float64_t
+
+cdef object _get_zone(object tz)
+cdef object _tz_cache_key(object tz)
+cdef bint _is_utc(object tz)
+cdef bint _is_tzlocal(object tz)
+cdef bint _treat_tz_as_pytz(object tz)
+cdef bint _treat_tz_as_dateutil(object tz)
+cpdef object maybe_get_tz(object tz)
+
+cpdef _get_utcoffset(tzinfo, obj)
+cpdef ndarray _unbox_utcoffsets(object transinfo)
+cdef bint _is_fixed_offset(object tz)
+cdef object _get_utc_trans_times_from_dateutil_tz(object tz)
+
+cpdef object _get_dst_info(object tz)
diff --git a/pandas/_libs/tslibs/timezones.pyx b/pandas/_libs/tslibs/timezones.pyx
new file mode 100644
index 0000000000000..4d78e83e31101
--- /dev/null
+++ b/pandas/_libs/tslibs/timezones.pyx
@@ -0,0 +1,301 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# cython: profile=False
+
+from pandas.compat import string_types, is_platform_windows
+
+
+cdef extern from "Python.h":
+ Py_ssize_t PY_SSIZE_T_MAX
+
+cdef extern from "datetime.h":
+ void PyDateTime_IMPORT()
+
+# import datetime C API
+PyDateTime_IMPORT
+
+
+import numpy as np
+cimport numpy as np
+from numpy cimport ndarray, int64_t, float64_t
+np.import_array()
+
+
+# dateutil compat
+from dateutil.tz import (tzoffset,
+ tzlocal as _dateutil_tzlocal,
+ tzfile as _dateutil_tzfile,
+ tzutc as _dateutil_tzutc,
+ tzstr as _dateutil_tzstr)
+
+if is_platform_windows():
+ from dateutil.zoneinfo import gettz as _dateutil_gettz
+else:
+ from dateutil.tz import gettz as _dateutil_gettz
+
+
+from pytz.tzinfo import BaseTzInfo as _pytz_BaseTzInfo
+import pytz
+UTC = pytz.utc
+
+
+from cpython cimport PyBool_Check
+cdef extern from "numpy/ndarrayobject.h":
+ bint PyArray_IsIntegerScalar(object)
+
+cdef inline bint is_integer_object(object obj):
+ # Ported from util (which gets it from numpy_helper) to avoid
+ # direct dependency
+ return (not PyBool_Check(obj)) and PyArray_IsIntegerScalar(obj)
+
+
+cdef int64_t NPY_NAT = np.datetime64('nat').astype(np.int64)
+
+
+#----------------------------------------------------------------------
+# time zone conversion helpers
+
+def get_timezone(tz):
+ return _get_zone(tz)
+
+
+cdef bint _is_utc(object tz):
+ return tz is UTC or isinstance(tz, _dateutil_tzutc)
+
+
+cdef bint _is_tzlocal(object tz):
+ return isinstance(tz, _dateutil_tzlocal)
+
+
+cdef bint _treat_tz_as_pytz(object tz):
+ return (hasattr(tz, '_utc_transition_times') and
+ hasattr(tz, '_transition_info'))
+
+
+cdef bint _treat_tz_as_dateutil(object tz):
+ return hasattr(tz, '_trans_list') and hasattr(tz, '_trans_idx')
+
+
+cdef object _get_zone(object tz):
+ """
+ We need to do several things here:
+ 1) Distinguish between pytz and dateutil timezones
+ 2) Not be over-specific (e.g. US/Eastern with/without DST is same *zone*
+ but a different tz object)
+ 3) Provide something to serialize when we're storing a datetime object
+ in pytables.
+
+ We return a string prefaced with dateutil if it's a dateutil tz, else just
+ the tz name. It needs to be a string so that we can serialize it with
+ UJSON/pytables. maybe_get_tz (below) is the inverse of this process.
+ """
+ if _is_utc(tz):
+ return 'UTC'
+ else:
+ if _treat_tz_as_dateutil(tz):
+ if '.tar.gz' in tz._filename:
+ raise ValueError(
+ 'Bad tz filename. Dateutil on python 3 on windows has a '
+ 'bug which causes tzfile._filename to be the same for all '
+ 'timezone files. Please construct dateutil timezones '
+ 'implicitly by passing a string like "dateutil/Europe'
+ '/London" when you construct your pandas objects instead '
+ 'of passing a timezone object. See '
+ 'https://github.com/pandas-dev/pandas/pull/7362')
+ return 'dateutil/' + tz._filename
+ # TODO: use os.path.join?
+ else:
+ # tz is a pytz timezone or unknown.
+ try:
+ zone = tz.zone
+ if zone is None:
+ return tz
+ return zone
+ except AttributeError:
+ return tz
+
+
+cpdef object maybe_get_tz(object tz):
+ """
+ (Maybe) Construct a timezone object from a string. If tz is a string, use
+ it to construct a timezone object. Otherwise, just return tz.
+ """
+ if isinstance(tz, string_types):
+ if tz == 'tzlocal()':
+ tz = _dateutil_tzlocal()
+ elif tz.startswith('dateutil/'):
+ zone = tz[9:]
+ tz = _dateutil_gettz(zone)
+ # On Python 3 on Windows, the filename is not always set correctly.
+ if isinstance(tz, _dateutil_tzfile) and '.tar.gz' in tz._filename:
+ tz._filename = zone
+ else:
+ tz = pytz.timezone(tz)
+ elif is_integer_object(tz):
+ tz = pytz.FixedOffset(tz / 60)
+ return tz
+
+
+def _p_tz_cache_key(tz):
+ """ Python interface for cache function to facilitate testing."""
+ return _tz_cache_key(tz)
+
+
+cdef object _tz_cache_key(object tz):
+ """
+ Return the key in the cache for the timezone info object or None
+ if unknown.
+
+ The key is currently the tz string for pytz timezones, the filename for
+ dateutil timezones.
+
+ Notes
+ =====
+ This cannot just be the hash of a timezone object. Unfortunately, the
+ hashes of two dateutil tz objects which represent the same timezone are
+ not equal (even though the tz objects will compare equal and represent
+ the same tz file). Also, pytz objects are not always hashable so we use
+ str(tz) instead.
+ """
+ if isinstance(tz, _pytz_BaseTzInfo):
+ return tz.zone
+ elif isinstance(tz, _dateutil_tzfile):
+ if '.tar.gz' in tz._filename:
+ raise ValueError('Bad tz filename. Dateutil on python 3 on '
+ 'windows has a bug which causes tzfile._filename '
+ 'to be the same for all timezone files. Please '
+ 'construct dateutil timezones implicitly by '
+ 'passing a string like "dateutil/Europe/London" '
+ 'when you construct your pandas objects instead '
+ 'of passing a timezone object. See '
+ 'https://github.com/pandas-dev/pandas/pull/7362')
+ return 'dateutil' + tz._filename
+ else:
+ return None
+
+
+#----------------------------------------------------------------------
+# UTC Offsets
+
+cpdef _get_utcoffset(tzinfo, obj):
+ try:
+ return tzinfo._utcoffset
+ except AttributeError:
+ return tzinfo.utcoffset(obj)
+
+
+cpdef ndarray _unbox_utcoffsets(object transinfo):
+ cdef:
+ Py_ssize_t i, sz
+ ndarray[int64_t] arr
+
+ sz = len(transinfo)
+ arr = np.empty(sz, dtype='i8')
+
+ for i in range(sz):
+ arr[i] = int(transinfo[i][0].total_seconds()) * 1000000000
+
+ return arr
+
+
+cdef bint _is_fixed_offset(object tz):
+ if _treat_tz_as_dateutil(tz):
+ if len(tz._trans_idx) == 0 and len(tz._trans_list) == 0:
+ return 1
+ else:
+ return 0
+ elif _treat_tz_as_pytz(tz):
+ if (len(tz._transition_info) == 0
+ and len(tz._utc_transition_times) == 0):
+ return 1
+ else:
+ return 0
+ return 1
+
+
+cdef object _get_utc_trans_times_from_dateutil_tz(object tz):
+ """
+ Transition times in dateutil timezones are stored in local non-dst
+ time. This code converts them to UTC. It's the reverse of the code
+ in dateutil.tz.tzfile.__init__.
+ """
+ new_trans = list(tz._trans_list)
+ last_std_offset = 0
+ for i, (trans, tti) in enumerate(zip(tz._trans_list, tz._trans_idx)):
+ if not tti.isdst:
+ last_std_offset = tti.offset
+ new_trans[i] = trans - last_std_offset
+ return new_trans
+
+
+#----------------------------------------------------------------------
+# Daylight Savings
+
+# Timezone data caches, key is the pytz string or dateutil file name.
+dst_cache = {}
+
+# TODO: go back to just cdef
+cpdef object _get_dst_info(object tz):
+ """
+ return a tuple of :
+ (UTC times of DST transitions,
+ UTC offsets in microseconds corresponding to DST transitions,
+ string of type of transitions)
+
+ """
+ cache_key = _tz_cache_key(tz)
+ if cache_key is None:
+ num = int(_get_utcoffset(tz, None).total_seconds()) * 1000000000
+ return (np.array([NPY_NAT + 1], dtype=np.int64),
+ np.array([num], dtype=np.int64),
+ None)
+
+ if cache_key not in dst_cache:
+ if _treat_tz_as_pytz(tz):
+ trans = np.array(tz._utc_transition_times, dtype='M8[ns]')
+ trans = trans.view('i8')
+ try:
+ if tz._utc_transition_times[0].year == 1:
+ trans[0] = NPY_NAT + 1
+ except Exception:
+ pass
+ deltas = _unbox_utcoffsets(tz._transition_info)
+ typ = 'pytz'
+
+ elif _treat_tz_as_dateutil(tz):
+ if len(tz._trans_list):
+ # get utc trans times
+ trans_list = _get_utc_trans_times_from_dateutil_tz(tz)
+ trans = np.hstack([
+ np.array([0], dtype='M8[s]'), # place holder for first item
+ np.array(trans_list, dtype='M8[s]')]).astype(
+ 'M8[ns]') # all trans listed
+ trans = trans.view('i8')
+ trans[0] = NPY_NAT + 1
+
+ # deltas
+ deltas = np.array([v.offset for v in (
+ tz._ttinfo_before,) + tz._trans_idx], dtype='i8')
+ deltas *= 1000000000
+ typ = 'dateutil'
+
+ elif _is_fixed_offset(tz):
+ trans = np.array([NPY_NAT + 1], dtype=np.int64)
+ deltas = np.array([tz._ttinfo_std.offset],
+ dtype='i8') * 1000000000
+ typ = 'fixed'
+ else:
+ trans = np.array([], dtype='M8[ns]')
+ deltas = np.array([], dtype='i8')
+ typ = None
+
+ else:
+ # static tzinfo
+ trans = np.array([NPY_NAT + 1], dtype=np.int64)
+ num = int(_get_utcoffset(tz, None).total_seconds()) * 1000000000
+ deltas = np.array([num], dtype=np.int64)
+ typ = 'static'
+
+ dst_cache[cache_key] = (trans, deltas, typ)
+
+ return dst_cache[cache_key]
diff --git a/setup.py b/setup.py
index 444db5bc4d275..d21df49fe065f 100755
--- a/setup.py
+++ b/setup.py
@@ -341,6 +341,7 @@ class CheckSDist(sdist_class):
'pandas/_libs/window.pyx',
'pandas/_libs/sparse.pyx',
'pandas/_libs/parsers.pyx',
+ 'pandas/_libs/tslibs/timezones.pyx',
'pandas/io/sas/sas.pyx']
def initialize_options(self):
@@ -483,12 +484,14 @@ def pxd(name):
+ _pxi_dep['hashtable'])},
'_libs.tslib': {'pyxfile': '_libs/tslib',
'pxdfiles': ['_libs/src/util', '_libs/lib'],
- 'depends': tseries_depends,
+ 'depends': tseries_depends + \
+ ['pandas/_libs/tslibs/timezones.pyx'],
'sources': ['pandas/_libs/src/datetime/np_datetime.c',
'pandas/_libs/src/datetime/np_datetime_strings.c',
'pandas/_libs/src/period_helper.c']},
'_libs.period': {'pyxfile': '_libs/period',
- 'depends': tseries_depends,
+ 'depends': tseries_depends + \
+ ['pandas/_libs/tslibs/timezones.pyx'],
'sources': ['pandas/_libs/src/datetime/np_datetime.c',
'pandas/_libs/src/datetime/np_datetime_strings.c',
'pandas/_libs/src/period_helper.c']},
@@ -496,7 +499,8 @@ def pxd(name):
'sources': ['pandas/_libs/src/datetime/np_datetime.c',
'pandas/_libs/src/datetime/np_datetime_strings.c'],
'pxdfiles': ['_libs/src/util', '_libs/hashtable'],
- 'depends': _pxi_dep['index']},
+ 'depends': _pxi_dep['index'] + \
+ ['pandas/_libs/tslibs/timezones.pyx']},
'_libs.algos': {'pyxfile': '_libs/algos',
'pxdfiles': ['_libs/src/util', '_libs/algos', '_libs/hashtable'],
'depends': _pxi_dep['algos']},
@@ -528,6 +532,7 @@ def pxd(name):
'depends': ['pandas/_libs/testing.pyx']},
'_libs.hashing': {'pyxfile': '_libs/hashing',
'depends': ['pandas/_libs/hashing.pyx']},
+ '_libs.tslibs.timezones': {'pyxfile': '_libs/tslibs/timezones'},
'io.sas._sas': {'pyxfile': 'io/sas/sas'},
}
| `_libs.tslib` is over 5k lines and is imported by a bunch of other modules including `_libs.lib`. It looks like it was pasted together from a bunch of older files. There are a handful of areas where significant chunks can be refactored out in complexity-reducing (and testability-increasing) ways. This is the first one: timezones. (Next up: parsing (and gathering parsing code dispersed across pandas))
timezones.pyx has no other dependencies within pandas, helping to de-tangle some of the `_libs` modules
Code in timezones is used in both `_libs.tslib` and `_libs.period`, and a bit in `_libs.index`.
This is one of several steps in making _libs.period not need to import from non-cython code and
ideally not need to import `tslib` (though `NaT` makes that tough). See existing comments
in `_libs.__init__` on why this is desireable.
This is the first of several independent pieces to be split off of `tslib`.
There are also several notes on functions that appear to be unused and may be ready for removal.
Removes `datetime_helper` dependency from most of `_libs`, as it is somehow slower than a plain cython version. In cases where C can be replaced by idiomatic cython without hurting performance, I'm calling that a win.
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/17274 | 2017-08-17T19:58:24Z | 2017-09-11T11:26:36Z | null | 2017-10-30T16:24:00Z |
BUG: Index._searchsorted_monotonic(..., side='right') returns the left side position for monotonic decreasing indexes | diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt
index 636bb2dc3e60e..0f67b58c678a7 100644
--- a/doc/source/whatsnew/v0.21.0.txt
+++ b/doc/source/whatsnew/v0.21.0.txt
@@ -418,6 +418,7 @@ Indexing
- Bug in ``.isin()`` in which checking membership in empty ``Series`` objects raised an error (:issue:`16991`)
- Bug in ``CategoricalIndex`` reindexing in which specified indices containing duplicates were not being respected (:issue:`17323`)
- Bug in intersection of ``RangeIndex`` with negative step (:issue:`17296`)
+- Bug in ``IntervalIndex`` where performing a scalar lookup fails for included right endpoints of non-overlapping monotonic decreasing indexes (:issue:`16417`, :issue:`17271`)
I/O
^^^
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index a9098126a38e3..ef5f68936044a 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -3465,7 +3465,7 @@ def _searchsorted_monotonic(self, label, side='left'):
# everything for it to work (element ordering, search side and
# resulting value).
pos = self[::-1].searchsorted(label, side='right' if side == 'left'
- else 'right')
+ else 'left')
return len(self) - pos
raise ValueError('index must be monotonic increasing or decreasing')
diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py
index 1fdc08d68eb26..90618cd6e235f 100644
--- a/pandas/tests/indexes/common.py
+++ b/pandas/tests/indexes/common.py
@@ -11,6 +11,7 @@
RangeIndex, MultiIndex, CategoricalIndex, DatetimeIndex,
TimedeltaIndex, PeriodIndex, IntervalIndex,
notna, isna)
+from pandas.core.indexes.base import InvalidIndexError
from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
from pandas.core.dtypes.common import needs_i8_conversion
from pandas._libs.tslib import iNaT
@@ -138,9 +139,14 @@ def test_get_indexer_consistency(self):
if isinstance(index, IntervalIndex):
continue
- indexer = index.get_indexer(index[0:2])
- assert isinstance(indexer, np.ndarray)
- assert indexer.dtype == np.intp
+ if index.is_unique or isinstance(index, CategoricalIndex):
+ indexer = index.get_indexer(index[0:2])
+ assert isinstance(indexer, np.ndarray)
+ assert indexer.dtype == np.intp
+ else:
+ e = "Reindexing only valid with uniquely valued Index objects"
+ with tm.assert_raises_regex(InvalidIndexError, e):
+ indexer = index.get_indexer(index[0:2])
indexer, _ = index.get_indexer_non_unique(index[0:2])
assert isinstance(indexer, np.ndarray)
@@ -632,7 +638,8 @@ def test_difference_base(self):
pass
elif isinstance(idx, (DatetimeIndex, TimedeltaIndex)):
assert result.__class__ == answer.__class__
- tm.assert_numpy_array_equal(result.asi8, answer.asi8)
+ tm.assert_numpy_array_equal(result.sort_values().asi8,
+ answer.sort_values().asi8)
else:
result = first.difference(case)
assert tm.equalContents(result, answer)
@@ -954,3 +961,47 @@ def test_join_self_unique(self, how):
if index.is_unique:
joined = index.join(index, how=how)
assert (index == joined).all()
+
+ def test_searchsorted_monotonic(self):
+ # GH17271
+ for index in self.indices.values():
+ # not implemented for tuple searches in MultiIndex
+ # or Intervals searches in IntervalIndex
+ if isinstance(index, (MultiIndex, IntervalIndex)):
+ continue
+
+ # nothing to test if the index is empty
+ if index.empty:
+ continue
+ value = index[0]
+
+ # determine the expected results (handle dupes for 'right')
+ expected_left, expected_right = 0, (index == value).argmin()
+ if expected_right == 0:
+ # all values are the same, expected_right should be length
+ expected_right = len(index)
+
+ # test _searchsorted_monotonic in all cases
+ # test searchsorted only for increasing
+ if index.is_monotonic_increasing:
+ ssm_left = index._searchsorted_monotonic(value, side='left')
+ assert expected_left == ssm_left
+
+ ssm_right = index._searchsorted_monotonic(value, side='right')
+ assert expected_right == ssm_right
+
+ ss_left = index.searchsorted(value, side='left')
+ assert expected_left == ss_left
+
+ ss_right = index.searchsorted(value, side='right')
+ assert expected_right == ss_right
+ elif index.is_monotonic_decreasing:
+ ssm_left = index._searchsorted_monotonic(value, side='left')
+ assert expected_left == ssm_left
+
+ ssm_right = index._searchsorted_monotonic(value, side='right')
+ assert expected_right == ssm_right
+ else:
+ # non-monotonic should raise.
+ with pytest.raises(ValueError):
+ index._searchsorted_monotonic(value, side='left')
diff --git a/pandas/tests/indexes/datetimes/test_datetimelike.py b/pandas/tests/indexes/datetimes/test_datetimelike.py
index 3b970ee382521..538e10e6011ec 100644
--- a/pandas/tests/indexes/datetimes/test_datetimelike.py
+++ b/pandas/tests/indexes/datetimes/test_datetimelike.py
@@ -12,7 +12,9 @@ class TestDatetimeIndex(DatetimeLike):
_holder = DatetimeIndex
def setup_method(self, method):
- self.indices = dict(index=tm.makeDateIndex(10))
+ self.indices = dict(index=tm.makeDateIndex(10),
+ index_dec=date_range('20130110', periods=10,
+ freq='-1D'))
self.setup_indices()
def create_index(self):
diff --git a/pandas/tests/indexes/period/test_period.py b/pandas/tests/indexes/period/test_period.py
index e24e2ad936e2c..51f7d13cb0638 100644
--- a/pandas/tests/indexes/period/test_period.py
+++ b/pandas/tests/indexes/period/test_period.py
@@ -18,7 +18,9 @@ class TestPeriodIndex(DatetimeLike):
_multiprocess_can_split_ = True
def setup_method(self, method):
- self.indices = dict(index=tm.makePeriodIndex(10))
+ self.indices = dict(index=tm.makePeriodIndex(10),
+ index_dec=period_range('20130101', periods=10,
+ freq='D')[::-1])
self.setup_indices()
def create_index(self):
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index f96dbdcfb8acf..d69fbbcdf4bf6 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -46,7 +46,8 @@ def setup_method(self, method):
catIndex=tm.makeCategoricalIndex(100),
empty=Index([]),
tuples=MultiIndex.from_tuples(lzip(
- ['foo', 'bar', 'baz'], [1, 2, 3])))
+ ['foo', 'bar', 'baz'], [1, 2, 3])),
+ repeats=Index([0, 0, 1, 1, 2, 2]))
self.setup_indices()
def create_index(self):
diff --git a/pandas/tests/indexes/test_numeric.py b/pandas/tests/indexes/test_numeric.py
index 1a0a38c173284..7e7e10e4aeabe 100644
--- a/pandas/tests/indexes/test_numeric.py
+++ b/pandas/tests/indexes/test_numeric.py
@@ -181,7 +181,9 @@ class TestFloat64Index(Numeric):
def setup_method(self, method):
self.indices = dict(mixed=Float64Index([1.5, 2, 3, 4, 5]),
- float=Float64Index(np.arange(5) * 2.5))
+ float=Float64Index(np.arange(5) * 2.5),
+ mixed_dec=Float64Index([5, 4, 3, 2, 1.5]),
+ float_dec=Float64Index(np.arange(4, -1, -1) * 2.5))
self.setup_indices()
def create_index(self):
@@ -654,7 +656,8 @@ class TestInt64Index(NumericInt):
_holder = Int64Index
def setup_method(self, method):
- self.indices = dict(index=Int64Index(np.arange(0, 20, 2)))
+ self.indices = dict(index=Int64Index(np.arange(0, 20, 2)),
+ index_dec=Int64Index(np.arange(19, -1, -1)))
self.setup_indices()
def create_index(self):
@@ -949,8 +952,9 @@ class TestUInt64Index(NumericInt):
_holder = UInt64Index
def setup_method(self, method):
- self.indices = dict(index=UInt64Index([2**63, 2**63 + 10, 2**63 + 15,
- 2**63 + 20, 2**63 + 25]))
+ vals = [2**63, 2**63 + 10, 2**63 + 15, 2**63 + 20, 2**63 + 25]
+ self.indices = dict(index=UInt64Index(vals),
+ index_dec=UInt64Index(reversed(vals)))
self.setup_indices()
def create_index(self):
diff --git a/pandas/tests/indexes/test_range.py b/pandas/tests/indexes/test_range.py
index 06c8f0ee392c7..d206c36ee51c9 100644
--- a/pandas/tests/indexes/test_range.py
+++ b/pandas/tests/indexes/test_range.py
@@ -25,7 +25,8 @@ class TestRangeIndex(Numeric):
_compat_props = ['shape', 'ndim', 'size', 'itemsize']
def setup_method(self, method):
- self.indices = dict(index=RangeIndex(0, 20, 2, name='foo'))
+ self.indices = dict(index=RangeIndex(0, 20, 2, name='foo'),
+ index_dec=RangeIndex(18, -1, -2, name='bar'))
self.setup_indices()
def create_index(self):
diff --git a/pandas/tests/indexing/test_interval.py b/pandas/tests/indexing/test_interval.py
index be6e5e1cffb2e..31a94abcd99a5 100644
--- a/pandas/tests/indexing/test_interval.py
+++ b/pandas/tests/indexing/test_interval.py
@@ -3,6 +3,7 @@
import pandas as pd
from pandas import Series, DataFrame, IntervalIndex, Interval
+from pandas.compat import product
import pandas.util.testing as tm
@@ -14,16 +15,6 @@ def setup_method(self, method):
def test_loc_with_scalar(self):
s = self.s
- expected = 0
-
- result = s.loc[0.5]
- assert result == expected
-
- result = s.loc[1]
- assert result == expected
-
- with pytest.raises(KeyError):
- s.loc[0]
expected = s.iloc[:3]
tm.assert_series_equal(expected, s.loc[:3])
@@ -42,16 +33,6 @@ def test_loc_with_scalar(self):
def test_getitem_with_scalar(self):
s = self.s
- expected = 0
-
- result = s[0.5]
- assert result == expected
-
- result = s[1]
- assert result == expected
-
- with pytest.raises(KeyError):
- s[0]
expected = s.iloc[:3]
tm.assert_series_equal(expected, s[:3])
@@ -67,6 +48,41 @@ def test_getitem_with_scalar(self):
expected = s.iloc[2:5]
tm.assert_series_equal(expected, s[s >= 2])
+ @pytest.mark.parametrize('direction, closed',
+ product(('increasing', 'decreasing'),
+ ('left', 'right', 'neither', 'both')))
+ def test_nonoverlapping_monotonic(self, direction, closed):
+ tpls = [(0, 1), (2, 3), (4, 5)]
+ if direction == 'decreasing':
+ tpls = reversed(tpls)
+
+ idx = IntervalIndex.from_tuples(tpls, closed=closed)
+ s = Series(list('abc'), idx)
+
+ for key, expected in zip(idx.left, s):
+ if idx.closed_left:
+ assert s[key] == expected
+ assert s.loc[key] == expected
+ else:
+ with pytest.raises(KeyError):
+ s[key]
+ with pytest.raises(KeyError):
+ s.loc[key]
+
+ for key, expected in zip(idx.right, s):
+ if idx.closed_right:
+ assert s[key] == expected
+ assert s.loc[key] == expected
+ else:
+ with pytest.raises(KeyError):
+ s[key]
+ with pytest.raises(KeyError):
+ s.loc[key]
+
+ for key, expected in zip(idx.mid, s):
+ assert s[key] == expected
+ assert s.loc[key] == expected
+
def test_with_interval(self):
s = self.s
| - [X] closes #17271
- [X] closes #16417
- [X] tests added / passed
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [X] whatsnew entry
I didn't add a whatsnew entry for the `Index._searchsorted_monotonic` fix, since it looks like past convention has been to not add whatsnew entries for private methods. Happy to add a whatsnew entry if I'm mistaken though. I did add a whatsnew entry for the downstream effect on IntervalIndex. | https://api.github.com/repos/pandas-dev/pandas/pulls/17272 | 2017-08-17T18:55:50Z | 2017-09-07T11:49:28Z | 2017-09-07T11:49:28Z | 2020-12-06T03:59:04Z |
CLN: replace %s syntax with .format in core.dtypes and core.sparse | diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index 723e4f70da4e9..c2cf6afc1a7b5 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -516,7 +516,8 @@ def maybe_cast_item(obj, item, dtype):
if dtype in (np.object_, np.bool_):
obj[item] = chunk.astype(np.object_)
elif not issubclass(dtype, (np.integer, np.bool_)): # pragma: no cover
- raise ValueError("Unexpected dtype encountered: %s" % dtype)
+ raise ValueError("Unexpected dtype encountered: {dtype}"
+ .format(dtype=dtype))
def invalidate_string_dtypes(dtype_set):
@@ -620,8 +621,9 @@ def astype_nansafe(arr, dtype, copy=True):
elif dtype == np.int64:
return arr.view(dtype)
elif dtype != _NS_DTYPE:
- raise TypeError("cannot astype a datetimelike from [%s] to [%s]" %
- (arr.dtype, dtype))
+ raise TypeError("cannot astype a datetimelike from [{from_dtype}] "
+ "to [{to_dtype}]".format(from_dtype=arr.dtype,
+ to_dtype=dtype))
return arr.astype(_NS_DTYPE)
elif is_timedelta64_dtype(arr):
if dtype == np.int64:
@@ -640,8 +642,9 @@ def astype_nansafe(arr, dtype, copy=True):
result[mask] = np.nan
return result
- raise TypeError("cannot astype a timedelta from [%s] to [%s]" %
- (arr.dtype, dtype))
+ raise TypeError("cannot astype a timedelta from [{from_dtype}] "
+ "to [{to_dtype}]".format(from_dtype=arr.dtype,
+ to_dtype=dtype))
return arr.astype(_TD_DTYPE)
elif (np.issubdtype(arr.dtype, np.floating) and
@@ -926,7 +929,7 @@ def maybe_cast_to_datetime(value, dtype, errors='raise'):
dtype = _NS_DTYPE
else:
raise TypeError("cannot convert datetimelike to "
- "dtype [%s]" % dtype)
+ "dtype [{dtype}]".format(dtype=dtype))
elif is_datetime64tz:
# our NaT doesn't support tz's
@@ -943,7 +946,7 @@ def maybe_cast_to_datetime(value, dtype, errors='raise'):
dtype = _TD_DTYPE
else:
raise TypeError("cannot convert timedeltalike to "
- "dtype [%s]" % dtype)
+ "dtype [{dtype}]".format(dtype=dtype))
if is_scalar(value):
if value == iNaT or isna(value):
@@ -982,7 +985,8 @@ def maybe_cast_to_datetime(value, dtype, errors='raise'):
return tslib.ints_to_pydatetime(ints)
# we have a non-castable dtype that was passed
- raise TypeError('Cannot cast datetime64 to %s' % dtype)
+ raise TypeError('Cannot cast datetime64 to {dtype}'
+ .format(dtype=dtype))
else:
diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py
index 37f99bd344e6c..c47e61dc446be 100644
--- a/pandas/core/dtypes/common.py
+++ b/pandas/core/dtypes/common.py
@@ -1854,10 +1854,10 @@ def _validate_date_like_dtype(dtype):
try:
typ = np.datetime_data(dtype)[0]
except ValueError as e:
- raise TypeError('%s' % e)
+ raise TypeError('{error}'.format(error=e))
if typ != 'generic' and typ != 'ns':
- raise ValueError('%r is too specific of a frequency, try passing %r' %
- (dtype.name, dtype.type.__name__))
+ msg = '{name!r} is too specific of a frequency, try passing {type!r}'
+ raise ValueError(msg.format(name=dtype.name, type=dtype.type.__name__))
_string_dtypes = frozenset(map(_get_dtype_from_object, (binary_type,
@@ -1924,6 +1924,6 @@ def pandas_dtype(dtype):
if dtype in [object, np.object_, 'object', 'O']:
return npdtype
elif npdtype.kind == 'O':
- raise TypeError('dtype {0} not understood'.format(dtype))
+ raise TypeError('dtype {dtype} not understood'.format(dtype=dtype))
return npdtype
diff --git a/pandas/core/sparse/array.py b/pandas/core/sparse/array.py
index 4a12dd1af28c9..2f830a98db649 100644
--- a/pandas/core/sparse/array.py
+++ b/pandas/core/sparse/array.py
@@ -52,8 +52,8 @@ def _arith_method(op, name, str_rep=None, default_axis=None, fill_zeros=None,
def wrapper(self, other):
if isinstance(other, np.ndarray):
if len(self) != len(other):
- raise AssertionError("length mismatch: %d vs. %d" %
- (len(self), len(other)))
+ raise AssertionError("length mismatch: {self} vs. {other}"
+ .format(self=len(self), other=len(other)))
if not isinstance(other, ABCSparseArray):
dtype = getattr(other, 'dtype', None)
other = SparseArray(other, fill_value=self.fill_value,
@@ -66,7 +66,8 @@ def wrapper(self, other):
return _wrap_result(name, result, self.sp_index, fill)
else: # pragma: no cover
- raise TypeError('operation with %s not supported' % type(other))
+ raise TypeError('operation with {other} not supported'
+ .format(other=type(other)))
if name.startswith("__"):
name = name[2:-2]
@@ -218,9 +219,9 @@ def __new__(cls, data, sparse_index=None, index=None, kind='integer',
else:
values = _sanitize_values(data)
if len(values) != sparse_index.npoints:
- raise AssertionError("Non array-like type {0} must have"
- " the same length as the"
- " index".format(type(values)))
+ raise AssertionError("Non array-like type {type} must "
+ "have the same length as the index"
+ .format(type=type(values)))
# Create array, do *not* copy data by default
if copy:
subarr = np.array(values, dtype=dtype, copy=True)
@@ -330,9 +331,10 @@ def __len__(self):
return 0
def __unicode__(self):
- return '%s\nFill: %s\n%s' % (printing.pprint_thing(self),
- printing.pprint_thing(self.fill_value),
- printing.pprint_thing(self.sp_index))
+ return '{self}\nFill: {fill}\n{index}'.format(
+ self=printing.pprint_thing(self),
+ fill=printing.pprint_thing(self.fill_value),
+ index=printing.pprint_thing(self.sp_index))
def disable(self, other):
raise NotImplementedError('inplace binary ops not supported')
@@ -377,8 +379,8 @@ def fill_value(self, value):
if is_dtype_equal(self.dtype, new_dtype):
self._fill_value = fill_value
else:
- msg = 'unable to set fill_value {0} to {1} dtype'
- raise ValueError(msg.format(value, self.dtype))
+ msg = 'unable to set fill_value {fill} to {dtype} dtype'
+ raise ValueError(msg.format(fill=value, dtype=self.dtype))
def get_values(self, fill=None):
""" return a dense representation """
@@ -466,7 +468,8 @@ def take(self, indices, axis=0, allow_fill=True,
nv.validate_take(tuple(), kwargs)
if axis:
- raise ValueError("axis must be 0, input was {0}".format(axis))
+ raise ValueError("axis must be 0, input was {axis}"
+ .format(axis=axis))
if is_integer(indices):
# return scalar
@@ -482,12 +485,12 @@ def take(self, indices, axis=0, allow_fill=True,
'all indices must be >= -1')
raise ValueError(msg)
elif (n <= indices).any():
- msg = 'index is out of bounds for size {0}'
- raise IndexError(msg.format(n))
+ msg = 'index is out of bounds for size {size}'.format(size=n)
+ raise IndexError(msg)
else:
if ((indices < -n) | (n <= indices)).any():
- msg = 'index is out of bounds for size {0}'
- raise IndexError(msg.format(n))
+ msg = 'index is out of bounds for size {size}'.format(size=n)
+ raise IndexError(msg)
indices = indices.astype(np.int32)
if not (allow_fill and fill_value is not None):
@@ -543,8 +546,8 @@ def astype(self, dtype=None, copy=True):
else:
fill_value = dtype.type(self.fill_value)
except ValueError:
- msg = 'unable to coerce current fill_value {0} to {1} dtype'
- raise ValueError(msg.format(self.fill_value, dtype))
+ msg = 'unable to coerce current fill_value {fill} to {dtype} dtype'
+ raise ValueError(msg.format(fill=self.fill_value, dtype=dtype))
return self._simple_new(sp_values, self.sp_index,
fill_value=fill_value)
diff --git a/pandas/core/sparse/frame.py b/pandas/core/sparse/frame.py
index f30bd5c36a61b..1e98e919baa33 100644
--- a/pandas/core/sparse/frame.py
+++ b/pandas/core/sparse/frame.py
@@ -214,11 +214,11 @@ def _prep_index(self, data, index, columns):
columns = _default_index(K)
if len(columns) != K:
- raise ValueError('Column length mismatch: %d vs. %d' %
- (len(columns), K))
+ raise ValueError('Column length mismatch: {columns} vs. {K}'
+ .format(columns=len(columns), K=K))
if len(index) != N:
- raise ValueError('Index length mismatch: %d vs. %d' %
- (len(index), N))
+ raise ValueError('Index length mismatch: {index} vs. {N}'
+ .format(index=len(index), N=N))
return index, columns
def to_coo(self):
@@ -725,17 +725,17 @@ def _maybe_rename_join(self, other, lsuffix, rsuffix):
to_rename = self.columns.intersection(other.columns)
if len(to_rename) > 0:
if not lsuffix and not rsuffix:
- raise ValueError('columns overlap but no suffix specified: %s'
- % to_rename)
+ raise ValueError('columns overlap but no suffix specified: '
+ '{to_rename}'.format(to_rename=to_rename))
def lrenamer(x):
if x in to_rename:
- return '%s%s' % (x, lsuffix)
+ return '{x}{lsuffix}'.format(x=x, lsuffix=lsuffix)
return x
def rrenamer(x):
if x in to_rename:
- return '%s%s' % (x, rsuffix)
+ return '{x}{rsuffix}'.format(x=x, rsuffix=rsuffix)
return x
this = self.rename(columns=lrenamer)
diff --git a/pandas/core/sparse/list.py b/pandas/core/sparse/list.py
index e2a8c6a29cc23..f3e64b7efc764 100644
--- a/pandas/core/sparse/list.py
+++ b/pandas/core/sparse/list.py
@@ -35,7 +35,8 @@ def __init__(self, data=None, fill_value=np.nan):
def __unicode__(self):
contents = '\n'.join(repr(c) for c in self._chunks)
- return '%s\n%s' % (object.__repr__(self), pprint_thing(contents))
+ return '{self}\n{contents}'.format(self=object.__repr__(self),
+ contents=pprint_thing(contents))
def __len__(self):
return sum(len(c) for c in self._chunks)
@@ -43,7 +44,7 @@ def __len__(self):
def __getitem__(self, i):
if i < 0:
if i + len(self) < 0: # pragma: no cover
- raise ValueError('%d out of range' % i)
+ raise ValueError('{index} out of range'.format(index=i))
i += len(self)
passed = 0
diff --git a/pandas/core/sparse/series.py b/pandas/core/sparse/series.py
index 62d20e73dbfcb..99aec2dd11569 100644
--- a/pandas/core/sparse/series.py
+++ b/pandas/core/sparse/series.py
@@ -65,7 +65,8 @@ def wrapper(self, other):
index=self.index,
name=self.name)
else: # pragma: no cover
- raise TypeError('operation with %s not supported' % type(other))
+ raise TypeError('operation with {other} not supported'
+ .format(other=type(other)))
wrapper.__name__ = name
if name.startswith("__"):
@@ -295,7 +296,8 @@ def shape(self):
def __unicode__(self):
# currently, unicode is same as repr...fixes infinite loop
series_rep = Series.__unicode__(self)
- rep = '%s\n%s' % (series_rep, repr(self.sp_index))
+ rep = '{series}\n{index!r}'.format(series=series_rep,
+ index=self.sp_index)
return rep
def __array_wrap__(self, result, context=None):
| Progress towards #16130
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
Replaced `%s` syntax with `.format` in pandas.core.dtypes and pandas.core.sparse. Additionally, made some of the existing positional `.format` code more explicit.
| https://api.github.com/repos/pandas-dev/pandas/pulls/17270 | 2017-08-17T02:05:51Z | 2017-08-17T10:10:53Z | 2017-08-17T10:10:53Z | 2017-08-24T16:29:49Z |
CLN: Replace imports of * with explicit imports | diff --git a/pandas/_libs/hashtable.pyx b/pandas/_libs/hashtable.pyx
index 101e2c031f26e..2462b7af7b0fe 100644
--- a/pandas/_libs/hashtable.pyx
+++ b/pandas/_libs/hashtable.pyx
@@ -2,7 +2,27 @@
from cpython cimport PyObject, Py_INCREF, PyList_Check, PyTuple_Check
-from khash cimport *
+from khash cimport (
+ khiter_t,
+
+ kh_str_t, kh_init_str, kh_put_str, kh_exist_str,
+ kh_get_str, kh_destroy_str, kh_resize_str,
+
+ kh_put_strbox, kh_get_strbox, kh_init_strbox,
+
+ kh_int64_t, kh_init_int64, kh_resize_int64, kh_destroy_int64,
+ kh_get_int64, kh_exist_int64, kh_put_int64,
+
+ kh_float64_t, kh_exist_float64, kh_put_float64, kh_init_float64,
+ kh_get_float64, kh_destroy_float64, kh_resize_float64,
+
+ kh_resize_uint64, kh_exist_uint64, kh_destroy_uint64, kh_put_uint64,
+ kh_get_uint64, kh_init_uint64,
+
+ kh_destroy_pymap, kh_exist_pymap, kh_init_pymap, kh_get_pymap,
+ kh_put_pymap, kh_resize_pymap)
+
+
from numpy cimport *
from libc.stdlib cimport malloc, free
diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx
index 273dc06886088..8cf3f79a29704 100644
--- a/pandas/_libs/index.pyx
+++ b/pandas/_libs/index.pyx
@@ -1,8 +1,6 @@
# cython: profile=False
-from numpy cimport ndarray
-
-from numpy cimport (float64_t, int32_t, int64_t, uint8_t,
+from numpy cimport (ndarray, float64_t, int32_t, int64_t, uint8_t, uint64_t,
NPY_DATETIME, NPY_TIMEDELTA)
cimport cython
@@ -16,7 +14,9 @@ cimport util
import numpy as np
cimport tslib
-from hashtable cimport *
+
+from hashtable cimport HashTable
+
from pandas._libs import tslib, algos, hashtable as _hash
from pandas._libs.tslib import Timestamp, Timedelta
from datetime import datetime, timedelta
diff --git a/pandas/_libs/join_func_helper.pxi.in b/pandas/_libs/join_func_helper.pxi.in
index 9cca9bba2a197..73d231b8588dc 100644
--- a/pandas/_libs/join_func_helper.pxi.in
+++ b/pandas/_libs/join_func_helper.pxi.in
@@ -9,6 +9,8 @@ WARNING: DO NOT edit .pxi FILE directly, .pxi is generated from .pxi.in
# asof_join_by
#----------------------------------------------------------------------
+from hashtable cimport PyObjectHashTable, UInt64HashTable, Int64HashTable
+
{{py:
# table_type, by_dtype
@@ -23,7 +25,6 @@ on_dtypes = ['uint8_t', 'uint16_t', 'uint32_t', 'uint64_t',
}}
-from hashtable cimport *
{{for table_type, by_dtype in by_dtypes}}
{{for on_dtype in on_dtypes}}
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx
index 0458d4ae9f3de..53ca41e4b2489 100644
--- a/pandas/_libs/lib.pyx
+++ b/pandas/_libs/lib.pyx
@@ -10,21 +10,14 @@ from numpy cimport *
np.import_array()
-cdef extern from "numpy/arrayobject.h":
- cdef enum NPY_TYPES:
- NPY_intp "NPY_INTP"
-
from libc.stdlib cimport malloc, free
-from cpython cimport (PyDict_New, PyDict_GetItem, PyDict_SetItem,
- PyDict_Contains, PyDict_Keys,
- Py_INCREF, PyTuple_SET_ITEM,
+from cpython cimport (Py_INCREF, PyTuple_SET_ITEM,
PyList_Check, PyFloat_Check,
PyString_Check,
PyBytes_Check,
- PyTuple_SetItem,
+ PyUnicode_Check,
PyTuple_New,
- PyObject_SetAttrString,
PyObject_RichCompareBool,
PyBytes_GET_SIZE,
PyUnicode_GET_SIZE,
@@ -55,7 +48,18 @@ cdef double NAN = nan
from datetime import datetime as pydatetime
# this is our tseries.pxd
-from datetime cimport *
+from datetime cimport (
+ get_timedelta64_value, get_datetime64_value,
+ npy_timedelta, npy_datetime,
+ PyDateTime_Check, PyDate_Check, PyTime_Check, PyDelta_Check,
+ PyDateTime_GET_YEAR,
+ PyDateTime_GET_MONTH,
+ PyDateTime_GET_DAY,
+ PyDateTime_DATE_GET_HOUR,
+ PyDateTime_DATE_GET_MINUTE,
+ PyDateTime_DATE_GET_SECOND,
+ PyDateTime_IMPORT)
+
from tslib cimport (convert_to_tsobject, convert_to_timedelta64,
_check_all_nulls)
diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx
index ae420da2102b2..3e8b5c4bd3feb 100644
--- a/pandas/_libs/parsers.pyx
+++ b/pandas/_libs/parsers.pyx
@@ -32,7 +32,7 @@ cdef extern from "stdlib.h":
cimport cython
cimport numpy as cnp
-from numpy cimport ndarray, uint8_t, uint64_t
+from numpy cimport ndarray, uint8_t, uint64_t, int64_t
import numpy as np
cimport util
@@ -57,7 +57,14 @@ import os
cnp.import_array()
-from khash cimport *
+from khash cimport (
+ khiter_t,
+ kh_str_t, kh_init_str, kh_put_str, kh_exist_str,
+ kh_get_str, kh_destroy_str,
+ kh_float64_t, kh_get_float64, kh_destroy_float64,
+ kh_put_float64, kh_init_float64,
+ kh_strbox_t, kh_put_strbox, kh_get_strbox, kh_init_strbox,
+ kh_destroy_strbox)
import sys
diff --git a/pandas/_libs/period.pyx b/pandas/_libs/period.pyx
index 1db31387de5a7..53df68ea9677d 100644
--- a/pandas/_libs/period.pyx
+++ b/pandas/_libs/period.pyx
@@ -2,6 +2,7 @@ from datetime import datetime, date, timedelta
import operator
from cpython cimport (
+ PyUnicode_Check,
PyObject_RichCompareBool,
Py_EQ, Py_NE,
)
@@ -19,7 +20,16 @@ from pandas import compat
from pandas.compat import PY2
cimport cython
-from datetime cimport *
+
+from datetime cimport (
+ is_leapyear,
+ PyDateTime_IMPORT,
+ pandas_datetimestruct,
+ pandas_datetimestruct_to_datetime,
+ pandas_datetime_to_datetimestruct,
+ PANDAS_FR_ns,
+ INT32_MIN)
+
cimport util, lib
from lib cimport is_null_datetimelike, is_period
from pandas._libs import tslib, lib
@@ -30,8 +40,7 @@ from tslib cimport (
_is_utc,
_is_tzlocal,
_get_dst_info,
- _nat_scalar_rules,
-)
+ _nat_scalar_rules)
from pandas.tseries import offsets
from pandas.core.tools.datetimes import parse_time_string
diff --git a/pandas/_libs/src/properties.pyx b/pandas/_libs/src/properties.pyx
index e619a3b6edd9a..4a3fd4b771a17 100644
--- a/pandas/_libs/src/properties.pyx
+++ b/pandas/_libs/src/properties.pyx
@@ -1,4 +1,5 @@
-from cpython cimport PyDict_Contains, PyDict_GetItem, PyDict_GetItem
+from cpython cimport (
+ PyDict_Contains, PyDict_GetItem, PyDict_GetItem, PyDict_SetItem)
cdef class cache_readonly(object):
diff --git a/pandas/_libs/src/skiplist.pyx b/pandas/_libs/src/skiplist.pyx
index 3017931e25115..559b529822a69 100644
--- a/pandas/_libs/src/skiplist.pyx
+++ b/pandas/_libs/src/skiplist.pyx
@@ -6,10 +6,6 @@
# Cython version: Wes McKinney
-cdef extern from "numpy/arrayobject.h":
-
- void import_array()
-
cdef extern from "math.h":
double log(double x)
@@ -25,7 +21,7 @@ import numpy as np
from random import random
# initialize numpy
-import_array()
+np.import_array()
# TODO: optimize this, make less messy
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx
index 44be9ba56b84a..3bd40a455ef6c 100644
--- a/pandas/_libs/tslib.pyx
+++ b/pandas/_libs/tslib.pyx
@@ -4,8 +4,8 @@ import warnings
cimport numpy as np
from numpy cimport (int8_t, int32_t, int64_t, import_array, ndarray,
+ float64_t,
NPY_INT64, NPY_DATETIME, NPY_TIMEDELTA)
-from datetime cimport get_datetime64_value, get_timedelta64_value
import numpy as np
import sys
@@ -30,20 +30,47 @@ cdef extern from "datetime_helper.h":
double total_seconds(object)
# this is our datetime.pxd
-from datetime cimport cmp_pandas_datetimestruct
from libc.stdlib cimport free
from util cimport (is_integer_object, is_float_object, is_datetime64_object,
is_timedelta64_object, INT64_MAX)
cimport util
-from datetime cimport *
-from khash cimport *
-cimport cython
-
+# this is our datetime.pxd
+from datetime cimport (
+ pandas_datetimestruct,
+ pandas_datetime_to_datetimestruct,
+ pandas_datetimestruct_to_datetime,
+ cmp_pandas_datetimestruct,
+ days_per_month_table,
+ get_datetime64_value,
+ get_timedelta64_value,
+ get_datetime64_unit,
+ PANDAS_DATETIMEUNIT,
+ _string_to_dts,
+ _pydatetime_to_dts,
+ _date_to_datetime64,
+ npy_datetime,
+ is_leapyear,
+ dayofweek,
+ PANDAS_FR_ns,
+ PyDateTime_Check, PyDate_Check,
+ PyDateTime_IMPORT,
+ timedelta, datetime
+ )
+
+# stdlib datetime imports
from datetime import timedelta, datetime
from datetime import time as datetime_time
+from khash cimport (
+ khiter_t,
+ kh_destroy_int64, kh_put_int64,
+ kh_init_int64, kh_int64_t,
+ kh_resize_int64, kh_get_int64)
+
+cimport cython
+
import re
# dateutil compat
@@ -81,15 +108,6 @@ PyDateTime_IMPORT
cdef int64_t NPY_NAT = util.get_nat()
iNaT = NPY_NAT
-# < numpy 1.7 compat for NaT
-compat_NaT = np.array([NPY_NAT]).astype('m8[ns]').item()
-
-
-try:
- basestring
-except NameError: # py3
- basestring = str
-
cdef inline object create_timestamp_from_ts(
int64_t value, pandas_datetimestruct dts,
@@ -314,7 +332,7 @@ class Timestamp(_Timestamp):
tz : string / timezone object, default None
Timezone to localize to
"""
- if isinstance(tz, basestring):
+ if isinstance(tz, string_types):
tz = maybe_get_tz(tz)
return cls(datetime.now(tz))
@@ -615,7 +633,7 @@ class Timestamp(_Timestamp):
if self.tzinfo is None:
# tz naive, localize
tz = maybe_get_tz(tz)
- if not isinstance(ambiguous, basestring):
+ if not isinstance(ambiguous, string_types):
ambiguous = [ambiguous]
value = tz_localize_to_utc(np.array([self.value], dtype='i8'), tz,
ambiguous=ambiguous, errors=errors)[0]
diff --git a/pandas/_libs/window.pyx b/pandas/_libs/window.pyx
index bdd371871b6e1..9fb3d0662eb4f 100644
--- a/pandas/_libs/window.pyx
+++ b/pandas/_libs/window.pyx
@@ -56,7 +56,13 @@ cdef inline int int_min(int a, int b): return a if a <= b else b
from util cimport numeric
-from skiplist cimport *
+from skiplist cimport (
+ skiplist_t,
+ skiplist_init,
+ skiplist_destroy,
+ skiplist_get,
+ skiplist_insert,
+ skiplist_remove)
cdef extern from "../src/headers/math.h":
double sqrt(double x) nogil
| Remove compat_NaT which is never used after being defined
Remove other unused imports
Remove unused cdef enum NPY_TYPES
flake8 cleanup in period.pyx
Ref #17234
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/17269 | 2017-08-16T21:04:41Z | 2017-08-17T10:13:37Z | 2017-08-17T10:13:37Z | 2017-10-30T16:24:03Z |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.