title stringlengths 1 185 | diff stringlengths 0 32.2M | body stringlengths 0 123k ⌀ | url stringlengths 57 58 | created_at stringlengths 20 20 | closed_at stringlengths 20 20 | merged_at stringlengths 20 20 ⌀ | updated_at stringlengths 20 20 |
|---|---|---|---|---|---|---|---|
rework Series.sort is_view check on underlying ndarray before inplace sort (GH5856) | diff --git a/doc/source/release.rst b/doc/source/release.rst
index 425f6dfe36990..c8e5d700ac4a0 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -56,6 +56,9 @@ New features
API Changes
~~~~~~~~~~~
+ - ``Series.sort`` will raise a ``ValueError`` (rather than a ``TypeError``) on sorting an
+ object that is a view of another (:issue:`5856`, :issue:`5853`)
+
.. _release.bug_fixes-0.13.1:
Experimental Features
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 5544cd0b34e3c..0f49c976d00a3 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1604,10 +1604,15 @@ def _ixs(self, i, axis=0, copy=False):
values = self._data.iget(i)
if not len(values):
values = np.array([np.nan] * len(self.index), dtype=object)
- return self._constructor_sliced.from_array(
+ result = self._constructor_sliced.from_array(
values, index=self.index,
name=label, fastpath=True)
+ # this is a cached value, mark it so
+ result._set_as_cached(i, self)
+
+ return result
+
def iget_value(self, i, j):
return self.iat[i, j]
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 61235862534f0..92539e7deb5d7 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -983,9 +983,14 @@ def _get_item_cache(self, item):
values = self._data.get(item)
res = self._box_item_values(item, values)
cache[item] = res
- res._cacher = (item, weakref.ref(self))
+ res._set_as_cached(item, self)
return res
+ def _set_as_cached(self, item, cacher):
+ """ set the _cacher attribute on the calling object with
+ a weakref to cacher """
+ self._cacher = (item, weakref.ref(cacher))
+
def _box_item_values(self, key, values):
raise NotImplementedError
@@ -994,6 +999,12 @@ def _maybe_cache_changed(self, item, value):
maybe it has changed """
self._data.set(item, value)
+ @property
+ def _is_cached(self):
+ """ boolean : return if I am cached """
+ cacher = getattr(self, '_cacher', None)
+ return cacher is not None
+
def _maybe_update_cacher(self, clear=False):
""" see if we need to update our parent cacher
if clear, then clear our cache """
diff --git a/pandas/core/series.py b/pandas/core/series.py
index f147eb87d7480..c310358ab58f9 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -1664,20 +1664,16 @@ def sort(self, axis=0, kind='quicksort', order=None, ascending=True):
--------
pandas.Series.order
"""
- sortedSeries = self.order(na_last=True, kind=kind,
- ascending=ascending)
- true_base = self.values
- while true_base.base is not None:
- true_base = true_base.base
+ # GH 5856/5863
+ if self._is_cached:
+ raise ValueError("This Series is a view of some other array, to "
+ "sort in-place you must create a copy")
- if (true_base is not None and
- (true_base.ndim != 1 or true_base.shape != self.shape)):
- raise TypeError('This Series is a view of some other array, to '
- 'sort in-place you must create a copy')
+ result = self.order(na_last=True, kind=kind,
+ ascending=ascending)
- self._data = sortedSeries._data.copy()
- self.index = sortedSeries.index
+ self._update_inplace(result)
def sort_index(self, ascending=True):
"""
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index ef6990337bbbb..bded2fad36763 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -9547,7 +9547,7 @@ def test_sort_datetimes(self):
def test_frame_column_inplace_sort_exception(self):
s = self.frame['A']
- with assertRaisesRegexp(TypeError, "This Series is a view"):
+ with assertRaisesRegexp(ValueError, "This Series is a view"):
s.sort()
cp = s.copy()
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index c48b4b84698b6..a5270fbbecf00 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -2019,6 +2019,20 @@ def f():
zed['eyes']['right'].fillna(value=555, inplace=True)
self.assertRaises(com.SettingWithCopyError, f)
+ # GH 5856/5863
+ # Series.sort operating on a view
+ df = DataFrame(np.random.randn(10,4))
+ s = df.iloc[:,0]
+ def f():
+ s.sort()
+ self.assertRaises(ValueError, f)
+
+ df = DataFrame(np.random.randn(10,4))
+ s = df.iloc[:,0]
+ s = s.order()
+ assert_series_equal(s,df.iloc[:,0].order())
+ assert_series_equal(s,df[0].order())
+
pd.set_option('chained_assignment','warn')
def test_float64index_slicing_bug(self):
| closes #5856
related #5853
DataFrame._ixs will properly record a cache change (similar to _get_item_cache)
| https://api.github.com/repos/pandas-dev/pandas/pulls/5859 | 2014-01-05T15:26:43Z | 2014-01-06T12:25:15Z | 2014-01-06T12:25:15Z | 2014-06-25T17:54:20Z |
COMPAT/BUG: Ignore numexpr < 2.0 | diff --git a/pandas/computation/__init__.py b/pandas/computation/__init__.py
index e69de29bb2d1d..fcfa515283f97 100644
--- a/pandas/computation/__init__.py
+++ b/pandas/computation/__init__.py
@@ -0,0 +1,17 @@
+try:
+ import numexpr as ne
+ _NUMEXPR_INSTALLED = True
+ import distutils.version
+ if distutils.version.LooseVersion(ne.__version__) < '2':
+ _NUMEXPR_INSTALLED = False
+ import warnings
+ warnings.warn("pandas requires at least numexpr version 2.0"
+ " to accelerate with numexpr. Found numexpr version"
+ " %s" % ne.__version__)
+ del ne
+ del distutils.version
+except ImportError: # pragma: no cover
+ _NUMEXPR_INSTALLED = False
+
+#: tracks globally whether numexpr should be used.
+_USE_NUMEXPR = _NUMEXPR_INSTALLED
diff --git a/pandas/computation/engines.py b/pandas/computation/engines.py
index 9738cac58fb2d..9bbd4ecf944d3 100644
--- a/pandas/computation/engines.py
+++ b/pandas/computation/engines.py
@@ -5,6 +5,7 @@
from pandas import compat
from pandas.core import common as com
+import pandas.computation as computation
from pandas.computation.align import _align, _reconstruct_object
from pandas.computation.ops import UndefinedVariableError
@@ -79,6 +80,9 @@ class NumExprEngine(AbstractEngine):
has_neg_frac = True
def __init__(self, expr):
+ if not computation._USE_NUMEXPR:
+ raise ImportError("Can't use numexpr engine without compatbile"
+ " version of numexpr (or with numexpr disabled)")
super(NumExprEngine, self).__init__(expr)
def convert(self):
diff --git a/pandas/computation/eval.py b/pandas/computation/eval.py
index 163477b258e15..d3e5dfe8ca62e 100644
--- a/pandas/computation/eval.py
+++ b/pandas/computation/eval.py
@@ -10,6 +10,7 @@
from pandas.compat import string_types
from pandas.computation.expr import Expr, _parsers, _ensure_scope
from pandas.computation.engines import _engines
+import pandas.computation as computation
def _check_engine(engine):
@@ -33,12 +34,10 @@ def _check_engine(engine):
# TODO: validate this in a more general way (thinking of future engines
# that won't necessarily be import-able)
# Could potentially be done on engine instantiation
- if engine == 'numexpr':
- try:
- import numexpr
- except ImportError:
- raise ImportError("'numexpr' not found. Cannot use "
- "engine='numexpr' if 'numexpr' is not installed")
+ if engine == 'numexpr' and not computation._USE_NUMEXPR:
+ raise ImportError("'numexpr' not found or disabled. Cannot use "
+ "engine='numexpr' if 'numexpr' < v2.0 is not "
+ "installed")
def _check_parser(parser):
diff --git a/pandas/computation/expr.py b/pandas/computation/expr.py
index c16205ff34b1f..2da8ffb8f6790 100644
--- a/pandas/computation/expr.py
+++ b/pandas/computation/expr.py
@@ -16,6 +16,7 @@
from pandas.compat import StringIO, zip, reduce, string_types
from pandas.core.base import StringMixin
from pandas.core import common as com
+import pandas.computation as computation
from pandas.computation.common import NameResolutionError
from pandas.computation.ops import (_cmp_ops_syms, _bool_ops_syms,
_arith_ops_syms, _unary_ops_syms, is_term)
diff --git a/pandas/computation/expressions.py b/pandas/computation/expressions.py
index 035878e20c645..310a846c77925 100644
--- a/pandas/computation/expressions.py
+++ b/pandas/computation/expressions.py
@@ -8,16 +8,15 @@
import numpy as np
from pandas.core.common import _values_from_object
-
+import pandas.computation as computation
try:
import numexpr as ne
- _NUMEXPR_INSTALLED = True
-except ImportError: # pragma: no cover
- _NUMEXPR_INSTALLED = False
+except ImportError:
+ pass
+
_TEST_MODE = None
_TEST_RESULT = None
-_USE_NUMEXPR = _NUMEXPR_INSTALLED
_evaluate = None
_where = None
@@ -33,13 +32,12 @@
def set_use_numexpr(v=True):
# set/unset to use numexpr
- global _USE_NUMEXPR
- if _NUMEXPR_INSTALLED:
- _USE_NUMEXPR = v
+ if computation._NUMEXPR_INSTALLED:
+ computation._USE_NUMEXPR = v
# choose what we are going to do
global _evaluate, _where
- if not _USE_NUMEXPR:
+ if not computation._USE_NUMEXPR:
_evaluate = _evaluate_standard
_where = _where_standard
else:
@@ -50,7 +48,7 @@ def set_use_numexpr(v=True):
def set_numexpr_threads(n=None):
# if we are using numexpr, set the threads to n
# otherwise reset
- if _NUMEXPR_INSTALLED and _USE_NUMEXPR:
+ if computation._NUMEXPR_INSTALLED and computation._USE_NUMEXPR:
if n is None:
n = ne.detect_number_of_cores()
ne.set_num_threads(n)
diff --git a/pandas/computation/tests/test_eval.py b/pandas/computation/tests/test_eval.py
index e6d2484a41019..c2a2cd6b79aca 100644
--- a/pandas/computation/tests/test_eval.py
+++ b/pandas/computation/tests/test_eval.py
@@ -16,8 +16,7 @@
from pandas import DataFrame, Series, Panel, date_range
from pandas.util.testing import makeCustomDataframe as mkdf
-from pandas.computation import pytables
-from pandas.computation.expressions import _USE_NUMEXPR
+from pandas.computation import pytables, _USE_NUMEXPR
from pandas.computation.engines import _engines
from pandas.computation.expr import PythonExprVisitor, PandasExprVisitor
from pandas.computation.ops import (_binary_ops_dict, _unary_ops_dict,
diff --git a/pandas/tests/test_expressions.py b/pandas/tests/test_expressions.py
index 7d392586c159b..49284f0a00a52 100644
--- a/pandas/tests/test_expressions.py
+++ b/pandas/tests/test_expressions.py
@@ -10,7 +10,8 @@
from numpy.testing import assert_array_equal
from pandas.core.api import DataFrame, Panel
-from pandas.computation import expressions as expr
+import pandas.computation as computation
+import pandas.computation.expressions as expr
from pandas import compat
from pandas.util.testing import (assert_almost_equal, assert_series_equal,
@@ -20,15 +21,6 @@
from numpy.testing.decorators import slow
-if not expr._USE_NUMEXPR:
- try:
- import numexpr
- except ImportError:
- msg = "don't have"
- else:
- msg = "not using"
- raise nose.SkipTest("{0} numexpr".format(msg))
-
_frame = DataFrame(randn(10000, 4), columns=list('ABCD'), dtype='float64')
_frame2 = DataFrame(randn(100, 4), columns = list('ABCD'), dtype='float64')
_mixed = DataFrame({ 'A' : _frame['A'].copy(), 'B' : _frame['B'].astype('float32'), 'C' : _frame['C'].astype('int64'), 'D' : _frame['D'].astype('int32') })
@@ -47,6 +39,26 @@
_mixed2_panel = Panel(dict(ItemA=_mixed2, ItemB=(_mixed2 + 3)))
+def test_arithmetic_works_with_zero_min_elements():
+ df = DataFrame(range(10))
+ original_min = expr._MIN_ELEMENTS
+ expr._MIN_ELEMENTS = 0
+ result = df + 1
+ expected = DataFrame(range(1, 11))
+ tm.assert_frame_equal(result, expected)
+ expr._MIN_ELEMENTS = original_min
+
+
+if not computation._USE_NUMEXPR:
+ try:
+ import numexpr
+ except ImportError:
+ msg = "don't have"
+ else:
+ msg = "not using"
+ raise nose.SkipTest("{0} numexpr".format(msg))
+
+
class TestExpressions(tm.TestCase):
_multiprocess_can_split_ = False
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 3b6e4ba445ce0..7e6563e08f41f 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -49,6 +49,7 @@
import pandas.util.testing as tm
import pandas.lib as lib
+import pandas.computation as computation
from numpy.testing.decorators import slow
@@ -11927,12 +11928,9 @@ def test_concat_empty_dataframe_dtypes(self):
def skip_if_no_ne(engine='numexpr'):
- if engine == 'numexpr':
- try:
- import numexpr as ne
- except ImportError:
- raise nose.SkipTest("cannot query engine numexpr when numexpr not "
- "installed")
+ if engine == 'numexpr' and not computation._NUMEXPR_INSTALLED:
+ raise nose.SkipTest("cannot query engine numexpr when numexpr 2.0 not "
+ "installed")
def skip_if_no_pandas_parser(parser):
| in both computation.expressions and computationr.engine. Issues with API
compatibility.
Fixes #5857, #5855, #5845.
| https://api.github.com/repos/pandas-dev/pandas/pulls/5858 | 2014-01-05T07:14:39Z | 2014-01-26T18:38:17Z | null | 2014-07-16T17:24:57Z |
ENH: Refactor code to add is_view method for Series. | diff --git a/doc/source/v0.13.1.txt b/doc/source/v0.13.1.txt
index 250adffdadbca..0ea6b161107e7 100644
--- a/doc/source/v0.13.1.txt
+++ b/doc/source/v0.13.1.txt
@@ -29,6 +29,8 @@ Deprecations
Enhancements
~~~~~~~~~~~~
+Added an ``is_view`` method to Series.
+
Experimental
~~~~~~~~~~~~
diff --git a/pandas/core/series.py b/pandas/core/series.py
index f147eb87d7480..ec4fd3aa18b94 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -1645,6 +1645,20 @@ def update(self, other):
#----------------------------------------------------------------------
# Reindexing, sorting
+ def is_view(self):
+ """
+ Return True if series is a view of some other array, False otherwise.
+ """
+ true_base = self.values
+ while true_base.base is not None:
+ true_base = true_base.base
+
+ if (true_base is not None and
+ (true_base.ndim != 1 or true_base.shape != self.shape)):
+ return True
+ return False
+
+
def sort(self, axis=0, kind='quicksort', order=None, ascending=True):
"""
Sort values and index labels by value, in place. For compatibility with
@@ -1667,12 +1681,7 @@ def sort(self, axis=0, kind='quicksort', order=None, ascending=True):
sortedSeries = self.order(na_last=True, kind=kind,
ascending=ascending)
- true_base = self.values
- while true_base.base is not None:
- true_base = true_base.base
-
- if (true_base is not None and
- (true_base.ndim != 1 or true_base.shape != self.shape)):
+ if self.is_view():
raise TypeError('This Series is a view of some other array, to '
'sort in-place you must create a copy')
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index e8b421608fc0a..281e88b7bdba4 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -5548,6 +5548,13 @@ def test_unique_data_ownership(self):
# it works! #1807
Series(Series(["a", "c", "b"]).unique()).sort()
+def test_is_view():
+ df = tm.makeDataFrame()
+ view = df['A'].is_view()
+ tm.assert_equal(view, True)
+ ser = tm.makeStringSeries()
+ view = ser.is_view()
+ tm.assert_equal(view, False)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
| So you can check if a series is a view instead of waiting to get an error on sort or copying by default.
| https://api.github.com/repos/pandas-dev/pandas/pulls/5853 | 2014-01-05T03:37:50Z | 2014-01-05T04:30:22Z | 2014-01-05T04:30:22Z | 2014-06-18T19:59:46Z |
ENH: enhancements to Panel.apply to enable arbitrary functions and multi-dim slicing (GH1148) | diff --git a/doc/source/api.rst b/doc/source/api.rst
index 4ecde7e05256a..79f5af74c3985 100644
--- a/doc/source/api.rst
+++ b/doc/source/api.rst
@@ -785,6 +785,7 @@ Attributes and underlying data
Panel.axes
Panel.ndim
Panel.shape
+ Panel.dtypes
Conversion
~~~~~~~~~~
@@ -1122,7 +1123,7 @@ Indexing, iteration
~~~~~~~~~~~~~~~~~~~
.. autosummary::
:toctree: generated/
-
+
GroupBy.__iter__
GroupBy.groups
GroupBy.indices
@@ -1141,7 +1142,7 @@ Computations / Descriptive Stats
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autosummary::
:toctree: generated/
-
+
GroupBy.mean
GroupBy.median
GroupBy.std
@@ -1155,7 +1156,7 @@ Computations / Descriptive Stats
.. toctree::
:hidden:
-
+
generated/pandas.core.common.isnull
generated/pandas.core.common.notnull
generated/pandas.core.reshape.get_dummies
diff --git a/doc/source/basics.rst b/doc/source/basics.rst
index eef271be74a02..bd2980c2f1c9f 100644
--- a/doc/source/basics.rst
+++ b/doc/source/basics.rst
@@ -637,6 +637,81 @@ to :ref:`merging/joining functionality <merging>`:
s
s.map(t)
+
+.. _basics.apply_panel:
+
+Applying with a Panel
+~~~~~~~~~~~~~~~~~~~~~
+
+Applying with a ``Panel`` will pass a ``Series`` to the applied function. If the applied
+function returns a ``Series``, the result of the application will be a ``Panel``. If the applied function
+reduces to a scalar, the result of the application will be a ``DataFrame``.
+
+.. note::
+
+ Prior to 0.13.1 ``apply`` on a ``Panel`` would only work on ``ufuncs`` (e.g. ``np.sum/np.max``).
+
+.. ipython:: python
+
+ import pandas.util.testing as tm
+ panel = tm.makePanel(5)
+ panel
+ panel['ItemA']
+
+A transformational apply.
+
+.. ipython:: python
+
+ result = panel.apply(lambda x: x*2, axis='items')
+ result
+ result['ItemA']
+
+A reduction operation.
+
+.. ipython:: python
+
+ panel.apply(lambda x: x.dtype, axis='items')
+
+A similar reduction type operation
+
+.. ipython:: python
+
+ panel.apply(lambda x: x.sum(), axis='major_axis')
+
+This last reduction is equivalent to
+
+.. ipython:: python
+
+ panel.sum('major_axis')
+
+A transformation operation that returns a ``Panel``, but is computing
+the z-score across the ``major_axis``.
+
+.. ipython:: python
+
+ result = panel.apply(lambda x: (x-x.mean())/x.std(), axis='major_axis')
+ result
+ result['ItemA']
+
+Apply can also accept multiple axes in the ``axis`` argument. This will pass a
+``DataFrame`` of the cross-section to the applied function.
+
+.. ipython:: python
+
+ f = lambda x: (x-x.mean(1)/x.std(1))
+
+ result = panel.apply(f, axis = ['items','major_axis'])
+ result
+ result.loc[:,:,'ItemA']
+
+This is equivalent to the following
+
+.. ipython:: python
+
+ result = Panel(dict([ (ax,f(panel.loc[:,:,ax])) for ax in panel.minor_axis ]))
+ result
+ result.loc[:,:,'ItemA']
+
.. _basics.reindexing:
Reindexing and altering labels
@@ -1066,7 +1141,7 @@ or match a pattern:
Series(['1', '2', '3a', '3b', '03c']).str.match(pattern, as_indexer=True)
-The distinction between ``match`` and ``contains`` is strictness: ``match``
+The distinction between ``match`` and ``contains`` is strictness: ``match``
relies on strict ``re.match``, while ``contains`` relies on ``re.search``.
.. warning::
@@ -1078,7 +1153,7 @@ relies on strict ``re.match``, while ``contains`` relies on ``re.search``.
This old, deprecated behavior of ``match`` is still the default. As
demonstrated above, use the new behavior by setting ``as_indexer=True``.
In this mode, ``match`` is analagous to ``contains``, returning a boolean
- Series. The new behavior will become the default behavior in a future
+ Series. The new behavior will become the default behavior in a future
release.
Methods like ``match``, ``contains``, ``startswith``, and ``endswith`` take
diff --git a/doc/source/release.rst b/doc/source/release.rst
index 9f0b42dd5b741..fc9f18279087b 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -73,6 +73,9 @@ Improvements to existing features
- df.info() view now display dtype info per column (:issue: `5682`)
- perf improvements in DataFrame ``count/dropna`` for ``axis=1``
- Series.str.contains now has a `regex=False` keyword which can be faster for plain (non-regex) string patterns. (:issue: `5879`)
+ - support ``dtypes`` on ``Panel``
+ - extend ``Panel.apply`` to allow arbitrary functions (rather than only ufuncs) (:issue:`1148`)
+ allow multiple axes to be used to operate on slabs of a ``Panel``
.. _release.bug_fixes-0.13.1:
diff --git a/doc/source/v0.13.1.txt b/doc/source/v0.13.1.txt
index 250adffdadbca..76b915c519440 100644
--- a/doc/source/v0.13.1.txt
+++ b/doc/source/v0.13.1.txt
@@ -29,6 +29,60 @@ Deprecations
Enhancements
~~~~~~~~~~~~
+- ``Panel.apply`` will work on non-ufuncs. See :ref:`the docs<basics.apply_panel>`.
+
+ .. ipython:: python
+
+ import pandas.util.testing as tm
+ panel = tm.makePanel(5)
+ panel
+ panel['ItemA']
+
+ Specifying an ``apply`` that operates on a Series (to return a single element)
+
+ .. ipython:: python
+
+ panel.apply(lambda x: x.dtype, axis='items')
+
+ A similar reduction type operation
+
+ .. ipython:: python
+
+ panel.apply(lambda x: x.sum(), axis='major_axis')
+
+ This is equivalent to
+
+ .. ipython:: python
+
+ panel.sum('major_axis')
+
+ A transformation operation that returns a Panel, but is computing
+ the z-score across the major_axis
+
+ .. ipython:: python
+
+ result = panel.apply(lambda x: (x-x.mean())/x.std(), axis='major_axis')
+ result
+ result['ItemA']
+
+- ``Panel.apply`` operating on cross-sectional slabs. (:issue:`1148`)
+
+ .. ipython:: python
+
+ f = lambda x: (x-x.mean(1)/x.std(1))
+
+ result = panel.apply(f, axis = ['items','major_axis'])
+ result
+ result.loc[:,:,'ItemA']
+
+ This is equivalent to the following
+
+ .. ipython:: python
+
+ result = Panel(dict([ (ax,f(panel.loc[:,:,ax])) for ax in panel.minor_axis ]))
+ result
+ result.loc[:,:,'ItemA']
+
Experimental
~~~~~~~~~~~~
diff --git a/pandas/core/panel.py b/pandas/core/panel.py
index b6cd643f47c5a..8c50396c503a0 100644
--- a/pandas/core/panel.py
+++ b/pandas/core/panel.py
@@ -17,8 +17,10 @@
from pandas.core.internals import (BlockManager,
create_block_manager_from_arrays,
create_block_manager_from_blocks)
+from pandas.core.series import Series
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame, _shared_docs
+from pandas.tools.util import cartesian_product
from pandas import compat
from pandas.util.decorators import deprecate, Appender, Substitution
import pandas.core.common as com
@@ -333,26 +335,34 @@ def axis_pretty(a):
[class_name, dims] + [axis_pretty(a) for a in self._AXIS_ORDERS])
return output
- def _get_plane_axes(self, axis):
+ def _get_plane_axes_index(self, axis):
"""
- Get my plane axes: these are already
+ Get my plane axes indexes: these are already
(as compared with higher level planes),
- as we are returning a DataFrame axes
+ as we are returning a DataFrame axes indexes
"""
- axis = self._get_axis_name(axis)
+ axis_name = self._get_axis_name(axis)
- if axis == 'major_axis':
- index = self.minor_axis
- columns = self.items
- if axis == 'minor_axis':
- index = self.major_axis
- columns = self.items
- elif axis == 'items':
- index = self.major_axis
- columns = self.minor_axis
+ if axis_name == 'major_axis':
+ index = 'minor_axis'
+ columns = 'items'
+ if axis_name == 'minor_axis':
+ index = 'major_axis'
+ columns = 'items'
+ elif axis_name == 'items':
+ index = 'major_axis'
+ columns = 'minor_axis'
return index, columns
+ def _get_plane_axes(self, axis):
+ """
+ Get my plane axes indexes: these are already
+ (as compared with higher level planes),
+ as we are returning a DataFrame axes
+ """
+ return [ self._get_axis(axi) for axi in self._get_plane_axes_index(axis) ]
+
fromDict = from_dict
def to_sparse(self, fill_value=None, kind='block'):
@@ -431,6 +441,10 @@ def as_matrix(self):
self._consolidate_inplace()
return self._data.as_matrix()
+ @property
+ def dtypes(self):
+ return self.apply(lambda x: x.dtype, axis='items')
+
#----------------------------------------------------------------------
# Getting and setting elements
@@ -827,25 +841,138 @@ def to_frame(self, filter_observations=True):
to_long = deprecate('to_long', to_frame)
toLong = deprecate('toLong', to_frame)
- def apply(self, func, axis='major'):
+ def apply(self, func, axis='major', **kwargs):
"""
- Apply
+ Applies function along input axis of the Panel
Parameters
----------
- func : numpy function
- Signature should match numpy.{sum, mean, var, std} etc.
+ func : function
+ Function to apply to each combination of 'other' axes
+ e.g. if axis = 'items', then the combination of major_axis/minor_axis
+ will be passed a Series
axis : {'major', 'minor', 'items'}
- fill_value : boolean, default True
- Replace NaN values with specified first
+ Additional keyword arguments will be passed as keywords to the function
+
+ Examples
+ --------
+ >>> p.apply(numpy.sqrt) # returns a Panel
+ >>> p.apply(lambda x: x.sum(), axis=0) # equiv to p.sum(0)
+ >>> p.apply(lambda x: x.sum(), axis=1) # equiv to p.sum(1)
+ >>> p.apply(lambda x: x.sum(), axis=2) # equiv to p.sum(2)
Returns
-------
- result : DataFrame or Panel
+ result : Pandas Object
"""
- i = self._get_axis_number(axis)
- result = np.apply_along_axis(func, i, self.values)
- return self._wrap_result(result, axis=axis)
+
+ if kwargs and not isinstance(func, np.ufunc):
+ f = lambda x: func(x, **kwargs)
+ else:
+ f = func
+
+ # 2d-slabs
+ if isinstance(axis, (tuple,list)) and len(axis) == 2:
+ return self._apply_2d(f, axis=axis)
+
+ axis = self._get_axis_number(axis)
+
+ # try ufunc like
+ if isinstance(f, np.ufunc):
+ try:
+ result = np.apply_along_axis(func, axis, self.values)
+ return self._wrap_result(result, axis=axis)
+ except (AttributeError):
+ pass
+
+ # 1d
+ return self._apply_1d(f, axis=axis)
+
+ def _apply_1d(self, func, axis):
+
+ axis_name = self._get_axis_name(axis)
+ ax = self._get_axis(axis)
+ ndim = self.ndim
+ values = self.values
+
+ # iter thru the axes
+ slice_axis = self._get_axis(axis)
+ slice_indexer = [0]*(ndim-1)
+ indexer = np.zeros(ndim, 'O')
+ indlist = list(range(ndim))
+ indlist.remove(axis)
+ indexer[axis] = slice(None, None)
+ indexer.put(indlist, slice_indexer)
+ planes = [ self._get_axis(axi) for axi in indlist ]
+ shape = np.array(self.shape).take(indlist)
+
+ # all the iteration points
+ points = cartesian_product(planes)
+
+ results = []
+ for i in range(np.prod(shape)):
+
+ # construct the object
+ pts = tuple([ p[i] for p in points ])
+ indexer.put(indlist, slice_indexer)
+
+ obj = Series(values[tuple(indexer)],index=slice_axis,name=pts)
+ result = func(obj)
+
+ results.append(result)
+
+ # increment the indexer
+ slice_indexer[-1] += 1
+ n = -1
+ while (slice_indexer[n] >= shape[n]) and (n > (1-ndim)):
+ slice_indexer[n-1] += 1
+ slice_indexer[n] = 0
+ n -= 1
+
+ # empty object
+ if not len(results):
+ return self._constructor(**self._construct_axes_dict())
+
+ # same ndim as current
+ if isinstance(results[0],Series):
+ arr = np.vstack([ r.values for r in results ])
+ arr = arr.T.reshape(tuple([len(slice_axis)] + list(shape)))
+ tranp = np.array([axis]+indlist).argsort()
+ arr = arr.transpose(tuple(list(tranp)))
+ return self._constructor(arr,**self._construct_axes_dict())
+
+ # ndim-1 shape
+ results = np.array(results).reshape(shape)
+ if results.ndim == 2 and axis_name != self._info_axis_name:
+ results = results.T
+ planes = planes[::-1]
+ return self._construct_return_type(results,planes)
+
+ def _apply_2d(self, func, axis):
+ """ handle 2-d slices, equiv to iterating over the other axis """
+
+ ndim = self.ndim
+ axis = [ self._get_axis_number(a) for a in axis ]
+
+ # construct slabs, in 2-d this is a DataFrame result
+ indexer_axis = list(range(ndim))
+ for a in axis:
+ indexer_axis.remove(a)
+ indexer_axis = indexer_axis[0]
+
+ slicer = [ slice(None,None) ] * ndim
+ ax = self._get_axis(indexer_axis)
+
+ results = []
+ for i, e in enumerate(ax):
+
+ slicer[indexer_axis] = i
+ sliced = self.iloc[tuple(slicer)]
+
+ obj = func(sliced)
+ results.append((e,obj))
+
+ return self._construct_return_type(dict(results))
def _reduce(self, op, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
@@ -863,13 +990,33 @@ def _reduce(self, op, axis=0, skipna=True, numeric_only=None,
def _construct_return_type(self, result, axes=None, **kwargs):
""" return the type for the ndim of the result """
- ndim = result.ndim
- if self.ndim == ndim:
+ ndim = getattr(result,'ndim',None)
+
+ # need to assume they are the same
+ if ndim is None:
+ if isinstance(result,dict):
+ ndim = getattr(list(compat.itervalues(result))[0],'ndim',None)
+
+ # a saclar result
+ if ndim is None:
+ ndim = 0
+
+ # have a dict, so top-level is +1 dim
+ else:
+ ndim += 1
+
+ # scalar
+ if ndim == 0:
+ return Series(result)
+
+ # same as self
+ elif self.ndim == ndim:
""" return the construction dictionary for these axes """
if axes is None:
return self._constructor(result)
return self._constructor(result, **self._construct_axes_dict())
+ # sliced
elif self.ndim == ndim + 1:
if axes is None:
return self._constructor_sliced(result)
@@ -877,7 +1024,7 @@ def _construct_return_type(self, result, axes=None, **kwargs):
result, **self._extract_axes_for_slice(self, axes))
raise PandasError('invalid _construct_return_type [self->%s] '
- '[result->%s]' % (self.ndim, result.ndim))
+ '[result->%s]' % (self, result))
def _wrap_result(self, result, axis):
axis = self._get_axis_name(axis)
diff --git a/pandas/core/panelnd.py b/pandas/core/panelnd.py
index a7cfe49484d24..3eebd51190e3d 100644
--- a/pandas/core/panelnd.py
+++ b/pandas/core/panelnd.py
@@ -56,9 +56,10 @@ def __init__(self, *args, **kwargs):
self._init_data(*args, **kwargs)
klass.__init__ = __init__
- def _get_plane_axes(self, axis):
+ def _get_plane_axes_index(self, axis):
+ """ return the sliced index for this object """
- axis = self._get_axis_name(axis)
+ axis_name = self._get_axis_name(axis)
index = self._AXIS_ORDERS.index(axis)
planes = []
@@ -67,8 +68,8 @@ def _get_plane_axes(self, axis):
if index != self._AXIS_LEN:
planes.extend(self._AXIS_ORDERS[index + 1:])
- return [getattr(self, p) for p in planes]
- klass._get_plane_axes = _get_plane_axes
+ return planes
+ klass._get_plane_axes_index = _get_plane_axes_index
def _combine(self, other, func, axis=0):
if isinstance(other, klass):
diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py
index 30500ac57a7f6..08d3afe63ec86 100644
--- a/pandas/tests/test_panel.py
+++ b/pandas/tests/test_panel.py
@@ -1061,6 +1061,105 @@ def test_convert_objects(self):
result = p.convert_objects(convert_numeric='force')
assert_panel_equal(result, expected)
+ def test_dtypes(self):
+
+ result = self.panel.dtypes
+ expected = DataFrame(np.dtype('float64'),index=self.panel.major_axis,columns=self.panel.minor_axis)
+ assert_frame_equal(result, expected)
+
+ def test_apply(self):
+ # GH1148
+
+ from pandas import Series,DataFrame
+
+ # ufunc
+ applied = self.panel.apply(np.sqrt)
+ self.assert_(assert_almost_equal(applied.values,
+ np.sqrt(self.panel.values)))
+
+ # ufunc same shape
+ result = self.panel.apply(lambda x: x*2, axis='items')
+ expected = self.panel*2
+ assert_panel_equal(result, expected)
+ result = self.panel.apply(lambda x: x*2, axis='major_axis')
+ expected = self.panel*2
+ assert_panel_equal(result, expected)
+ result = self.panel.apply(lambda x: x*2, axis='minor_axis')
+ expected = self.panel*2
+ assert_panel_equal(result, expected)
+
+ # reduction to DataFrame
+ result = self.panel.apply(lambda x: x.dtype, axis='items')
+ expected = DataFrame(np.dtype('float64'),index=self.panel.major_axis,columns=self.panel.minor_axis)
+ assert_frame_equal(result,expected)
+ result = self.panel.apply(lambda x: x.dtype, axis='major_axis')
+ expected = DataFrame(np.dtype('float64'),index=self.panel.minor_axis,columns=self.panel.items)
+ assert_frame_equal(result,expected)
+ result = self.panel.apply(lambda x: x.dtype, axis='minor_axis')
+ expected = DataFrame(np.dtype('float64'),index=self.panel.major_axis,columns=self.panel.items)
+ assert_frame_equal(result,expected)
+
+ # reductions via other dims
+ expected = self.panel.sum(0)
+ result = self.panel.apply(lambda x: x.sum(), axis='items')
+ assert_frame_equal(result,expected)
+ expected = self.panel.sum(1)
+ result = self.panel.apply(lambda x: x.sum(), axis='major_axis')
+ assert_frame_equal(result,expected)
+ expected = self.panel.sum(2)
+ result = self.panel.apply(lambda x: x.sum(), axis='minor_axis')
+ assert_frame_equal(result,expected)
+
+ # pass kwargs
+ result = self.panel.apply(lambda x, y: x.sum() + y, axis='items', y=5)
+ expected = self.panel.sum(0) + 5
+ assert_frame_equal(result,expected)
+
+ def test_apply_slabs(self):
+
+ # same shape as original
+ result = self.panel.apply(lambda x: x*2, axis = ['items','major_axis'])
+ expected = (self.panel*2).transpose('minor_axis','major_axis','items')
+ assert_panel_equal(result,expected)
+ result = self.panel.apply(lambda x: x*2, axis = ['major_axis','items'])
+ assert_panel_equal(result,expected)
+
+ result = self.panel.apply(lambda x: x*2, axis = ['items','minor_axis'])
+ expected = (self.panel*2).transpose('major_axis','minor_axis','items')
+ assert_panel_equal(result,expected)
+ result = self.panel.apply(lambda x: x*2, axis = ['minor_axis','items'])
+ assert_panel_equal(result,expected)
+
+ result = self.panel.apply(lambda x: x*2, axis = ['major_axis','minor_axis'])
+ expected = self.panel*2
+ assert_panel_equal(result,expected)
+ result = self.panel.apply(lambda x: x*2, axis = ['minor_axis','major_axis'])
+ assert_panel_equal(result,expected)
+
+ # reductions
+ result = self.panel.apply(lambda x: x.sum(0), axis = ['items','major_axis'])
+ expected = self.panel.sum(1).T
+ assert_frame_equal(result,expected)
+
+ result = self.panel.apply(lambda x: x.sum(1), axis = ['items','major_axis'])
+ expected = self.panel.sum(0)
+ assert_frame_equal(result,expected)
+
+ # transforms
+ f = lambda x: (x-x.mean(1)/x.std(1))
+
+ result = self.panel.apply(f, axis = ['items','major_axis'])
+ expected = Panel(dict([ (ax,f(self.panel.loc[:,:,ax])) for ax in self.panel.minor_axis ]))
+ assert_panel_equal(result,expected)
+
+ result = self.panel.apply(f, axis = ['major_axis','minor_axis'])
+ expected = Panel(dict([ (ax,f(self.panel.loc[ax])) for ax in self.panel.items ]))
+ assert_panel_equal(result,expected)
+
+ result = self.panel.apply(f, axis = ['minor_axis','items'])
+ expected = Panel(dict([ (ax,f(self.panel.loc[:,ax])) for ax in self.panel.major_axis ]))
+ assert_panel_equal(result,expected)
+
def test_reindex(self):
ref = self.panel['ItemB']
@@ -1989,12 +2088,6 @@ def test_get_dummies(self):
dummies = get_dummies(self.panel['Label'])
self.assert_(np.array_equal(dummies.values, minor_dummies.values))
- def test_apply(self):
- # ufunc
- applied = self.panel.apply(np.sqrt)
- self.assert_(assert_almost_equal(applied.values,
- np.sqrt(self.panel.values)))
-
def test_mean(self):
means = self.panel.mean(level='minor')
| closes #1148
A reproduction of the new docs section
Applying with a Panel will pass a Series to the applied function. If the applied function returns a Series, the result of the application will be a Panel. If the applied function reduces to a scalar, the result of the application will be a DataFrame.
Note Prior to 0.13.1 apply on a Panel would only work on ufuncs (e.g. np.sum/np.max).
```
In [120]: import pandas.util.testing as tm
In [121]: panel = tm.makePanel(5)
In [122]: panel
<class 'pandas.core.panel.Panel'>
Dimensions: 3 (items) x 5 (major_axis) x 4 (minor_axis)
Items axis: ItemA to ItemC
Major_axis axis: 2000-01-03 00:00:00 to 2000-01-07 00:00:00
Minor_axis axis: A to D
In [123]: panel['ItemA']
A B C D
2000-01-03 0.166882 -0.597361 -1.200639 0.174260
2000-01-04 -1.759496 -1.514940 -1.872993 -0.581163
2000-01-05 0.901336 -1.640398 0.825210 0.087916
2000-01-06 -0.317478 -1.130643 -0.392715 0.416971
2000-01-07 -0.681335 -0.245890 -1.994150 0.666084
[5 rows x 4 columns]
```
A transformational apply.
```
In [124]: result = panel.apply(lambda x: x*2, axis='items')
In [125]: result
<class 'pandas.core.panel.Panel'>
Dimensions: 3 (items) x 5 (major_axis) x 4 (minor_axis)
Items axis: ItemA to ItemC
Major_axis axis: 2000-01-03 00:00:00 to 2000-01-07 00:00:00
Minor_axis axis: A to D
In [126]: result['ItemA']
A B C D
2000-01-03 0.333764 -1.194722 -2.401278 0.348520
2000-01-04 -3.518991 -3.029880 -3.745986 -1.162326
2000-01-05 1.802673 -3.280796 1.650421 0.175832
2000-01-06 -0.634955 -2.261286 -0.785430 0.833943
2000-01-07 -1.362670 -0.491779 -3.988300 1.332168
[5 rows x 4 columns]
```
A reduction operation.
```
In [127]: panel.apply(lambda x: x.dtype, axis='items')
A B C D
2000-01-03 float64 float64 float64 float64
2000-01-04 float64 float64 float64 float64
2000-01-05 float64 float64 float64 float64
2000-01-06 float64 float64 float64 float64
2000-01-07 float64 float64 float64 float64
[5 rows x 4 columns]
```
A similar reduction type operation
```
In [128]: panel.apply(lambda x: x.sum(), axis='major_axis')
ItemA ItemB ItemC
A -1.690090 1.840259 0.010754
B -5.129232 0.860182 0.178018
C -4.635286 0.545328 2.456520
D 0.764068 -3.623586 1.761541
[4 rows x 3 columns]
```
This last reduction is equivalent to
```
In [129]: panel.sum('major_axis')
ItemA ItemB ItemC
A -1.690090 1.840259 0.010754
B -5.129232 0.860182 0.178018
C -4.635286 0.545328 2.456520
D 0.764068 -3.623586 1.761541
[4 rows x 3 columns]
```
A transformation operation that returns a Panel, but is computing the z-score across the major_axis.
```
In [130]: result = panel.apply(lambda x: (x-x.mean())/x.std(), axis='major_axis')
In [131]: result
<class 'pandas.core.panel.Panel'>
Dimensions: 3 (items) x 5 (major_axis) x 4 (minor_axis)
Items axis: ItemA to ItemC
Major_axis axis: 2000-01-03 00:00:00 to 2000-01-07 00:00:00
Minor_axis axis: A to D
In [132]: result['ItemA']
A B C D
2000-01-03 0.509389 0.719204 -0.234072 0.045812
2000-01-04 -1.434116 -0.820934 -0.809328 -1.567858
2000-01-05 1.250373 -1.031513 1.499214 -0.138629
2000-01-06 0.020723 -0.175899 0.457175 0.564271
2000-01-07 -0.346370 1.309142 -0.912988 1.096405
[5 rows x 4 columns]
```
Apply can also accept multiple axes in the axis argument. This will pass a DataFrame of the cross-section to the applied function.
```
In [133]: f = lambda x: (x-x.mean(1)/x.std(1))
In [134]: result = panel.apply(f, axis = ['items','major_axis'])
In [135]: result
<class 'pandas.core.panel.Panel'>
Dimensions: 4 (items) x 5 (major_axis) x 3 (minor_axis)
Items axis: A to D
Major_axis axis: 2000-01-03 00:00:00 to 2000-01-07 00:00:00
Minor_axis axis: ItemA to ItemC
In [136]: result.loc[:,:,'ItemA']
A B C D
2000-01-03 0.748886 -0.323319 -1.172352 0.370451
2000-01-04 -1.594544 -1.659365 -1.444732 -0.162764
2000-01-05 0.908832 -1.220236 0.237668 0.754405
2000-01-06 -1.024669 -0.081850 -0.792957 0.641960
2000-01-07 -0.884333 -0.472889 -1.474646 -0.671871
[5 rows x 4 columns]
```
This is equivalent to the following
```
In [137]: result = Panel(dict([ (ax,f(panel.loc[:,:,ax])) for ax in panel.minor_axis ]))
In [138]: result
<class 'pandas.core.panel.Panel'>
Dimensions: 4 (items) x 5 (major_axis) x 3 (minor_axis)
Items axis: A to D
Major_axis axis: 2000-01-03 00:00:00 to 2000-01-07 00:00:00
Minor_axis axis: ItemA to ItemC
In [139]: result.loc[:,:,'ItemA']
A B C D
2000-01-03 0.748886 -0.323319 -1.172352 0.370451
2000-01-04 -1.594544 -1.659365 -1.444732 -0.162764
2000-01-05 0.908832 -1.220236 0.237668 0.754405
2000-01-06 -1.024669 -0.081850 -0.792957 0.641960
2000-01-07 -0.884333 -0.472889 -1.474646 -0.671871
[5 rows x 4 columns]
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/5850 | 2014-01-04T18:03:12Z | 2014-01-15T02:53:46Z | 2014-01-15T02:53:46Z | 2014-06-25T15:23:43Z |
BUG: Bug in selection with missing values via .ix from a duplicate indexed DataFrame failing | diff --git a/doc/source/release.rst b/doc/source/release.rst
index 3d4b5e8facfa4..0150b233110a7 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -73,6 +73,7 @@ Bug Fixes
~~~~~~~~~
- Bug in Series replace with timestamp dict (:issue:`5797`)
- read_csv/read_table now respects the `prefix` kwarg (:issue:`5732`).
+ - Bug in selection with missing values via ``.ix`` from a duplicate indexed DataFrame failing (:issue:`5835`)
pandas 0.13.0
-------------
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index d636edeec0815..7f49f7c1993bd 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -3119,6 +3119,9 @@ def reindex_indexer(self, new_axis, indexer, axis=1, fill_value=None,
if not allow_dups and not self.axes[axis].is_unique:
raise ValueError("cannot reindex from a duplicate axis")
+ if not self.is_consolidated():
+ self = self.consolidate()
+
if axis == 0:
return self._reindex_indexer_items(new_axis, indexer, fill_value)
@@ -3140,38 +3143,62 @@ def _reindex_indexer_items(self, new_items, indexer, fill_value):
new_blocks = []
is_unique = new_items.is_unique
+ # we have duplicates in the items and what we are reindexing
+ if not is_unique and not self.items.is_unique:
+
+ rl = self._set_ref_locs(do_refs='force')
+ for i, idx in enumerate(indexer):
+ item = new_items.take([i])
+ if idx >= 0:
+ blk, lidx = rl[idx]
+ blk = make_block(_block_shape(blk.iget(lidx)), item,
+ new_items, ndim=self.ndim, fastpath=True,
+ placement=[i])
+
+ # a missing value
+ else:
+ blk = self._make_na_block(item,
+ new_items,
+ placement=[i],
+ fill_value=fill_value)
+ new_blocks.append(blk)
+ new_blocks = _consolidate(new_blocks, new_items)
+
+
# keep track of what items aren't found anywhere
- l = np.arange(len(item_order))
- mask = np.zeros(len(item_order), dtype=bool)
- for blk in self.blocks:
- blk_indexer = blk.items.get_indexer(item_order)
- selector = blk_indexer != -1
+ else:
+ l = np.arange(len(item_order))
+ mask = np.zeros(len(item_order), dtype=bool)
- # update with observed items
- mask |= selector
+ for blk in self.blocks:
+ blk_indexer = blk.items.get_indexer(item_order)
+ selector = blk_indexer != -1
+
+ # update with observed items
+ mask |= selector
- if not selector.any():
- continue
+ if not selector.any():
+ continue
- new_block_items = new_items.take(selector.nonzero()[0])
- new_values = com.take_nd(blk.values, blk_indexer[selector], axis=0,
- allow_fill=False)
- placement = l[selector] if not is_unique else None
- new_blocks.append(make_block(new_values,
- new_block_items,
+ new_block_items = new_items.take(selector.nonzero()[0])
+ new_values = com.take_nd(blk.values, blk_indexer[selector], axis=0,
+ allow_fill=False)
+ placement = l[selector] if not is_unique else None
+ new_blocks.append(make_block(new_values,
+ new_block_items,
new_items,
- placement=placement,
- fastpath=True))
-
- if not mask.all():
- na_items = new_items[-mask]
- placement = l[-mask] if not is_unique else None
- na_block = self._make_na_block(na_items,
- new_items,
- placement=placement,
- fill_value=fill_value)
- new_blocks.append(na_block)
- new_blocks = _consolidate(new_blocks, new_items)
+ placement=placement,
+ fastpath=True))
+
+ if not mask.all():
+ na_items = new_items[-mask]
+ placement = l[-mask] if not is_unique else None
+ na_block = self._make_na_block(na_items,
+ new_items,
+ placement=placement,
+ fill_value=fill_value)
+ new_blocks.append(na_block)
+ new_blocks = _consolidate(new_blocks, new_items)
return self.__class__(new_blocks, new_axes)
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index fe3aac0e9eeaa..c48b4b84698b6 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -981,6 +981,14 @@ def test_dups_fancy_indexing(self):
result = df.ix[['A','A','E']]
assert_frame_equal(result, expected)
+ # GH 5835
+ # dups on index and missing values
+ df = DataFrame(np.random.randn(5,5),columns=['A','B','B','B','A'])
+
+ expected = pd.concat([df.ix[:,['A','B']],DataFrame(np.nan,columns=['C'],index=df.index)],axis=1)
+ result = df.ix[:,['A','B','C']]
+ assert_frame_equal(result, expected)
+
def test_indexing_mixed_frame_bug(self):
# GH3492
| closes #5835
| https://api.github.com/repos/pandas-dev/pandas/pulls/5849 | 2014-01-04T17:20:44Z | 2014-01-04T18:53:09Z | 2014-01-04T18:53:09Z | 2014-07-09T13:06:17Z |
BUG: tail raises on empty DataFrame | diff --git a/doc/source/release.rst b/doc/source/release.rst
index 9ab175c07f169..c51651d321cd6 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -90,6 +90,8 @@ Bug Fixes
- Bug in ``BusinessDay`` when adding n days to a date not on offset when n>5 and n%5==0 (:issue:`5890`)
- Bug in assigning to chained series with a series via ix (:issue:`5928`)
- Bug in creating an empty DataFrame, copying, then assigning (:issue:`5932`)
+ - Bug in DataFrame.tail with empty frame (:issue:`5846`)
+ - DataFrame.head(0) returns self instead of empty frame (:issue:`5846`)
pandas 0.13.0
-------------
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index e5c5f362d7f58..a5a7021d9f4b7 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -1570,8 +1570,12 @@ def head(self, n=5):
Returns first n rows
"""
l = len(self)
- if abs(n) > l:
- n = l if n > 0 else -l
+ if l == 0 or n==0:
+ return self
+ if n > l:
+ n = l
+ elif n < -l:
+ n = -l
return self.iloc[:n]
def tail(self, n=5):
@@ -1579,8 +1583,12 @@ def tail(self, n=5):
Returns last n rows
"""
l = len(self)
- if abs(n) > l:
- n = l if n > 0 else -l
+ if l == 0 or n == 0:
+ return self
+ if n > l:
+ n = l
+ elif n < -l:
+ n = -l
return self.iloc[-n:]
#----------------------------------------------------------------------
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index bded2fad36763..bfa57590e1760 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -4259,12 +4259,25 @@ def test_repr_column_name_unicode_truncation_bug(self):
def test_head_tail(self):
assert_frame_equal(self.frame.head(), self.frame[:5])
assert_frame_equal(self.frame.tail(), self.frame[-5:])
-
+ assert_frame_equal(self.frame.head(0), self.frame)
+ assert_frame_equal(self.frame.tail(0), self.frame)
+ assert_frame_equal(self.frame.head(-1), self.frame[:-1])
+ assert_frame_equal(self.frame.tail(-1), self.frame[1:])
+ assert_frame_equal(self.frame.head(1), self.frame[:1])
+ assert_frame_equal(self.frame.tail(1), self.frame[-1:])
# with a float index
df = self.frame.copy()
df.index = np.arange(len(self.frame)) + 0.1
assert_frame_equal(df.head(), df.iloc[:5])
assert_frame_equal(df.tail(), df.iloc[-5:])
+ assert_frame_equal(df.head(0), df)
+ assert_frame_equal(df.tail(0), df)
+ assert_frame_equal(df.head(-1), df.iloc[:-1])
+ assert_frame_equal(df.tail(-1), df.iloc[1:])
+ #test empty dataframe
+ empty_df = DataFrame()
+ assert_frame_equal(empty_df.tail(), empty_df)
+ assert_frame_equal(empty_df.head(), empty_df)
def test_insert(self):
df = DataFrame(np.random.randn(5, 3), index=np.arange(5),
diff --git a/pandas/tests/test_generic.py b/pandas/tests/test_generic.py
index 97e25f105db70..14082486f80a0 100644
--- a/pandas/tests/test_generic.py
+++ b/pandas/tests/test_generic.py
@@ -338,7 +338,7 @@ def test_head_tail(self):
self._compare(o.tail(), o.iloc[-5:])
# 0-len
- self._compare(o.head(0), o.iloc[:0])
+ self._compare(o.head(0), o.iloc[:])
self._compare(o.tail(0), o.iloc[0:])
# bounded
| closes #5846
calling tail on an empty frame threw an exception. In addition, df.tail(0) returns self without indexing.
| https://api.github.com/repos/pandas-dev/pandas/pulls/5848 | 2014-01-04T16:18:26Z | 2014-01-15T00:34:38Z | 2014-01-15T00:34:38Z | 2014-06-29T04:32:50Z |
ENH: Improve error message for PeriodIndex to infer_freq. Closes #5841. | diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py
index cfe874484231b..3892897e43bb0 100644
--- a/pandas/tseries/frequencies.py
+++ b/pandas/tseries/frequencies.py
@@ -358,7 +358,7 @@ def get_offset(name):
else:
if name in _rule_aliases:
name = _rule_aliases[name]
-
+
if name not in _offset_map:
try:
# generate and cache offset
@@ -625,7 +625,7 @@ def _period_str_to_code(freqstr):
alias = _period_alias_dict[freqstr]
except KeyError:
raise ValueError("Unknown freqstr: %s" % freqstr)
-
+
return _period_code_map[alias]
@@ -647,6 +647,10 @@ def infer_freq(index, warn=True):
from pandas.tseries.index import DatetimeIndex
if not isinstance(index, DatetimeIndex):
+ from pandas.tseries.period import PeriodIndex
+ if isinstance(index, PeriodIndex):
+ raise ValueError("PeriodIndex given. Check the `freq` attribute "
+ "instead of using infer_freq.")
index = DatetimeIndex(index)
inferer = _FrequencyInferer(index, warn=warn)
@@ -850,7 +854,7 @@ def _get_wom_rule(self):
weekdays = unique(self.index.weekday)
if len(weekdays) > 1:
return None
-
+
week_of_months = unique((self.index.day - 1) // 7)
if len(week_of_months) > 1:
return None
diff --git a/pandas/tseries/tests/test_frequencies.py b/pandas/tseries/tests/test_frequencies.py
index ad9c93592a26c..8d95e22e4c6f2 100644
--- a/pandas/tseries/tests/test_frequencies.py
+++ b/pandas/tseries/tests/test_frequencies.py
@@ -13,6 +13,7 @@
from pandas.tseries.tools import to_datetime
import pandas.tseries.frequencies as fmod
import pandas.tseries.offsets as offsets
+from pandas.tseries.period import PeriodIndex
import pandas.lib as lib
@@ -88,6 +89,10 @@ def test_anchored_shortcuts():
class TestFrequencyInference(tm.TestCase):
+ def test_raise_if_period_index(self):
+ index = PeriodIndex(start="1/1/1990", periods=20, freq="M")
+ self.assertRaises(ValueError, infer_freq, index)
+
def test_raise_if_too_few(self):
index = _dti(['12/31/1998', '1/3/1999'])
self.assertRaises(ValueError, infer_freq, index)
| closes #5841. I'm still not sure we shouldn't just return `freq` if a PeriodIndex is given. Seems easy enough.
| https://api.github.com/repos/pandas-dev/pandas/pulls/5847 | 2014-01-04T15:34:02Z | 2014-01-04T18:48:48Z | 2014-01-04T18:48:48Z | 2014-07-16T08:45:16Z |
BLD: fix cythonized msgpack extension in setup.py GH5831 | diff --git a/setup.py b/setup.py
index 497c6a5644def..ce26946b76124 100755
--- a/setup.py
+++ b/setup.py
@@ -486,7 +486,8 @@ def pxd(name):
msgpack_ext = Extension('pandas.msgpack',
sources = [srcpath('msgpack',
- suffix=suffix, subdir='')],
+ suffix=suffix if suffix == '.pyx' else '.cpp',
+ subdir='')],
language='c++',
include_dirs=common_include,
define_macros=macros)
@@ -499,7 +500,7 @@ def pxd(name):
if suffix == '.pyx' and 'setuptools' in sys.modules:
# undo dumb setuptools bug clobbering .pyx sources back to .c
for ext in extensions:
- if ext.sources[0].endswith('.c'):
+ if ext.sources[0].endswith(('.c','.cpp')):
root, _ = os.path.splitext(ext.sources[0])
ext.sources[0] = root + suffix
| closes #5831
@yarikoptic, can you confirm this fixes the problem?
| https://api.github.com/repos/pandas-dev/pandas/pulls/5844 | 2014-01-04T03:28:50Z | 2014-01-10T12:26:16Z | 2014-01-10T12:26:16Z | 2014-06-24T04:26:01Z |
Add pandas cookbook to tutorials (for #5837) | diff --git a/doc/source/tutorials.rst b/doc/source/tutorials.rst
index fe12b9e9d855d..4ad9082b0cb9b 100644
--- a/doc/source/tutorials.rst
+++ b/doc/source/tutorials.rst
@@ -16,3 +16,43 @@ More complex recipes are in the :ref:`Cookbook<cookbook>`
Tutorials
---------
+
+Pandas Cookbook
+---------------
+
+The goal of this cookbook (by `Julia Evans <http://jvns.ca>`_) is to
+give you some concrete examples for getting started with pandas. These
+are examples with real-world data, and all the bugs and weirdness that
+that entails.
+
+Here are links to the v0.1 release. For an up-to-date table of contents, see the `pandas-cookbook GitHub
+repository <http://github.com/jvns/pandas-cookbook>`_.
+
+* | `A quick tour of the IPython
+ Notebook <http://nbviewer.ipython.org/github/jvns/pandas-c|%2055ookbook/blob/v0.1/cookbook/A%20quick%20tour%20of%20IPython%20Notebook.ipynb>`_
+ | Shows off IPython's awesome tab completion and magic functions.
+* | `Chapter 1: <http://nbviewer.ipython.org/github/jvns/pandas-cookbook/blob/v0.1/cookbook/Chapter%201%20-%20Reading%20from%20a%20CSV.ipynb>`_
+ Reading your data into pandas is pretty much the easiest thing. Even
+ when the encoding is wrong!
+* | `Chapter 2: <http://nbviewer.ipython.org/github/jvns/pandas-cookbook/blob/v0.1/cookbook/Chapter%202%20-%20Selecting%20data%20&%20finding%20the%20most%20common%20complaint%20type.ipynb>`_
+ It's not totally obvious how to select data from a pandas dataframe.
+ Here we explain the basics (how to take slices and get columns)
+* | `Chapter 3: <http://nbviewer.ipython.org/github/jvns/pandas-cookbook/blob/v0.1/cookbook/Chapter%203%20-%20Which%20borough%20has%20the%20most%20noise%20complaints%3F%20%28or%2C%20more%20selecting%20data%29.ipynb>`_
+ Here we get into serious slicing and dicing and learn how to filter
+ dataframes in complicated ways, really fast.
+* | `Chapter 4: <http://nbviewer.ipython.org/github/jvns/pandas-cookbook/blob/v0.1/cookbook/Chapter%204%20-%20Find%20out%20on%20which%20weekday%20people%20bike%20the%20most%20with%20groupby%20and%20aggregate.ipynb>`_
+ Groupby/aggregate is seriously my favorite thing about pandas
+ and I use it all the time. You should probably read this.
+* | `Chapter 5: <http://nbviewer.ipython.org/github/jvns/pandas-cookbook/blob/v0.1/cookbook/Chapter%205%20-%20Combining%20dataframes%20and%20scraping%20Canadian%20weather%20data.ipynb>`_
+ Here you get to find out if it's cold in Montreal in the winter
+ (spoiler: yes). Web scraping with pandas is fun! Here we combine dataframes.
+* | `Chapter 6: <http://nbviewer.ipython.org/github/jvns/pandas-cookbook/blob/v0.1/cookbook/Chapter%206%20-%20String%20operations%21%20Which%20month%20was%20the%20snowiest%3F.ipynb>`_
+ Strings with pandas are great. It has all these vectorized string
+ operations and they're the best. We will turn a bunch of strings
+ containing "Snow" into vectors of numbers in a trice.
+* | `Chapter 7: <http://nbviewer.ipython.org/github/jvns/pandas-cookbook/blob/v0.1/cookbook/Chapter%207%20-%20Cleaning%20up%20messy%20data.ipynb>`_
+ Cleaning up messy data is never a joy, but with pandas it's easier.
+* | `Chapter 8: <http://nbviewer.ipython.org/github/jvns/pandas-cookbook/blob/v0.1/cookbook/Chapter%208%20-%20How%20to%20deal%20with%20timestamps.ipynb>`_
+ Parsing Unix timestamps is confusing at first but it turns out
+ to be really easy.
+
| @jreback asked me to add something like this in https://github.com/jvns/pandas-cookbook/issues/1
I haven't been able to test that this displays correctly as I can't figure out how to build the Sphinx docs.
If someone can tell me how to build the docs, I can test it.
```
bork@kiwi ~/c/p/pandas> pwd
/home/bork/clones/pandas/pandas
bork@kiwi ~/c/p/pandas> python ../doc/make.py html
Error: Cannot find source directory.
Building HTML failed
```
```
bork@kiwi ~/c/p/doc> pwd
/home/bork/clones/pandas/doc
bork@kiwi ~/c/p/doc> python make.py html
Running Sphinx v1.1.3
cannot import name hashtable
Exception occurred while building, starting debugger:
Traceback (most recent call last):
File "/opt/anaconda/lib/python2.7/site-packages/sphinx/cmdline.py", line 188, in main
warningiserror, tags)
File "/opt/anaconda/lib/python2.7/site-packages/sphinx/application.py", line 102, in __init__
confoverrides or {}, self.tags)
File "/opt/anaconda/lib/python2.7/site-packages/sphinx/config.py", line 216, in __init__
exec code in config
File "/home/bork/clones/pandas/doc/source/conf.py", line 74, in <module>
import pandas
File "/home/bork/clones/pandas/pandas/__init__.py", line 6, in <module>
from . import hashtable, tslib, lib
ImportError: cannot import name hashtable
> /home/bork/clones/pandas/pandas/__init__.py(6)<module>()
-> from . import hashtable, tslib, lib
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/5842 | 2014-01-03T22:42:00Z | 2014-01-07T11:04:01Z | 2014-01-07T11:04:01Z | 2014-06-26T12:23:17Z |
DOC: minor fix in extract docstring | diff --git a/doc/source/basics.rst b/doc/source/basics.rst
index 1b2c80f90f97b..528440f454e57 100644
--- a/doc/source/basics.rst
+++ b/doc/source/basics.rst
@@ -1057,14 +1057,14 @@ You can check whether elements contain a pattern:
.. ipython:: python
pattern = r'[a-z][0-9]'
- Series(['1', '2', '3a', '3b', '03c']).contains(pattern)
+ Series(['1', '2', '3a', '3b', '03c']).str.contains(pattern)
or match a pattern:
.. ipython:: python
- Series(['1', '2', '3a', '3b', '03c']).match(pattern, as_indexer=True)
+ Series(['1', '2', '3a', '3b', '03c']).str.match(pattern, as_indexer=True)
The distinction between ``match`` and ``contains`` is strictness: ``match``
relies on strict ``re.match``, while ``contains`` relies on ``re.search``.
diff --git a/pandas/core/strings.py b/pandas/core/strings.py
index 02f422bb0b635..1d9139fa9a1c7 100644
--- a/pandas/core/strings.py
+++ b/pandas/core/strings.py
@@ -333,15 +333,11 @@ def str_match(arr, pat, case=True, flags=0, na=np.nan, as_indexer=False):
Returns
-------
- boolean Series
+ Series of boolean values
if as_indexer=True
Series of tuples
if as_indexer=False, default but deprecated
- Returns
- -------
- Series of boolean values
-
See Also
--------
contains : analagous, but less strict, relying on re.search instead of
@@ -414,14 +410,27 @@ def str_extract(arr, pat, flags=0):
A pattern with more than one group will return a DataFrame.
>>> Series(['a1', 'b2', 'c3']).str.extract('([ab])(\d)')
+ 0 1
+ 0 a 1
+ 1 b 2
+ 2 NaN NaN
A pattern may contain optional groups.
>>> Series(['a1', 'b2', 'c3']).str.extract('([ab])?(\d)')
+ 0 1
+ 0 a 1
+ 1 b 2
+ 2 NaN 3
Named groups will become column names in the result.
>>> Series(['a1', 'b2', 'c3']).str.extract('(?P<letter>[ab])(?P<digit>\d)')
+ letter digit
+ 0 a 1
+ 1 b 2
+ 2 NaN NaN
+
"""
regex = re.compile(pat, flags=flags)
| https://api.github.com/repos/pandas-dev/pandas/pulls/5838 | 2014-01-03T18:39:14Z | 2014-01-04T09:55:17Z | 2014-01-04T09:55:17Z | 2014-06-20T01:21:42Z | |
DOC: change doc refs to 0.13.1 | diff --git a/doc/source/basics.rst b/doc/source/basics.rst
index 38ba0b064c192..0c4de4e5173de 100644
--- a/doc/source/basics.rst
+++ b/doc/source/basics.rst
@@ -1457,9 +1457,9 @@ It's also possible to reset multiple options at once (using a regex):
reset_option("^display")
-.. versionadded:: 0.14.0
+.. versionadded:: 0.13.1
- Beginning with v0.14.0 the `option_context` context manager has been exposed through
+ Beginning with v0.13.1 the `option_context` context manager has been exposed through
the top-level API, allowing you to execute code with given option values. Option values
are restored automatically when you exit the `with` block:
diff --git a/doc/source/release.rst b/doc/source/release.rst
index 0b25d0f6aa61a..3d4b5e8facfa4 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -45,7 +45,7 @@ analysis / manipulation tool available in any language.
* Binary installers on PyPI: http://pypi.python.org/pypi/pandas
* Documentation: http://pandas.pydata.org
-pandas 0.14.0
+pandas 0.13.1
-------------
**Release date:** not-yet-released
@@ -56,7 +56,7 @@ New features
API Changes
~~~~~~~~~~~
-.. _release.bug_fixes-0.14.0:
+.. _release.bug_fixes-0.13.1:
Experimental Features
~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/source/v0.13.0.txt b/doc/source/v0.13.0.txt
index cbe8a776e64ef..8d46c8c54c5c6 100644
--- a/doc/source/v0.13.0.txt
+++ b/doc/source/v0.13.0.txt
@@ -1,6 +1,6 @@
.. _whatsnew_0130:
-v0.13.0 (January 1, 2014)
+v0.13.0 (January 3, 2014)
---------------------------
This is a major release from 0.12.0 and includes a number of API changes, several new features and
diff --git a/doc/source/v0.14.0.txt b/doc/source/v0.13.1.txt
similarity index 74%
rename from doc/source/v0.14.0.txt
rename to doc/source/v0.13.1.txt
index b949a536043b4..dcb8564c89457 100644
--- a/doc/source/v0.14.0.txt
+++ b/doc/source/v0.13.1.txt
@@ -1,6 +1,6 @@
-.. _whatsnew_0140:
+.. _whatsnew_0131:
-v0.14.0 (???)
+v0.13.1 (???)
-------------
This is a major release from 0.13.0 and includes a number of API changes, several new features and
@@ -18,7 +18,7 @@ API changes
Prior Version Deprecations/Changes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-These were announced changes in 0.13 or prior that are taking effect as of 0.14.0
+These were announced changes in 0.13 or prior that are taking effect as of 0.13.1
Deprecations
~~~~~~~~~~~~
@@ -32,9 +32,7 @@ Experimental
Bug Fixes
~~~~~~~~~
- - read_csv/read_table now respects the `prefix` kwarg (:issue:`5732`).
-
-See :ref:`V0.14.0 Bug Fixes<release.bug_fixes-0.14.0>` for an extensive list of bugs that have been fixed in 0.14.0.
+See :ref:`V0.13.1 Bug Fixes<release.bug_fixes-0.13.1>` for an extensive list of bugs that have been fixed in 0.13.1.
See the :ref:`full release notes
<release>` or issue tracker
| https://api.github.com/repos/pandas-dev/pandas/pulls/5833 | 2014-01-03T14:26:34Z | 2014-01-03T18:18:44Z | 2014-01-03T18:18:44Z | 2014-07-16T08:45:06Z | |
DOC: add 'pandas ecosystem' section to docs | diff --git a/doc/source/ecosystem.rst b/doc/source/ecosystem.rst
new file mode 100644
index 0000000000000..4c9e4ca4ef7ee
--- /dev/null
+++ b/doc/source/ecosystem.rst
@@ -0,0 +1,60 @@
+****************
+Pandas Ecosystem
+****************
+
+Increasingly, packages are being built on top of pandas to address specific needs
+in data preparation, analysis and visualization.
+This is encouraging because it means pandas is not only helping users to handle
+their data tasks but also that provides a better starting point for developers to
+build powerful and more focused data tools.
+The creation of libraries that complement pandas' functionality also allows pandas
+development to remain focused around it's original requirements.
+
+This is an in-exhaustive list of projects that build on pandas in order to provide
+tools in the PyData space.
+
+We'd like to make it easier for users to find these project, if you know of other
+substantial projects that you feel should be on this list, please let us know.
+
+`Statsmodels <http://statsmodels.sourceforge.net>`__
+-----------
+
+Statsmodels is the prominent python "statistics and econometrics library" and it has
+a long-standing special relationship with pandas. Statsmodels provides powerful statistics,
+econometrics, analysis and modeling functionality that is out of pandas' scope.
+Statsmodels leverages pandas objects as the underlying data container for computation.
+
+`Vincent <https://github.com/wrobstory/vincent>`__
+-------
+
+The `Vincent <https://github.com/wrobstory/vincent>`__ project leverages `Vega <https://github.com/trifacta/vega>`__ to create
+plots (that in turn, leverages `d3 <http://d3js.org/>`__). It has great support for pandas data objects.
+
+`yhat/ggplot <https://github.com/yhat/ggplot>`__
+-----------
+
+Hadley Wickham's `ggplot2 <http://ggplot2.org/>`__is a foundational exploratory visualization package for the R language.
+Based on `"The Grammer of Graphics" <http://www.cs.uic.edu/~wilkinson/TheGrammarOfGraphics/GOG.html>`__ it
+provides a powerful, declarative and extremely general way to generate plots of arbitrary data.
+It's really quite incredible. Various implementations to other languages are available,
+but a faithful implementation for python users has long been missing. Although still young
+(as of Jan-2014), the `yhat/ggplot <https://github.com/yhat/ggplot>` project has been
+progressing quickly in that direction.
+
+
+`Seaborn <https://github.com/mwaskom/seaborn>`__
+-------
+
+Although pandas has quite a bit of "just plot it" functionality built-in, visualization and
+in particular statistical graphics is a vast field with a long tradition and lots of ground
+to cover. `The Seaborn project <https://github.com/mwaskom/seaborn>`__ builds on top of pandas
+and `matplotlib <http://matplotlib.org>`__ to provide easy plotting of data which extends to
+more advanced types of plots then those offered by pandas.
+
+
+`Geopandas <https://github.com/kjordahl/geopandas>`__
+---------
+
+Geopandas extends pandas data objects to include geographic information which support
+geometric operations. If your work entails maps and geographical coordinates, and
+you love pandas, you should take a close look at Geopandas.
diff --git a/doc/source/index.rst b/doc/source/index.rst
index c406c4f2cfa27..a416e4af4e486 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -130,7 +130,7 @@ See the package overview for more detail about what's in the library.
sparse
gotchas
r_interface
- related
+ ecosystem
comparison_with_r
comparison_with_sql
api
diff --git a/doc/source/related.rst b/doc/source/related.rst
deleted file mode 100644
index 33dad8115e5b1..0000000000000
--- a/doc/source/related.rst
+++ /dev/null
@@ -1,57 +0,0 @@
-************************
-Related Python libraries
-************************
-
-la (larry)
-----------
-
-Keith Goodman's excellent `labeled array package
-<http://pypi.python.org/pypi/la>`__ is very similar to pandas in many regards,
-though with some key differences. The main philosophical design difference is
-to be a wrapper around a single NumPy ``ndarray`` object while adding axis
-labeling and label-based operations and indexing. Because of this, creating a
-size-mutable object with heterogeneous columns (e.g. DataFrame) is not possible
-with the ``la`` package.
-
- - Provide a single n-dimensional object with labeled axes with functionally
- analogous data alignment semantics to pandas objects
- - Advanced / label-based indexing similar to that provided in pandas but
- setting is not supported
- - Stays much closer to NumPy arrays than pandas-- ``larry`` objects must be
- homogeneously typed
- - GroupBy support is relatively limited, but a few functions are available:
- ``group_mean``, ``group_median``, and ``group_ranking``
- - It has a collection of analytical functions suited to quantitative
- portfolio construction for financial applications
- - It has a collection of moving window statistics implemented in
- `Bottleneck <http://pypi.python.org/pypi/Bottleneck>`__
-
-statsmodels
------------
-
-The main `statistics and econometrics library
-<http://statsmodels.sourceforge.net>`__ for Python. pandas has become a
-dependency of this library.
-
-scikits.timeseries
-------------------
-
-`scikits.timeseries <http://pytseries.sourceforge.net/>`__ provides a data
-structure for fixed frequency time series data based on the numpy.MaskedArray
-class. For time series data, it provides some of the same functionality to the
-pandas Series class. It has many more functions for time series-specific
-manipulation. Also, it has support for many more frequencies, though less
-customizable by the user (so 5-minutely data is easier to do with pandas for
-example).
-
-We are aiming to merge these libraries together in the near future.
-
-Progress:
-
- - It has a collection of moving window statistics implemented in
- `Bottleneck <http://pandas.pydata.org/developers.html#development-roadmap>`__
- - `Outstanding issues <https://github.com/pydata/pandas/issues?labels=timeseries&milestone=&page=1&state=open>`__
-
-Summarising, Pandas offers superior functionality due to its combination with the :py:class:`pandas.DataFrame`.
-
-An introduction for former users of :mod:`scikits.timeseries` is provided in the :ref:`migration guide <ref-scikits-migration>`.
\ No newline at end of file
| cc @jseabold, @glamp, @mwaskom, @kjordahl, @wrobstory
Feel free to suggest a better blurb if you'd like.
Replaces the `related libraries` section, which I feel is outdated.
closes #5804
| https://api.github.com/repos/pandas-dev/pandas/pulls/5829 | 2014-01-03T02:57:56Z | 2014-01-03T02:58:01Z | 2014-01-03T02:58:01Z | 2014-07-14T12:38:07Z |
DOC: add pandas-xlsxwriter-charts ipnb to cookbook | diff --git a/doc/source/cookbook.rst b/doc/source/cookbook.rst
index 4c365455d1b03..b0b15410fb215 100644
--- a/doc/source/cookbook.rst
+++ b/doc/source/cookbook.rst
@@ -317,6 +317,10 @@ The :ref:`Plotting <visualization>` docs.
`Annotate a time-series plot #2
<http://stackoverflow.com/questions/17891493/annotating-points-from-a-pandas-dataframe-in-matplotlib-plot>`__
+`Generate Embedded plots in excel files using Pandas, Vincent and xlsxwriter
+<http://pandas-xlsxwriter-charts.readthedocs.org/en/latest/introduction.html>`__
+
+
Data In/Out
-----------
| cc @jmcnamara.
| https://api.github.com/repos/pandas-dev/pandas/pulls/5828 | 2014-01-03T02:07:56Z | 2014-01-03T02:58:27Z | 2014-01-03T02:58:27Z | 2014-07-16T08:45:01Z |
DOC: Add example to extract docstring, and re-explain change to match. | diff --git a/doc/source/basics.rst b/doc/source/basics.rst
index 38ba0b064c192..1b2c80f90f97b 100644
--- a/doc/source/basics.rst
+++ b/doc/source/basics.rst
@@ -1029,7 +1029,7 @@ with more than one group returns a DataFrame with one column per group.
Series(['a1', 'b2', 'c3']).str.extract('([ab])(\d)')
-Elements that do not match return a row of ``NaN``s.
+Elements that do not match return a row filled with ``NaN``.
Thus, a Series of messy strings can be "converted" into a
like-indexed Series or DataFrame of cleaned-up or more useful strings,
without necessitating ``get()`` to access tuples or ``re.match`` objects.
@@ -1051,18 +1051,35 @@ can also be used.
Testing for Strings that Match or Contain a Pattern
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-In previous versions, *extracting* match groups was accomplished by ``match``,
-which returned a not-so-convenient Series of tuples. Starting in version 0.14,
-the default behavior of match will change. It will return a boolean
-indexer, analagous to the method ``contains``.
-The distinction between
-``match`` and ``contains`` is strictness: ``match`` relies on
-strict ``re.match`` while ``contains`` relies on ``re.search``.
+You can check whether elements contain a pattern:
-In version 0.13, ``match`` performs its old, deprecated behavior by default,
-but the new behavior is availabe through the keyword argument
-``as_indexer=True``.
+.. ipython:: python
+
+ pattern = r'[a-z][0-9]'
+ Series(['1', '2', '3a', '3b', '03c']).contains(pattern)
+
+or match a pattern:
+
+
+.. ipython:: python
+
+ Series(['1', '2', '3a', '3b', '03c']).match(pattern, as_indexer=True)
+
+The distinction between ``match`` and ``contains`` is strictness: ``match``
+relies on strict ``re.match``, while ``contains`` relies on ``re.search``.
+
+.. warning::
+
+ In previous versions, ``match`` was for *extracting* groups,
+ returning a not-so-convenient Series of tuples. The new method ``extract``
+ (described in the previous section) is now preferred.
+
+ This old, deprecated behavior of ``match`` is still the default. As
+ demonstrated above, use the new behavior by setting ``as_indexer=True``.
+ In this mode, ``match`` is analagous to ``contains``, returning a boolean
+ Series. The new behavior will become the default behavior in a future
+ release.
Methods like ``match``, ``contains``, ``startswith``, and ``endswith`` take
an extra ``na`` arguement so missing values can be considered True or False:
diff --git a/pandas/core/strings.py b/pandas/core/strings.py
index 3b1b220d3fac7..02f422bb0b635 100644
--- a/pandas/core/strings.py
+++ b/pandas/core/strings.py
@@ -164,6 +164,11 @@ def str_contains(arr, pat, case=True, flags=0, na=np.nan):
Returns
-------
+ Series of boolean values
+
+ See Also
+ --------
+ match : analagous, but stricter, relying on re.match instead of re.search
"""
if not case:
@@ -326,11 +331,22 @@ def str_match(arr, pat, case=True, flags=0, na=np.nan, as_indexer=False):
as_indexer : False, by default, gives deprecated behavior better achieved
using str_extract. True return boolean indexer.
+ Returns
+ -------
+ boolean Series
+ if as_indexer=True
+ Series of tuples
+ if as_indexer=False, default but deprecated
Returns
-------
- matches : boolean array (if as_indexer=True)
- matches : array of tuples (if as_indexer=False, default but deprecated)
+ Series of boolean values
+
+ See Also
+ --------
+ contains : analagous, but less strict, relying on re.search instead of
+ re.match
+ extract : now preferred to the deprecated usage of match (as_indexer=False)
Notes
-----
@@ -385,10 +401,27 @@ def str_extract(arr, pat, flags=0):
-------
extracted groups : Series (one group) or DataFrame (multiple groups)
+ Examples
+ --------
+ A pattern with one group will return a Series. Non-matches will be NaN.
- Notes
- -----
- Compare to the string method match, which returns re.match objects.
+ >>> Series(['a1', 'b2', 'c3']).str.extract('[ab](\d)')
+ 0 1
+ 1 2
+ 2 NaN
+ dtype: object
+
+ A pattern with more than one group will return a DataFrame.
+
+ >>> Series(['a1', 'b2', 'c3']).str.extract('([ab])(\d)')
+
+ A pattern may contain optional groups.
+
+ >>> Series(['a1', 'b2', 'c3']).str.extract('([ab])?(\d)')
+
+ Named groups will become column names in the result.
+
+ >>> Series(['a1', 'b2', 'c3']).str.extract('(?P<letter>[ab])(?P<digit>\d)')
"""
regex = re.compile(pat, flags=flags)
| @jreback, I added examples per your request in #5099. Also, I rewrote the explanation of how `str.match` is changing, which I left confusing and wordy before.
| https://api.github.com/repos/pandas-dev/pandas/pulls/5826 | 2014-01-02T21:57:20Z | 2014-01-03T15:24:28Z | 2014-01-03T15:24:28Z | 2014-07-16T08:44:59Z |
BUG: dropna dtype comp issue related (GH5815) | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 36cfbb524ab31..5544cd0b34e3c 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -3954,7 +3954,7 @@ def count(self, axis=0, level=None, numeric_only=False):
else:
result = notnull(frame).sum(axis=axis)
- return result
+ return result.astype('int64')
def _count_level(self, level, axis=0, numeric_only=False):
if numeric_only:
| fixes dtype issue on 32-bit, related #5815
| https://api.github.com/repos/pandas-dev/pandas/pulls/5820 | 2014-01-02T11:54:16Z | 2014-01-02T12:09:55Z | 2014-01-02T12:09:55Z | 2014-06-19T08:09:48Z |
BUG: Fix DatetimeIndex.insert() with strings. | diff --git a/doc/source/release.rst b/doc/source/release.rst
index 0b25d0f6aa61a..95f7f7e71b89d 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -73,6 +73,7 @@ Bug Fixes
~~~~~~~~~
- Bug in Series replace with timestamp dict (:issue:`5797`)
- read_csv/read_table now respects the `prefix` kwarg (:issue:`5732`).
+ - Bug with insert of strings into DatetimeIndex (:issue:`5818`, :issue:`5819`)
pandas 0.13.0
-------------
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index bfddd2e78c322..9a9e3caa96c5b 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -1455,7 +1455,6 @@ def _safe_append_to_index(index, key):
# raise here as this is basically an unsafe operation and we want
# it to be obvious that you are doing something wrong
-
raise ValueError("unsafe appending to index of type {0} with a key "
"{1}".format(index.__class__.__name__, key))
diff --git a/pandas/sparse/tests/test_sparse.py b/pandas/sparse/tests/test_sparse.py
index bd05a7093fd7c..70f1e50f475bb 100644
--- a/pandas/sparse/tests/test_sparse.py
+++ b/pandas/sparse/tests/test_sparse.py
@@ -1084,8 +1084,10 @@ def test_icol(self):
def test_set_value(self):
- # this is invalid because it is not a valid type for this index
- self.assertRaises(ValueError, self.frame.set_value, 'foobar', 'B', 1.5)
+ # ok as the index gets conver to object
+ frame = self.frame.copy()
+ res = frame.set_value('foobar', 'B', 1.5)
+ self.assert_(res.index.dtype == 'object')
res = self.frame
res.index = res.index.astype(object)
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index a1ef94d8400da..548f49e23a035 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -10897,6 +10897,19 @@ def test_reset_index_multiindex_col(self):
['a', 'mean', 'median', 'mean']])
assert_frame_equal(rs, xp)
+ def test_reset_index_with_datetimeindex_cols(self):
+ # GH5818
+ #
+ df = pd.DataFrame([[1, 2], [3, 4]],
+ columns=pd.date_range('1/1/2013', '1/2/2013'),
+ index=['A', 'B'])
+
+ result = df.reset_index()
+ expected = pd.DataFrame([['A', 1, 2], ['B', 3, 4]],
+ columns=['index', datetime(2013, 1, 1),
+ datetime(2013, 1, 2)])
+ assert_frame_equal(result, expected)
+
#----------------------------------------------------------------------
# Tests to cope with refactored internals
def test_as_matrix_numeric_cols(self):
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index fe3aac0e9eeaa..ee57902bdeb5f 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -1665,15 +1665,13 @@ def test_partial_set_invalid(self):
df = tm.makeTimeDataFrame()
+ # don't allow not string inserts
def f():
df.loc[100.0, :] = df.ix[0]
self.assertRaises(ValueError, f)
def f():
df.loc[100,:] = df.ix[0]
self.assertRaises(ValueError, f)
- def f():
- df.loc['a',:] = df.ix[0]
- self.assertRaises(ValueError, f)
def f():
df.ix[100.0, :] = df.ix[0]
@@ -1682,6 +1680,9 @@ def f():
df.ix[100,:] = df.ix[0]
self.assertRaises(ValueError, f)
+ # allow object conversion here
+ df.loc['a',:] = df.ix[0]
+
def test_partial_set_empty(self):
# GH5226
diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py
index 6779e1a61c081..8cf11dd921abf 100644
--- a/pandas/tseries/index.py
+++ b/pandas/tseries/index.py
@@ -1533,6 +1533,8 @@ def insert(self, loc, item):
----------
loc : int
item : object
+ if not either a Python datetime or a numpy integer-like, returned
+ Index dtype will be object rather than datetime.
Returns
-------
@@ -1540,11 +1542,17 @@ def insert(self, loc, item):
"""
if isinstance(item, datetime):
item = _to_m8(item, tz=self.tz)
-
- new_index = np.concatenate((self[:loc].asi8,
+ try:
+ new_index = np.concatenate((self[:loc].asi8,
[item.view(np.int64)],
self[loc:].asi8))
- return DatetimeIndex(new_index, freq='infer')
+ return DatetimeIndex(new_index, freq='infer')
+ except (AttributeError, TypeError):
+
+ # fall back to object index
+ if isinstance(item,compat.string_types):
+ return self.asobject.insert(loc, item)
+ raise TypeError("cannot insert DatetimeIndex with incompatible label")
def delete(self, loc):
"""
@@ -1585,7 +1593,7 @@ def tz_convert(self, tz):
def tz_localize(self, tz, infer_dst=False):
"""
Localize tz-naive DatetimeIndex to given time zone (using pytz)
-
+
Parameters
----------
tz : string or pytz.timezone
diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py
index f4dcdb7a44a3e..4dfe05e38458a 100644
--- a/pandas/tseries/tests/test_timeseries.py
+++ b/pandas/tseries/tests/test_timeseries.py
@@ -2099,6 +2099,13 @@ def test_insert(self):
'2000-01-02'])
self.assert_(result.equals(exp))
+ # insertion of non-datetime should coerce to object index
+ result = idx.insert(1, 'inserted')
+ expected = Index([datetime(2000, 1, 4), 'inserted', datetime(2000, 1, 1),
+ datetime(2000, 1, 2)])
+ self.assert_(not isinstance(result, DatetimeIndex))
+ tm.assert_index_equal(result, expected)
+
idx = date_range('1/1/2000', periods=3, freq='M')
result = idx.insert(3, datetime(2000, 4, 30))
self.assert_(result.freqstr == 'M')
| Falls back to object Index instead. (previously wasn't checking for them), but _only_ strings are allowed.
Fixes #5818.
| https://api.github.com/repos/pandas-dev/pandas/pulls/5819 | 2014-01-02T07:14:00Z | 2014-01-24T22:20:52Z | 2014-01-24T22:20:51Z | 2014-07-09T11:59:16Z |
CLN: Make io/data urls easier to monkey-patch | diff --git a/pandas/io/data.py b/pandas/io/data.py
index 98ac860c391c8..b3332df3c8866 100644
--- a/pandas/io/data.py
+++ b/pandas/io/data.py
@@ -107,6 +107,9 @@ def _in_chunks(seq, size):
'time': 't1', 'short_ratio': 's7'}
+_YAHOO_QUOTE_URL = 'http://finance.yahoo.com/d/quotes.csv?'
+
+
def get_quote_yahoo(symbols):
"""
Get current yahoo quote
@@ -124,8 +127,7 @@ def get_quote_yahoo(symbols):
data = defaultdict(list)
- url_str = 'http://finance.yahoo.com/d/quotes.csv?s=%s&f=%s' % (sym_list,
- request)
+ url_str = _YAHOO_QUOTE_URL + 's=%s&f=%s' % (sym_list, request)
with urlopen(url_str) as url:
lines = url.readlines()
@@ -175,6 +177,9 @@ def _retry_read_url(url, retry_count, pause, name):
"return a 200 for url %r" % (retry_count, name, url))
+_HISTORICAL_YAHOO_URL = 'http://ichart.finance.yahoo.com/table.csv?'
+
+
def _get_hist_yahoo(sym, start, end, retry_count, pause):
"""
Get historical data for the given name from yahoo.
@@ -183,8 +188,7 @@ def _get_hist_yahoo(sym, start, end, retry_count, pause):
Returns a DataFrame.
"""
start, end = _sanitize_dates(start, end)
- yahoo_url = 'http://ichart.finance.yahoo.com/table.csv?'
- url = (yahoo_url + 's=%s' % sym +
+ url = (_HISTORICAL_YAHOO_URL + 's=%s' % sym +
'&a=%s' % (start.month - 1) +
'&b=%s' % start.day +
'&c=%s' % start.year +
@@ -196,6 +200,9 @@ def _get_hist_yahoo(sym, start, end, retry_count, pause):
return _retry_read_url(url, retry_count, pause, 'Yahoo!')
+_HISTORICAL_GOOGLE_URL = 'http://www.google.com/finance/historical?'
+
+
def _get_hist_google(sym, start, end, retry_count, pause):
"""
Get historical data for the given name from google.
@@ -204,13 +211,13 @@ def _get_hist_google(sym, start, end, retry_count, pause):
Returns a DataFrame.
"""
start, end = _sanitize_dates(start, end)
- google_URL = 'http://www.google.com/finance/historical?'
# www.google.com/finance/historical?q=GOOG&startdate=Jun+9%2C+2011&enddate=Jun+8%2C+2013&output=csv
- url = google_URL + urlencode({"q": sym,
- "startdate": start.strftime('%b %d, ' '%Y'),
- "enddate": end.strftime('%b %d, %Y'),
- "output": "csv"})
+ url = "%s%s" % (_HISTORICAL_GOOGLE_URL,
+ urlencode({"q": sym,
+ "startdate": start.strftime('%b %d, ' '%Y'),
+ "enddate": end.strftime('%b %d, %Y'),
+ "output": "csv"}))
return _retry_read_url(url, retry_count, pause, 'Google')
@@ -251,6 +258,9 @@ def _calc_return_index(price_df):
return df
+_YAHOO_COMPONENTS_URL = 'http://download.finance.yahoo.com/d/quotes.csv?'
+
+
def get_components_yahoo(idx_sym):
"""
Returns DataFrame containing list of component information for
@@ -275,8 +285,7 @@ def get_components_yahoo(idx_sym):
stats = 'snx'
# URL of form:
# http://download.finance.yahoo.com/d/quotes.csv?s=@%5EIXIC&f=snxl1d1t1c1ohgv
- url = ('http://download.finance.yahoo.com/d/quotes.csv?s={0}&f={1}'
- '&e=.csv&h={2}')
+ url = _YAHOO_COMPONENTS_URL + 's={0}&f={1}&e=.csv&h={2}'
idx_mod = idx_sym.replace('^', '@%5E')
url_str = url.format(idx_mod, stats, 1)
@@ -430,6 +439,9 @@ def get_data_google(symbols=None, start=None, end=None, retry_count=3,
adjust_price, ret_index, chunksize, 'google', name)
+_FRED_URL = "http://research.stlouisfed.org/fred2/series/"
+
+
def get_data_fred(name, start=dt.datetime(2010, 1, 1),
end=dt.datetime.today()):
"""
@@ -443,14 +455,12 @@ def get_data_fred(name, start=dt.datetime(2010, 1, 1),
"""
start, end = _sanitize_dates(start, end)
- fred_URL = "http://research.stlouisfed.org/fred2/series/"
-
if not is_list_like(name):
names = [name]
else:
names = name
- urls = [fred_URL + '%s' % n + '/downloaddata/%s' % n + '.csv' for
+ urls = [_FRED_URL + '%s' % n + '/downloaddata/%s' % n + '.csv' for
n in names]
def fetch_data(url, name):
@@ -470,11 +480,12 @@ def fetch_data(url, name):
return df
+_FAMAFRENCH_URL = 'http://mba.tuck.dartmouth.edu/pages/faculty/ken.french/ftp'
+
+
def get_data_famafrench(name):
# path of zip files
- zip_file_url = ('http://mba.tuck.dartmouth.edu/pages/faculty/'
- 'ken.french/ftp')
- zip_file_path = '{0}/{1}.zip'.format(zip_file_url, name)
+ zip_file_path = '{0}/{1}.zip'.format(_FAMAFRENCH_URL, name)
with urlopen(zip_file_path) as url:
raw = url.read()
@@ -618,10 +629,12 @@ def get_options_data(self, month=None, year=None, expiry=None):
return [f(month, year, expiry) for f in (self.get_put_data,
self.get_call_data)]
+ _OPTIONS_BASE_URL = 'http://finance.yahoo.com/q/op?s={sym}'
+
def _get_option_data(self, month, year, expiry, table_loc, name):
year, month = self._try_parse_dates(year, month, expiry)
- url = 'http://finance.yahoo.com/q/op?s={sym}'.format(sym=self.symbol)
+ url = self._OPTIONS_BASE_URL.format(sym=self.symbol)
if month and year: # try to get specified month from yahoo finance
m1, m2 = _two_char_month(month), month
| This could be useful to make pandas more resilient to basic url changes
like that which happened with yahoo finance. That said, clearly
wholesale API changes won't be helped by this.
What do you all think, worth it to make this relatively trivial change?
| https://api.github.com/repos/pandas-dev/pandas/pulls/5817 | 2014-01-02T00:46:13Z | 2014-01-03T00:06:25Z | 2014-01-03T00:06:25Z | 2014-07-16T08:44:56Z |
PERF: perf issue with dropna on frame | diff --git a/doc/source/release.rst b/doc/source/release.rst
index c0e155372760f..0074d3b359cbe 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -67,6 +67,7 @@ Improvements to existing features
- perf improvements in Series datetime/timedelta binary operations (:issue:`5801`)
- `option_context` context manager now available as top-level API (:issue:`5752`)
- df.info() view now display dtype info per column (:issue: `5682`)
+ - perf improvements in DataFrame ``count/dropna`` for ``axis=1``
Bug Fixes
~~~~~~~~~
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 97c284fb75a43..36cfbb524ab31 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -3952,7 +3952,7 @@ def count(self, axis=0, level=None, numeric_only=False):
counts = notnull(frame.values).sum(1)
result = Series(counts, index=frame._get_agg_axis(axis))
else:
- result = DataFrame.apply(frame, Series.count, axis=axis)
+ result = notnull(frame).sum(axis=axis)
return result
diff --git a/vb_suite/frame_methods.py b/vb_suite/frame_methods.py
index ee4d876d20233..fd03d512125e7 100644
--- a/vb_suite/frame_methods.py
+++ b/vb_suite/frame_methods.py
@@ -289,6 +289,33 @@ def f(K=100):
frame_isnull = Benchmark('isnull(df)', setup,
start_date=datetime(2012,1,1))
+## dropna
+setup = common_setup + """
+data = np.random.randn(10000, 1000)
+df = DataFrame(data)
+df.ix[50:1000,20:50] = np.nan
+df.ix[2000:3000] = np.nan
+df.ix[:,60:70] = np.nan
+"""
+frame_dropna_axis0_any = Benchmark('df.dropna(how="any",axis=0)', setup,
+ start_date=datetime(2012,1,1))
+frame_dropna_axis0_all = Benchmark('df.dropna(how="all",axis=0)', setup,
+ start_date=datetime(2012,1,1))
+
+setup = common_setup + """
+data = np.random.randn(10000, 1000)
+df = DataFrame(data)
+df.ix[50:1000,20:50] = np.nan
+df.ix[2000:3000] = np.nan
+df.ix[:,60:70] = np.nan
+"""
+frame_dropna_axis1_any = Benchmark('df.dropna(how="any",axis=1)', setup,
+ start_date=datetime(2012,1,1))
+
+frame_dropna_axis1_all = Benchmark('df.dropna(how="all",axis=1)', setup,
+ start_date=datetime(2012,1,1))
+
+
#----------------------------------------------------------------------
# apply
@@ -298,3 +325,4 @@ def f(K=100):
"""
frame_apply_user_func = Benchmark('df.apply(lambda x: np.corrcoef(x,s)[0,1])', setup,
start_date=datetime(2012,1,1))
+
| took out the apply on `count` and just compute directly
```
-------------------------------------------------------------------------------
Test name | head[ms] | base[ms] | ratio |
-------------------------------------------------------------------------------
frame_dropna_axis1_any | 147.5154 | 334.2137 | 0.4414 |
frame_dropna_axis1_all | 251.1443 | 437.9021 | 0.5735 |
frame_dropna_axis0_all | 80.6900 | 80.8613 | 0.9979 |
frame_dropna_axis0_any | 58.6040 | 54.6887 | 1.0716 |
-------------------------------------------------------------------------------
Test name | head[ms] | base[ms] | ratio |
-------------------------------------------------------------------------------
Ratio < 1.0 means the target commit is faster then the baseline.
Seed used: 1234
Target [c6e300d] : PERF: perf issue with dropna on frame
Base [5e176a9] : Merge pull request #5738 from y-p/PR_json_pr_ver
BLD: ci/print_versions.py learned to output json
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/5815 | 2014-01-01T18:50:03Z | 2014-01-01T19:13:26Z | 2014-01-01T19:13:26Z | 2014-07-16T08:44:54Z |
DOC: add way to document DatetimeIndex field attributes | diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py
index 23b949c1fedfb..6779e1a61c081 100644
--- a/pandas/tseries/index.py
+++ b/pandas/tseries/index.py
@@ -36,7 +36,7 @@ def _utc():
# -------- some conversion wrapper functions
-def _field_accessor(name, field):
+def _field_accessor(name, field, docstring=None):
def f(self):
values = self.asi8
if self.tz is not None:
@@ -45,6 +45,7 @@ def f(self):
values = self._local_timestamps()
return tslib.get_date_field(values, field)
f.__name__ = name
+ f.__doc__ = docstring
return property(f)
@@ -1398,7 +1399,7 @@ def freqstr(self):
return self.offset.freqstr
year = _field_accessor('year', 'Y')
- month = _field_accessor('month', 'M')
+ month = _field_accessor('month', 'M', "The month as January=1, December=12")
day = _field_accessor('day', 'D')
hour = _field_accessor('hour', 'h')
minute = _field_accessor('minute', 'm')
@@ -1407,7 +1408,8 @@ def freqstr(self):
nanosecond = _field_accessor('nanosecond', 'ns')
weekofyear = _field_accessor('weekofyear', 'woy')
week = weekofyear
- dayofweek = _field_accessor('dayofweek', 'dow')
+ dayofweek = _field_accessor('dayofweek', 'dow',
+ "The day of the week with Monday=0, Sunday=6")
weekday = dayofweek
dayofyear = _field_accessor('dayofyear', 'doy')
quarter = _field_accessor('quarter', 'q')
| Ping @rockg. The docstring is now listed in the autosummary table in api.rst.
Related to issue #5813 (but does not closes it, this only documents the DatetimeIndex field in the api.rst, not more general the Timestamp values).
I added the attribute docstring within the `_field_accessor` function. Is this a good approach?
You can also document attributes with a docstring line beneath the definition (see Sphinx docs: http://sphinx-doc.org/ext/autodoc.html#directive-autoattribute). The problem with this is that, for the moment, this works for the sphinx autodoc (so the generated pages), but not for the autosummary (there is an open PR for this: https://bitbucket.org/birkenfeld/sphinx/pull-request/142/make-autosummary-work-with-module-class/diff).
| https://api.github.com/repos/pandas-dev/pandas/pulls/5814 | 2014-01-01T17:11:07Z | 2014-01-02T20:17:17Z | 2014-01-02T20:17:17Z | 2014-06-29T12:30:55Z |
BUG: Yahoo finance changed chart base url. Updated _get_hist_yahoo | diff --git a/pandas/io/data.py b/pandas/io/data.py
index a3968446930e8..98ac860c391c8 100644
--- a/pandas/io/data.py
+++ b/pandas/io/data.py
@@ -183,7 +183,7 @@ def _get_hist_yahoo(sym, start, end, retry_count, pause):
Returns a DataFrame.
"""
start, end = _sanitize_dates(start, end)
- yahoo_url = 'http://ichart.yahoo.com/table.csv?'
+ yahoo_url = 'http://ichart.finance.yahoo.com/table.csv?'
url = (yahoo_url + 's=%s' % sym +
'&a=%s' % (start.month - 1) +
'&b=%s' % start.day +
| The start of the old url was: `http://ichart.yahoo.com/` and yahoo now uses `http://ichart.finance.yahoo.com/`
| https://api.github.com/repos/pandas-dev/pandas/pulls/5812 | 2013-12-31T21:50:44Z | 2014-01-01T18:52:09Z | 2014-01-01T18:52:09Z | 2014-06-12T19:28:36Z |
Update yahoo url in data.py | diff --git a/pandas/io/data.py b/pandas/io/data.py
index a3968446930e8..98ac860c391c8 100644
--- a/pandas/io/data.py
+++ b/pandas/io/data.py
@@ -183,7 +183,7 @@ def _get_hist_yahoo(sym, start, end, retry_count, pause):
Returns a DataFrame.
"""
start, end = _sanitize_dates(start, end)
- yahoo_url = 'http://ichart.yahoo.com/table.csv?'
+ yahoo_url = 'http://ichart.finance.yahoo.com/table.csv?'
url = (yahoo_url + 's=%s' % sym +
'&a=%s' % (start.month - 1) +
'&b=%s' % start.day +
| ichart.yahoo.com doesn't seem to be available, switched to ichart.finance.yahoo.com
| https://api.github.com/repos/pandas-dev/pandas/pulls/5811 | 2013-12-31T17:09:22Z | 2013-12-31T17:12:23Z | null | 2014-07-27T06:23:03Z |
BUG: fix issue of boolean comparison on empty DataFrames (GH5808) | diff --git a/doc/source/release.rst b/doc/source/release.rst
index 0150b233110a7..425f6dfe36990 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -74,6 +74,7 @@ Bug Fixes
- Bug in Series replace with timestamp dict (:issue:`5797`)
- read_csv/read_table now respects the `prefix` kwarg (:issue:`5732`).
- Bug in selection with missing values via ``.ix`` from a duplicate indexed DataFrame failing (:issue:`5835`)
+ - Fix issue of boolean comparison on empty DataFrames (:issue:`5808`)
pandas 0.13.0
-------------
diff --git a/pandas/core/ops.py b/pandas/core/ops.py
index b8f988e38f14b..a0e274b952817 100644
--- a/pandas/core/ops.py
+++ b/pandas/core/ops.py
@@ -736,11 +736,16 @@ def na_op(x, y):
result = np.empty(x.size, dtype=dtype)
yrav = y.ravel()
mask = notnull(xrav) & notnull(yrav)
- result[mask] = op(xrav[mask], yrav[mask])
+ xrav = xrav[mask]
+ yrav = yrav[mask]
+ if np.prod(xrav.shape) and np.prod(yrav.shape):
+ result[mask] = op(xrav, yrav)
else:
result = np.empty(x.size, dtype=x.dtype)
mask = notnull(xrav)
- result[mask] = op(xrav[mask], y)
+ xrav = xrav[mask]
+ if np.prod(xrav.shape):
+ result[mask] = op(xrav, y)
result, changed = com._maybe_upcast_putmask(result, -mask, np.nan)
result = result.reshape(x.shape)
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index a1ef94d8400da..ef6990337bbbb 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -4392,6 +4392,41 @@ def test_operators(self):
df = DataFrame({'a': ['a', None, 'b']})
assert_frame_equal(df + df, DataFrame({'a': ['aa', np.nan, 'bb']}))
+ def test_operators_boolean(self):
+
+ # GH 5808
+ # empty frames, non-mixed dtype
+
+ result = DataFrame(index=[1]) & DataFrame(index=[1])
+ assert_frame_equal(result,DataFrame(index=[1]))
+
+ result = DataFrame(index=[1]) | DataFrame(index=[1])
+ assert_frame_equal(result,DataFrame(index=[1]))
+
+ result = DataFrame(index=[1]) & DataFrame(index=[1,2])
+ assert_frame_equal(result,DataFrame(index=[1,2]))
+
+ result = DataFrame(index=[1],columns=['A']) & DataFrame(index=[1],columns=['A'])
+ assert_frame_equal(result,DataFrame(index=[1],columns=['A']))
+
+ result = DataFrame(True,index=[1],columns=['A']) & DataFrame(True,index=[1],columns=['A'])
+ assert_frame_equal(result,DataFrame(True,index=[1],columns=['A']))
+
+ result = DataFrame(True,index=[1],columns=['A']) | DataFrame(True,index=[1],columns=['A'])
+ assert_frame_equal(result,DataFrame(True,index=[1],columns=['A']))
+
+ # boolean ops
+ result = DataFrame(1,index=[1],columns=['A']) | DataFrame(True,index=[1],columns=['A'])
+ assert_frame_equal(result,DataFrame(1,index=[1],columns=['A']))
+
+ def f():
+ DataFrame(1.0,index=[1],columns=['A']) | DataFrame(True,index=[1],columns=['A'])
+ self.assertRaises(TypeError, f)
+
+ def f():
+ DataFrame('foo',index=[1],columns=['A']) | DataFrame(True,index=[1],columns=['A'])
+ self.assertRaises(TypeError, f)
+
def test_operators_none_as_na(self):
df = DataFrame({"col1": [2, 5.0, 123, None],
"col2": [1, 2, 3, 4]}, dtype=object)
| closes #5808
| https://api.github.com/repos/pandas-dev/pandas/pulls/5810 | 2013-12-31T15:46:31Z | 2014-01-04T22:30:39Z | 2014-01-04T22:30:39Z | 2014-06-18T06:21:16Z |
Fixing ichart URL for yahoo finance historical data. | diff --git a/pandas/io/data.py b/pandas/io/data.py
index a3968446930e8..98ac860c391c8 100644
--- a/pandas/io/data.py
+++ b/pandas/io/data.py
@@ -183,7 +183,7 @@ def _get_hist_yahoo(sym, start, end, retry_count, pause):
Returns a DataFrame.
"""
start, end = _sanitize_dates(start, end)
- yahoo_url = 'http://ichart.yahoo.com/table.csv?'
+ yahoo_url = 'http://ichart.finance.yahoo.com/table.csv?'
url = (yahoo_url + 's=%s' % sym +
'&a=%s' % (start.month - 1) +
'&b=%s' % start.day +
| The URL needs to be changed, as the old way no longer works.
| https://api.github.com/repos/pandas-dev/pandas/pulls/5809 | 2013-12-31T15:38:05Z | 2014-01-01T18:53:49Z | null | 2014-06-14T03:32:38Z |
BUG: Series replace values using timestamps in a dict GH5797 | diff --git a/doc/source/release.rst b/doc/source/release.rst
index 0074d3b359cbe..1b32f385d3ab9 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -71,7 +71,7 @@ Improvements to existing features
Bug Fixes
~~~~~~~~~
-
+ - Bug in Series replace with timestamp dict (:issue:`5797`)
pandas 0.13.0
-------------
@@ -861,7 +861,7 @@ Bug Fixes
- Bug in fillna with Series and a passed series/dict (:issue:`5703`)
- Bug in groupby transform with a datetime-like grouper (:issue:`5712`)
- Bug in multi-index selection in PY3 when using certain keys (:issue:`5725`)
- - Row-wise concat of differeing dtypes failing in certain cases (:issue:`5754`)
+ - Row-wise concat of differing dtypes failing in certain cases (:issue:`5754`)
pandas 0.12.0
-------------
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index e76cf69eb420b..d636edeec0815 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -2322,7 +2322,7 @@ def replace_list(self, src_lst, dest_lst, inplace=False, regex=False):
def comp(s):
if isnull(s):
return isnull(values)
- return values == s
+ return values == getattr(s, 'asm8', s)
masks = [comp(s) for i, s in enumerate(src_lst)]
result_blocks = []
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index 71ed0283fc3d0..e8b421608fc0a 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -5189,6 +5189,16 @@ def test_replace(self):
expected = ser.ffill()
result = ser.replace(np.nan)
assert_series_equal(result, expected)
+ #GH 5797
+ ser = Series(date_range('20130101', periods=5))
+ expected = ser.copy()
+ expected.loc[2] = Timestamp('20120101')
+ result = ser.replace({Timestamp('20130103'):
+ Timestamp('20120101')})
+ assert_series_equal(result, expected)
+ result = ser.replace(Timestamp('20130103'), Timestamp('20120101'))
+ assert_series_equal(result, expected)
+
def test_replace_with_single_list(self):
ser = Series([0, 1, 2, 3, 4])
| This fixes issue #5797.
| https://api.github.com/repos/pandas-dev/pandas/pulls/5806 | 2013-12-31T14:29:00Z | 2014-01-02T22:58:45Z | 2014-01-02T22:58:45Z | 2014-06-16T14:08:41Z |
update to allow for replace using timestamps with a test. fixed issue 5797 | diff --git a/doc/source/release.rst b/doc/source/release.rst
index d4c9fa07e546f..5ae11ee5b9ba8 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -560,6 +560,7 @@ See :ref:`Internal Refactoring<whatsnew_0130.refactoring>`
- Added an internal ``_update_inplace`` method to facilitate updating
``NDFrame`` wrappers on inplace ops (only is for convenience of caller,
doesn't actually prevent copies). (:issue:`5247`)
+ - Bug in Series replace with timestamp dict (:issue:`5797`)
.. _release.bug_fixes-0.13.0:
@@ -860,6 +861,7 @@ Bug Fixes
- Bug in groupby transform with a datetime-like grouper (:issue:`5712`)
- Bug in multi-index selection in PY3 when using certain keys (:issue:`5725`)
- Row-wise concat of differeing dtypes failing in certain cases (:issue:`5754`)
+ - Bug in Series replace with timestamp dict (:issue:`5797`)
pandas 0.12.0
-------------
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index e76cf69eb420b..d636edeec0815 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -2322,7 +2322,7 @@ def replace_list(self, src_lst, dest_lst, inplace=False, regex=False):
def comp(s):
if isnull(s):
return isnull(values)
- return values == s
+ return values == getattr(s, 'asm8', s)
masks = [comp(s) for i, s in enumerate(src_lst)]
result_blocks = []
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index 16e3368a2710d..5d9c22abbc277 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -5183,6 +5183,16 @@ def test_replace(self):
expected = ser.ffill()
result = ser.replace(np.nan)
assert_series_equal(result, expected)
+ #GH 5797
+ ser = Series(date_range('20130101', periods=5))
+ expected = ser.copy()
+ expected.loc[2] = Timestamp('20120101')
+ result = ser.replace({Timestamp('20130103'):
+ Timestamp('20120101')})
+ assert_series_equal(result, expected)
+ result = ser.replace(Timestamp('20130103'), Timestamp('20120101'))
+ assert_series_equal(result, expected)
+
def test_replace_with_single_list(self):
ser = Series([0, 1, 2, 3, 4])
| https://api.github.com/repos/pandas-dev/pandas/pulls/5805 | 2013-12-31T02:07:36Z | 2013-12-31T14:27:36Z | null | 2013-12-31T14:27:36Z | |
BLD: version strings should be updated only when tagging new release | diff --git a/setup.py b/setup.py
index 608532e919627..497c6a5644def 100755
--- a/setup.py
+++ b/setup.py
@@ -189,7 +189,7 @@ def build_extensions(self):
]
MAJOR = 0
-MINOR = 14
+MINOR = 13
MICRO = 0
ISRELEASED = False
VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
| 0.13.0 was the first time I tagged a release and I got it wrong.
Version strings are bumped only when tagging a new release.
```
% grh v0.13.0rc1^
% sudo python ./setup.py develop
% cat pandas/version.py
version = '0.12.0-1189-gd9b3340'
short_version = '0.12.0'
```
It makes sense since it leaves the next version undetermined until it happens.
Keeping the status-quo for now, until we agree on something better.
| https://api.github.com/repos/pandas-dev/pandas/pulls/5803 | 2013-12-31T00:37:30Z | 2013-12-31T00:37:43Z | 2013-12-31T00:37:43Z | 2014-06-15T18:45:46Z |
PERF: fix infer_dtype to properly infer a Series (GH5801) | diff --git a/doc/source/release.rst b/doc/source/release.rst
index 78c92ec11609e..7109b87f5352b 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -64,6 +64,8 @@ Experimental Features
Improvements to existing features
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ - perf improvements in Series datetime/timedelta binary operations (:issue:`5801`)
+
Bug Fixes
~~~~~~~~~
diff --git a/pandas/src/inference.pyx b/pandas/src/inference.pyx
index dce46c972fb3b..84f1f3cb4904d 100644
--- a/pandas/src/inference.pyx
+++ b/pandas/src/inference.pyx
@@ -35,6 +35,8 @@ def infer_dtype(object _values):
if isinstance(_values, np.ndarray):
values = _values
+ elif hasattr(_values,'values'):
+ values = _values.values
else:
if not isinstance(_values, list):
_values = list(_values)
diff --git a/vb_suite/binary_ops.py b/vb_suite/binary_ops.py
index fc84dd8bcdb81..5ec2d9fcfc2cf 100644
--- a/vb_suite/binary_ops.py
+++ b/vb_suite/binary_ops.py
@@ -103,6 +103,9 @@
Benchmark("df[(df>0) & (df2>0)]", setup, name='frame_multi_and_no_ne',cleanup="expr.set_use_numexpr(True)",
start_date=datetime(2013, 2, 26))
+#----------------------------------------------------------------------
+# timeseries
+
setup = common_setup + """
N = 1000000
halfway = N // 2 - 1
@@ -114,3 +117,13 @@
start_date=datetime(2013, 9, 27))
series_timestamp_compare = Benchmark("s <= ts", setup,
start_date=datetime(2012, 2, 21))
+
+setup = common_setup + """
+N = 1000000
+s = Series(date_range('20010101', periods=N, freq='s'))
+"""
+
+timestamp_ops_diff1 = Benchmark("s.diff()", setup,
+ start_date=datetime(2013, 1, 1))
+timestamp_ops_diff2 = Benchmark("s-s.shift()", setup,
+ start_date=datetime(2013, 1, 1))
| closes #5801
```
------------------------------------------------------------------------------
Test name | head[ms] | base[ms] | ratio |
-------------------------------------------------------------------------------
timestamp_ops_diff2 | 21.7124 | 2472.3583 | 0.0088 |
-------------------------------------------------------------------------------
Test name | head[ms] | base[ms] | ratio |
-------------------------------------------------------------------------------
Ratio < 1.0 means the target commit is faster then the baseline.
Seed used: 1234
```
really though this was in their before....oh well
| https://api.github.com/repos/pandas-dev/pandas/pulls/5802 | 2013-12-30T22:20:36Z | 2013-12-30T22:54:10Z | 2013-12-30T22:54:10Z | 2014-06-15T19:45:24Z |
This fixes issue 5797 and allows replace using timestamps. | diff --git a/doc/source/release.rst b/doc/source/release.rst
index 7109b87f5352b..3d23858de8fa5 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -859,6 +859,7 @@ Bug Fixes
- Bug in groupby transform with a datetime-like grouper (:issue:`5712`)
- Bug in multi-index selection in PY3 when using certain keys (:issue:`5725`)
- Row-wise concat of differeing dtypes failing in certain cases (:issue:`5754`)
+ - Bug in Series replace when using a dict of Pandas Timestamps (:issue:`5797`)
pandas 0.12.0
-------------
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index e76cf69eb420b..355e0819f499c 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -2315,14 +2315,13 @@ def replace(self, *args, **kwargs):
def replace_list(self, src_lst, dest_lst, inplace=False, regex=False):
""" do a list replace """
-
# figure out our mask a-priori to avoid repeated replacements
values = self.as_matrix()
def comp(s):
if isnull(s):
return isnull(values)
- return values == s
+ return values == getattr(s, 'asm8', s)
masks = [comp(s) for i, s in enumerate(src_lst)]
result_blocks = []
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index 16e3368a2710d..054d54492be8d 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -5171,7 +5171,8 @@ def test_replace(self):
ser = Series([0, 1, 2, 3, 4])
result = ser.replace([0, 1, 2, 3, 4], [4, 3, 2, 1, 0])
assert_series_equal(result, Series([4, 3, 2, 1, 0]))
-
+
+
# API change from 0.12?
# GH 5319
ser = Series([0, np.nan, 2, 3, 4])
@@ -5184,6 +5185,16 @@ def test_replace(self):
result = ser.replace(np.nan)
assert_series_equal(result, expected)
+ #GH 5797
+ ser = Series(date_range('20130101', periods=5))
+ expected = ser.copy()
+ expected.loc[2] = Timestamp('20120101')
+ result = ser.replace({Timestamp('20130103'):
+ Timestamp('20120101')})
+ assert_series_equal(result, expected)
+ result = ser.replace(Timestamp('20130103'), Timestamp('20120101'))
+ assert_series_equal(result, expected)
+
def test_replace_with_single_list(self):
ser = Series([0, 1, 2, 3, 4])
result = ser.replace([1,2,3])
| this fixes https://github.com/pydata/pandas/issues/5797
| https://api.github.com/repos/pandas-dev/pandas/pulls/5800 | 2013-12-30T21:28:59Z | 2013-12-31T01:25:48Z | null | 2014-07-25T08:41:04Z |
COMPAT: back compat for HDFStore with a Term | diff --git a/pandas/computation/pytables.py b/pandas/computation/pytables.py
index 7716bc0051159..bf477cd71df62 100644
--- a/pandas/computation/pytables.py
+++ b/pandas/computation/pytables.py
@@ -4,8 +4,8 @@
import time
import warnings
from functools import partial
-from datetime import datetime
-
+from datetime import datetime, timedelta
+import numpy as np
import pandas as pd
from pandas.compat import u, string_types, PY3
from pandas.core.base import StringMixin
@@ -540,6 +540,18 @@ def parse_back_compat(self, w, op=None, value=None):
if value is not None:
if isinstance(value, Expr):
raise TypeError("invalid value passed, must be a string")
+
+ # stringify with quotes these values
+ def convert(v):
+ if isinstance(v, (datetime,np.datetime64,timedelta,np.timedelta64)) or hasattr(v, 'timetuple'):
+ return "'{0}'".format(v)
+ return v
+
+ if isinstance(value, (list,tuple)):
+ value = [ convert(v) for v in value ]
+ else:
+ value = convert(value)
+
w = "{0}{1}".format(w, value)
warnings.warn("passing multiple values to Expr is deprecated, "
diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py
index c9955b1ae2fb2..5fcafdc295c5c 100644
--- a/pandas/io/tests/test_pytables.py
+++ b/pandas/io/tests/test_pytables.py
@@ -2347,6 +2347,30 @@ def test_term_compat(self):
expected = wp.loc[:,wp.major_axis<=Timestamp('20000103'),:]
assert_panel_equal(result, expected)
+ with ensure_clean_store(self.path) as store:
+
+ wp = Panel(np.random.randn(2, 5, 4), items=['Item1', 'Item2'],
+ major_axis=date_range('1/1/2000', periods=5),
+ minor_axis=['A', 'B', 'C', 'D'])
+ store.append('wp',wp)
+
+ # stringified datetimes
+ result = store.select('wp', [Term('major_axis','>',datetime.datetime(2000,1,2))])
+ expected = wp.loc[:,wp.major_axis>Timestamp('20000102')]
+ assert_panel_equal(result, expected)
+
+ result = store.select('wp', [Term('major_axis','>',datetime.datetime(2000,1,2,0,0))])
+ expected = wp.loc[:,wp.major_axis>Timestamp('20000102')]
+ assert_panel_equal(result, expected)
+
+ result = store.select('wp', [Term('major_axis','=',[datetime.datetime(2000,1,2,0,0),datetime.datetime(2000,1,3,0,0)])])
+ expected = wp.loc[:,[Timestamp('20000102'),Timestamp('20000103')]]
+ assert_panel_equal(result, expected)
+
+ result = store.select('wp', [Term('minor_axis','=',['A','B'])])
+ expected = wp.loc[:,:,['A','B']]
+ assert_panel_equal(result, expected)
+
def test_same_name_scoping(self):
with ensure_clean_store(self.path) as store:
| https://api.github.com/repos/pandas-dev/pandas/pulls/5794 | 2013-12-30T14:55:50Z | 2013-12-30T14:55:55Z | 2013-12-30T14:55:55Z | 2014-06-26T12:05:55Z | |
DOC: fix minor doc build warnings | diff --git a/doc/source/release.rst b/doc/source/release.rst
index 9aa6c68e9de8c..fa967a8e237d1 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -248,7 +248,7 @@ API Changes
- allow ``ix/loc`` for Series/DataFrame/Panel to set on any axis even when
the single-key is not currently contained in the index for that axis
(:issue:`2578`, :issue:`5226`, :issue:`5632`, :issue:`5720`,
- :issue:`5744`, :issue:`5756`)
+ :issue:`5744`, :issue:`5756`)
- Default export for ``to_clipboard`` is now csv with a sep of `\t` for
compat (:issue:`3368`)
- ``at`` now will enlarge the object inplace (and return the same)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index e07655b0539a5..61235862534f0 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -1852,7 +1852,7 @@ def fillna(self, value=None, method=None, axis=0, inplace=False,
limit : int, default None
Maximum size gap to forward or backward fill
downcast : dict, default is None
- a dict of item->dtype of what to downcast if possible,
+ a dict of item->dtype of what to downcast if possible,
or the string 'infer' which will try to downcast to an appropriate
equal type (e.g. float64 to int64 if possible)
| Two minor space-errors which caused a doc build warning.
| https://api.github.com/repos/pandas-dev/pandas/pulls/5792 | 2013-12-30T09:44:34Z | 2013-12-30T11:27:19Z | 2013-12-30T11:27:19Z | 2014-07-16T08:44:39Z |
TST: aggregate_item_by_item test failure (GH5782) | diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py
index 9d7e90e5f8f32..34c8869f72a53 100644
--- a/pandas/tests/test_groupby.py
+++ b/pandas/tests/test_groupby.py
@@ -583,8 +583,11 @@ def test_aggregate_item_by_item(self):
foo = (self.df.A == 'foo').sum()
bar = (self.df.A == 'bar').sum()
K = len(result.columns)
- assert_almost_equal(result.xs('foo'), [foo] * K)
- assert_almost_equal(result.xs('bar'), [bar] * K)
+
+ # GH5782
+ # odd comparisons can result here, so cast to make easy
+ assert_almost_equal(result.xs('foo'), np.array([foo] * K).astype('float64'))
+ assert_almost_equal(result.xs('bar'), np.array([bar] * K).astype('float64'))
def aggfun(ser):
return ser.size
| closes #5782
| https://api.github.com/repos/pandas-dev/pandas/pulls/5791 | 2013-12-29T23:34:43Z | 2013-12-29T23:35:06Z | 2013-12-29T23:35:06Z | 2014-06-19T10:35:39Z |
BUG: dont' always coerce reductions in a groupby always to datetimes | diff --git a/pandas/core/common.py b/pandas/core/common.py
index 08061b1d14863..a9b56b6905b6b 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -1527,17 +1527,22 @@ def _possibly_convert_objects(values, convert_dates=True,
values, convert_datetime=convert_dates)
# convert to numeric
- if convert_numeric and values.dtype == np.object_:
- try:
- new_values = lib.maybe_convert_numeric(
- values, set(), coerce_numeric=True)
+ if values.dtype == np.object_:
+ if convert_numeric:
+ try:
+ new_values = lib.maybe_convert_numeric(
+ values, set(), coerce_numeric=True)
- # if we are all nans then leave me alone
- if not isnull(new_values).all():
- values = new_values
+ # if we are all nans then leave me alone
+ if not isnull(new_values).all():
+ values = new_values
- except:
- pass
+ except:
+ pass
+ else:
+
+ # soft-conversion
+ values = lib.maybe_convert_objects(values)
return values
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index 182f75e53ca5d..fb9b5e7831c88 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -22,6 +22,7 @@
notnull, _DATELIKE_DTYPES, is_numeric_dtype,
is_timedelta64_dtype, is_datetime64_dtype)
+from pandas import _np_version_under1p7
import pandas.lib as lib
from pandas.lib import Timestamp
import pandas.algos as _algos
@@ -2243,16 +2244,19 @@ def _wrap_applied_output(self, keys, values, not_indexed_same=False):
try:
if self.axis == 0:
- stacked_values = np.vstack([np.asarray(x)
- for x in values])
- columns = v.index
- index = key_index
+ # normally use vstack as its faster than concat
+ # and if we have mi-columns
+ if not _np_version_under1p7 or isinstance(v.index,MultiIndex):
+ stacked_values = np.vstack([np.asarray(x) for x in values])
+ result = DataFrame(stacked_values,index=key_index,columns=v.index)
+ else:
+ # GH5788 instead of stacking; concat gets the dtypes correct
+ from pandas.tools.merge import concat
+ result = concat(values,keys=key_index,names=key_index.names,
+ axis=self.axis).unstack()
else:
- stacked_values = np.vstack([np.asarray(x)
- for x in values]).T
-
- index = v.index
- columns = key_index
+ stacked_values = np.vstack([np.asarray(x) for x in values])
+ result = DataFrame(stacked_values.T,index=v.index,columns=key_index)
except (ValueError, AttributeError):
# GH1738: values is list of arrays of unequal lengths fall
@@ -2261,15 +2265,14 @@ def _wrap_applied_output(self, keys, values, not_indexed_same=False):
# if we have date/time like in the original, then coerce dates
# as we are stacking can easily have object dtypes here
- cd = True
- if self.obj.ndim == 2 and self.obj.dtypes.isin(_DATELIKE_DTYPES).any():
- cd = 'coerce'
- return DataFrame(stacked_values, index=index,
- columns=columns).convert_objects(convert_dates=cd, convert_numeric=True)
+ cd = 'coerce' if self.obj.ndim == 2 and self.obj.dtypes.isin(_DATELIKE_DTYPES).any() else True
+ return result.convert_objects(convert_dates=cd)
else:
- return Series(values, index=key_index).convert_objects(
- convert_dates='coerce',convert_numeric=True)
+ # only coerce dates if we find at least 1 datetime
+ cd = 'coerce' if any([ isinstance(v,Timestamp) for v in values ]) else False
+ return Series(values, index=key_index).convert_objects(convert_dates=cd)
+
else:
# Handle cases like BinGrouper
return self._concat_objects(keys, values,
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 4e657ca343c12..e76cf69eb420b 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -3556,12 +3556,14 @@ def _consolidate_inplace(self):
pass
-def construction_error(tot_items, block_shape, axes):
+def construction_error(tot_items, block_shape, axes, e=None):
""" raise a helpful message about our construction """
- raise ValueError("Shape of passed values is %s, indices imply %s" % (
- tuple(map(int, [tot_items] + list(block_shape))),
- tuple(map(int, [len(ax) for ax in axes]))))
-
+ passed = tuple(map(int, [tot_items] + list(block_shape)))
+ implied = tuple(map(int, [len(ax) for ax in axes]))
+ if passed == implied and e is not None:
+ raise e
+ raise ValueError("Shape of passed values is {0}, indices imply {1}".format(
+ passed,implied))
def create_block_manager_from_blocks(blocks, axes):
try:
@@ -3576,10 +3578,10 @@ def create_block_manager_from_blocks(blocks, axes):
mgr._consolidate_inplace()
return mgr
- except (ValueError):
+ except (ValueError) as e:
blocks = [getattr(b, 'values', b) for b in blocks]
tot_items = sum(b.shape[0] for b in blocks)
- construction_error(tot_items, blocks[0].shape[1:], axes)
+ construction_error(tot_items, blocks[0].shape[1:], axes, e)
def create_block_manager_from_arrays(arrays, names, axes):
@@ -3588,8 +3590,8 @@ def create_block_manager_from_arrays(arrays, names, axes):
mgr = BlockManager(blocks, axes)
mgr._consolidate_inplace()
return mgr
- except (ValueError):
- construction_error(len(arrays), arrays[0].shape[1:], axes)
+ except (ValueError) as e:
+ construction_error(len(arrays), arrays[0].shape[1:], axes, e)
def maybe_create_block_in_items_map(im, block):
diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py
index 7e54aa4e0813f..9d7e90e5f8f32 100644
--- a/pandas/tests/test_groupby.py
+++ b/pandas/tests/test_groupby.py
@@ -28,7 +28,7 @@
import pandas.core.nanops as nanops
import pandas.util.testing as tm
-
+import pandas as pd
def commonSetUp(self):
self.dateRange = bdate_range('1/1/2005', periods=250)
@@ -481,6 +481,36 @@ def test_apply_describe_bug(self):
grouped = self.mframe.groupby(level='first')
result = grouped.describe() # it works!
+ def test_apply_issues(self):
+ # GH 5788
+
+ s="""2011.05.16,00:00,1.40893
+2011.05.16,01:00,1.40760
+2011.05.16,02:00,1.40750
+2011.05.16,03:00,1.40649
+2011.05.17,02:00,1.40893
+2011.05.17,03:00,1.40760
+2011.05.17,04:00,1.40750
+2011.05.17,05:00,1.40649
+2011.05.18,02:00,1.40893
+2011.05.18,03:00,1.40760
+2011.05.18,04:00,1.40750
+2011.05.18,05:00,1.40649"""
+
+ df = pd.read_csv(StringIO(s), header=None, names=['date', 'time', 'value'], parse_dates=[['date', 'time']])
+ df = df.set_index('date_time')
+
+ expected = df.groupby(df.index.date).idxmax()
+ result = df.groupby(df.index.date).apply(lambda x: x.idxmax())
+ assert_frame_equal(result,expected)
+
+ # GH 5789
+ # don't auto coerce dates
+ df = pd.read_csv(StringIO(s), header=None, names=['date', 'time', 'value'])
+ expected = Series(['00:00','02:00','02:00'],index=['2011.05.16','2011.05.17','2011.05.18'])
+ result = df.groupby('date').apply(lambda x: x['time'][x['value'].idxmax()])
+ assert_series_equal(result,expected)
+
def test_len(self):
df = tm.makeTimeDataFrame()
grouped = df.groupby([lambda x: x.year,
| only when we have actual Timestamps in the data (GH5788,GH5789)
closes #5789
TST: tests for idxmax used in an apply
closes #5788
| https://api.github.com/repos/pandas-dev/pandas/pulls/5790 | 2013-12-29T03:59:24Z | 2013-12-29T16:23:22Z | 2013-12-29T16:23:22Z | 2020-01-19T23:27:51Z |
Add idxmax/idxmin to groupby dispatch whitelist (#5786) | diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index e8a9d6e49a066..182f75e53ca5d 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -65,6 +65,7 @@
'mad',
'any', 'all',
'irow', 'take',
+ 'idxmax', 'idxmin',
'shift', 'tshift',
'ffill', 'bfill',
'pct_change', 'skew',
diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py
index 942efdfc23740..7e54aa4e0813f 100644
--- a/pandas/tests/test_groupby.py
+++ b/pandas/tests/test_groupby.py
@@ -3264,6 +3264,7 @@ def test_groupby_whitelist(self):
'mad',
'any', 'all',
'irow', 'take',
+ 'idxmax', 'idxmin',
'shift', 'tshift',
'ffill', 'bfill',
'pct_change', 'skew',
@@ -3284,6 +3285,7 @@ def test_groupby_whitelist(self):
'mad',
'any', 'all',
'irow', 'take',
+ 'idxmax', 'idxmin',
'shift', 'tshift',
'ffill', 'bfill',
'pct_change', 'skew',
@@ -3413,7 +3415,7 @@ def test_tab_completion(self):
'resample', 'cummin', 'fillna', 'cumsum', 'cumcount',
'all', 'shift', 'skew', 'bfill', 'irow', 'ffill',
'take', 'tshift', 'pct_change', 'any', 'mad', 'corr', 'corrwith',
- 'cov', 'dtypes', 'diff',
+ 'cov', 'dtypes', 'diff', 'idxmax', 'idxmin'
])
self.assertEqual(results, expected)
| Closes #5786.
| https://api.github.com/repos/pandas-dev/pandas/pulls/5787 | 2013-12-28T23:25:08Z | 2013-12-28T23:49:11Z | 2013-12-28T23:49:11Z | 2014-06-21T19:00:13Z |
BLD: print_versions get uname() via cross-platform API | diff --git a/pandas/util/print_versions.py b/pandas/util/print_versions.py
index 433b51b9c0e1e..c40366ec2d804 100644
--- a/pandas/util/print_versions.py
+++ b/pandas/util/print_versions.py
@@ -1,4 +1,5 @@
import os
+import platform
import sys
@@ -8,8 +9,12 @@ def show_versions():
print("Python: %d.%d.%d.%s.%s" % sys.version_info[:])
try:
- sysname, nodename, release, version, machine = os.uname()
- print("OS: %s %s %s %s" % (sysname, release, version, machine))
+ sysname, nodename, release, version, machine, processor = platform.uname()
+ print("OS: %s" % (sysname))
+ print("Release: %s" % (release))
+ #print("Version: %s" % (version))
+ #print("Machine: %s" % (machine))
+ print("Processor: %s" % (processor))
print("byteorder: %s" % sys.byteorder)
print("LC_ALL: %s" % os.environ.get('LC_ALL', "None"))
print("LANG: %s" % os.environ.get('LANG', "None"))
| @jtratner, guess it could fail after all. Windows doesn't support os.uname().
| https://api.github.com/repos/pandas-dev/pandas/pulls/5784 | 2013-12-28T04:19:45Z | 2013-12-28T04:19:58Z | 2013-12-28T04:19:58Z | 2014-06-27T23:28:24Z |
TST: ensure_clean skips test when fs doesn't support unicode (sparc) | diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index 0db221d224b45..85353a4a90f7b 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -354,6 +354,10 @@ def ensure_clean(filename=None, return_filelike=False):
try:
fd, filename = tempfile.mkstemp(suffix=filename)
+ except UnicodeEncodeError:
+ raise nose.SkipTest('no unicode file names on this system')
+
+ try:
yield filename
finally:
try:
| http://nipy.bic.berkeley.edu/builders/pandas-py2.x-sid-sparc/builds/365/steps/shell_4/logs/stdio
These checks were removed from the tests during a refactor to use ensure_clean
| https://api.github.com/repos/pandas-dev/pandas/pulls/5783 | 2013-12-28T04:16:49Z | 2013-12-28T04:17:46Z | 2013-12-28T04:17:46Z | 2014-07-16T08:44:30Z |
TST: close sparc test failures | diff --git a/pandas/core/common.py b/pandas/core/common.py
index 7b652c36ae47d..08061b1d14863 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -40,14 +40,17 @@ class AmbiguousIndexError(PandasError, KeyError):
_POSSIBLY_CAST_DTYPES = set([np.dtype(t)
- for t in ['M8[ns]', 'm8[ns]', 'O', 'int8',
+ for t in ['M8[ns]', '>M8[ns]', '<M8[ns]',
+ 'm8[ns]', '>m8[ns]', '<m8[ns]',
+ 'O', 'int8',
'uint8', 'int16', 'uint16', 'int32',
'uint32', 'int64', 'uint64']])
_NS_DTYPE = np.dtype('M8[ns]')
_TD_DTYPE = np.dtype('m8[ns]')
_INT64_DTYPE = np.dtype(np.int64)
-_DATELIKE_DTYPES = set([np.dtype(t) for t in ['M8[ns]', 'm8[ns]']])
+_DATELIKE_DTYPES = set([np.dtype(t) for t in ['M8[ns]', '<M8[ns]', '>M8[ns]',
+ 'm8[ns]', '<m8[ns]', '>m8[ns]']])
# define abstract base classes to enable isinstance type checking on our
@@ -1572,11 +1575,17 @@ def _possibly_cast_to_datetime(value, dtype, coerce=False):
# force the dtype if needed
if is_datetime64 and dtype != _NS_DTYPE:
- raise TypeError(
- "cannot convert datetimelike to dtype [%s]" % dtype)
+ if dtype.name == 'datetime64[ns]':
+ dtype = _NS_DTYPE
+ else:
+ raise TypeError(
+ "cannot convert datetimelike to dtype [%s]" % dtype)
elif is_timedelta64 and dtype != _TD_DTYPE:
- raise TypeError(
- "cannot convert timedeltalike to dtype [%s]" % dtype)
+ if dtype.name == 'timedelta64[ns]':
+ dtype = _TD_DTYPE
+ else:
+ raise TypeError(
+ "cannot convert timedeltalike to dtype [%s]" % dtype)
if np.isscalar(value):
if value == tslib.iNaT or isnull(value):
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 6ec08fe501bcd..4e657ca343c12 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -1245,8 +1245,10 @@ def _try_operate(self, values):
def _try_coerce_result(self, result):
""" reverse of try_coerce_args / try_operate """
if isinstance(result, np.ndarray):
+ mask = isnull(result)
if result.dtype.kind in ['i', 'f', 'O']:
result = result.astype('m8[ns]')
+ result[mask] = tslib.iNaT
elif isinstance(result, np.integer):
result = np.timedelta64(result)
return result
diff --git a/pandas/io/tests/test_stata.py b/pandas/io/tests/test_stata.py
index f75cf7ebb18d1..5a842adb561b1 100644
--- a/pandas/io/tests/test_stata.py
+++ b/pandas/io/tests/test_stata.py
@@ -14,6 +14,10 @@
from pandas.util.misc import is_little_endian
from pandas import compat
+def skip_if_not_little_endian():
+ if not is_little_endian():
+ raise nose.SkipTest("known failure of test on non-little endian")
+
class TestStata(tm.TestCase):
def setUp(self):
@@ -145,9 +149,7 @@ def test_read_dta4(self):
tm.assert_frame_equal(parsed_13, expected)
def test_read_write_dta5(self):
- if not is_little_endian():
- raise nose.SkipTest("known failure of test_write_dta5 on "
- "non-little endian")
+ skip_if_not_little_endian()
original = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)],
columns=['float_miss', 'double_miss', 'byte_miss',
@@ -161,9 +163,7 @@ def test_read_write_dta5(self):
original)
def test_write_dta6(self):
- if not is_little_endian():
- raise nose.SkipTest("known failure of test_write_dta6 on "
- "non-little endian")
+ skip_if_not_little_endian()
original = self.read_csv(self.csv3)
original.index.name = 'index'
@@ -193,9 +193,7 @@ def test_read_dta9(self):
tm.assert_frame_equal(parsed, expected)
def test_read_write_dta10(self):
- if not is_little_endian():
- raise nose.SkipTest("known failure of test_write_dta10 on "
- "non-little endian")
+ skip_if_not_little_endian()
original = DataFrame(data=[["string", "object", 1, 1.1,
np.datetime64('2003-12-25')]],
@@ -232,6 +230,8 @@ def test_encoding(self):
self.assert_(isinstance(result, unicode))
def test_read_write_dta11(self):
+ skip_if_not_little_endian()
+
original = DataFrame([(1, 2, 3, 4)],
columns=['good', compat.u('b\u00E4d'), '8number', 'astringwithmorethan32characters______'])
formatted = DataFrame([(1, 2, 3, 4)],
@@ -248,6 +248,8 @@ def test_read_write_dta11(self):
tm.assert_frame_equal(written_and_read_again.set_index('index'), formatted)
def test_read_write_dta12(self):
+ skip_if_not_little_endian()
+
original = DataFrame([(1, 2, 3, 4)],
columns=['astringwithmorethan32characters_1', 'astringwithmorethan32characters_2', '+', '-'])
formatted = DataFrame([(1, 2, 3, 4)],
| TST: closes #5778, failing tests on non-little endian for stata (spac)
TST: closes #5779, big endian compensation for datelike dtypes
| https://api.github.com/repos/pandas-dev/pandas/pulls/5780 | 2013-12-27T17:57:54Z | 2013-12-27T20:08:13Z | 2013-12-27T20:08:13Z | 2014-07-16T08:44:26Z |
BUG: setitem for iloc/loc with a slice on a Series (GH5771) | diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 8444c7a9b2a00..bfddd2e78c322 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -420,7 +420,10 @@ def can_do_equal_len():
self.obj._maybe_update_cacher(clear=True)
def _align_series(self, indexer, ser):
- # indexer to assign Series can be tuple or scalar
+ # indexer to assign Series can be tuple, slice, scalar
+ if isinstance(indexer, slice):
+ indexer = tuple([indexer])
+
if isinstance(indexer, tuple):
aligners = [not _is_null_slice(idx) for idx in indexer]
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index f4e203444acfc..fe3aac0e9eeaa 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -456,6 +456,20 @@ def test_iloc_setitem(self):
result = df.iloc[:,2:3]
assert_frame_equal(result, expected)
+ # GH5771
+ s = Series(0,index=[4,5,6])
+ s.iloc[1:2] += 1
+ expected = Series([0,1,0],index=[4,5,6])
+ assert_series_equal(s, expected)
+
+ def test_loc_setitem(self):
+ # GH 5771
+ # loc with slice and series
+ s = Series(0,index=[4,5,6])
+ s.loc[4:5] += 1
+ expected = Series([1,1,0],index=[4,5,6])
+ assert_series_equal(s, expected)
+
def test_loc_getitem_int(self):
# int label
| closes #5771
| https://api.github.com/repos/pandas-dev/pandas/pulls/5772 | 2013-12-25T12:54:50Z | 2013-12-25T13:07:52Z | 2013-12-25T13:07:52Z | 2014-06-27T23:15:08Z |
BUG: regression in read_csv parser handling of usecols GH5766 | diff --git a/pandas/io/tests/test_parsers.py b/pandas/io/tests/test_parsers.py
index 93a26b70a019e..484c0c89fe72d 100644
--- a/pandas/io/tests/test_parsers.py
+++ b/pandas/io/tests/test_parsers.py
@@ -2049,6 +2049,16 @@ def test_usecols(self):
header=None, usecols=['b', 'c'])
tm.assert_frame_equal(result2, result)
+
+ # 5766
+ result = self.read_csv(StringIO(data), names=['a', 'b'],
+ header=None, usecols=[0, 1])
+
+ expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
+ header=None)
+ expected = expected[['a', 'b']]
+ tm.assert_frame_equal(result, expected)
+
# length conflict, passed names and usecols disagree
self.assertRaises(ValueError, self.read_csv, StringIO(data),
names=['a', 'b'], usecols=[1], header=None)
diff --git a/pandas/parser.pyx b/pandas/parser.pyx
index 36b4b91023a73..bb93097debf71 100644
--- a/pandas/parser.pyx
+++ b/pandas/parser.pyx
@@ -898,6 +898,9 @@ cdef class TextReader:
if i < self.leading_cols:
# Pass through leading columns always
name = i
+ elif self.usecols and nused == len(self.usecols):
+ # Once we've gathered all requested columns, stop. GH5766
+ break
else:
name = self._get_column_name(i, nused)
if self.has_usecols and not (i in self.usecols or
| https://api.github.com/repos/pandas-dev/pandas/pulls/5770 | 2013-12-25T01:58:46Z | 2013-12-25T01:58:59Z | 2013-12-25T01:58:59Z | 2014-06-29T15:36:16Z | |
BUG: IndexError on read_csv/read_table when using usecols/names parameters and omitting last column | diff --git a/pandas/io/tests/test_parsers.py b/pandas/io/tests/test_parsers.py
index 93a26b70a019e..7f3b9ecb2ca5b 100644
--- a/pandas/io/tests/test_parsers.py
+++ b/pandas/io/tests/test_parsers.py
@@ -2037,16 +2037,16 @@ def test_usecols(self):
4,5,6
7,8,9
10,11,12"""
- result = self.read_csv(StringIO(data), names=['b', 'c'],
- header=None, usecols=[1, 2])
+ result = self.read_csv(StringIO(data), names=['a', 'b'],
+ header=None, usecols=[0, 1])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
- expected = expected[['b', 'c']]
+ expected = expected[['a', 'b']]
tm.assert_frame_equal(result, expected)
result2 = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
- header=None, usecols=['b', 'c'])
+ header=None, usecols=['a', 'b'])
tm.assert_frame_equal(result2, result)
# length conflict, passed names and usecols disagree
@@ -2072,7 +2072,6 @@ def test_catch_too_many_names(self):
10,11,12\n"""
tm.assertRaises(Exception, read_csv, StringIO(data), header=0, names=['a', 'b', 'c', 'd'])
-
class TestPythonParser(ParserTests, tm.TestCase):
def test_negative_skipfooter_raises(self):
text = """#foo,a,b,c
diff --git a/pandas/parser.pyx b/pandas/parser.pyx
index 36b4b91023a73..4a93499f880c9 100644
--- a/pandas/parser.pyx
+++ b/pandas/parser.pyx
@@ -1135,8 +1135,11 @@ cdef class TextReader:
cdef _get_column_name(self, Py_ssize_t i, Py_ssize_t nused):
if self.has_usecols and self.names is not None:
- if len(self.names) == len(self.usecols):
+ if len(self.names) == len(self.usecols) and nused < len(self.names):
return self.names[nused]
+ # addresses Issue #5766
+ elif nused >= len(self.names):
+ return None
else:
return self.names[i - self.leading_cols]
else:
| This addresses an issue in read_csv/read_table where there is no header and both usecols and names are assigned but the last column is not included. This caused an IndexError after reaching the last column specified in usecols. Existing test cases for `usecols` were modified to expose and address this specific issue.
#5766
| https://api.github.com/repos/pandas-dev/pandas/pulls/5767 | 2013-12-23T23:16:54Z | 2013-12-25T01:59:56Z | null | 2014-06-19T21:11:04Z |
WIP: df rendering using templates + conditional formatting for HTML | diff --git a/pandas/io/api.py b/pandas/io/api.py
index dc9ea290eb45e..ca62a68d85ddc 100644
--- a/pandas/io/api.py
+++ b/pandas/io/api.py
@@ -12,3 +12,4 @@
from pandas.io.stata import read_stata
from pandas.io.pickle import read_pickle, to_pickle
from pandas.io.packers import read_msgpack, to_msgpack
+from pandas.io.templating import HTMLStyler
\ No newline at end of file
diff --git a/pandas/io/templating/__init__.py b/pandas/io/templating/__init__.py
new file mode 100644
index 0000000000000..da7cfd3e58d3d
--- /dev/null
+++ b/pandas/io/templating/__init__.py
@@ -0,0 +1,114 @@
+from abc import abstractmethod
+import os
+import uuid
+
+
+ROW_HEADING_CLASS = "pandas_row_heading"
+COL_HEADING_CLASS = "pandas_col_heading"
+DATA_CLASS = "pandas_data"
+DATA_CELL_TYPE = "data"
+ROW_CLASS = "pandas_row"
+COLUMN_CLASS = "pandas_col"
+HEADING_CELL_TYPE = "heading"
+BLANK_CLASS = "pandas_blank"
+BLANK_VALUE = ""
+LEVEL_CLASS = "pandas_level"
+
+TEMPLATE_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)),
+ "templates")
+
+# TODO, cleaner handling of cell_context (extra_contaxt
+class Styler(object):
+
+ @abstractmethod
+ def render(self, f):
+ pass
+
+ def __init__(self, df, template, engine_instance=None,*args, **kwds):
+ super(Styler, self).__init__(*args, **kwds)
+ self.df = df
+ self.template = template
+ self.style = []
+ self.cell_context = {}
+
+ from pandas.io.templating.engines import Jinja2Engine
+ self.engine_instance = engine_instance or Jinja2Engine()
+ self.engine_instance.load(self.template)
+
+ def std_context(self, *args, **kwds):
+ df = self.df
+
+ n_rlvls = df.index.nlevels
+ n_clvls = df.columns.nlevels
+ rlabels = df.index.tolist()
+ clabels = df.columns.tolist()
+ if n_rlvls == 1:
+ rlabels = [[x] for x in rlabels]
+ if n_clvls == 1:
+ clabels = [[x] for x in clabels]
+ clabels = zip(*clabels)
+ head = []
+ for r in range(n_clvls):
+ row_es = [{"type": HEADING_CELL_TYPE, "value": BLANK_VALUE,
+ "class": " ".join([BLANK_CLASS])}] * n_rlvls
+ for c in range(len(clabels[0])):
+ cs = [COL_HEADING_CLASS, "%s%s" % (LEVEL_CLASS, r),
+ "%s%s" % (COLUMN_CLASS, c)]
+ cs.extend(
+ self.cell_context.get("col_headings", {}).get(r, {}).get(c,
+ []))
+ row_es.append(
+ {"type": HEADING_CELL_TYPE, "value": clabels[r][c],
+ "class": " ".join(cs)})
+ head.append(row_es)
+ body = []
+ for r in range(len(df)):
+ cs = [ROW_HEADING_CLASS, "%s%s" % (LEVEL_CLASS, c),
+ "%s%s" % (ROW_CLASS, r)]
+ cs.extend(
+ self.cell_context.get("row_headings", {}).get(r, {}).get(c, []))
+ row_es = [
+ {"type": HEADING_CELL_TYPE, "value": rlabels[r][c],
+ "class": " ".join(cs)}
+ for c in range(len(rlabels[r]))]
+ for c in range(len(df.columns)):
+ cs = [DATA_CLASS, "%s%s" % (ROW_CLASS, r),
+ "%s%s" % (COLUMN_CLASS, c)]
+ cs.extend(
+ self.cell_context.get("data", {}).get(r, {}).get(c, []))
+ row_es.append(
+ {"type": DATA_CELL_TYPE, "value": df.iloc[r][c],
+ "class": " ".join(cs)})
+ body.append(row_es)
+
+ # uuid required to isolate table styling from other tables
+ # on the page in ipnb
+ u = str(uuid.uuid1()).replace("-", "_")
+ return dict(head=head, body=body, uuid=u, style=self.style)
+
+ def render(self, f=None, **kwds):
+ encoding = kwds.pop('encoding', "utf8")
+ s = self.engine_instance.render(self.std_context())
+ if f:
+ with codecs.open(f, "wb", encoding) as f:
+ f.write(s)
+ else:
+ return s
+
+class ITemplateEngine(object):
+ """Interface for supporting multiple template engines
+
+ We'll support only a single engine, but help users help themselves.
+ """
+
+
+ @abstractmethod
+ def render(self, f=None,**kwds):
+ pass
+
+ @abstractmethod
+ def load(self, *args, **kwds):
+ pass
+
+
+from html import HTMLStyler
\ No newline at end of file
diff --git a/pandas/io/templating/colors.py b/pandas/io/templating/colors.py
new file mode 100644
index 0000000000000..f9b6d0873a6a2
--- /dev/null
+++ b/pandas/io/templating/colors.py
@@ -0,0 +1,10 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+from __future__ import print_function
+
+import sys, os, re, json, codecs
+
+# integrate some colorbrewer package
+red_scale = ["#fff7ec", "#fee8c8", "#fdd49e",
+ "#fdbb84", "#fc8d59", "#ef6548",
+ "#d7301f", "#b30000", "#7f0000"]
\ No newline at end of file
diff --git a/pandas/io/templating/engines.py b/pandas/io/templating/engines.py
new file mode 100644
index 0000000000000..bb9c146bcd641
--- /dev/null
+++ b/pandas/io/templating/engines.py
@@ -0,0 +1,19 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+from pandas.io.templating import ITemplateEngine
+
+
+class Jinja2Engine(ITemplateEngine):
+ def __init__(self):
+ from jinja2 import Template
+
+ def load(self, s):
+ from jinja2 import Template
+
+ self.t = Template(s)
+
+ def render(self, ctx):
+ if self.t:
+ return self.t.render(**ctx)
+ else:
+ raise AssertionError("Template not initialized, cannot render")
\ No newline at end of file
diff --git a/pandas/io/templating/html.py b/pandas/io/templating/html.py
new file mode 100644
index 0000000000000..2a35b81a726bc
--- /dev/null
+++ b/pandas/io/templating/html.py
@@ -0,0 +1,64 @@
+import uuid
+import os
+import functools
+from os.path import abspath, dirname
+
+from pandas.io.templating import *
+
+
+# TODO: caption
+
+class HTMLStyler(Styler):
+ def clone(self):
+ """We always return a modified copy when extra styling is added
+
+ `clone()` provides a copy to apply changes to
+ """
+ import copy
+
+ c = HTMLStyler(self.df,
+ template=self.template,
+ engine_instance=self.engine_instance)
+ c.style = copy.deepcopy(self.style)
+ c.cell_context = copy.deepcopy(self.cell_context)
+ return c
+
+ def __init__(self, *args, **kwds):
+
+ if not kwds.get('template'):
+ tmpl_filename = os.path.join(TEMPLATE_DIR, "html")
+ with open(tmpl_filename) as f:
+ kwds['template'] = f.read()
+
+ super(HTMLStyler, self).__init__(*args, **kwds)
+
+ import html_helpers
+ import types
+
+ for n in dir(html_helpers):
+ cand = getattr(html_helpers, n)
+ if callable(cand):
+ def f(cand):
+ @functools.wraps(cand)
+ def f(self, *args, **kwds):
+ self_copy = self.clone()
+ cand(self_copy, *args, **kwds)
+ # self.style.extend(sd.style)
+ # TODO, need to merge-with
+ # self.cell_context = sd.cell_context
+ return self_copy
+
+ return f
+
+ setattr(self, n, types.MethodType(f(cand), self))
+
+ def _repr_html_(self):
+ return self.render()
+
+ # def __dir__(self):
+ # import html_helpers
+ # return ["render","to_context"] + dir(html_helpers)
+
+ # def __getattr__(self, key):
+ # import html_helpers
+ # return getattr(html_helpers,key)
\ No newline at end of file
diff --git a/pandas/io/templating/html_helpers.py b/pandas/io/templating/html_helpers.py
new file mode 100644
index 0000000000000..dd158ceede6f7
--- /dev/null
+++ b/pandas/io/templating/html_helpers.py
@@ -0,0 +1,138 @@
+from __future__ import print_function
+
+import sys, os, re, json, codecs
+import uuid
+
+from pandas.io.templating import *
+
+__all__ = ["zebra", "hlrow", "hlcol"]
+
+from collections import namedtuple
+
+
+def zebra(s, color1, color2):
+ style = [dict(selector="td.%s:nth-child(2n)" % DATA_CLASS,
+ props=[("background-color", color1)]),
+ dict(selector="td.%s:nth-child(2n+1)" % DATA_CLASS,
+ props=[("background-color", color2)])]
+ s.style.extend(style)
+
+
+def hlcell(s, r, c, color="#aaa", with_headings=False):
+ selector = "td.%s%d.%s%d" % (ROW_CLASS, r, COLUMN_CLASS, c)
+ if not with_headings:
+ selector += ".%s" % DATA_CLASS
+ style = [dict(selector=selector,
+ props=[("background-color", color)])]
+ s.style.extend(style)
+
+
+def hlcol(s, n, color="#aaa", with_headings=False):
+ selector = "td.%s%d" % (COLUMN_CLASS, n)
+ if not with_headings:
+ selector += ".%s" % DATA_CLASS
+ style = [dict(selector=selector,
+ props=[("background-color", color)])]
+ s.style.extend(style)
+
+
+def hlrow(s, n, color="#ccc", with_headings=False):
+ selector = "td.%s%d" % (ROW_CLASS, n)
+ if not with_headings:
+ selector += ".%s" % DATA_CLASS
+ style = [dict(selector=selector,
+ props=[("background-color", color)])]
+ s.style.extend(style)
+
+
+def round_corners(s, radius):
+ props_bl = [
+ ("-moz-border-radius-bottomleft", "%dpx" % radius ),
+ ("-webkit-border-bottom-left-radius", "%dpx" % radius ),
+ ("border-bottom-left-radius", "%dpx" % radius )
+ ]
+ props_br = [
+ ("-moz-border-radius-bottomright", "%dpx" % radius ),
+ ("-webkit-border-bottom-right-radius", "%dpx" % radius ),
+ ("border-bottom-right-radius", "%dpx" % radius )
+ ]
+ props_tl = [
+ ("-moz-border-radius-topleft", "%dpx" % radius ),
+ ("-webkit-border-top-left-radius", "%dpx" % radius ),
+ ("border-top-left-radius", "%dpx" % radius )
+ ]
+ props_tr = [
+ ("-moz-border-radius-topright", "%dpx" % radius ),
+ ("-webkit-border-top-right-radius", "%dpx" % radius ),
+ ("border-top-right-radius", "%dpx" % radius )
+ ]
+
+ style = [
+ dict(selector="",
+ props=[("border-collapse", "separate")]),
+ dict(selector="td",
+ props=[("border-width", "0px")]),
+ dict(selector="th",
+ props=[("border-width", "0px")]),
+ dict(selector="td",
+ props=[("border-left-width", "1px")]),
+ dict(selector="td",
+ props=[("border-top-width", "1px")]),
+
+ dict(selector="tbody tr:last-child th",
+ props=[("border-bottom-width", "1px")]),
+ dict(selector="tr:last-child td",
+ props=[("border-bottom-width", "1px")]),
+
+ dict(selector="tr td:last-child",
+ props=[("border-right-width", "1px")]),
+ dict(selector="tr th:last-child",
+ props=[("border-right-width", "1px")]),
+ dict(selector="th",
+ props=[("border-left-width", "1px")]),
+ dict(selector="th",
+ props=[("border-top-width", "1px")]),
+ dict(selector="th td:last-child",
+ props=[("border-right-width", "1px")]),
+
+
+ dict(selector="tr:last-child th:first-child",
+ props=props_bl),
+ dict(selector="tr:last-child td:last-child",
+ props=props_br),
+ dict(selector="tr:first-child th.%s0" % COLUMN_CLASS,
+ props=props_tl),
+ dict(selector="tr:first-child th.%s0:first-child" % ROW_CLASS,
+ props=props_tl),
+ dict(selector="tr:first-child th:last-child",
+ props=props_tr),
+ ]
+
+ s.style.extend(style)
+
+#
+# def rank_heatmap(s, df, row=None, col=None):
+# def color_class(cls, color):
+# return [dict(selector="td.%s" % cls,
+# props=[("background-color", color)])]
+#
+# def rank_col(n, ranking, u):
+# data = {i: {n: ["%s-%s" % (u, ranking[i])]}
+# for i in range(len(ranking))}
+# return {"data": data}
+#
+#
+# # u = "U" + str(uuid.uuid1()).replace("-", "_")
+# # df = mkdf(9, 5, data_gen_f=lambda r, c: np.random.random())
+#
+# ranking = df.iloc[:, 1].argsort().tolist()
+# cell_context = rank_col(1, ranking, u)
+#
+# from .colors import red_scale
+#
+# style = [color_class("%s-%s" % (u, intensity),
+# red_scale[intensity])
+# for intensity in range(9)]
+#
+# s.style.extend(style)
+# # s.cell_context.extend(s) # TODO
diff --git a/pandas/io/templating/templates/html b/pandas/io/templating/templates/html
new file mode 100644
index 0000000000000..a9fa90385c793
--- /dev/null
+++ b/pandas/io/templating/templates/html
@@ -0,0 +1,61 @@
+{#jinja2 template#}
+
+<style type="text/css">
+ #T_{{uuid}} tr {
+ border: none;
+ }
+
+ #T_{{uuid}} {
+ border: none;
+ }
+
+ #T_{{uuid}} th.pandas_blank {
+ border: none;
+ }
+
+ {% for s in style %}
+ #T_{{uuid}} {{s.selector}} {
+ {% for p,val in s.props %}
+ {{p}}: {{val}};
+ {% endfor %}
+ }
+ {% endfor %}
+</style>
+
+
+<table id="T_{{ uuid }}">
+ {% if caption %}
+ <caption>
+ {{ caption }}
+ </caption>
+ {% endif %}
+
+ <thead>
+ {% for r in head %}
+ <tr>
+ {% for c in r %}
+ {% if c.type == "heading" %}
+ <th class="{{ c.class }}">{{ c.value }}</th>
+ {% else %}
+ <td class="{{ c.class }}">{{ c.value }}</td>
+ {% endif %}
+ {% endfor %}
+ </tr>
+ {% endfor %}
+ </thead>
+
+ <tbody>
+ {% for r in body %}
+ <tr>
+ {% for c in r %}
+ {% if c.type == "heading" %}
+ <th class="{{ c.class }}">{{ c.value }}</th>
+ {% else %}
+ <td class="{{ c.class }}">{{ c.value }}</td>
+ {% endif %}
+ {% endfor %}
+ </tr>
+ {% endfor %}
+ </tbody>
+
+</table>
\ No newline at end of file
diff --git a/pandas/io/templating/templates/latex b/pandas/io/templating/templates/latex
new file mode 100644
index 0000000000000..f7440e0a925ad
--- /dev/null
+++ b/pandas/io/templating/templates/latex
@@ -0,0 +1,23 @@
+{#jinja2 template#}
+
+\documentclass[11pt,a4paper]{article}
+\usepackage{booktabs}
+
+\begin{document}
+
+\begin{tabular}{ {% for c in body[0] %}l{% endfor %} }
+
+\toprule
+ {% for r in head %}
+ {% for c in r[:-1] %}{{ c.value }} & {% endfor %} {{ r[-1].value }} \\
+ {%- endfor %}
+
+\midrule
+ {% for r in body %}
+ {% for c in r[:-1] %}{{ c.value }} & {% endfor %} {{ r[-1].value }} \\
+ {%- endfor %}
+\bottomrule
+
+\end{tabular}
+
+\end{document}
\ No newline at end of file
diff --git a/pandas/io/templating/templates/markdown b/pandas/io/templating/templates/markdown
new file mode 100644
index 0000000000000..82397df703263
--- /dev/null
+++ b/pandas/io/templating/templates/markdown
@@ -0,0 +1,21 @@
+{# jinja2 template #}
+
+{% for r in head %}
+|{%- for c in r -%}
+ {%- if c.type == "heading" and "blank" not in c.class -%}
+ __{{ c.value }}__ |
+ {%- else -%}
+ {{ c.value }} |
+ {%- endif -%}
+{%- endfor %}
+{%- endfor %}
+| {% for c in head[0] %} --- | {% endfor -%}
+{% for r in body %}
+|{%- for c in r -%}
+ {%- if c.type == "heading" -%}
+ __{{ c.value }}__ |
+ {%- else -%}
+ {{ c.value }} |
+ {%- endif -%}
+{% endfor %}
+{%- endfor %}
\ No newline at end of file
diff --git a/pandas_HTML_styling.ipynb b/pandas_HTML_styling.ipynb
new file mode 100644
index 0000000000000..a2f52eac167e9
--- /dev/null
+++ b/pandas_HTML_styling.ipynb
@@ -0,0 +1,1809 @@
+{
+ "metadata": {
+ "name": ""
+ },
+ "nbformat": 3,
+ "nbformat_minor": 0,
+ "worksheets": [
+ {
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "###Conditional Formatting and CSS support for HTML in pandas\n",
+ "\n",
+ "This is a preview of functionality planned for inclusion in pandas 0.14.\n",
+ "\n",
+ "####Overview\n",
+ "\n",
+ "* We render HTML output using a template engine, Jinja2.\n",
+ "* The template engine renders a data object using the specified template. \n",
+ "* The data object (\"context\") we pass in contains a structured representation of the dataframe and, importantly, the css class names to attach to any given cell.\n",
+ "* The classes attached to any given cell are the union of two sources:\n",
+ " 1. *Required classes* are attached to table cells by default, as part of the standard context object constructed by pandas for a specific dataframe. Examples of classes are: \"col_heading\"/\"row_heading\"/\"data\", \"row0\",\"col3\". etc\n",
+ " 2. An additional nested dict (`cell_context`, provisional name) is passed in which can optionally attach classes to specific cells. e.g. `{data:{0:{2:[\"foo\"]}` will attach the class \"foo\" to the the data cell at df.iloc[0,2].\n",
+ "* since `cell_context` is constructed dynamically, we can construct \"styler\" functions that inspect the dataframe and attach arbitrary classes to various table cells by whatever logic we can express in code (i.e. anything).\n",
+ "* The actual styling is done using inline css which is included in the rendered HTML. The template context contains a style property, also produced dynamically by code, which can specify any selector + css property/value pair to be included in the inline `<style>` tag.\n",
+ "* By namespacing all css for a table under a UUID attached to the `<table>` element, we can isolate the styles of tables which share a single IPython Notebook \"page\".\n",
+ "\n",
+ "Taken together, this solution provides a flexible, open-ended way\n",
+ "to style the output with lots of control, with python code.\n",
+ "\n",
+ "By identifying common patterns (See included examples for: top-k heatmap, highlight columns/rowrow/col and zebra table) and \n",
+ "packaging the \"styler\" functions under a pandas namespace we\n",
+ "can offer a higher level API to the users for performing common tasks, but the power of the lower-level API is still available.\n",
+ "These styler function should be composable to a large degree, meaning\n",
+ "you can apply them after the other to incrementally reach the final\n",
+ "desired result (with caveats).\n",
+ "\n",
+ "Feedback welcome,\n",
+ "\n",
+ "@y-p\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "from jinja2 import Template"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [],
+ "prompt_number": 2
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "# The baseline jinja2 template for HTML output.\n",
+ "t=Template(\"\"\"\n",
+ "<style type=\"text/css\" >\n",
+ "#T_{{uuid}} tr {\n",
+ "border: none;\n",
+ "}\n",
+ "#T_{{uuid}} {\n",
+ "border: none;\n",
+ "}\n",
+ "#T_{{uuid}} th.blank {\n",
+ "border: none;\n",
+ "}\n",
+ "\n",
+ "\n",
+ "{% for s in style %}\n",
+ "#T_{{uuid}} {{s.selector}} { \n",
+ "{% for p,val in s.props %}\n",
+ " {{p}}: {{val}};\n",
+ "{% endfor %}\n",
+ " }\n",
+ "{% endfor %}\n",
+ "</style>\n",
+ "<table id=\"T_{{uuid}}\">\n",
+ "{% if caption %}\n",
+ "<caption>\n",
+ "{{caption}}\n",
+ "</caption>\n",
+ "{% endif %}\n",
+ "\n",
+ " <thead>\n",
+ "{% for r in head %}\n",
+ " <tr>\n",
+ " {% for c in r %} \n",
+ " <{{c.type}} class=\"{{c.class}}\">{{c.value}}</th>\n",
+ " {% endfor %}\n",
+ " </tr>\n",
+ "{% endfor %}\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " {% for r in body %}\n",
+ " <tr>\n",
+ " {% for c in r %} \n",
+ " <{{c.type}} class=\"{{c.class}}\">{{c.value}}</th>\n",
+ " {% endfor %}\n",
+ " </tr>\n",
+ "{% endfor %}\n",
+ " </tbody>\n",
+ "\n",
+ "</table>\n",
+ "\n",
+ "\"\"\")"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [],
+ "prompt_number": 3
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "# the implementation code. very small.\n",
+ "ROW_HEADING_CLASS=\"row_heading\"\n",
+ "COL_HEADING_CLASS=\"col_heading\"\n",
+ "DATA_CLASS=\"data\"\n",
+ "BLANK_CLASS=\"blank\"\n",
+ "BLANK_VALUE=\"\"\n",
+ "def translate(df,cell_context=None):\n",
+ " import uuid\n",
+ " cell_context = cell_context or dict()\n",
+ "\n",
+ " n_rlvls =df.index.nlevels\n",
+ " n_clvls =df.columns.nlevels\n",
+ " rlabels=df.index.tolist()\n",
+ " clabels=df.columns.tolist()\n",
+ " if n_rlvls == 1:\n",
+ " rlabels = [[x] for x in rlabels]\n",
+ " if n_clvls == 1:\n",
+ " clabels = [[x] for x in clabels]\n",
+ " clabels=zip(*clabels) \n",
+ " head=[]\n",
+ " for r in range(n_clvls): \n",
+ " row_es = [{\"type\":\"th\",\"value\":BLANK_VALUE ,\"class\": \" \".join([BLANK_CLASS])}]*n_rlvls\n",
+ " for c in range(len(clabels[0])):\n",
+ " cs = [COL_HEADING_CLASS,\"level%s\" % r,\"col%s\" %c]\n",
+ " cs.extend(cell_context.get(\"col_headings\",{}).get(r,{}).get(c,[]))\n",
+ " row_es.append({\"type\":\"th\",\"value\": clabels[r][c],\"class\": \" \".join(cs)})\n",
+ " head.append(row_es)\n",
+ " body=[]\n",
+ " for r in range(len(df)): \n",
+ " cs = [ROW_HEADING_CLASS,\"level%s\" % c,\"row%s\" % r]\n",
+ " cs.extend(cell_context.get(\"row_headings\",{}).get(r,{}).get(c,[]))\n",
+ " row_es = [{\"type\":\"th\",\"value\": rlabels[r][c],\"class\": \" \".join(cs)} \n",
+ " for c in range(len(rlabels[r]))]\n",
+ " for c in range(len(df.columns)):\n",
+ " cs = [DATA_CLASS,\"row%s\" % r,\"col%s\" %c]\n",
+ " cs.extend(cell_context.get(\"data\",{}).get(r,{}).get(c,[]))\n",
+ " row_es.append({\"type\":\"td\",\"value\": df.iloc[r][c],\"class\": \" \".join(cs)})\n",
+ " body.append(row_es)\n",
+ "\n",
+ " # uuid required to isolate table styling from others \n",
+ " # in same notebook in ipnb\n",
+ " u = str(uuid.uuid1()).replace(\"-\",\"_\")\n",
+ " return dict(head=head, body=body,uuid=u)\n"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [],
+ "prompt_number": 4
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "##Examples"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "# first, vanilla\n",
+ "df=mkdf(10,5,r_idx_nlevels=3,c_idx_nlevels=2)\n",
+ "from IPython.display import HTML,display\n",
+ "ctx= translate(df)\n",
+ "ctx['caption']=\"Just a table, but rendered using a template with lots of classes to style against\"\n",
+ "display(HTML(t.render(**ctx)))\n"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [
+ {
+ "html": [
+ "\n",
+ "<style type=\"text/css\" >\n",
+ "#T_17e7e48e_6aa7_11e3_bffd_001a4d511830 tr {\n",
+ "border: none;\n",
+ "}\n",
+ "#T_17e7e48e_6aa7_11e3_bffd_001a4d511830 {\n",
+ "border: none;\n",
+ "}\n",
+ "#T_17e7e48e_6aa7_11e3_bffd_001a4d511830 th.blank {\n",
+ "border: none;\n",
+ "}\n",
+ "\n",
+ "\n",
+ "\n",
+ "</style>\n",
+ "<table id=\"T_17e7e48e_6aa7_11e3_bffd_001a4d511830\">\n",
+ "\n",
+ "<caption>\n",
+ "Just a table, but rendered using a template with lots of classes to style against\n",
+ "</caption>\n",
+ "\n",
+ "\n",
+ " <thead>\n",
+ "\n",
+ " <tr>\n",
+ " \n",
+ " <th class=\"blank\"></th>\n",
+ " \n",
+ " <th class=\"blank\"></th>\n",
+ " \n",
+ " <th class=\"blank\"></th>\n",
+ " \n",
+ " <th class=\"col_heading level0 col0\">C_l0_g0</th>\n",
+ " \n",
+ " <th class=\"col_heading level0 col1\">C_l0_g1</th>\n",
+ " \n",
+ " <th class=\"col_heading level0 col2\">C_l0_g2</th>\n",
+ " \n",
+ " <th class=\"col_heading level0 col3\">C_l0_g3</th>\n",
+ " \n",
+ " <th class=\"col_heading level0 col4\">C_l0_g4</th>\n",
+ " \n",
+ " </tr>\n",
+ "\n",
+ " <tr>\n",
+ " \n",
+ " <th class=\"blank\"></th>\n",
+ " \n",
+ " <th class=\"blank\"></th>\n",
+ " \n",
+ " <th class=\"blank\"></th>\n",
+ " \n",
+ " <th class=\"col_heading level1 col0\">C_l1_g0</th>\n",
+ " \n",
+ " <th class=\"col_heading level1 col1\">C_l1_g1</th>\n",
+ " \n",
+ " <th class=\"col_heading level1 col2\">C_l1_g2</th>\n",
+ " \n",
+ " <th class=\"col_heading level1 col3\">C_l1_g3</th>\n",
+ " \n",
+ " <th class=\"col_heading level1 col4\">C_l1_g4</th>\n",
+ " \n",
+ " </tr>\n",
+ "\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " \n",
+ " <tr>\n",
+ " \n",
+ " <th class=\"row_heading level4 row0\">R_l0_g0</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row0\">R_l1_g0</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row0\">R_l2_g0</th>\n",
+ " \n",
+ " <td class=\"data row0 col0\">R0C0</th>\n",
+ " \n",
+ " <td class=\"data row0 col1\">R0C1</th>\n",
+ " \n",
+ " <td class=\"data row0 col2\">R0C2</th>\n",
+ " \n",
+ " <td class=\"data row0 col3\">R0C3</th>\n",
+ " \n",
+ " <td class=\"data row0 col4\">R0C4</th>\n",
+ " \n",
+ " </tr>\n",
+ "\n",
+ " <tr>\n",
+ " \n",
+ " <th class=\"row_heading level4 row1\">R_l0_g1</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row1\">R_l1_g1</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row1\">R_l2_g1</th>\n",
+ " \n",
+ " <td class=\"data row1 col0\">R1C0</th>\n",
+ " \n",
+ " <td class=\"data row1 col1\">R1C1</th>\n",
+ " \n",
+ " <td class=\"data row1 col2\">R1C2</th>\n",
+ " \n",
+ " <td class=\"data row1 col3\">R1C3</th>\n",
+ " \n",
+ " <td class=\"data row1 col4\">R1C4</th>\n",
+ " \n",
+ " </tr>\n",
+ "\n",
+ " <tr>\n",
+ " \n",
+ " <th class=\"row_heading level4 row2\">R_l0_g2</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row2\">R_l1_g2</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row2\">R_l2_g2</th>\n",
+ " \n",
+ " <td class=\"data row2 col0\">R2C0</th>\n",
+ " \n",
+ " <td class=\"data row2 col1\">R2C1</th>\n",
+ " \n",
+ " <td class=\"data row2 col2\">R2C2</th>\n",
+ " \n",
+ " <td class=\"data row2 col3\">R2C3</th>\n",
+ " \n",
+ " <td class=\"data row2 col4\">R2C4</th>\n",
+ " \n",
+ " </tr>\n",
+ "\n",
+ " <tr>\n",
+ " \n",
+ " <th class=\"row_heading level4 row3\">R_l0_g3</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row3\">R_l1_g3</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row3\">R_l2_g3</th>\n",
+ " \n",
+ " <td class=\"data row3 col0\">R3C0</th>\n",
+ " \n",
+ " <td class=\"data row3 col1\">R3C1</th>\n",
+ " \n",
+ " <td class=\"data row3 col2\">R3C2</th>\n",
+ " \n",
+ " <td class=\"data row3 col3\">R3C3</th>\n",
+ " \n",
+ " <td class=\"data row3 col4\">R3C4</th>\n",
+ " \n",
+ " </tr>\n",
+ "\n",
+ " <tr>\n",
+ " \n",
+ " <th class=\"row_heading level4 row4\">R_l0_g4</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row4\">R_l1_g4</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row4\">R_l2_g4</th>\n",
+ " \n",
+ " <td class=\"data row4 col0\">R4C0</th>\n",
+ " \n",
+ " <td class=\"data row4 col1\">R4C1</th>\n",
+ " \n",
+ " <td class=\"data row4 col2\">R4C2</th>\n",
+ " \n",
+ " <td class=\"data row4 col3\">R4C3</th>\n",
+ " \n",
+ " <td class=\"data row4 col4\">R4C4</th>\n",
+ " \n",
+ " </tr>\n",
+ "\n",
+ " <tr>\n",
+ " \n",
+ " <th class=\"row_heading level4 row5\">R_l0_g5</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row5\">R_l1_g5</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row5\">R_l2_g5</th>\n",
+ " \n",
+ " <td class=\"data row5 col0\">R5C0</th>\n",
+ " \n",
+ " <td class=\"data row5 col1\">R5C1</th>\n",
+ " \n",
+ " <td class=\"data row5 col2\">R5C2</th>\n",
+ " \n",
+ " <td class=\"data row5 col3\">R5C3</th>\n",
+ " \n",
+ " <td class=\"data row5 col4\">R5C4</th>\n",
+ " \n",
+ " </tr>\n",
+ "\n",
+ " <tr>\n",
+ " \n",
+ " <th class=\"row_heading level4 row6\">R_l0_g6</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row6\">R_l1_g6</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row6\">R_l2_g6</th>\n",
+ " \n",
+ " <td class=\"data row6 col0\">R6C0</th>\n",
+ " \n",
+ " <td class=\"data row6 col1\">R6C1</th>\n",
+ " \n",
+ " <td class=\"data row6 col2\">R6C2</th>\n",
+ " \n",
+ " <td class=\"data row6 col3\">R6C3</th>\n",
+ " \n",
+ " <td class=\"data row6 col4\">R6C4</th>\n",
+ " \n",
+ " </tr>\n",
+ "\n",
+ " <tr>\n",
+ " \n",
+ " <th class=\"row_heading level4 row7\">R_l0_g7</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row7\">R_l1_g7</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row7\">R_l2_g7</th>\n",
+ " \n",
+ " <td class=\"data row7 col0\">R7C0</th>\n",
+ " \n",
+ " <td class=\"data row7 col1\">R7C1</th>\n",
+ " \n",
+ " <td class=\"data row7 col2\">R7C2</th>\n",
+ " \n",
+ " <td class=\"data row7 col3\">R7C3</th>\n",
+ " \n",
+ " <td class=\"data row7 col4\">R7C4</th>\n",
+ " \n",
+ " </tr>\n",
+ "\n",
+ " <tr>\n",
+ " \n",
+ " <th class=\"row_heading level4 row8\">R_l0_g8</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row8\">R_l1_g8</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row8\">R_l2_g8</th>\n",
+ " \n",
+ " <td class=\"data row8 col0\">R8C0</th>\n",
+ " \n",
+ " <td class=\"data row8 col1\">R8C1</th>\n",
+ " \n",
+ " <td class=\"data row8 col2\">R8C2</th>\n",
+ " \n",
+ " <td class=\"data row8 col3\">R8C3</th>\n",
+ " \n",
+ " <td class=\"data row8 col4\">R8C4</th>\n",
+ " \n",
+ " </tr>\n",
+ "\n",
+ " <tr>\n",
+ " \n",
+ " <th class=\"row_heading level4 row9\">R_l0_g9</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row9\">R_l1_g9</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row9\">R_l2_g9</th>\n",
+ " \n",
+ " <td class=\"data row9 col0\">R9C0</th>\n",
+ " \n",
+ " <td class=\"data row9 col1\">R9C1</th>\n",
+ " \n",
+ " <td class=\"data row9 col2\">R9C2</th>\n",
+ " \n",
+ " <td class=\"data row9 col3\">R9C3</th>\n",
+ " \n",
+ " <td class=\"data row9 col4\">R9C4</th>\n",
+ " \n",
+ " </tr>\n",
+ "\n",
+ " </tbody>\n",
+ "\n",
+ "</table>\n"
+ ],
+ "metadata": {},
+ "output_type": "display_data",
+ "text": [
+ "<IPython.core.display.HTML at 0x42a4550>"
+ ]
+ }
+ ],
+ "prompt_number": 5
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "def zebra(color1, color2):\n",
+ " return [dict(selector=\"td.data:nth-child(2n)\" ,\n",
+ " props=[(\"background-color\",color1)]),\n",
+ " dict(selector=\"td.data:nth-child(2n+1)\" ,\n",
+ " props=[(\"background-color\",color2)])]\n",
+ "\n",
+ "ctx= translate(df)\n",
+ "style=[]\n",
+ "style.extend(zebra(\"#aaa\",\"#ddd\"))\n",
+ "ctx['style']=style\n",
+ "ctx['caption']=\"A zebra table\"\n",
+ "display(HTML(t.render(**ctx)))\n"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [
+ {
+ "html": [
+ "\n",
+ "<style type=\"text/css\" >\n",
+ "#T_1bc45290_6aa7_11e3_bffd_001a4d511830 tr {\n",
+ "border: none;\n",
+ "}\n",
+ "#T_1bc45290_6aa7_11e3_bffd_001a4d511830 {\n",
+ "border: none;\n",
+ "}\n",
+ "#T_1bc45290_6aa7_11e3_bffd_001a4d511830 th.blank {\n",
+ "border: none;\n",
+ "}\n",
+ "\n",
+ "\n",
+ "\n",
+ "#T_1bc45290_6aa7_11e3_bffd_001a4d511830 td.data:nth-child(2n) { \n",
+ "\n",
+ " background-color: #aaa;\n",
+ "\n",
+ " }\n",
+ "\n",
+ "#T_1bc45290_6aa7_11e3_bffd_001a4d511830 td.data:nth-child(2n+1) { \n",
+ "\n",
+ " background-color: #ddd;\n",
+ "\n",
+ " }\n",
+ "\n",
+ "</style>\n",
+ "<table id=\"T_1bc45290_6aa7_11e3_bffd_001a4d511830\">\n",
+ "\n",
+ "<caption>\n",
+ "A zebra table\n",
+ "</caption>\n",
+ "\n",
+ "\n",
+ " <thead>\n",
+ "\n",
+ " <tr>\n",
+ " \n",
+ " <th class=\"blank\"></th>\n",
+ " \n",
+ " <th class=\"blank\"></th>\n",
+ " \n",
+ " <th class=\"blank\"></th>\n",
+ " \n",
+ " <th class=\"col_heading level0 col0\">C_l0_g0</th>\n",
+ " \n",
+ " <th class=\"col_heading level0 col1\">C_l0_g1</th>\n",
+ " \n",
+ " <th class=\"col_heading level0 col2\">C_l0_g2</th>\n",
+ " \n",
+ " <th class=\"col_heading level0 col3\">C_l0_g3</th>\n",
+ " \n",
+ " <th class=\"col_heading level0 col4\">C_l0_g4</th>\n",
+ " \n",
+ " </tr>\n",
+ "\n",
+ " <tr>\n",
+ " \n",
+ " <th class=\"blank\"></th>\n",
+ " \n",
+ " <th class=\"blank\"></th>\n",
+ " \n",
+ " <th class=\"blank\"></th>\n",
+ " \n",
+ " <th class=\"col_heading level1 col0\">C_l1_g0</th>\n",
+ " \n",
+ " <th class=\"col_heading level1 col1\">C_l1_g1</th>\n",
+ " \n",
+ " <th class=\"col_heading level1 col2\">C_l1_g2</th>\n",
+ " \n",
+ " <th class=\"col_heading level1 col3\">C_l1_g3</th>\n",
+ " \n",
+ " <th class=\"col_heading level1 col4\">C_l1_g4</th>\n",
+ " \n",
+ " </tr>\n",
+ "\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " \n",
+ " <tr>\n",
+ " \n",
+ " <th class=\"row_heading level4 row0\">R_l0_g0</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row0\">R_l1_g0</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row0\">R_l2_g0</th>\n",
+ " \n",
+ " <td class=\"data row0 col0\">R0C0</th>\n",
+ " \n",
+ " <td class=\"data row0 col1\">R0C1</th>\n",
+ " \n",
+ " <td class=\"data row0 col2\">R0C2</th>\n",
+ " \n",
+ " <td class=\"data row0 col3\">R0C3</th>\n",
+ " \n",
+ " <td class=\"data row0 col4\">R0C4</th>\n",
+ " \n",
+ " </tr>\n",
+ "\n",
+ " <tr>\n",
+ " \n",
+ " <th class=\"row_heading level4 row1\">R_l0_g1</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row1\">R_l1_g1</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row1\">R_l2_g1</th>\n",
+ " \n",
+ " <td class=\"data row1 col0\">R1C0</th>\n",
+ " \n",
+ " <td class=\"data row1 col1\">R1C1</th>\n",
+ " \n",
+ " <td class=\"data row1 col2\">R1C2</th>\n",
+ " \n",
+ " <td class=\"data row1 col3\">R1C3</th>\n",
+ " \n",
+ " <td class=\"data row1 col4\">R1C4</th>\n",
+ " \n",
+ " </tr>\n",
+ "\n",
+ " <tr>\n",
+ " \n",
+ " <th class=\"row_heading level4 row2\">R_l0_g2</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row2\">R_l1_g2</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row2\">R_l2_g2</th>\n",
+ " \n",
+ " <td class=\"data row2 col0\">R2C0</th>\n",
+ " \n",
+ " <td class=\"data row2 col1\">R2C1</th>\n",
+ " \n",
+ " <td class=\"data row2 col2\">R2C2</th>\n",
+ " \n",
+ " <td class=\"data row2 col3\">R2C3</th>\n",
+ " \n",
+ " <td class=\"data row2 col4\">R2C4</th>\n",
+ " \n",
+ " </tr>\n",
+ "\n",
+ " <tr>\n",
+ " \n",
+ " <th class=\"row_heading level4 row3\">R_l0_g3</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row3\">R_l1_g3</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row3\">R_l2_g3</th>\n",
+ " \n",
+ " <td class=\"data row3 col0\">R3C0</th>\n",
+ " \n",
+ " <td class=\"data row3 col1\">R3C1</th>\n",
+ " \n",
+ " <td class=\"data row3 col2\">R3C2</th>\n",
+ " \n",
+ " <td class=\"data row3 col3\">R3C3</th>\n",
+ " \n",
+ " <td class=\"data row3 col4\">R3C4</th>\n",
+ " \n",
+ " </tr>\n",
+ "\n",
+ " <tr>\n",
+ " \n",
+ " <th class=\"row_heading level4 row4\">R_l0_g4</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row4\">R_l1_g4</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row4\">R_l2_g4</th>\n",
+ " \n",
+ " <td class=\"data row4 col0\">R4C0</th>\n",
+ " \n",
+ " <td class=\"data row4 col1\">R4C1</th>\n",
+ " \n",
+ " <td class=\"data row4 col2\">R4C2</th>\n",
+ " \n",
+ " <td class=\"data row4 col3\">R4C3</th>\n",
+ " \n",
+ " <td class=\"data row4 col4\">R4C4</th>\n",
+ " \n",
+ " </tr>\n",
+ "\n",
+ " <tr>\n",
+ " \n",
+ " <th class=\"row_heading level4 row5\">R_l0_g5</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row5\">R_l1_g5</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row5\">R_l2_g5</th>\n",
+ " \n",
+ " <td class=\"data row5 col0\">R5C0</th>\n",
+ " \n",
+ " <td class=\"data row5 col1\">R5C1</th>\n",
+ " \n",
+ " <td class=\"data row5 col2\">R5C2</th>\n",
+ " \n",
+ " <td class=\"data row5 col3\">R5C3</th>\n",
+ " \n",
+ " <td class=\"data row5 col4\">R5C4</th>\n",
+ " \n",
+ " </tr>\n",
+ "\n",
+ " <tr>\n",
+ " \n",
+ " <th class=\"row_heading level4 row6\">R_l0_g6</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row6\">R_l1_g6</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row6\">R_l2_g6</th>\n",
+ " \n",
+ " <td class=\"data row6 col0\">R6C0</th>\n",
+ " \n",
+ " <td class=\"data row6 col1\">R6C1</th>\n",
+ " \n",
+ " <td class=\"data row6 col2\">R6C2</th>\n",
+ " \n",
+ " <td class=\"data row6 col3\">R6C3</th>\n",
+ " \n",
+ " <td class=\"data row6 col4\">R6C4</th>\n",
+ " \n",
+ " </tr>\n",
+ "\n",
+ " <tr>\n",
+ " \n",
+ " <th class=\"row_heading level4 row7\">R_l0_g7</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row7\">R_l1_g7</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row7\">R_l2_g7</th>\n",
+ " \n",
+ " <td class=\"data row7 col0\">R7C0</th>\n",
+ " \n",
+ " <td class=\"data row7 col1\">R7C1</th>\n",
+ " \n",
+ " <td class=\"data row7 col2\">R7C2</th>\n",
+ " \n",
+ " <td class=\"data row7 col3\">R7C3</th>\n",
+ " \n",
+ " <td class=\"data row7 col4\">R7C4</th>\n",
+ " \n",
+ " </tr>\n",
+ "\n",
+ " <tr>\n",
+ " \n",
+ " <th class=\"row_heading level4 row8\">R_l0_g8</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row8\">R_l1_g8</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row8\">R_l2_g8</th>\n",
+ " \n",
+ " <td class=\"data row8 col0\">R8C0</th>\n",
+ " \n",
+ " <td class=\"data row8 col1\">R8C1</th>\n",
+ " \n",
+ " <td class=\"data row8 col2\">R8C2</th>\n",
+ " \n",
+ " <td class=\"data row8 col3\">R8C3</th>\n",
+ " \n",
+ " <td class=\"data row8 col4\">R8C4</th>\n",
+ " \n",
+ " </tr>\n",
+ "\n",
+ " <tr>\n",
+ " \n",
+ " <th class=\"row_heading level4 row9\">R_l0_g9</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row9\">R_l1_g9</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row9\">R_l2_g9</th>\n",
+ " \n",
+ " <td class=\"data row9 col0\">R9C0</th>\n",
+ " \n",
+ " <td class=\"data row9 col1\">R9C1</th>\n",
+ " \n",
+ " <td class=\"data row9 col2\">R9C2</th>\n",
+ " \n",
+ " <td class=\"data row9 col3\">R9C3</th>\n",
+ " \n",
+ " <td class=\"data row9 col4\">R9C4</th>\n",
+ " \n",
+ " </tr>\n",
+ "\n",
+ " </tbody>\n",
+ "\n",
+ "</table>\n"
+ ],
+ "metadata": {},
+ "output_type": "display_data",
+ "text": [
+ "<IPython.core.display.HTML at 0x4492710>"
+ ]
+ }
+ ],
+ "prompt_number": 6
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "def tag_col(n,c=\"grey10\", with_headings=False):\n",
+ " selector=\"td.col%d\" % n\n",
+ " if not with_headings:\n",
+ " selector+=\".data\"\n",
+ " return [dict(selector=selector,\n",
+ " props=[(\"background-color\",c)])]\n",
+ "\n",
+ "def tag_row(n,c=\"grey10\", with_headings=False):\n",
+ " selector=\"td.row%d\" % n\n",
+ " if not with_headings:\n",
+ " selector+=\".data\"\n",
+ " return [dict(selector=selector,\n",
+ " props=[(\"background-color\",c)])]\n",
+ "\n",
+ "ctx= translate(df)\n",
+ "style=[]\n",
+ "style.extend(tag_col(2,\"beige\"))\n",
+ "style.extend(tag_row(3,\"purple\"))\n",
+ "ctx['style']=style\n",
+ "ctx['caption']=\"Highlight rows/cols by index\"\n",
+ "display(HTML(t.render(**ctx)))\n"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [
+ {
+ "html": [
+ "\n",
+ "<style type=\"text/css\" >\n",
+ "#T_1f2d5ef4_6aa7_11e3_bffd_001a4d511830 tr {\n",
+ "border: none;\n",
+ "}\n",
+ "#T_1f2d5ef4_6aa7_11e3_bffd_001a4d511830 {\n",
+ "border: none;\n",
+ "}\n",
+ "#T_1f2d5ef4_6aa7_11e3_bffd_001a4d511830 th.blank {\n",
+ "border: none;\n",
+ "}\n",
+ "\n",
+ "\n",
+ "\n",
+ "#T_1f2d5ef4_6aa7_11e3_bffd_001a4d511830 td.col2.data { \n",
+ "\n",
+ " background-color: beige;\n",
+ "\n",
+ " }\n",
+ "\n",
+ "#T_1f2d5ef4_6aa7_11e3_bffd_001a4d511830 td.row3.data { \n",
+ "\n",
+ " background-color: purple;\n",
+ "\n",
+ " }\n",
+ "\n",
+ "</style>\n",
+ "<table id=\"T_1f2d5ef4_6aa7_11e3_bffd_001a4d511830\">\n",
+ "\n",
+ "<caption>\n",
+ "Highlight rows/cols by index\n",
+ "</caption>\n",
+ "\n",
+ "\n",
+ " <thead>\n",
+ "\n",
+ " <tr>\n",
+ " \n",
+ " <th class=\"blank\"></th>\n",
+ " \n",
+ " <th class=\"blank\"></th>\n",
+ " \n",
+ " <th class=\"blank\"></th>\n",
+ " \n",
+ " <th class=\"col_heading level0 col0\">C_l0_g0</th>\n",
+ " \n",
+ " <th class=\"col_heading level0 col1\">C_l0_g1</th>\n",
+ " \n",
+ " <th class=\"col_heading level0 col2\">C_l0_g2</th>\n",
+ " \n",
+ " <th class=\"col_heading level0 col3\">C_l0_g3</th>\n",
+ " \n",
+ " <th class=\"col_heading level0 col4\">C_l0_g4</th>\n",
+ " \n",
+ " </tr>\n",
+ "\n",
+ " <tr>\n",
+ " \n",
+ " <th class=\"blank\"></th>\n",
+ " \n",
+ " <th class=\"blank\"></th>\n",
+ " \n",
+ " <th class=\"blank\"></th>\n",
+ " \n",
+ " <th class=\"col_heading level1 col0\">C_l1_g0</th>\n",
+ " \n",
+ " <th class=\"col_heading level1 col1\">C_l1_g1</th>\n",
+ " \n",
+ " <th class=\"col_heading level1 col2\">C_l1_g2</th>\n",
+ " \n",
+ " <th class=\"col_heading level1 col3\">C_l1_g3</th>\n",
+ " \n",
+ " <th class=\"col_heading level1 col4\">C_l1_g4</th>\n",
+ " \n",
+ " </tr>\n",
+ "\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " \n",
+ " <tr>\n",
+ " \n",
+ " <th class=\"row_heading level4 row0\">R_l0_g0</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row0\">R_l1_g0</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row0\">R_l2_g0</th>\n",
+ " \n",
+ " <td class=\"data row0 col0\">R0C0</th>\n",
+ " \n",
+ " <td class=\"data row0 col1\">R0C1</th>\n",
+ " \n",
+ " <td class=\"data row0 col2\">R0C2</th>\n",
+ " \n",
+ " <td class=\"data row0 col3\">R0C3</th>\n",
+ " \n",
+ " <td class=\"data row0 col4\">R0C4</th>\n",
+ " \n",
+ " </tr>\n",
+ "\n",
+ " <tr>\n",
+ " \n",
+ " <th class=\"row_heading level4 row1\">R_l0_g1</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row1\">R_l1_g1</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row1\">R_l2_g1</th>\n",
+ " \n",
+ " <td class=\"data row1 col0\">R1C0</th>\n",
+ " \n",
+ " <td class=\"data row1 col1\">R1C1</th>\n",
+ " \n",
+ " <td class=\"data row1 col2\">R1C2</th>\n",
+ " \n",
+ " <td class=\"data row1 col3\">R1C3</th>\n",
+ " \n",
+ " <td class=\"data row1 col4\">R1C4</th>\n",
+ " \n",
+ " </tr>\n",
+ "\n",
+ " <tr>\n",
+ " \n",
+ " <th class=\"row_heading level4 row2\">R_l0_g2</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row2\">R_l1_g2</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row2\">R_l2_g2</th>\n",
+ " \n",
+ " <td class=\"data row2 col0\">R2C0</th>\n",
+ " \n",
+ " <td class=\"data row2 col1\">R2C1</th>\n",
+ " \n",
+ " <td class=\"data row2 col2\">R2C2</th>\n",
+ " \n",
+ " <td class=\"data row2 col3\">R2C3</th>\n",
+ " \n",
+ " <td class=\"data row2 col4\">R2C4</th>\n",
+ " \n",
+ " </tr>\n",
+ "\n",
+ " <tr>\n",
+ " \n",
+ " <th class=\"row_heading level4 row3\">R_l0_g3</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row3\">R_l1_g3</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row3\">R_l2_g3</th>\n",
+ " \n",
+ " <td class=\"data row3 col0\">R3C0</th>\n",
+ " \n",
+ " <td class=\"data row3 col1\">R3C1</th>\n",
+ " \n",
+ " <td class=\"data row3 col2\">R3C2</th>\n",
+ " \n",
+ " <td class=\"data row3 col3\">R3C3</th>\n",
+ " \n",
+ " <td class=\"data row3 col4\">R3C4</th>\n",
+ " \n",
+ " </tr>\n",
+ "\n",
+ " <tr>\n",
+ " \n",
+ " <th class=\"row_heading level4 row4\">R_l0_g4</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row4\">R_l1_g4</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row4\">R_l2_g4</th>\n",
+ " \n",
+ " <td class=\"data row4 col0\">R4C0</th>\n",
+ " \n",
+ " <td class=\"data row4 col1\">R4C1</th>\n",
+ " \n",
+ " <td class=\"data row4 col2\">R4C2</th>\n",
+ " \n",
+ " <td class=\"data row4 col3\">R4C3</th>\n",
+ " \n",
+ " <td class=\"data row4 col4\">R4C4</th>\n",
+ " \n",
+ " </tr>\n",
+ "\n",
+ " <tr>\n",
+ " \n",
+ " <th class=\"row_heading level4 row5\">R_l0_g5</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row5\">R_l1_g5</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row5\">R_l2_g5</th>\n",
+ " \n",
+ " <td class=\"data row5 col0\">R5C0</th>\n",
+ " \n",
+ " <td class=\"data row5 col1\">R5C1</th>\n",
+ " \n",
+ " <td class=\"data row5 col2\">R5C2</th>\n",
+ " \n",
+ " <td class=\"data row5 col3\">R5C3</th>\n",
+ " \n",
+ " <td class=\"data row5 col4\">R5C4</th>\n",
+ " \n",
+ " </tr>\n",
+ "\n",
+ " <tr>\n",
+ " \n",
+ " <th class=\"row_heading level4 row6\">R_l0_g6</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row6\">R_l1_g6</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row6\">R_l2_g6</th>\n",
+ " \n",
+ " <td class=\"data row6 col0\">R6C0</th>\n",
+ " \n",
+ " <td class=\"data row6 col1\">R6C1</th>\n",
+ " \n",
+ " <td class=\"data row6 col2\">R6C2</th>\n",
+ " \n",
+ " <td class=\"data row6 col3\">R6C3</th>\n",
+ " \n",
+ " <td class=\"data row6 col4\">R6C4</th>\n",
+ " \n",
+ " </tr>\n",
+ "\n",
+ " <tr>\n",
+ " \n",
+ " <th class=\"row_heading level4 row7\">R_l0_g7</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row7\">R_l1_g7</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row7\">R_l2_g7</th>\n",
+ " \n",
+ " <td class=\"data row7 col0\">R7C0</th>\n",
+ " \n",
+ " <td class=\"data row7 col1\">R7C1</th>\n",
+ " \n",
+ " <td class=\"data row7 col2\">R7C2</th>\n",
+ " \n",
+ " <td class=\"data row7 col3\">R7C3</th>\n",
+ " \n",
+ " <td class=\"data row7 col4\">R7C4</th>\n",
+ " \n",
+ " </tr>\n",
+ "\n",
+ " <tr>\n",
+ " \n",
+ " <th class=\"row_heading level4 row8\">R_l0_g8</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row8\">R_l1_g8</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row8\">R_l2_g8</th>\n",
+ " \n",
+ " <td class=\"data row8 col0\">R8C0</th>\n",
+ " \n",
+ " <td class=\"data row8 col1\">R8C1</th>\n",
+ " \n",
+ " <td class=\"data row8 col2\">R8C2</th>\n",
+ " \n",
+ " <td class=\"data row8 col3\">R8C3</th>\n",
+ " \n",
+ " <td class=\"data row8 col4\">R8C4</th>\n",
+ " \n",
+ " </tr>\n",
+ "\n",
+ " <tr>\n",
+ " \n",
+ " <th class=\"row_heading level4 row9\">R_l0_g9</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row9\">R_l1_g9</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row9\">R_l2_g9</th>\n",
+ " \n",
+ " <td class=\"data row9 col0\">R9C0</th>\n",
+ " \n",
+ " <td class=\"data row9 col1\">R9C1</th>\n",
+ " \n",
+ " <td class=\"data row9 col2\">R9C2</th>\n",
+ " \n",
+ " <td class=\"data row9 col3\">R9C3</th>\n",
+ " \n",
+ " <td class=\"data row9 col4\">R9C4</th>\n",
+ " \n",
+ " </tr>\n",
+ "\n",
+ " </tbody>\n",
+ "\n",
+ "</table>\n"
+ ],
+ "metadata": {},
+ "output_type": "display_data",
+ "text": [
+ "<IPython.core.display.HTML at 0x44f4cd0>"
+ ]
+ }
+ ],
+ "prompt_number": 7
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "def round_corners(radius):\n",
+ " props_bl=[ \n",
+ " (\"-moz-border-radius-bottomleft\", \"%dpx\" % radius ),\n",
+ " (\"-webkit-border-bottom-left-radius\", \"%dpx\" % radius ),\n",
+ " (\"border-bottom-left-radius\", \"%dpx\" % radius )\n",
+ " ]\n",
+ " props_br=[ \n",
+ " (\"-moz-border-radius-bottomright\", \"%dpx\" % radius ),\n",
+ " (\"-webkit-border-bottom-right-radius\", \"%dpx\" % radius ),\n",
+ " (\"border-bottom-right-radius\", \"%dpx\" % radius )\n",
+ " ]\n",
+ " props_tl=[ \n",
+ " (\"-moz-border-radius-topleft\", \"%dpx\" % radius ),\n",
+ " (\"-webkit-border-top-left-radius\", \"%dpx\" % radius ),\n",
+ " (\"border-top-left-radius\", \"%dpx\" % radius )\n",
+ " ]\n",
+ " props_tr=[ \n",
+ " (\"-moz-border-radius-topright\", \"%dpx\" % radius ),\n",
+ " (\"-webkit-border-top-right-radius\", \"%dpx\" % radius ),\n",
+ " (\"border-top-right-radius\", \"%dpx\" % radius )\n",
+ " ] \n",
+ " \n",
+ " \n",
+ " return [dict(selector=\"td\",\n",
+ " props=[(\"border-width\",\"1px\")]),\n",
+ " dict(selector=\"\",\n",
+ " props=[(\"border-collapse\",\"separate\")]),\n",
+ " dict(selector=\"tr:last-child th:first-child\",\n",
+ " props=props_bl),\n",
+ " dict(selector=\"tr:last-child td:last-child\",\n",
+ " props=props_br),\n",
+ " dict(selector=\"tr:first-child th.col0\",\n",
+ " props=props_tl),\n",
+ " dict(selector=\"tr:first-child th.row0:first-child\",\n",
+ " props=props_tl), \n",
+ " dict(selector=\"tr:first-child th:last-child\",\n",
+ " props=props_tr),\n",
+ " ]\n",
+ " \n",
+ "ctx= translate(df)\n",
+ "style=[]\n",
+ "style.extend(round_corners(5))\n",
+ "\n",
+ "ctx['caption']=\"Rounded corners. CSS skills beginning to fail.\"\n",
+ "ctx['style']=style\n",
+ "display(HTML(t.render(**ctx)))\n"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [
+ {
+ "html": [
+ "\n",
+ "<style type=\"text/css\" >\n",
+ "#T_20f772ec_6aa7_11e3_bffd_001a4d511830 tr {\n",
+ "border: none;\n",
+ "}\n",
+ "#T_20f772ec_6aa7_11e3_bffd_001a4d511830 {\n",
+ "border: none;\n",
+ "}\n",
+ "#T_20f772ec_6aa7_11e3_bffd_001a4d511830 th.blank {\n",
+ "border: none;\n",
+ "}\n",
+ "\n",
+ "\n",
+ "\n",
+ "#T_20f772ec_6aa7_11e3_bffd_001a4d511830 td { \n",
+ "\n",
+ " border-width: 1px;\n",
+ "\n",
+ " }\n",
+ "\n",
+ "#T_20f772ec_6aa7_11e3_bffd_001a4d511830 { \n",
+ "\n",
+ " border-collapse: separate;\n",
+ "\n",
+ " }\n",
+ "\n",
+ "#T_20f772ec_6aa7_11e3_bffd_001a4d511830 tr:last-child th:first-child { \n",
+ "\n",
+ " -moz-border-radius-bottomleft: 5px;\n",
+ "\n",
+ " -webkit-border-bottom-left-radius: 5px;\n",
+ "\n",
+ " border-bottom-left-radius: 5px;\n",
+ "\n",
+ " }\n",
+ "\n",
+ "#T_20f772ec_6aa7_11e3_bffd_001a4d511830 tr:last-child td:last-child { \n",
+ "\n",
+ " -moz-border-radius-bottomright: 5px;\n",
+ "\n",
+ " -webkit-border-bottom-right-radius: 5px;\n",
+ "\n",
+ " border-bottom-right-radius: 5px;\n",
+ "\n",
+ " }\n",
+ "\n",
+ "#T_20f772ec_6aa7_11e3_bffd_001a4d511830 tr:first-child th.col0 { \n",
+ "\n",
+ " -moz-border-radius-topleft: 5px;\n",
+ "\n",
+ " -webkit-border-top-left-radius: 5px;\n",
+ "\n",
+ " border-top-left-radius: 5px;\n",
+ "\n",
+ " }\n",
+ "\n",
+ "#T_20f772ec_6aa7_11e3_bffd_001a4d511830 tr:first-child th.row0:first-child { \n",
+ "\n",
+ " -moz-border-radius-topleft: 5px;\n",
+ "\n",
+ " -webkit-border-top-left-radius: 5px;\n",
+ "\n",
+ " border-top-left-radius: 5px;\n",
+ "\n",
+ " }\n",
+ "\n",
+ "#T_20f772ec_6aa7_11e3_bffd_001a4d511830 tr:first-child th:last-child { \n",
+ "\n",
+ " -moz-border-radius-topright: 5px;\n",
+ "\n",
+ " -webkit-border-top-right-radius: 5px;\n",
+ "\n",
+ " border-top-right-radius: 5px;\n",
+ "\n",
+ " }\n",
+ "\n",
+ "</style>\n",
+ "<table id=\"T_20f772ec_6aa7_11e3_bffd_001a4d511830\">\n",
+ "\n",
+ "<caption>\n",
+ "Rounded corners. CSS skills beginning to fail.\n",
+ "</caption>\n",
+ "\n",
+ "\n",
+ " <thead>\n",
+ "\n",
+ " <tr>\n",
+ " \n",
+ " <th class=\"blank\"></th>\n",
+ " \n",
+ " <th class=\"blank\"></th>\n",
+ " \n",
+ " <th class=\"blank\"></th>\n",
+ " \n",
+ " <th class=\"col_heading level0 col0\">C_l0_g0</th>\n",
+ " \n",
+ " <th class=\"col_heading level0 col1\">C_l0_g1</th>\n",
+ " \n",
+ " <th class=\"col_heading level0 col2\">C_l0_g2</th>\n",
+ " \n",
+ " <th class=\"col_heading level0 col3\">C_l0_g3</th>\n",
+ " \n",
+ " <th class=\"col_heading level0 col4\">C_l0_g4</th>\n",
+ " \n",
+ " </tr>\n",
+ "\n",
+ " <tr>\n",
+ " \n",
+ " <th class=\"blank\"></th>\n",
+ " \n",
+ " <th class=\"blank\"></th>\n",
+ " \n",
+ " <th class=\"blank\"></th>\n",
+ " \n",
+ " <th class=\"col_heading level1 col0\">C_l1_g0</th>\n",
+ " \n",
+ " <th class=\"col_heading level1 col1\">C_l1_g1</th>\n",
+ " \n",
+ " <th class=\"col_heading level1 col2\">C_l1_g2</th>\n",
+ " \n",
+ " <th class=\"col_heading level1 col3\">C_l1_g3</th>\n",
+ " \n",
+ " <th class=\"col_heading level1 col4\">C_l1_g4</th>\n",
+ " \n",
+ " </tr>\n",
+ "\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " \n",
+ " <tr>\n",
+ " \n",
+ " <th class=\"row_heading level4 row0\">R_l0_g0</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row0\">R_l1_g0</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row0\">R_l2_g0</th>\n",
+ " \n",
+ " <td class=\"data row0 col0\">R0C0</th>\n",
+ " \n",
+ " <td class=\"data row0 col1\">R0C1</th>\n",
+ " \n",
+ " <td class=\"data row0 col2\">R0C2</th>\n",
+ " \n",
+ " <td class=\"data row0 col3\">R0C3</th>\n",
+ " \n",
+ " <td class=\"data row0 col4\">R0C4</th>\n",
+ " \n",
+ " </tr>\n",
+ "\n",
+ " <tr>\n",
+ " \n",
+ " <th class=\"row_heading level4 row1\">R_l0_g1</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row1\">R_l1_g1</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row1\">R_l2_g1</th>\n",
+ " \n",
+ " <td class=\"data row1 col0\">R1C0</th>\n",
+ " \n",
+ " <td class=\"data row1 col1\">R1C1</th>\n",
+ " \n",
+ " <td class=\"data row1 col2\">R1C2</th>\n",
+ " \n",
+ " <td class=\"data row1 col3\">R1C3</th>\n",
+ " \n",
+ " <td class=\"data row1 col4\">R1C4</th>\n",
+ " \n",
+ " </tr>\n",
+ "\n",
+ " <tr>\n",
+ " \n",
+ " <th class=\"row_heading level4 row2\">R_l0_g2</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row2\">R_l1_g2</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row2\">R_l2_g2</th>\n",
+ " \n",
+ " <td class=\"data row2 col0\">R2C0</th>\n",
+ " \n",
+ " <td class=\"data row2 col1\">R2C1</th>\n",
+ " \n",
+ " <td class=\"data row2 col2\">R2C2</th>\n",
+ " \n",
+ " <td class=\"data row2 col3\">R2C3</th>\n",
+ " \n",
+ " <td class=\"data row2 col4\">R2C4</th>\n",
+ " \n",
+ " </tr>\n",
+ "\n",
+ " <tr>\n",
+ " \n",
+ " <th class=\"row_heading level4 row3\">R_l0_g3</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row3\">R_l1_g3</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row3\">R_l2_g3</th>\n",
+ " \n",
+ " <td class=\"data row3 col0\">R3C0</th>\n",
+ " \n",
+ " <td class=\"data row3 col1\">R3C1</th>\n",
+ " \n",
+ " <td class=\"data row3 col2\">R3C2</th>\n",
+ " \n",
+ " <td class=\"data row3 col3\">R3C3</th>\n",
+ " \n",
+ " <td class=\"data row3 col4\">R3C4</th>\n",
+ " \n",
+ " </tr>\n",
+ "\n",
+ " <tr>\n",
+ " \n",
+ " <th class=\"row_heading level4 row4\">R_l0_g4</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row4\">R_l1_g4</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row4\">R_l2_g4</th>\n",
+ " \n",
+ " <td class=\"data row4 col0\">R4C0</th>\n",
+ " \n",
+ " <td class=\"data row4 col1\">R4C1</th>\n",
+ " \n",
+ " <td class=\"data row4 col2\">R4C2</th>\n",
+ " \n",
+ " <td class=\"data row4 col3\">R4C3</th>\n",
+ " \n",
+ " <td class=\"data row4 col4\">R4C4</th>\n",
+ " \n",
+ " </tr>\n",
+ "\n",
+ " <tr>\n",
+ " \n",
+ " <th class=\"row_heading level4 row5\">R_l0_g5</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row5\">R_l1_g5</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row5\">R_l2_g5</th>\n",
+ " \n",
+ " <td class=\"data row5 col0\">R5C0</th>\n",
+ " \n",
+ " <td class=\"data row5 col1\">R5C1</th>\n",
+ " \n",
+ " <td class=\"data row5 col2\">R5C2</th>\n",
+ " \n",
+ " <td class=\"data row5 col3\">R5C3</th>\n",
+ " \n",
+ " <td class=\"data row5 col4\">R5C4</th>\n",
+ " \n",
+ " </tr>\n",
+ "\n",
+ " <tr>\n",
+ " \n",
+ " <th class=\"row_heading level4 row6\">R_l0_g6</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row6\">R_l1_g6</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row6\">R_l2_g6</th>\n",
+ " \n",
+ " <td class=\"data row6 col0\">R6C0</th>\n",
+ " \n",
+ " <td class=\"data row6 col1\">R6C1</th>\n",
+ " \n",
+ " <td class=\"data row6 col2\">R6C2</th>\n",
+ " \n",
+ " <td class=\"data row6 col3\">R6C3</th>\n",
+ " \n",
+ " <td class=\"data row6 col4\">R6C4</th>\n",
+ " \n",
+ " </tr>\n",
+ "\n",
+ " <tr>\n",
+ " \n",
+ " <th class=\"row_heading level4 row7\">R_l0_g7</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row7\">R_l1_g7</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row7\">R_l2_g7</th>\n",
+ " \n",
+ " <td class=\"data row7 col0\">R7C0</th>\n",
+ " \n",
+ " <td class=\"data row7 col1\">R7C1</th>\n",
+ " \n",
+ " <td class=\"data row7 col2\">R7C2</th>\n",
+ " \n",
+ " <td class=\"data row7 col3\">R7C3</th>\n",
+ " \n",
+ " <td class=\"data row7 col4\">R7C4</th>\n",
+ " \n",
+ " </tr>\n",
+ "\n",
+ " <tr>\n",
+ " \n",
+ " <th class=\"row_heading level4 row8\">R_l0_g8</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row8\">R_l1_g8</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row8\">R_l2_g8</th>\n",
+ " \n",
+ " <td class=\"data row8 col0\">R8C0</th>\n",
+ " \n",
+ " <td class=\"data row8 col1\">R8C1</th>\n",
+ " \n",
+ " <td class=\"data row8 col2\">R8C2</th>\n",
+ " \n",
+ " <td class=\"data row8 col3\">R8C3</th>\n",
+ " \n",
+ " <td class=\"data row8 col4\">R8C4</th>\n",
+ " \n",
+ " </tr>\n",
+ "\n",
+ " <tr>\n",
+ " \n",
+ " <th class=\"row_heading level4 row9\">R_l0_g9</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row9\">R_l1_g9</th>\n",
+ " \n",
+ " <th class=\"row_heading level4 row9\">R_l2_g9</th>\n",
+ " \n",
+ " <td class=\"data row9 col0\">R9C0</th>\n",
+ " \n",
+ " <td class=\"data row9 col1\">R9C1</th>\n",
+ " \n",
+ " <td class=\"data row9 col2\">R9C2</th>\n",
+ " \n",
+ " <td class=\"data row9 col3\">R9C3</th>\n",
+ " \n",
+ " <td class=\"data row9 col4\">R9C4</th>\n",
+ " \n",
+ " </tr>\n",
+ "\n",
+ " </tbody>\n",
+ "\n",
+ "</table>\n"
+ ],
+ "metadata": {},
+ "output_type": "display_data",
+ "text": [
+ "<IPython.core.display.HTML at 0x44d9750>"
+ ]
+ }
+ ],
+ "prompt_number": 8
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "def color_class(cls, color):\n",
+ " return [dict(selector=\"td.%s\" % cls ,\n",
+ " props=[(\"background-color\",color)])]\n",
+ "def rank_col(n,ranking,u):\n",
+ " data = {i: {n: [\"%s-%s\" % (u,ranking[i])]} for i in range(len(ranking))}\n",
+ " return {\"data\": data}\n",
+ "\n",
+ "import uuid\n",
+ "u = \"U\"+str(uuid.uuid1()).replace(\"-\",\"_\")\n",
+ "df=mkdf(9,5,data_gen_f=lambda r,c:np.random.random())\n",
+ "\n",
+ "ranking=df.iloc[:,1].argsort().tolist()\n",
+ "cell_context=rank_col(1, ranking, u)\n",
+ "\n",
+ "ctx= translate(df,cell_context)\n",
+ "style=[]\n",
+ "# http://colorbrewer2.org/\n",
+ "color_scale=[\"#fff7ec\",\"#fee8c8\",\"#fdd49e\",\"#fdbb84\",\"#fc8d59\",\"#ef6548\",\"#d7301f\",\"#b30000\",\"#7f0000\"]\n",
+ "for intensity in range(9):\n",
+ " style.extend(color_class(\"%s-%s\" % (u,intensity),color_scale[intensity]))\n",
+ " \n",
+ "ctx['style']=style\n",
+ "ctx['caption']=\"And finally, a heatmap based on values\"\n",
+ "display(HTML(t.render(**ctx)))\n",
+ " "
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [
+ {
+ "html": [
+ "\n",
+ "<style type=\"text/css\" >\n",
+ "#T_23627d9c_6aa7_11e3_bffd_001a4d511830 tr {\n",
+ "border: none;\n",
+ "}\n",
+ "#T_23627d9c_6aa7_11e3_bffd_001a4d511830 {\n",
+ "border: none;\n",
+ "}\n",
+ "#T_23627d9c_6aa7_11e3_bffd_001a4d511830 th.blank {\n",
+ "border: none;\n",
+ "}\n",
+ "\n",
+ "\n",
+ "\n",
+ "#T_23627d9c_6aa7_11e3_bffd_001a4d511830 td.U2360ee96_6aa7_11e3_bffd_001a4d511830-0 { \n",
+ "\n",
+ " background-color: #fff7ec;\n",
+ "\n",
+ " }\n",
+ "\n",
+ "#T_23627d9c_6aa7_11e3_bffd_001a4d511830 td.U2360ee96_6aa7_11e3_bffd_001a4d511830-1 { \n",
+ "\n",
+ " background-color: #fee8c8;\n",
+ "\n",
+ " }\n",
+ "\n",
+ "#T_23627d9c_6aa7_11e3_bffd_001a4d511830 td.U2360ee96_6aa7_11e3_bffd_001a4d511830-2 { \n",
+ "\n",
+ " background-color: #fdd49e;\n",
+ "\n",
+ " }\n",
+ "\n",
+ "#T_23627d9c_6aa7_11e3_bffd_001a4d511830 td.U2360ee96_6aa7_11e3_bffd_001a4d511830-3 { \n",
+ "\n",
+ " background-color: #fdbb84;\n",
+ "\n",
+ " }\n",
+ "\n",
+ "#T_23627d9c_6aa7_11e3_bffd_001a4d511830 td.U2360ee96_6aa7_11e3_bffd_001a4d511830-4 { \n",
+ "\n",
+ " background-color: #fc8d59;\n",
+ "\n",
+ " }\n",
+ "\n",
+ "#T_23627d9c_6aa7_11e3_bffd_001a4d511830 td.U2360ee96_6aa7_11e3_bffd_001a4d511830-5 { \n",
+ "\n",
+ " background-color: #ef6548;\n",
+ "\n",
+ " }\n",
+ "\n",
+ "#T_23627d9c_6aa7_11e3_bffd_001a4d511830 td.U2360ee96_6aa7_11e3_bffd_001a4d511830-6 { \n",
+ "\n",
+ " background-color: #d7301f;\n",
+ "\n",
+ " }\n",
+ "\n",
+ "#T_23627d9c_6aa7_11e3_bffd_001a4d511830 td.U2360ee96_6aa7_11e3_bffd_001a4d511830-7 { \n",
+ "\n",
+ " background-color: #b30000;\n",
+ "\n",
+ " }\n",
+ "\n",
+ "#T_23627d9c_6aa7_11e3_bffd_001a4d511830 td.U2360ee96_6aa7_11e3_bffd_001a4d511830-8 { \n",
+ "\n",
+ " background-color: #7f0000;\n",
+ "\n",
+ " }\n",
+ "\n",
+ "</style>\n",
+ "<table id=\"T_23627d9c_6aa7_11e3_bffd_001a4d511830\">\n",
+ "\n",
+ "<caption>\n",
+ "And finally, a heatmap based on values\n",
+ "</caption>\n",
+ "\n",
+ "\n",
+ " <thead>\n",
+ "\n",
+ " <tr>\n",
+ " \n",
+ " <th class=\"blank\"></th>\n",
+ " \n",
+ " <th class=\"col_heading level0 col0\">C_l0_g0</th>\n",
+ " \n",
+ " <th class=\"col_heading level0 col1\">C_l0_g1</th>\n",
+ " \n",
+ " <th class=\"col_heading level0 col2\">C_l0_g2</th>\n",
+ " \n",
+ " <th class=\"col_heading level0 col3\">C_l0_g3</th>\n",
+ " \n",
+ " <th class=\"col_heading level0 col4\">C_l0_g4</th>\n",
+ " \n",
+ " </tr>\n",
+ "\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " \n",
+ " <tr>\n",
+ " \n",
+ " <th class=\"row_heading level4 row0\">R_l0_g0</th>\n",
+ " \n",
+ " <td class=\"data row0 col0\">0.211801747127</th>\n",
+ " \n",
+ " <td class=\"data row0 col1 U2360ee96_6aa7_11e3_bffd_001a4d511830-4\">0.208803651856</th>\n",
+ " \n",
+ " <td class=\"data row0 col2\">0.743716623422</th>\n",
+ " \n",
+ " <td class=\"data row0 col3\">0.962017341688</th>\n",
+ " \n",
+ " <td class=\"data row0 col4\">0.176015213664</th>\n",
+ " \n",
+ " </tr>\n",
+ "\n",
+ " <tr>\n",
+ " \n",
+ " <th class=\"row_heading level4 row1\">R_l0_g1</th>\n",
+ " \n",
+ " <td class=\"data row1 col0\">0.745491761131</th>\n",
+ " \n",
+ " <td class=\"data row1 col1 U2360ee96_6aa7_11e3_bffd_001a4d511830-5\">0.820494510091</th>\n",
+ " \n",
+ " <td class=\"data row1 col2\">0.503137612941</th>\n",
+ " \n",
+ " <td class=\"data row1 col3\">0.238019828149</th>\n",
+ " \n",
+ " <td class=\"data row1 col4\">0.634250742128</th>\n",
+ " \n",
+ " </tr>\n",
+ "\n",
+ " <tr>\n",
+ " \n",
+ " <th class=\"row_heading level4 row2\">R_l0_g2</th>\n",
+ " \n",
+ " <td class=\"data row2 col0\">0.569301755126</th>\n",
+ " \n",
+ " <td class=\"data row2 col1 U2360ee96_6aa7_11e3_bffd_001a4d511830-0\">0.507672767903</th>\n",
+ " \n",
+ " <td class=\"data row2 col2\">0.309202712181</th>\n",
+ " \n",
+ " <td class=\"data row2 col3\">0.830732729127</th>\n",
+ " \n",
+ " <td class=\"data row2 col4\">0.617108870585</th>\n",
+ " \n",
+ " </tr>\n",
+ "\n",
+ " <tr>\n",
+ " \n",
+ " <th class=\"row_heading level4 row3\">R_l0_g3</th>\n",
+ " \n",
+ " <td class=\"data row3 col0\">0.685515863149</th>\n",
+ " \n",
+ " <td class=\"data row3 col1 U2360ee96_6aa7_11e3_bffd_001a4d511830-7\">0.985786341696</th>\n",
+ " \n",
+ " <td class=\"data row3 col2\">0.534988628544</th>\n",
+ " \n",
+ " <td class=\"data row3 col3\">0.900187936559</th>\n",
+ " \n",
+ " <td class=\"data row3 col4\">0.602678947775</th>\n",
+ " \n",
+ " </tr>\n",
+ "\n",
+ " <tr>\n",
+ " \n",
+ " <th class=\"row_heading level4 row4\">R_l0_g4</th>\n",
+ " \n",
+ " <td class=\"data row4 col0\">0.521370083872</th>\n",
+ " \n",
+ " <td class=\"data row4 col1 U2360ee96_6aa7_11e3_bffd_001a4d511830-2\">0.0110884722823</th>\n",
+ " \n",
+ " <td class=\"data row4 col2\">0.752014325914</th>\n",
+ " \n",
+ " <td class=\"data row4 col3\">0.520270090798</th>\n",
+ " \n",
+ " <td class=\"data row4 col4\">0.0095252937789</th>\n",
+ " \n",
+ " </tr>\n",
+ "\n",
+ " <tr>\n",
+ " \n",
+ " <th class=\"row_heading level4 row5\">R_l0_g5</th>\n",
+ " \n",
+ " <td class=\"data row5 col0\">0.29865049643</th>\n",
+ " \n",
+ " <td class=\"data row5 col1 U2360ee96_6aa7_11e3_bffd_001a4d511830-1\">0.1156016189</th>\n",
+ " \n",
+ " <td class=\"data row5 col2\">0.16250389458</th>\n",
+ " \n",
+ " <td class=\"data row5 col3\">0.1162681165</th>\n",
+ " \n",
+ " <td class=\"data row5 col4\">0.0624890733322</th>\n",
+ " \n",
+ " </tr>\n",
+ "\n",
+ " <tr>\n",
+ " \n",
+ " <th class=\"row_heading level4 row6\">R_l0_g6</th>\n",
+ " \n",
+ " <td class=\"data row6 col0\">0.24039917756</th>\n",
+ " \n",
+ " <td class=\"data row6 col1 U2360ee96_6aa7_11e3_bffd_001a4d511830-6\">0.831887982065</th>\n",
+ " \n",
+ " <td class=\"data row6 col2\">0.139444606684</th>\n",
+ " \n",
+ " <td class=\"data row6 col3\">0.87434203059</th>\n",
+ " \n",
+ " <td class=\"data row6 col4\">0.214273512954</th>\n",
+ " \n",
+ " </tr>\n",
+ "\n",
+ " <tr>\n",
+ " \n",
+ " <th class=\"row_heading level4 row7\">R_l0_g7</th>\n",
+ " \n",
+ " <td class=\"data row7 col0\">0.0066042287924</th>\n",
+ " \n",
+ " <td class=\"data row7 col1 U2360ee96_6aa7_11e3_bffd_001a4d511830-3\">0.333568312336</th>\n",
+ " \n",
+ " <td class=\"data row7 col2\">0.860660066709</th>\n",
+ " \n",
+ " <td class=\"data row7 col3\">0.273793599297</th>\n",
+ " \n",
+ " <td class=\"data row7 col4\">0.0247286858822</th>\n",
+ " \n",
+ " </tr>\n",
+ "\n",
+ " <tr>\n",
+ " \n",
+ " <th class=\"row_heading level4 row8\">R_l0_g8</th>\n",
+ " \n",
+ " <td class=\"data row8 col0\">0.0372715722521</th>\n",
+ " \n",
+ " <td class=\"data row8 col1 U2360ee96_6aa7_11e3_bffd_001a4d511830-8\">0.996144666822</th>\n",
+ " \n",
+ " <td class=\"data row8 col2\">0.258524096378</th>\n",
+ " \n",
+ " <td class=\"data row8 col3\">0.0665443054498</th>\n",
+ " \n",
+ " <td class=\"data row8 col4\">0.993389023817</th>\n",
+ " \n",
+ " </tr>\n",
+ "\n",
+ " </tbody>\n",
+ "\n",
+ "</table>\n"
+ ],
+ "metadata": {},
+ "output_type": "display_data",
+ "text": [
+ "<IPython.core.display.HTML at 0x44e4890>"
+ ]
+ }
+ ],
+ "prompt_number": 9
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [],
+ "language": "python",
+ "metadata": {},
+ "outputs": []
+ }
+ ],
+ "metadata": {}
+ }
+ ]
+}
\ No newline at end of file
diff --git a/pandas_HTML_styling_api.ipynb b/pandas_HTML_styling_api.ipynb
new file mode 100644
index 0000000000000..abe53dea5b057
--- /dev/null
+++ b/pandas_HTML_styling_api.ipynb
@@ -0,0 +1,2151 @@
+{
+ "metadata": {
+ "name": ""
+ },
+ "nbformat": 3,
+ "nbformat_minor": 0,
+ "worksheets": [
+ {
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "###Experimenting with the API"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "df=mkdf(10,5,r_idx_nlevels=2,c_idx_nlevels=3)\n",
+ "s=pd.io.templating.HTMLStyler(df) # -> df.to_styler()\n",
+ "s #vanilla"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [
+ {
+ "html": [
+ "\n",
+ "\n",
+ "<style type=\"text/css\">\n",
+ " #T_4fcb2ed6_6bf3_11e3_b230_001a4d511830 tr {\n",
+ " border: none;\n",
+ " }\n",
+ "\n",
+ " #T_4fcb2ed6_6bf3_11e3_b230_001a4d511830 {\n",
+ " border: none;\n",
+ " }\n",
+ "\n",
+ " #T_4fcb2ed6_6bf3_11e3_b230_001a4d511830 th.pandas_blank {\n",
+ " border: none;\n",
+ " }\n",
+ "\n",
+ " \n",
+ "</style>\n",
+ "\n",
+ "\n",
+ "<table id=\"T_4fcb2ed6_6bf3_11e3_b230_001a4d511830\">\n",
+ " \n",
+ "\n",
+ " <thead>\n",
+ " \n",
+ " <tr>\n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_blank\"></th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_blank\"></th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_col_heading pandas_level0 pandas_col0\">C_l0_g0</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_col_heading pandas_level0 pandas_col1\">C_l0_g1</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_col_heading pandas_level0 pandas_col2\">C_l0_g2</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_col_heading pandas_level0 pandas_col3\">C_l0_g3</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_col_heading pandas_level0 pandas_col4\">C_l0_g4</th>\n",
+ " \n",
+ " \n",
+ " </tr>\n",
+ " \n",
+ " <tr>\n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_blank\"></th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_blank\"></th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_col_heading pandas_level1 pandas_col0\">C_l1_g0</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_col_heading pandas_level1 pandas_col1\">C_l1_g1</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_col_heading pandas_level1 pandas_col2\">C_l1_g2</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_col_heading pandas_level1 pandas_col3\">C_l1_g3</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_col_heading pandas_level1 pandas_col4\">C_l1_g4</th>\n",
+ " \n",
+ " \n",
+ " </tr>\n",
+ " \n",
+ " <tr>\n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_blank\"></th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_blank\"></th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_col_heading pandas_level2 pandas_col0\">C_l2_g0</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_col_heading pandas_level2 pandas_col1\">C_l2_g1</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_col_heading pandas_level2 pandas_col2\">C_l2_g2</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_col_heading pandas_level2 pandas_col3\">C_l2_g3</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_col_heading pandas_level2 pandas_col4\">C_l2_g4</th>\n",
+ " \n",
+ " \n",
+ " </tr>\n",
+ " \n",
+ " </thead>\n",
+ "\n",
+ " <tbody>\n",
+ " \n",
+ " <tr>\n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row0\">R_l0_g0</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row0\">R_l1_g0</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row0 pandas_col0\">R0C0</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row0 pandas_col1\">R0C1</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row0 pandas_col2\">R0C2</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row0 pandas_col3\">R0C3</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row0 pandas_col4\">R0C4</td>\n",
+ " \n",
+ " \n",
+ " </tr>\n",
+ " \n",
+ " <tr>\n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row1\">R_l0_g1</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row1\">R_l1_g1</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row1 pandas_col0\">R1C0</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row1 pandas_col1\">R1C1</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row1 pandas_col2\">R1C2</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row1 pandas_col3\">R1C3</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row1 pandas_col4\">R1C4</td>\n",
+ " \n",
+ " \n",
+ " </tr>\n",
+ " \n",
+ " <tr>\n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row2\">R_l0_g2</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row2\">R_l1_g2</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row2 pandas_col0\">R2C0</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row2 pandas_col1\">R2C1</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row2 pandas_col2\">R2C2</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row2 pandas_col3\">R2C3</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row2 pandas_col4\">R2C4</td>\n",
+ " \n",
+ " \n",
+ " </tr>\n",
+ " \n",
+ " <tr>\n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row3\">R_l0_g3</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row3\">R_l1_g3</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row3 pandas_col0\">R3C0</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row3 pandas_col1\">R3C1</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row3 pandas_col2\">R3C2</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row3 pandas_col3\">R3C3</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row3 pandas_col4\">R3C4</td>\n",
+ " \n",
+ " \n",
+ " </tr>\n",
+ " \n",
+ " <tr>\n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row4\">R_l0_g4</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row4\">R_l1_g4</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row4 pandas_col0\">R4C0</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row4 pandas_col1\">R4C1</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row4 pandas_col2\">R4C2</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row4 pandas_col3\">R4C3</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row4 pandas_col4\">R4C4</td>\n",
+ " \n",
+ " \n",
+ " </tr>\n",
+ " \n",
+ " <tr>\n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row5\">R_l0_g5</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row5\">R_l1_g5</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row5 pandas_col0\">R5C0</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row5 pandas_col1\">R5C1</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row5 pandas_col2\">R5C2</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row5 pandas_col3\">R5C3</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row5 pandas_col4\">R5C4</td>\n",
+ " \n",
+ " \n",
+ " </tr>\n",
+ " \n",
+ " <tr>\n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row6\">R_l0_g6</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row6\">R_l1_g6</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row6 pandas_col0\">R6C0</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row6 pandas_col1\">R6C1</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row6 pandas_col2\">R6C2</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row6 pandas_col3\">R6C3</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row6 pandas_col4\">R6C4</td>\n",
+ " \n",
+ " \n",
+ " </tr>\n",
+ " \n",
+ " <tr>\n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row7\">R_l0_g7</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row7\">R_l1_g7</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row7 pandas_col0\">R7C0</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row7 pandas_col1\">R7C1</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row7 pandas_col2\">R7C2</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row7 pandas_col3\">R7C3</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row7 pandas_col4\">R7C4</td>\n",
+ " \n",
+ " \n",
+ " </tr>\n",
+ " \n",
+ " <tr>\n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row8\">R_l0_g8</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row8\">R_l1_g8</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row8 pandas_col0\">R8C0</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row8 pandas_col1\">R8C1</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row8 pandas_col2\">R8C2</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row8 pandas_col3\">R8C3</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row8 pandas_col4\">R8C4</td>\n",
+ " \n",
+ " \n",
+ " </tr>\n",
+ " \n",
+ " <tr>\n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row9\">R_l0_g9</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row9\">R_l1_g9</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row9 pandas_col0\">R9C0</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row9 pandas_col1\">R9C1</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row9 pandas_col2\">R9C2</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row9 pandas_col3\">R9C3</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row9 pandas_col4\">R9C4</td>\n",
+ " \n",
+ " \n",
+ " </tr>\n",
+ " \n",
+ " </tbody>\n",
+ "\n",
+ "</table>"
+ ],
+ "metadata": {},
+ "output_type": "pyout",
+ "prompt_number": 1,
+ "text": [
+ "<pandas.io.templating.html.HTMLStyler at 0x4b106d0>"
+ ]
+ }
+ ],
+ "prompt_number": 1
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "# vanilla-chocolate\n",
+ "# Should be vertical...\n",
+ "s.zebra(\"#ddd\",\"#eee\")"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [
+ {
+ "html": [
+ "\n",
+ "\n",
+ "<style type=\"text/css\">\n",
+ " #T_500ed942_6bf3_11e3_b230_001a4d511830 tr {\n",
+ " border: none;\n",
+ " }\n",
+ "\n",
+ " #T_500ed942_6bf3_11e3_b230_001a4d511830 {\n",
+ " border: none;\n",
+ " }\n",
+ "\n",
+ " #T_500ed942_6bf3_11e3_b230_001a4d511830 th.pandas_blank {\n",
+ " border: none;\n",
+ " }\n",
+ "\n",
+ " \n",
+ " #T_500ed942_6bf3_11e3_b230_001a4d511830 td.pandas_data:nth-child(2n) {\n",
+ " \n",
+ " background-color: #ddd;\n",
+ " \n",
+ " }\n",
+ " \n",
+ " #T_500ed942_6bf3_11e3_b230_001a4d511830 td.pandas_data:nth-child(2n+1) {\n",
+ " \n",
+ " background-color: #eee;\n",
+ " \n",
+ " }\n",
+ " \n",
+ "</style>\n",
+ "\n",
+ "\n",
+ "<table id=\"T_500ed942_6bf3_11e3_b230_001a4d511830\">\n",
+ " \n",
+ "\n",
+ " <thead>\n",
+ " \n",
+ " <tr>\n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_blank\"></th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_blank\"></th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_col_heading pandas_level0 pandas_col0\">C_l0_g0</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_col_heading pandas_level0 pandas_col1\">C_l0_g1</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_col_heading pandas_level0 pandas_col2\">C_l0_g2</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_col_heading pandas_level0 pandas_col3\">C_l0_g3</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_col_heading pandas_level0 pandas_col4\">C_l0_g4</th>\n",
+ " \n",
+ " \n",
+ " </tr>\n",
+ " \n",
+ " <tr>\n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_blank\"></th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_blank\"></th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_col_heading pandas_level1 pandas_col0\">C_l1_g0</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_col_heading pandas_level1 pandas_col1\">C_l1_g1</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_col_heading pandas_level1 pandas_col2\">C_l1_g2</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_col_heading pandas_level1 pandas_col3\">C_l1_g3</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_col_heading pandas_level1 pandas_col4\">C_l1_g4</th>\n",
+ " \n",
+ " \n",
+ " </tr>\n",
+ " \n",
+ " <tr>\n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_blank\"></th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_blank\"></th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_col_heading pandas_level2 pandas_col0\">C_l2_g0</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_col_heading pandas_level2 pandas_col1\">C_l2_g1</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_col_heading pandas_level2 pandas_col2\">C_l2_g2</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_col_heading pandas_level2 pandas_col3\">C_l2_g3</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_col_heading pandas_level2 pandas_col4\">C_l2_g4</th>\n",
+ " \n",
+ " \n",
+ " </tr>\n",
+ " \n",
+ " </thead>\n",
+ "\n",
+ " <tbody>\n",
+ " \n",
+ " <tr>\n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row0\">R_l0_g0</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row0\">R_l1_g0</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row0 pandas_col0\">R0C0</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row0 pandas_col1\">R0C1</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row0 pandas_col2\">R0C2</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row0 pandas_col3\">R0C3</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row0 pandas_col4\">R0C4</td>\n",
+ " \n",
+ " \n",
+ " </tr>\n",
+ " \n",
+ " <tr>\n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row1\">R_l0_g1</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row1\">R_l1_g1</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row1 pandas_col0\">R1C0</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row1 pandas_col1\">R1C1</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row1 pandas_col2\">R1C2</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row1 pandas_col3\">R1C3</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row1 pandas_col4\">R1C4</td>\n",
+ " \n",
+ " \n",
+ " </tr>\n",
+ " \n",
+ " <tr>\n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row2\">R_l0_g2</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row2\">R_l1_g2</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row2 pandas_col0\">R2C0</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row2 pandas_col1\">R2C1</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row2 pandas_col2\">R2C2</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row2 pandas_col3\">R2C3</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row2 pandas_col4\">R2C4</td>\n",
+ " \n",
+ " \n",
+ " </tr>\n",
+ " \n",
+ " <tr>\n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row3\">R_l0_g3</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row3\">R_l1_g3</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row3 pandas_col0\">R3C0</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row3 pandas_col1\">R3C1</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row3 pandas_col2\">R3C2</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row3 pandas_col3\">R3C3</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row3 pandas_col4\">R3C4</td>\n",
+ " \n",
+ " \n",
+ " </tr>\n",
+ " \n",
+ " <tr>\n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row4\">R_l0_g4</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row4\">R_l1_g4</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row4 pandas_col0\">R4C0</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row4 pandas_col1\">R4C1</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row4 pandas_col2\">R4C2</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row4 pandas_col3\">R4C3</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row4 pandas_col4\">R4C4</td>\n",
+ " \n",
+ " \n",
+ " </tr>\n",
+ " \n",
+ " <tr>\n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row5\">R_l0_g5</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row5\">R_l1_g5</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row5 pandas_col0\">R5C0</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row5 pandas_col1\">R5C1</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row5 pandas_col2\">R5C2</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row5 pandas_col3\">R5C3</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row5 pandas_col4\">R5C4</td>\n",
+ " \n",
+ " \n",
+ " </tr>\n",
+ " \n",
+ " <tr>\n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row6\">R_l0_g6</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row6\">R_l1_g6</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row6 pandas_col0\">R6C0</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row6 pandas_col1\">R6C1</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row6 pandas_col2\">R6C2</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row6 pandas_col3\">R6C3</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row6 pandas_col4\">R6C4</td>\n",
+ " \n",
+ " \n",
+ " </tr>\n",
+ " \n",
+ " <tr>\n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row7\">R_l0_g7</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row7\">R_l1_g7</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row7 pandas_col0\">R7C0</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row7 pandas_col1\">R7C1</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row7 pandas_col2\">R7C2</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row7 pandas_col3\">R7C3</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row7 pandas_col4\">R7C4</td>\n",
+ " \n",
+ " \n",
+ " </tr>\n",
+ " \n",
+ " <tr>\n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row8\">R_l0_g8</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row8\">R_l1_g8</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row8 pandas_col0\">R8C0</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row8 pandas_col1\">R8C1</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row8 pandas_col2\">R8C2</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row8 pandas_col3\">R8C3</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row8 pandas_col4\">R8C4</td>\n",
+ " \n",
+ " \n",
+ " </tr>\n",
+ " \n",
+ " <tr>\n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row9\">R_l0_g9</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row9\">R_l1_g9</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row9 pandas_col0\">R9C0</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row9 pandas_col1\">R9C1</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row9 pandas_col2\">R9C2</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row9 pandas_col3\">R9C3</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row9 pandas_col4\">R9C4</td>\n",
+ " \n",
+ " \n",
+ " </tr>\n",
+ " \n",
+ " </tbody>\n",
+ "\n",
+ "</table>"
+ ],
+ "metadata": {},
+ "output_type": "pyout",
+ "prompt_number": 2,
+ "text": [
+ "<pandas.io.templating.html.HTMLStyler at 0x4c69fd0>"
+ ]
+ }
+ ],
+ "prompt_number": 2
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "# cover your eyes\n",
+ "s.hlrow(4, \"green\").hlcol(2,\"purple\").hlcell(4,2,\"blue\")"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [
+ {
+ "html": [
+ "\n",
+ "\n",
+ "<style type=\"text/css\">\n",
+ " #T_0d0af95e_6bf4_11e3_b230_001a4d511830 tr {\n",
+ " border: none;\n",
+ " }\n",
+ "\n",
+ " #T_0d0af95e_6bf4_11e3_b230_001a4d511830 {\n",
+ " border: none;\n",
+ " }\n",
+ "\n",
+ " #T_0d0af95e_6bf4_11e3_b230_001a4d511830 th.pandas_blank {\n",
+ " border: none;\n",
+ " }\n",
+ "\n",
+ " \n",
+ " #T_0d0af95e_6bf4_11e3_b230_001a4d511830 td.pandas_row4.pandas_data {\n",
+ " \n",
+ " background-color: green;\n",
+ " \n",
+ " }\n",
+ " \n",
+ " #T_0d0af95e_6bf4_11e3_b230_001a4d511830 td.pandas_col2.pandas_data {\n",
+ " \n",
+ " background-color: purple;\n",
+ " \n",
+ " }\n",
+ " \n",
+ " #T_0d0af95e_6bf4_11e3_b230_001a4d511830 td.pandas_row4.pandas_col2.pandas_data {\n",
+ " \n",
+ " background-color: blue;\n",
+ " \n",
+ " }\n",
+ " \n",
+ "</style>\n",
+ "\n",
+ "\n",
+ "<table id=\"T_0d0af95e_6bf4_11e3_b230_001a4d511830\">\n",
+ " \n",
+ "\n",
+ " <thead>\n",
+ " \n",
+ " <tr>\n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_blank\"></th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_blank\"></th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_col_heading pandas_level0 pandas_col0\">C_l0_g0</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_col_heading pandas_level0 pandas_col1\">C_l0_g1</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_col_heading pandas_level0 pandas_col2\">C_l0_g2</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_col_heading pandas_level0 pandas_col3\">C_l0_g3</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_col_heading pandas_level0 pandas_col4\">C_l0_g4</th>\n",
+ " \n",
+ " \n",
+ " </tr>\n",
+ " \n",
+ " <tr>\n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_blank\"></th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_blank\"></th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_col_heading pandas_level1 pandas_col0\">C_l1_g0</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_col_heading pandas_level1 pandas_col1\">C_l1_g1</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_col_heading pandas_level1 pandas_col2\">C_l1_g2</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_col_heading pandas_level1 pandas_col3\">C_l1_g3</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_col_heading pandas_level1 pandas_col4\">C_l1_g4</th>\n",
+ " \n",
+ " \n",
+ " </tr>\n",
+ " \n",
+ " <tr>\n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_blank\"></th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_blank\"></th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_col_heading pandas_level2 pandas_col0\">C_l2_g0</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_col_heading pandas_level2 pandas_col1\">C_l2_g1</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_col_heading pandas_level2 pandas_col2\">C_l2_g2</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_col_heading pandas_level2 pandas_col3\">C_l2_g3</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_col_heading pandas_level2 pandas_col4\">C_l2_g4</th>\n",
+ " \n",
+ " \n",
+ " </tr>\n",
+ " \n",
+ " </thead>\n",
+ "\n",
+ " <tbody>\n",
+ " \n",
+ " <tr>\n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row0\">R_l0_g0</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row0\">R_l1_g0</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row0 pandas_col0\">R0C0</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row0 pandas_col1\">R0C1</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row0 pandas_col2\">R0C2</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row0 pandas_col3\">R0C3</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row0 pandas_col4\">R0C4</td>\n",
+ " \n",
+ " \n",
+ " </tr>\n",
+ " \n",
+ " <tr>\n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row1\">R_l0_g1</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row1\">R_l1_g1</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row1 pandas_col0\">R1C0</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row1 pandas_col1\">R1C1</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row1 pandas_col2\">R1C2</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row1 pandas_col3\">R1C3</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row1 pandas_col4\">R1C4</td>\n",
+ " \n",
+ " \n",
+ " </tr>\n",
+ " \n",
+ " <tr>\n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row2\">R_l0_g2</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row2\">R_l1_g2</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row2 pandas_col0\">R2C0</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row2 pandas_col1\">R2C1</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row2 pandas_col2\">R2C2</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row2 pandas_col3\">R2C3</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row2 pandas_col4\">R2C4</td>\n",
+ " \n",
+ " \n",
+ " </tr>\n",
+ " \n",
+ " <tr>\n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row3\">R_l0_g3</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row3\">R_l1_g3</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row3 pandas_col0\">R3C0</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row3 pandas_col1\">R3C1</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row3 pandas_col2\">R3C2</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row3 pandas_col3\">R3C3</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row3 pandas_col4\">R3C4</td>\n",
+ " \n",
+ " \n",
+ " </tr>\n",
+ " \n",
+ " <tr>\n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row4\">R_l0_g4</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row4\">R_l1_g4</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row4 pandas_col0\">R4C0</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row4 pandas_col1\">R4C1</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row4 pandas_col2\">R4C2</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row4 pandas_col3\">R4C3</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row4 pandas_col4\">R4C4</td>\n",
+ " \n",
+ " \n",
+ " </tr>\n",
+ " \n",
+ " <tr>\n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row5\">R_l0_g5</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row5\">R_l1_g5</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row5 pandas_col0\">R5C0</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row5 pandas_col1\">R5C1</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row5 pandas_col2\">R5C2</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row5 pandas_col3\">R5C3</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row5 pandas_col4\">R5C4</td>\n",
+ " \n",
+ " \n",
+ " </tr>\n",
+ " \n",
+ " <tr>\n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row6\">R_l0_g6</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row6\">R_l1_g6</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row6 pandas_col0\">R6C0</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row6 pandas_col1\">R6C1</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row6 pandas_col2\">R6C2</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row6 pandas_col3\">R6C3</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row6 pandas_col4\">R6C4</td>\n",
+ " \n",
+ " \n",
+ " </tr>\n",
+ " \n",
+ " <tr>\n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row7\">R_l0_g7</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row7\">R_l1_g7</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row7 pandas_col0\">R7C0</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row7 pandas_col1\">R7C1</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row7 pandas_col2\">R7C2</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row7 pandas_col3\">R7C3</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row7 pandas_col4\">R7C4</td>\n",
+ " \n",
+ " \n",
+ " </tr>\n",
+ " \n",
+ " <tr>\n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row8\">R_l0_g8</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row8\">R_l1_g8</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row8 pandas_col0\">R8C0</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row8 pandas_col1\">R8C1</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row8 pandas_col2\">R8C2</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row8 pandas_col3\">R8C3</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row8 pandas_col4\">R8C4</td>\n",
+ " \n",
+ " \n",
+ " </tr>\n",
+ " \n",
+ " <tr>\n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row9\">R_l0_g9</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row9\">R_l1_g9</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row9 pandas_col0\">R9C0</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row9 pandas_col1\">R9C1</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row9 pandas_col2\">R9C2</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row9 pandas_col3\">R9C3</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row9 pandas_col4\">R9C4</td>\n",
+ " \n",
+ " \n",
+ " </tr>\n",
+ " \n",
+ " </tbody>\n",
+ "\n",
+ "</table>"
+ ],
+ "metadata": {},
+ "output_type": "pyout",
+ "prompt_number": 14,
+ "text": [
+ "<pandas.io.templating.html.HTMLStyler at 0x4d0cc90>"
+ ]
+ }
+ ],
+ "prompt_number": 14
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "s.round_corners(4)"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [
+ {
+ "html": [
+ "\n",
+ "\n",
+ "<style type=\"text/css\">\n",
+ " #T_0fe32732_6bf4_11e3_b230_001a4d511830 tr {\n",
+ " border: none;\n",
+ " }\n",
+ "\n",
+ " #T_0fe32732_6bf4_11e3_b230_001a4d511830 {\n",
+ " border: none;\n",
+ " }\n",
+ "\n",
+ " #T_0fe32732_6bf4_11e3_b230_001a4d511830 th.pandas_blank {\n",
+ " border: none;\n",
+ " }\n",
+ "\n",
+ " \n",
+ " #T_0fe32732_6bf4_11e3_b230_001a4d511830 {\n",
+ " \n",
+ " border-collapse: separate;\n",
+ " \n",
+ " }\n",
+ " \n",
+ " #T_0fe32732_6bf4_11e3_b230_001a4d511830 td {\n",
+ " \n",
+ " border-width: 0px;\n",
+ " \n",
+ " }\n",
+ " \n",
+ " #T_0fe32732_6bf4_11e3_b230_001a4d511830 th {\n",
+ " \n",
+ " border-width: 0px;\n",
+ " \n",
+ " }\n",
+ " \n",
+ " #T_0fe32732_6bf4_11e3_b230_001a4d511830 td {\n",
+ " \n",
+ " border-left-width: 1px;\n",
+ " \n",
+ " }\n",
+ " \n",
+ " #T_0fe32732_6bf4_11e3_b230_001a4d511830 td {\n",
+ " \n",
+ " border-top-width: 1px;\n",
+ " \n",
+ " }\n",
+ " \n",
+ " #T_0fe32732_6bf4_11e3_b230_001a4d511830 tbody tr:last-child th {\n",
+ " \n",
+ " border-bottom-width: 1px;\n",
+ " \n",
+ " }\n",
+ " \n",
+ " #T_0fe32732_6bf4_11e3_b230_001a4d511830 tr:last-child td {\n",
+ " \n",
+ " border-bottom-width: 1px;\n",
+ " \n",
+ " }\n",
+ " \n",
+ " #T_0fe32732_6bf4_11e3_b230_001a4d511830 tr td:last-child {\n",
+ " \n",
+ " border-right-width: 1px;\n",
+ " \n",
+ " }\n",
+ " \n",
+ " #T_0fe32732_6bf4_11e3_b230_001a4d511830 tr th:last-child {\n",
+ " \n",
+ " border-right-width: 1px;\n",
+ " \n",
+ " }\n",
+ " \n",
+ " #T_0fe32732_6bf4_11e3_b230_001a4d511830 th {\n",
+ " \n",
+ " border-left-width: 1px;\n",
+ " \n",
+ " }\n",
+ " \n",
+ " #T_0fe32732_6bf4_11e3_b230_001a4d511830 th {\n",
+ " \n",
+ " border-top-width: 1px;\n",
+ " \n",
+ " }\n",
+ " \n",
+ " #T_0fe32732_6bf4_11e3_b230_001a4d511830 th td:last-child {\n",
+ " \n",
+ " border-right-width: 1px;\n",
+ " \n",
+ " }\n",
+ " \n",
+ " #T_0fe32732_6bf4_11e3_b230_001a4d511830 tr:last-child th:first-child {\n",
+ " \n",
+ " -moz-border-radius-bottomleft: 4px;\n",
+ " \n",
+ " -webkit-border-bottom-left-radius: 4px;\n",
+ " \n",
+ " border-bottom-left-radius: 4px;\n",
+ " \n",
+ " }\n",
+ " \n",
+ " #T_0fe32732_6bf4_11e3_b230_001a4d511830 tr:last-child td:last-child {\n",
+ " \n",
+ " -moz-border-radius-bottomright: 4px;\n",
+ " \n",
+ " -webkit-border-bottom-right-radius: 4px;\n",
+ " \n",
+ " border-bottom-right-radius: 4px;\n",
+ " \n",
+ " }\n",
+ " \n",
+ " #T_0fe32732_6bf4_11e3_b230_001a4d511830 tr:first-child th.pandas_col0 {\n",
+ " \n",
+ " -moz-border-radius-topleft: 4px;\n",
+ " \n",
+ " -webkit-border-top-left-radius: 4px;\n",
+ " \n",
+ " border-top-left-radius: 4px;\n",
+ " \n",
+ " }\n",
+ " \n",
+ " #T_0fe32732_6bf4_11e3_b230_001a4d511830 tr:first-child th.pandas_row0:first-child {\n",
+ " \n",
+ " -moz-border-radius-topleft: 4px;\n",
+ " \n",
+ " -webkit-border-top-left-radius: 4px;\n",
+ " \n",
+ " border-top-left-radius: 4px;\n",
+ " \n",
+ " }\n",
+ " \n",
+ " #T_0fe32732_6bf4_11e3_b230_001a4d511830 tr:first-child th:last-child {\n",
+ " \n",
+ " -moz-border-radius-topright: 4px;\n",
+ " \n",
+ " -webkit-border-top-right-radius: 4px;\n",
+ " \n",
+ " border-top-right-radius: 4px;\n",
+ " \n",
+ " }\n",
+ " \n",
+ "</style>\n",
+ "\n",
+ "\n",
+ "<table id=\"T_0fe32732_6bf4_11e3_b230_001a4d511830\">\n",
+ " \n",
+ "\n",
+ " <thead>\n",
+ " \n",
+ " <tr>\n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_blank\"></th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_blank\"></th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_col_heading pandas_level0 pandas_col0\">C_l0_g0</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_col_heading pandas_level0 pandas_col1\">C_l0_g1</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_col_heading pandas_level0 pandas_col2\">C_l0_g2</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_col_heading pandas_level0 pandas_col3\">C_l0_g3</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_col_heading pandas_level0 pandas_col4\">C_l0_g4</th>\n",
+ " \n",
+ " \n",
+ " </tr>\n",
+ " \n",
+ " <tr>\n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_blank\"></th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_blank\"></th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_col_heading pandas_level1 pandas_col0\">C_l1_g0</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_col_heading pandas_level1 pandas_col1\">C_l1_g1</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_col_heading pandas_level1 pandas_col2\">C_l1_g2</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_col_heading pandas_level1 pandas_col3\">C_l1_g3</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_col_heading pandas_level1 pandas_col4\">C_l1_g4</th>\n",
+ " \n",
+ " \n",
+ " </tr>\n",
+ " \n",
+ " <tr>\n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_blank\"></th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_blank\"></th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_col_heading pandas_level2 pandas_col0\">C_l2_g0</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_col_heading pandas_level2 pandas_col1\">C_l2_g1</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_col_heading pandas_level2 pandas_col2\">C_l2_g2</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_col_heading pandas_level2 pandas_col3\">C_l2_g3</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_col_heading pandas_level2 pandas_col4\">C_l2_g4</th>\n",
+ " \n",
+ " \n",
+ " </tr>\n",
+ " \n",
+ " </thead>\n",
+ "\n",
+ " <tbody>\n",
+ " \n",
+ " <tr>\n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row0\">R_l0_g0</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row0\">R_l1_g0</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row0 pandas_col0\">R0C0</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row0 pandas_col1\">R0C1</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row0 pandas_col2\">R0C2</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row0 pandas_col3\">R0C3</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row0 pandas_col4\">R0C4</td>\n",
+ " \n",
+ " \n",
+ " </tr>\n",
+ " \n",
+ " <tr>\n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row1\">R_l0_g1</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row1\">R_l1_g1</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row1 pandas_col0\">R1C0</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row1 pandas_col1\">R1C1</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row1 pandas_col2\">R1C2</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row1 pandas_col3\">R1C3</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row1 pandas_col4\">R1C4</td>\n",
+ " \n",
+ " \n",
+ " </tr>\n",
+ " \n",
+ " <tr>\n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row2\">R_l0_g2</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row2\">R_l1_g2</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row2 pandas_col0\">R2C0</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row2 pandas_col1\">R2C1</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row2 pandas_col2\">R2C2</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row2 pandas_col3\">R2C3</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row2 pandas_col4\">R2C4</td>\n",
+ " \n",
+ " \n",
+ " </tr>\n",
+ " \n",
+ " <tr>\n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row3\">R_l0_g3</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row3\">R_l1_g3</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row3 pandas_col0\">R3C0</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row3 pandas_col1\">R3C1</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row3 pandas_col2\">R3C2</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row3 pandas_col3\">R3C3</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row3 pandas_col4\">R3C4</td>\n",
+ " \n",
+ " \n",
+ " </tr>\n",
+ " \n",
+ " <tr>\n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row4\">R_l0_g4</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row4\">R_l1_g4</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row4 pandas_col0\">R4C0</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row4 pandas_col1\">R4C1</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row4 pandas_col2\">R4C2</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row4 pandas_col3\">R4C3</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row4 pandas_col4\">R4C4</td>\n",
+ " \n",
+ " \n",
+ " </tr>\n",
+ " \n",
+ " <tr>\n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row5\">R_l0_g5</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row5\">R_l1_g5</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row5 pandas_col0\">R5C0</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row5 pandas_col1\">R5C1</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row5 pandas_col2\">R5C2</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row5 pandas_col3\">R5C3</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row5 pandas_col4\">R5C4</td>\n",
+ " \n",
+ " \n",
+ " </tr>\n",
+ " \n",
+ " <tr>\n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row6\">R_l0_g6</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row6\">R_l1_g6</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row6 pandas_col0\">R6C0</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row6 pandas_col1\">R6C1</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row6 pandas_col2\">R6C2</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row6 pandas_col3\">R6C3</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row6 pandas_col4\">R6C4</td>\n",
+ " \n",
+ " \n",
+ " </tr>\n",
+ " \n",
+ " <tr>\n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row7\">R_l0_g7</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row7\">R_l1_g7</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row7 pandas_col0\">R7C0</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row7 pandas_col1\">R7C1</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row7 pandas_col2\">R7C2</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row7 pandas_col3\">R7C3</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row7 pandas_col4\">R7C4</td>\n",
+ " \n",
+ " \n",
+ " </tr>\n",
+ " \n",
+ " <tr>\n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row8\">R_l0_g8</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row8\">R_l1_g8</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row8 pandas_col0\">R8C0</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row8 pandas_col1\">R8C1</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row8 pandas_col2\">R8C2</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row8 pandas_col3\">R8C3</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row8 pandas_col4\">R8C4</td>\n",
+ " \n",
+ " \n",
+ " </tr>\n",
+ " \n",
+ " <tr>\n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row9\">R_l0_g9</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <th class=\"pandas_row_heading pandas_level4 pandas_row9\">R_l1_g9</th>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row9 pandas_col0\">R9C0</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row9 pandas_col1\">R9C1</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row9 pandas_col2\">R9C2</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row9 pandas_col3\">R9C3</td>\n",
+ " \n",
+ " \n",
+ " \n",
+ " <td class=\"pandas_data pandas_row9 pandas_col4\">R9C4</td>\n",
+ " \n",
+ " \n",
+ " </tr>\n",
+ " \n",
+ " </tbody>\n",
+ "\n",
+ "</table>"
+ ],
+ "metadata": {},
+ "output_type": "pyout",
+ "prompt_number": 15,
+ "text": [
+ "<pandas.io.templating.html.HTMLStyler at 0x4d0ce10>"
+ ]
+ }
+ ],
+ "prompt_number": 15
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "# basic latex output, matching to_latex (+ boilerplate)\n",
+ "latex_s=pd.io.templating.Styler(df,template=open(\"/home/user1/src/pandas/pandas/io/templating/templates/latex\").read())\n",
+ "print latex_s.render()"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [
+ {
+ "output_type": "stream",
+ "stream": "stdout",
+ "text": [
+ "\n",
+ "\n",
+ "\\documentclass[11pt,a4paper]{article}\n",
+ "\\usepackage{booktabs}\n",
+ "\n",
+ "\\begin{document}\n",
+ "\n",
+ "\\begin{tabular}{ lllllll }\n",
+ "\n",
+ "\\toprule\n",
+ " \n",
+ " & & C_l0_g0 & C_l0_g1 & C_l0_g2 & C_l0_g3 & C_l0_g4 \\\\\n",
+ " & & C_l1_g0 & C_l1_g1 & C_l1_g2 & C_l1_g3 & C_l1_g4 \\\\\n",
+ " & & C_l2_g0 & C_l2_g1 & C_l2_g2 & C_l2_g3 & C_l2_g4 \\\\\n",
+ "\n",
+ "\\midrule\n",
+ " \n",
+ " R_l0_g0 & R_l1_g0 & R0C0 & R0C1 & R0C2 & R0C3 & R0C4 \\\\\n",
+ " R_l0_g1 & R_l1_g1 & R1C0 & R1C1 & R1C2 & R1C3 & R1C4 \\\\\n",
+ " R_l0_g2 & R_l1_g2 & R2C0 & R2C1 & R2C2 & R2C3 & R2C4 \\\\\n",
+ " R_l0_g3 & R_l1_g3 & R3C0 & R3C1 & R3C2 & R3C3 & R3C4 \\\\\n",
+ " R_l0_g4 & R_l1_g4 & R4C0 & R4C1 & R4C2 & R4C3 & R4C4 \\\\\n",
+ " R_l0_g5 & R_l1_g5 & R5C0 & R5C1 & R5C2 & R5C3 & R5C4 \\\\\n",
+ " R_l0_g6 & R_l1_g6 & R6C0 & R6C1 & R6C2 & R6C3 & R6C4 \\\\\n",
+ " R_l0_g7 & R_l1_g7 & R7C0 & R7C1 & R7C2 & R7C3 & R7C4 \\\\\n",
+ " R_l0_g8 & R_l1_g8 & R8C0 & R8C1 & R8C2 & R8C3 & R8C4 \\\\\n",
+ " R_l0_g9 & R_l1_g9 & R9C0 & R9C1 & R9C2 & R9C3 & R9C4 \\\\\n",
+ "\\bottomrule\n",
+ "\n",
+ "\\end{tabular}\n",
+ "\n",
+ "\\end{document}\n"
+ ]
+ }
+ ],
+ "prompt_number": 10
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "# markdown, just to try out the machinery\n",
+ "# Works with GH GFM\n",
+ "df=mkdf(10,5,r_idx_nlevels=2)\n",
+ "md_s=pd.io.templating.Styler(df,template=open(\"/home/user1/src/pandas/pandas/io/templating/templates/markdown\").read())\n",
+ "print md_s.render()\n"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [
+ {
+ "output_type": "stream",
+ "stream": "stdout",
+ "text": [
+ "\n",
+ "\n",
+ "\n",
+ "| | |__C_l0_g0__ |__C_l0_g1__ |__C_l0_g2__ |__C_l0_g3__ |__C_l0_g4__ |\n",
+ "| --- | --- | --- | --- | --- | --- | --- | \n",
+ "|__R_l0_g0__ |__R_l1_g0__ |R0C0 |R0C1 |R0C2 |R0C3 |R0C4 |\n",
+ "|__R_l0_g1__ |__R_l1_g1__ |R1C0 |R1C1 |R1C2 |R1C3 |R1C4 |\n",
+ "|__R_l0_g2__ |__R_l1_g2__ |R2C0 |R2C1 |R2C2 |R2C3 |R2C4 |\n",
+ "|__R_l0_g3__ |__R_l1_g3__ |R3C0 |R3C1 |R3C2 |R3C3 |R3C4 |\n",
+ "|__R_l0_g4__ |__R_l1_g4__ |R4C0 |R4C1 |R4C2 |R4C3 |R4C4 |\n",
+ "|__R_l0_g5__ |__R_l1_g5__ |R5C0 |R5C1 |R5C2 |R5C3 |R5C4 |\n",
+ "|__R_l0_g6__ |__R_l1_g6__ |R6C0 |R6C1 |R6C2 |R6C3 |R6C4 |\n",
+ "|__R_l0_g7__ |__R_l1_g7__ |R7C0 |R7C1 |R7C2 |R7C3 |R7C4 |\n",
+ "|__R_l0_g8__ |__R_l1_g8__ |R8C0 |R8C1 |R8C2 |R8C3 |R8C4 |\n",
+ "|__R_l0_g9__ |__R_l1_g9__ |R9C0 |R9C1 |R9C2 |R9C3 |R9C4 |\n"
+ ]
+ }
+ ],
+ "prompt_number": 13
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [],
+ "language": "python",
+ "metadata": {},
+ "outputs": []
+ }
+ ],
+ "metadata": {}
+ }
+ ]
+}
\ No newline at end of file
diff --git a/setup.py b/setup.py
index fe921b1ff6029..5e70529ccde0e 100755
--- a/setup.py
+++ b/setup.py
@@ -535,6 +535,7 @@ def pxd(name):
'pandas.computation.tests',
'pandas.core',
'pandas.io',
+ 'pandas.io.templating',
'pandas.rpy',
'pandas.sandbox',
'pandas.sparse',
| closes #3190.
Provides conditional formatting and CSS styling of HTML tables for dataframes using code.
The plan is to clean up and package the functionality behind a nice API in some pandas namespace,
perhaps with a dotted API, and ship in 0.14.
Unlike my first attempt from 6 months ago which turned into a rabbit hole, this is a much simpler approach that came together in a few hours and supports everything I care about.
See the ipython notebook in the root directory, or on [nbviewer](http://nbviewer.ipython.org/github/y-p/pandas/blob/PR_html_conditional_formatting/pandas_HTML_styling.ipynb).
Feedback welcome.
- Related https://github.com/pydata/pandas/pull/5374. need to have format-specific escaping function as part of the process.
- Equivalent functionality to formatters
- Should be able to "freeze" constructed views and use them as functions of other df
(Example: highlight 3 largest values in "price" column for each frame in a panel).
- Use to define default style for html output (bootstrap-like for visual integration with ipnb).
| https://api.github.com/repos/pandas-dev/pandas/pulls/5763 | 2013-12-22T01:24:46Z | 2014-02-07T10:31:42Z | null | 2014-06-12T11:50:46Z |
DOC: Flesh out the R comparison section of docs (GH3980) | diff --git a/doc/source/comparison_with_r.rst b/doc/source/comparison_with_r.rst
index c05ec01df6bcc..9aedb801250d7 100644
--- a/doc/source/comparison_with_r.rst
+++ b/doc/source/comparison_with_r.rst
@@ -4,7 +4,8 @@
.. ipython:: python
:suppress:
- from pandas import *
+ import pandas as pd
+ import numpy as np
options.display.max_rows=15
Comparison with R / R libraries
@@ -38,25 +39,25 @@ The :meth:`~pandas.DataFrame.query` method is similar to the base R ``subset``
function. In R you might want to get the rows of a ``data.frame`` where one
column's values are less than another column's values:
- .. code-block:: r
+.. code-block:: r
- df <- data.frame(a=rnorm(10), b=rnorm(10))
- subset(df, a <= b)
- df[df$a <= df$b,] # note the comma
+ df <- data.frame(a=rnorm(10), b=rnorm(10))
+ subset(df, a <= b)
+ df[df$a <= df$b,] # note the comma
In ``pandas``, there are a few ways to perform subsetting. You can use
:meth:`~pandas.DataFrame.query` or pass an expression as if it were an
index/slice as well as standard boolean indexing:
- .. ipython:: python
+.. ipython:: python
- from pandas import DataFrame
- from numpy.random import randn
+ from pandas import DataFrame
+ from numpy import random
- df = DataFrame({'a': randn(10), 'b': randn(10)})
- df.query('a <= b')
- df[df.a <= df.b]
- df.loc[df.a <= df.b]
+ df = DataFrame({'a': random.randn(10), 'b': random.randn(10)})
+ df.query('a <= b')
+ df[df.a <= df.b]
+ df.loc[df.a <= df.b]
For more details and examples see :ref:`the query documentation
<indexing.query>`.
@@ -70,20 +71,20 @@ For more details and examples see :ref:`the query documentation
An expression using a data.frame called ``df`` in R with the columns ``a`` and
``b`` would be evaluated using ``with`` like so:
- .. code-block:: r
+.. code-block:: r
- df <- data.frame(a=rnorm(10), b=rnorm(10))
- with(df, a + b)
- df$a + df$b # same as the previous expression
+ df <- data.frame(a=rnorm(10), b=rnorm(10))
+ with(df, a + b)
+ df$a + df$b # same as the previous expression
In ``pandas`` the equivalent expression, using the
:meth:`~pandas.DataFrame.eval` method, would be:
- .. ipython:: python
+.. ipython:: python
- df = DataFrame({'a': randn(10), 'b': randn(10)})
- df.eval('a + b')
- df.a + df.b # same as the previous expression
+ df = DataFrame({'a': random.randn(10), 'b': random.randn(10)})
+ df.eval('a + b')
+ df.a + df.b # same as the previous expression
In certain cases :meth:`~pandas.DataFrame.eval` will be much faster than
evaluation in pure Python. For more details and examples see :ref:`the eval
@@ -98,12 +99,194 @@ xts
plyr
----
+``plyr`` is an R library for the split-apply-combine strategy for data
+analysis. The functions revolve around three data structures in R, ``a``
+for ``arrays``, ``l`` for ``lists``, and ``d`` for ``data.frame``. The
+table below shows how these data structures could be mapped in Python.
+
++------------+-------------------------------+
+| R | Python |
++============+===============================+
+| array | list |
++------------+-------------------------------+
+| lists | dictionary or list of objects |
++------------+-------------------------------+
+| data.frame | dataframe |
++------------+-------------------------------+
+
+|ddply|_
+~~~~~~~~
+
+An expression using a data.frame called ``df`` in R where you want to
+summarize ``x`` by ``month``:
+
+
+
+.. code-block:: r
+
+ require(plyr)
+ df <- data.frame(
+ x = runif(120, 1, 168),
+ y = runif(120, 7, 334),
+ z = runif(120, 1.7, 20.7),
+ month = rep(c(5,6,7,8),30),
+ week = sample(1:4, 120, TRUE)
+ )
+
+ ddply(df, .(month, week), summarize,
+ mean = round(mean(x), 2),
+ sd = round(sd(x), 2))
+
+In ``pandas`` the equivalent expression, using the
+:meth:`~pandas.DataFrame.groupby` method, would be:
+
+
+
+.. ipython:: python
+
+ df = DataFrame({
+ 'x': random.uniform(1., 168., 120),
+ 'y': random.uniform(7., 334., 120),
+ 'z': random.uniform(1.7, 20.7, 120),
+ 'month': [5,6,7,8]*30,
+ 'week': random.randint(1,4, 120)
+ })
+
+ grouped = df.groupby(['month','week'])
+ print grouped['x'].agg([np.mean, np.std])
+
+
+For more details and examples see :ref:`the groupby documentation
+<groupby.aggregate>`.
+
reshape / reshape2
------------------
+|meltarray|_
+~~~~~~~~~~~~~
+
+An expression using a 3 dimensional array called ``a`` in R where you want to
+melt it into a data.frame:
+
+.. code-block:: r
+
+ a <- array(c(1:23, NA), c(2,3,4))
+ data.frame(melt(a))
+
+In Python, since ``a`` is a list, you can simply use list comprehension.
+
+.. ipython:: python
+
+ a = np.array(range(1,24)+[np.NAN]).reshape(2,3,4)
+ DataFrame([tuple(list(x)+[val]) for x, val in np.ndenumerate(a)])
+
+|meltlist|_
+~~~~~~~~~~~~
+
+An expression using a list called ``a`` in R where you want to melt it
+into a data.frame:
+
+.. code-block:: r
+
+ a <- as.list(c(1:4, NA))
+ data.frame(melt(a))
+
+In Python, this list would be a list of tuples, so
+:meth:`~pandas.DataFrame` method would convert it to a dataframe as required.
+
+.. ipython:: python
+
+ a = list(enumerate(range(1,5)+[np.NAN]))
+ DataFrame(a)
+
+For more details and examples see :ref:`the Into to Data Structures
+documentation <basics.dataframe.from_items>`.
+
+|meltdf|_
+~~~~~~~~~~~~~~~~
+
+An expression using a data.frame called ``cheese`` in R where you want to
+reshape the data.frame:
+
+.. code-block:: r
+
+ cheese <- data.frame(
+ first = c('John', 'Mary'),
+ last = c('Doe', 'Bo'),
+ height = c(5.5, 6.0),
+ weight = c(130, 150)
+ )
+ melt(cheese, id=c("first", "last"))
+
+In Python, the :meth:`~pandas.melt` method is the R equivalent:
+
+.. ipython:: python
+
+ cheese = DataFrame({'first' : ['John', 'Mary'],
+ 'last' : ['Doe', 'Bo'],
+ 'height' : [5.5, 6.0],
+ 'weight' : [130, 150]})
+ pd.melt(cheese, id_vars=['first', 'last'])
+ cheese.set_index(['first', 'last']).stack() # alternative way
+
+For more details and examples see :ref:`the reshaping documentation
+<reshaping.melt>`.
+
+|cast|_
+~~~~~~~
+
+An expression using a data.frame called ``df`` in R to cast into a higher
+dimensional array:
+
+.. code-block:: r
+
+ df <- data.frame(
+ x = runif(12, 1, 168),
+ y = runif(12, 7, 334),
+ z = runif(12, 1.7, 20.7),
+ month = rep(c(5,6,7),4),
+ week = rep(c(1,2), 6)
+ )
+
+ mdf <- melt(df, id=c("month", "week"))
+ acast(mdf, week ~ month ~ variable, mean)
+
+In Python the best way is to make use of :meth:`~pandas.pivot_table`:
+
+.. ipython:: python
+
+ df = DataFrame({
+ 'x': random.uniform(1., 168., 12),
+ 'y': random.uniform(7., 334., 12),
+ 'z': random.uniform(1.7, 20.7, 12),
+ 'month': [5,6,7]*4,
+ 'week': [1,2]*6
+ })
+ mdf = pd.melt(df, id_vars=['month', 'week'])
+ pd.pivot_table(mdf, values='value', rows=['variable','week'],
+ cols=['month'], aggfunc=np.mean)
+
+For more details and examples see :ref:`the reshaping documentation
+<reshaping.pivot>`.
.. |with| replace:: ``with``
.. _with: http://finzi.psych.upenn.edu/R/library/base/html/with.html
.. |subset| replace:: ``subset``
.. _subset: http://finzi.psych.upenn.edu/R/library/base/html/subset.html
+
+.. |ddply| replace:: ``ddply``
+.. _ddply: http://www.inside-r.org/packages/cran/plyr/docs/ddply
+
+.. |meltarray| replace:: ``melt.array``
+.. _meltarray: http://www.inside-r.org/packages/cran/reshape2/docs/melt.array
+
+.. |meltlist| replace:: ``melt.list``
+.. meltlist: http://www.inside-r.org/packages/cran/reshape2/docs/melt.list
+
+.. |meltdf| replace:: ``melt.data.frame``
+.. meltdf: http://www.inside-r.org/packages/cran/reshape2/docs/melt.data.frame
+
+.. |cast| replace:: ``cast``
+.. cast: http://www.inside-r.org/packages/cran/reshape2/docs/cast
+
| Some additions to #3980.
I've done some more comparisons to the plyr functions located [here](http://nbviewer.ipython.org/gist/chappers/8066230/). More specifically [aaply](http://nbviewer.ipython.org/gist/chappers/8066230/aaply.ipynb), [alply](http://nbviewer.ipython.org/gist/chappers/8066230/alply.ipynb) don't really have much to do with pandas (its just list comprehension). and [dlply](http://nbviewer.ipython.org/gist/chappers/8066230/dlplyr.ipynb) is probably more suited to be in statsmodel example rather than pandas.
| https://api.github.com/repos/pandas-dev/pandas/pulls/5761 | 2013-12-21T10:04:08Z | 2013-12-28T15:55:45Z | 2013-12-28T15:55:45Z | 2014-06-19T00:03:47Z |
Update rolling skew & kurtosis to handle cases where they aren't defined | diff --git a/doc/source/release.rst b/doc/source/release.rst
index 8de8929c5fa7a..0666eb7f88675 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -111,6 +111,7 @@ Bug Fixes
- Bug in ``pd.read_msgpack`` with inferring a ``DateTimeIndex`` frequencey
incorrectly (:issue:`5947`)
- Fixed ``to_datetime`` for array with both Tz-aware datetimes and ``NaT``s (:issue:`5961`)
+ - Bug in rolling skew/kurtosis when passed a Series with bad data (:issue:`5749`)
pandas 0.13.0
-------------
diff --git a/pandas/algos.pyx b/pandas/algos.pyx
index 08ec707b0d96d..d916de32b7cd3 100644
--- a/pandas/algos.pyx
+++ b/pandas/algos.pyx
@@ -1167,8 +1167,11 @@ def roll_skew(ndarray[double_t] input, int win, int minp):
R = sqrt(B)
- output[i] = ((sqrt(nobs * (nobs - 1.)) * C) /
- ((nobs-2) * R * R * R))
+ if B == 0 or nobs < 3:
+ output[i] = NaN
+ else:
+ output[i] = ((sqrt(nobs * (nobs - 1.)) * C) /
+ ((nobs-2) * R * R * R))
else:
output[i] = NaN
@@ -1236,10 +1239,15 @@ def roll_kurt(ndarray[double_t] input,
R = R * A
D = xxxx / nobs - R - 6*B*A*A - 4*C*A
- K = (nobs * nobs - 1.)*D/(B*B) - 3*((nobs-1.)**2)
- K = K / ((nobs - 2.)*(nobs-3.))
+ if B == 0 or nobs < 4:
+ output[i] = NaN
+
+ else:
+ K = (nobs * nobs - 1.)*D/(B*B) - 3*((nobs-1.)**2)
+ K = K / ((nobs - 2.)*(nobs-3.))
+
+ output[i] = K
- output[i] = K
else:
output[i] = NaN
diff --git a/pandas/stats/tests/test_moments.py b/pandas/stats/tests/test_moments.py
index 7381d4c1ae0b4..970adeace1e0f 100644
--- a/pandas/stats/tests/test_moments.py
+++ b/pandas/stats/tests/test_moments.py
@@ -741,6 +741,50 @@ def test_expanding_corr_pairwise(self):
for i in result.items:
assert_almost_equal(result[i], rolling_result[i])
+ def test_rolling_skew_edge_cases(self):
+
+ all_nan = Series([np.NaN] * 5)
+
+ # yields all NaN (0 variance)
+ d = Series([1] * 5)
+ x = mom.rolling_skew(d, window=5)
+ assert_series_equal(all_nan, x)
+
+ # yields all NaN (window too small)
+ d = Series(np.random.randn(5))
+ x = mom.rolling_skew(d, window=2)
+ assert_series_equal(all_nan, x)
+
+ # yields [NaN, NaN, NaN, 0.177994, 1.548824]
+ d = Series([-1.50837035, -0.1297039 , 0.19501095,
+ 1.73508164, 0.41941401])
+ expected = Series([np.NaN, np.NaN, np.NaN,
+ 0.177994, 1.548824])
+ x = mom.rolling_skew(d, window=4)
+ assert_series_equal(expected, x)
+
+ def test_rolling_kurt_edge_cases(self):
+
+ all_nan = Series([np.NaN] * 5)
+
+ # yields all NaN (0 variance)
+ d = Series([1] * 5)
+ x = mom.rolling_kurt(d, window=5)
+ assert_series_equal(all_nan, x)
+
+ # yields all NaN (window too small)
+ d = Series(np.random.randn(5))
+ x = mom.rolling_kurt(d, window=3)
+ assert_series_equal(all_nan, x)
+
+ # yields [NaN, NaN, NaN, 1.224307, 2.671499]
+ d = Series([-1.50837035, -0.1297039 , 0.19501095,
+ 1.73508164, 0.41941401])
+ expected = Series([np.NaN, np.NaN, np.NaN,
+ 1.224307, 2.671499])
+ x = mom.rolling_kurt(d, window=4)
+ assert_series_equal(expected, x)
+
def _check_expanding_ndarray(self, func, static_comp, has_min_periods=True,
has_time_rule=True, preserve_nan=True):
result = func(self.arr)
| closes #5749
The rolling skewness and kurtosis in algos.pyx were modified to match
the testing logic in pandas/core/nanops.py. They now both return NaN
where they are not defined, which occurs where there are either too
few observations or where the variance is zero.
A set of tests was added to verify that Nan is returned in these cases
and that the computations continue to work correctly when the values
are defined.
| https://api.github.com/repos/pandas-dev/pandas/pulls/5760 | 2013-12-20T19:35:08Z | 2014-01-16T14:27:39Z | 2014-01-16T14:27:39Z | 2014-06-20T06:05:10Z |
API: Series.ravel compat with ndarray | diff --git a/pandas/core/series.py b/pandas/core/series.py
index c41cdc89b7bb1..f147eb87d7480 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -317,8 +317,8 @@ def ndim(self):
def base(self):
return self.values.base
- def ravel(self):
- return self.values.ravel()
+ def ravel(self, order='C'):
+ return self.values.ravel(order=order)
def transpose(self):
""" support for compatiblity """
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index 21f94f0c5d9e1..16e3368a2710d 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -3253,6 +3253,10 @@ def f(x):
expected = Series(1,index=range(10),dtype='float64')
#assert_series_equal(result,expected)
+ # ravel
+ s = Series(np.random.randn(10))
+ tm.assert_almost_equal(s.ravel(order='F'),s.values.ravel(order='F'))
+
def test_complexx(self):
# GH4819
| related #5698
| https://api.github.com/repos/pandas-dev/pandas/pulls/5759 | 2013-12-20T14:48:20Z | 2013-12-20T15:10:26Z | 2013-12-20T15:10:26Z | 2014-06-25T21:40:52Z |
BUG: empty Series construction (GH5756), concat issues (GH5754) | diff --git a/doc/source/release.rst b/doc/source/release.rst
index 8ac168e18233f..173d03f9be3c8 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -247,7 +247,8 @@ API Changes
(:issue:`4390`)
- allow ``ix/loc`` for Series/DataFrame/Panel to set on any axis even when
the single-key is not currently contained in the index for that axis
- (:issue:`2578`, :issue:`5226`, :issue:`5632`, :issue:`5720`, :issue:`5744`)
+ (:issue:`2578`, :issue:`5226`, :issue:`5632`, :issue:`5720`,
+ :issue:`5744`, :issue:`5756`)
- Default export for ``to_clipboard`` is now csv with a sep of `\t` for
compat (:issue:`3368`)
- ``at`` now will enlarge the object inplace (and return the same)
@@ -827,6 +828,7 @@ Bug Fixes
- Bug in fillna with Series and a passed series/dict (:issue:`5703`)
- Bug in groupby transform with a datetime-like grouper (:issue:`5712`)
- Bug in multi-index selection in PY3 when using certain keys (:issue:`5725`)
+ - Row-wise concat of differeing dtypes failing in certain cases (:issue:`5754`)
pandas 0.12.0
-------------
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 8a3869d15c85f..ed6d7fef4dd66 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -325,15 +325,16 @@ def _init_dict(self, data, index, columns, dtype=None):
def _init_ndarray(self, values, index, columns, dtype=None,
copy=False):
if isinstance(values, Series):
- if columns is None and values.name is not None:
- columns = [values.name]
+ if columns is None:
+ if values.name is not None:
+ columns = [values.name]
if index is None:
index = values.index
else:
values = values.reindex(index)
# zero len case (GH #2234)
- if not len(values) and len(columns):
+ if not len(values) and columns is not None and len(columns):
values = np.empty((0, 1), dtype=object)
values = _prep_ndarray(values, copy=copy)
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 3a29fa41046ca..5e00d14a0e0cb 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -6154,6 +6154,48 @@ def test_append_empty_dataframe(self):
expected = df1.copy()
assert_frame_equal(result, expected)
+ def test_append_dtypes(self):
+
+ # GH 5754
+ # row appends of different dtypes (so need to do by-item)
+ # can sometimes infer the correct type
+
+ df1 = DataFrame({ 'bar' : Timestamp('20130101') }, index=lrange(5))
+ df2 = DataFrame()
+ result = df1.append(df2)
+ expected = df1.copy()
+ assert_frame_equal(result, expected)
+
+ df1 = DataFrame({ 'bar' : Timestamp('20130101') }, index=lrange(1))
+ df2 = DataFrame({ 'bar' : 'foo' }, index=lrange(1,2))
+ result = df1.append(df2)
+ expected = DataFrame({ 'bar' : [ Timestamp('20130101'), 'foo' ]})
+ assert_frame_equal(result, expected)
+
+ df1 = DataFrame({ 'bar' : Timestamp('20130101') }, index=lrange(1))
+ df2 = DataFrame({ 'bar' : np.nan }, index=lrange(1,2))
+ result = df1.append(df2)
+ expected = DataFrame({ 'bar' : Series([ Timestamp('20130101'), np.nan ],dtype='M8[ns]') })
+ assert_frame_equal(result, expected)
+
+ df1 = DataFrame({ 'bar' : Timestamp('20130101') }, index=lrange(1))
+ df2 = DataFrame({ 'bar' : np.nan }, index=lrange(1,2), dtype=object)
+ result = df1.append(df2)
+ expected = DataFrame({ 'bar' : Series([ Timestamp('20130101'), np.nan ],dtype='M8[ns]') })
+ assert_frame_equal(result, expected)
+
+ df1 = DataFrame({ 'bar' : np.nan }, index=lrange(1))
+ df2 = DataFrame({ 'bar' : Timestamp('20130101') }, index=lrange(1,2))
+ result = df1.append(df2)
+ expected = DataFrame({ 'bar' : Series([ np.nan, Timestamp('20130101')] ,dtype='M8[ns]') })
+ assert_frame_equal(result, expected)
+
+ df1 = DataFrame({ 'bar' : Timestamp('20130101') }, index=lrange(1))
+ df2 = DataFrame({ 'bar' : 1 }, index=lrange(1,2), dtype=object)
+ result = df1.append(df2)
+ expected = DataFrame({ 'bar' : Series([ Timestamp('20130101'), 1 ]) })
+ assert_frame_equal(result, expected)
+
def test_asfreq(self):
offset_monthly = self.tsframe.asfreq(datetools.bmonthEnd)
rule_monthly = self.tsframe.asfreq('BM')
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index c7fb209b4aacb..f4e203444acfc 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -1793,6 +1793,14 @@ def f():
expected = DataFrame(columns=['A','B','C'])
assert_frame_equal(result,expected)
+ # GH 5756
+ # setting with empty Series
+ df = DataFrame(Series())
+ assert_frame_equal(df, DataFrame({ 0 : Series() }))
+
+ df = DataFrame(Series(name='foo'))
+ assert_frame_equal(df, DataFrame({ 'foo' : Series() }))
+
def test_cache_updating(self):
# GH 4939, make sure to update the cache on setitem
diff --git a/pandas/tools/merge.py b/pandas/tools/merge.py
index c76bdea950650..dd7ab65869303 100644
--- a/pandas/tools/merge.py
+++ b/pandas/tools/merge.py
@@ -1139,52 +1139,55 @@ def _concat_blocks(self, blocks):
def _concat_single_item(self, objs, item):
# this is called if we don't have consistent dtypes in a row-wise append
-
all_values = []
- dtypes = set()
+ dtypes = []
+ alls = set()
+ # figure out the resulting dtype of the combination
for data, orig in zip(objs, self.objs):
+ d = dict([ (t,False) for t in ['object','datetime','timedelta','other'] ])
if item in orig:
values = data.get(item)
if hasattr(values,'to_dense'):
values = values.to_dense()
- dtypes.add(values.dtype)
all_values.append(values)
- else:
- all_values.append(None)
- # figure out the resulting dtype of the combination
- alls = set()
- seen = []
- for dtype in dtypes:
- d = dict([ (t,False) for t in ['object','datetime','timedelta','other'] ])
- if issubclass(dtype.type, (np.object_, np.bool_)):
- d['object'] = True
- alls.add('object')
- elif is_datetime64_dtype(dtype):
- d['datetime'] = True
- alls.add('datetime')
- elif is_timedelta64_dtype(dtype):
- d['timedelta'] = True
- alls.add('timedelta')
+ dtype = values.dtype
+
+ if issubclass(dtype.type, (np.object_, np.bool_)):
+ d['object'] = True
+ alls.add('object')
+ elif is_datetime64_dtype(dtype):
+ d['datetime'] = True
+ alls.add('datetime')
+ elif is_timedelta64_dtype(dtype):
+ d['timedelta'] = True
+ alls.add('timedelta')
+ else:
+ d['other'] = True
+ alls.add('other')
+
else:
+ all_values.append(None)
d['other'] = True
alls.add('other')
- seen.append(d)
+
+ dtypes.append(d)
if 'datetime' in alls or 'timedelta' in alls:
if 'object' in alls or 'other' in alls:
- for v, s in zip(all_values,seen):
- if s.get('datetime') or s.get('timedelta'):
+
+ for v, d in zip(all_values,dtypes):
+ if d.get('datetime') or d.get('timedelta'):
pass
# if we have all null, then leave a date/time like type
# if we have only that type left
- elif isnull(v).all():
+ elif v is None or isnull(v).all():
- alls.remove('other')
- alls.remove('object')
+ alls.discard('other')
+ alls.discard('object')
# create the result
if 'object' in alls:
@@ -1200,7 +1203,7 @@ def _concat_single_item(self, objs, item):
to_concat = []
for obj, item_values in zip(objs, all_values):
- if item_values is None:
+ if item_values is None or isnull(item_values).all():
shape = obj.shape[1:]
missing_arr = np.empty(shape, dtype=empty_dtype)
missing_arr.fill(fill_value)
| closes #5756, BUG: construction of DataFrame from empty Series regression
closes #5754, BUG: Row-wise concat of differeing dtypes failing in certain cases
| https://api.github.com/repos/pandas-dev/pandas/pulls/5757 | 2013-12-20T13:06:10Z | 2013-12-20T14:08:05Z | 2013-12-20T14:08:05Z | 2014-07-02T10:56:26Z |
ENH: set display.max_seq_items default != None | diff --git a/doc/source/release.rst b/doc/source/release.rst
index 8ac168e18233f..cb8d745099c1a 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -388,6 +388,8 @@ API Changes
dates are given (:issue:`5242`)
- ``Timestamp`` now supports ``now/today/utcnow`` class methods
(:issue:`5339`)
+ - default for `display.max_seq_len` is now 100 rather then `None`. This activates
+ truncated display ("...") of long sequences in various places. (:issue:`3391`)
- **All** division with ``NDFrame`` - likes is now truedivision, regardless
of the future import. You can use ``//`` and ``floordiv`` to do integer
division.
diff --git a/doc/source/v0.13.0.txt b/doc/source/v0.13.0.txt
index 720150015909e..7c0472fc07de5 100644
--- a/doc/source/v0.13.0.txt
+++ b/doc/source/v0.13.0.txt
@@ -147,6 +147,8 @@ These were announced changes in 0.12 or prior that are taking effect as of 0.13.
- Remove deprecated ``_verbose_info`` (:issue:`3215`)
- Remove deprecated ``read_clipboard/to_clipboard/ExcelFile/ExcelWriter`` from ``pandas.io.parsers`` (:issue:`3717`)
- default for ``tupleize_cols`` is now ``False`` for both ``to_csv`` and ``read_csv``. Fair warning in 0.12 (:issue:`3604`)
+- default for `display.max_seq_len` is now 100 rather then `None`. This activates
+ truncated display ("...") of long sequences in various places. (:issue:`3391`)
Deprecations
~~~~~~~~~~~~
diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py
index b7ec76522b60c..e4d4ea74ac169 100644
--- a/pandas/core/config_init.py
+++ b/pandas/core/config_init.py
@@ -246,7 +246,7 @@ def mpl_style_cb(key):
validator=is_text)
cf.register_option('expand_frame_repr', True, pc_expand_repr_doc)
cf.register_option('chop_threshold', None, pc_chop_threshold_doc)
- cf.register_option('max_seq_items', None, pc_max_seq_items)
+ cf.register_option('max_seq_items', 100, pc_max_seq_items)
cf.register_option('mpl_style', None, pc_mpl_style_doc,
validator=is_one_of_factory([None, False, 'default']),
cb=mpl_style_cb)
diff --git a/pandas/tests/test_format.py b/pandas/tests/test_format.py
index f09becb5befb7..f66c59fade2c1 100644
--- a/pandas/tests/test_format.py
+++ b/pandas/tests/test_format.py
@@ -141,9 +141,8 @@ def test_repr_chop_threshold(self):
def test_repr_obeys_max_seq_limit(self):
import pandas.core.common as com
- #unlimited
- reset_option("display.max_seq_items")
- self.assertTrue(len(com.pprint_thing(lrange(1000)))> 2000)
+ with option_context("display.max_seq_items",2000):
+ self.assertTrue(len(com.pprint_thing(lrange(1000))) > 1000)
with option_context("display.max_seq_items",5):
self.assertTrue(len(com.pprint_thing(lrange(1000)))< 100)
| closes #3391.
No idea what the default value should be. numpy equivalent default is 1000, but even 100 is too big IMO.
The default is primarily there to mitigate output bombs when displaying some pandas objects.
Still need to update the release notes on all these micro-PRs.
| https://api.github.com/repos/pandas-dev/pandas/pulls/5753 | 2013-12-19T22:54:03Z | 2013-12-20T15:55:34Z | 2013-12-20T15:55:34Z | 2014-06-27T10:14:01Z |
ENH: expose option_context as a top-level API GH5618 | diff --git a/doc/source/basics.rst b/doc/source/basics.rst
index 46745d94b5f78..adff5a3c74f90 100644
--- a/doc/source/basics.rst
+++ b/doc/source/basics.rst
@@ -1457,6 +1457,21 @@ It's also possible to reset multiple options at once (using a regex):
reset_option("^display")
+.. versionadded:: 0.14.0
+
+ Beginning with v0.14.0 the `option_context` context manager has been exposed through
+ the top-level API, allowing you to execute code with given option values. Option values
+ are restored automatically when you exit the `with` block:
+
+.. ipython:: python
+
+ with option_context("display.max_rows",10,"display.max_columns", 5):
+ print get_option("display.max_rows")
+ print get_option("display.max_columns")
+
+ print get_option("display.max_rows")
+ print get_option("display.max_columns")
+
Console Output Formatting
-------------------------
diff --git a/doc/source/release.rst b/doc/source/release.rst
index 7109b87f5352b..d4c9fa07e546f 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -65,6 +65,7 @@ Improvements to existing features
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- perf improvements in Series datetime/timedelta binary operations (:issue:`5801`)
+ - `option_context` context manager now available as top-level API (:issue:`5752`)
Bug Fixes
~~~~~~~~~
diff --git a/pandas/core/api.py b/pandas/core/api.py
index d75c075d22d7c..b36c9f7499df6 100644
--- a/pandas/core/api.py
+++ b/pandas/core/api.py
@@ -31,4 +31,4 @@
import pandas.core.datetools as datetools
from pandas.core.config import (get_option, set_option, reset_option,
- describe_option, options)
+ describe_option, option_context, options)
diff --git a/pandas/core/config.py b/pandas/core/config.py
index 4bec029851092..f2f932e39759a 100644
--- a/pandas/core/config.py
+++ b/pandas/core/config.py
@@ -100,54 +100,29 @@ def _get_option(pat, silent=False):
return root[k]
-def _set_single_option(pat, value, silent):
- key = _get_single_key(pat, silent)
-
- o = _get_registered_option(key)
- if o and o.validator:
- o.validator(value)
-
- # walk the nested dict
- root, k = _get_root(key)
- root[k] = value
-
- if o.cb:
- o.cb(key)
-
-
-def _set_multiple_options(args, silent):
- for k, v in zip(args[::2], args[1::2]):
- _set_single_option(k, v, silent)
-
-
def _set_option(*args, **kwargs):
# must at least 1 arg deal with constraints later
nargs = len(args)
if not nargs or nargs % 2 != 0:
- raise AssertionError("Must provide an even number of non-keyword "
+ raise ValueError("Must provide an even number of non-keyword "
"arguments")
- # must be 0 or 1 kwargs
- nkwargs = len(kwargs)
- if nkwargs not in (0, 1):
- raise AssertionError("The can only be 0 or 1 keyword arguments")
+ # default to false
+ silent = kwargs.get('silent', False)
- # if 1 kwarg then it must be silent=True or silent=False
- if nkwargs:
- k, = list(kwargs.keys())
- v, = list(kwargs.values())
+ for k, v in zip(args[::2], args[1::2]):
+ key = _get_single_key(k, silent)
- if k != 'silent':
- raise ValueError("the only allowed keyword argument is 'silent', "
- "you passed '{0}'".format(k))
- if not isinstance(v, bool):
- raise TypeError("the type of the keyword argument passed must be "
- "bool, you passed a {0}".format(v.__class__))
+ o = _get_registered_option(key)
+ if o and o.validator:
+ o.validator(v)
- # default to false
- silent = kwargs.get('silent', False)
- _set_multiple_options(args, silent)
+ # walk the nested dict
+ root, k = _get_root(key)
+ root[k] = v
+ if o.cb:
+ o.cb(key)
def _describe_option(pat='', _print_desc=True):
@@ -365,7 +340,7 @@ class option_context(object):
def __init__(self, *args):
if not (len(args) % 2 == 0 and len(args) >= 2):
- raise AssertionError(
+ raise ValueError(
'Need to invoke as'
'option_context(pat, val, [(pat, val), ...)).'
)
diff --git a/pandas/tests/test_config.py b/pandas/tests/test_config.py
index 80a3fe9be7003..6d4486525f4eb 100644
--- a/pandas/tests/test_config.py
+++ b/pandas/tests/test_config.py
@@ -170,26 +170,13 @@ def test_set_option(self):
def test_set_option_empty_args(self):
- self.assertRaises(AssertionError, self.cf.set_option)
+ self.assertRaises(ValueError, self.cf.set_option)
def test_set_option_uneven_args(self):
- self.assertRaises(AssertionError, self.cf.set_option, 'a.b', 2, 'b.c')
-
-
- def test_set_option_2_kwargs(self):
- self.assertRaises(AssertionError, self.cf.set_option, 'a.b', 2,
- silenadf=2, asdf=2)
-
- def test_set_option_invalid_kwargs_key(self):
- self.assertRaises(ValueError, self.cf.set_option, 'a.b', 2,
- silenadf=2)
-
- def test_set_option_invalid_kwargs_value_type(self):
- self.assertRaises(TypeError, self.cf.set_option, 'a.b', 2,
- silent=2)
+ self.assertRaises(ValueError, self.cf.set_option, 'a.b', 2, 'b.c')
def test_set_option_invalid_single_argument_type(self):
- self.assertRaises(AssertionError, self.cf.set_option, 2)
+ self.assertRaises(ValueError, self.cf.set_option, 2)
def test_set_option_multiple(self):
self.cf.register_option('a', 1, 'doc')
| <del>
Moving `option_context` to toplevel rather then making `set_option` a context manager means
a one line change instead of metaclasses and config_prefix subtlety and the rest of the ich.
When a given approach to something explodes in complexity I'll [bravely, bravely run away](http://www.youtube.com/watch?v=BZwuTo7zKM8) every single time.
</del>
closes #5618
replaces #5625.
@jtratner, your points about config_prefix being broken are valid, feel free to pick up in the future
if you're so inclined.
cc @jseabold
| https://api.github.com/repos/pandas-dev/pandas/pulls/5752 | 2013-12-19T22:42:42Z | 2013-12-31T00:49:22Z | 2013-12-31T00:49:22Z | 2014-06-14T10:26:45Z |
DOC: added missing argument in Series.apply | diff --git a/pandas/core/series.py b/pandas/core/series.py
index ecfd99e61a090..c41cdc89b7bb1 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -1993,6 +1993,9 @@ def apply(self, func, convert_dtype=True, args=(), **kwds):
convert_dtype : boolean, default True
Try to find better dtype for elementwise function results. If
False, leave as dtype=object
+ args : tuple
+ Positional arguments to pass to function in addition to the value
+ Additional keyword arguments will be passed as keywords to the function
See also
--------
| https://api.github.com/repos/pandas-dev/pandas/pulls/5750 | 2013-12-19T20:57:33Z | 2013-12-20T01:55:41Z | 2013-12-20T01:55:41Z | 2015-04-25T23:33:13Z | |
API/REGRESS: partial revert of f8b6208675b5b10d73a74f50478fa5e37b43fc02 (GH5720,GH5744) | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 4d6af77880747..8a3869d15c85f 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1916,10 +1916,11 @@ def _ensure_valid_index(self, value):
'Series')
self._data.set_axis(1, value.index.copy(), check_axis=False)
+ # we are a scalar
+ # noop
else:
- raise ValueError('Cannot set a frame with no defined index '
- 'and a value that cannot be converted to a '
- 'Series')
+
+ pass
def _set_item(self, key, value):
"""
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index 11bf985bea041..c7fb209b4aacb 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -1778,14 +1778,12 @@ def f():
# don't create rows when empty
df = DataFrame({"A": [1, 2, 3], "B": [1.2, 4.2, 5.2]})
y = df[df.A > 5]
- def f():
- y['New'] = np.nan
- self.assertRaises(ValueError, f)
+ y['New'] = np.nan
+ assert_frame_equal(y,DataFrame(columns=['A','B','New']))
df = DataFrame(columns=['a', 'b', 'c c'])
- def f():
- df['d'] = 3
- self.assertRaises(ValueError, f)
+ df['d'] = 3
+ assert_frame_equal(df,DataFrame(columns=['a','b','c c','d']))
assert_series_equal(df['c c'],Series(name='c c',dtype=object))
# reindex columns is ok
| ```
allow assignment of a column in a frame with a scalar with no index (so adds to the columns),
instead of raising; this preservers 0.12 behavior
```
related #5720, #5744
going back to 0.12 behavior
effectively can add a column by assigning a scalar to a frame that doesn't have an index
need a more compelling reason to raise here
```
In [4]: df = DataFrame({"A": [1, 2, 3], "B": [1.2, 4.2, 5.2]})
In [5]: y = df[df.A > 5]
In [6]: y
Out[6]:
Empty DataFrame
Columns: [A, B]
Index: []
[0 rows x 2 columns]
In [7]: y['New'] = np.nan
In [8]: y
Out[8]:
Empty DataFrame
Columns: [A, B, New]
Index: []
[0 rows x 3 columns]
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/5747 | 2013-12-19T17:32:02Z | 2013-12-19T18:04:07Z | 2013-12-19T18:04:07Z | 2014-06-21T16:49:50Z |
BUG: don't allow an empty dataframe to have scalar assignment succeed (GH5744) | diff --git a/doc/source/release.rst b/doc/source/release.rst
index 3a22de3cb43f3..8ac168e18233f 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -247,7 +247,7 @@ API Changes
(:issue:`4390`)
- allow ``ix/loc`` for Series/DataFrame/Panel to set on any axis even when
the single-key is not currently contained in the index for that axis
- (:issue:`2578`, :issue:`5226`, :issue:`5632`, :issue:`5720`)
+ (:issue:`2578`, :issue:`5226`, :issue:`5632`, :issue:`5720`, :issue:`5744`)
- Default export for ``to_clipboard`` is now csv with a sep of `\t` for
compat (:issue:`3368`)
- ``at`` now will enlarge the object inplace (and return the same)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 2f299488bd321..4d6af77880747 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1916,6 +1916,11 @@ def _ensure_valid_index(self, value):
'Series')
self._data.set_axis(1, value.index.copy(), check_axis=False)
+ else:
+ raise ValueError('Cannot set a frame with no defined index '
+ 'and a value that cannot be converted to a '
+ 'Series')
+
def _set_item(self, key, value):
"""
Add series to DataFrame in specified column.
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index e601755ba8aaf..010020630cd18 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -1753,13 +1753,26 @@ def f():
str(df)
assert_frame_equal(df,expected)
- # GH5720
+ # GH5720, GH5744
# don't create rows when empty
df = DataFrame({"A": [1, 2, 3], "B": [1.2, 4.2, 5.2]})
y = df[df.A > 5]
- y['New'] = np.nan
- expected = DataFrame(columns=['A','B','New'])
- assert_frame_equal(y, expected)
+ def f():
+ y['New'] = np.nan
+ self.assertRaises(ValueError, f)
+
+ df = DataFrame(columns=['a', 'b', 'c c'])
+ def f():
+ df['d'] = 3
+ self.assertRaises(ValueError, f)
+ assert_series_equal(df['c c'],Series(name='c c',dtype=object))
+
+ # reindex columns is ok
+ df = DataFrame({"A": [1, 2, 3], "B": [1.2, 4.2, 5.2]})
+ y = df[df.A > 5]
+ result = y.reindex(columns=['A','B','C'])
+ expected = DataFrame(columns=['A','B','C'])
+ assert_frame_equal(result,expected)
def test_cache_updating(self):
# GH 4939, make sure to update the cache on setitem
| closes #5744
related #5720
I think this is the correct behavior
```
In [1]: df = pd.DataFrame(columns=['a', 'b', 'c c'])
In [2]: df['d'] = 3
ValueError: Cannot set a frame with no defined index and a value that cannot be converted to a Series
In [3]: df['c c']
Out[3]: Series([], name: c c, dtype: object)
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/5745 | 2013-12-19T14:43:10Z | 2013-12-19T15:20:21Z | 2013-12-19T15:20:21Z | 2014-06-16T20:48:04Z |
BUG: don't lose dtypes when concatenating empty array-likes | diff --git a/doc/source/release.rst b/doc/source/release.rst
index 99b8bfc460068..9650089279f12 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -118,6 +118,7 @@ Bug Fixes
- Bug in rolling skew/kurtosis when passed a Series with bad data (:issue:`5749`)
- Bug in scipy ``interpolate`` methods with a datetime index (:issue:`5975`)
- Bug in NaT comparison if a mixed datetime/np.datetime64 with NaT were passed (:issue:`5968`)
+ - Fixed bug with ``pd.concat`` losing dtype information if all inputs are empty (:issue:`5742`)
pandas 0.13.0
-------------
diff --git a/pandas/core/common.py b/pandas/core/common.py
index e8bcfa71fe32a..cd78f35aabdf9 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -2326,20 +2326,23 @@ def _check_as_is(x):
def _concat_compat(to_concat, axis=0):
# filter empty arrays
- to_concat = [x for x in to_concat if x.shape[axis] > 0]
-
- # return the empty np array, if nothing to concatenate, #3121
- if not to_concat:
- return np.array([], dtype=object)
-
- is_datetime64 = [x.dtype == _NS_DTYPE for x in to_concat]
- if all(is_datetime64):
- # work around NumPy 1.6 bug
- new_values = np.concatenate([x.view(np.int64) for x in to_concat],
- axis=axis)
- return new_values.view(_NS_DTYPE)
- elif any(is_datetime64):
- to_concat = [_to_pydatetime(x) for x in to_concat]
+ nonempty = [x for x in to_concat if x.shape[axis] > 0]
+
+ # If all arrays are empty, there's nothing to convert, just short-cut to
+ # the concatenation, #3121.
+ #
+ # Creating an empty array directly is tempting, but the winnings would be
+ # marginal given that it would still require shape & dtype calculation and
+ # np.concatenate which has them both implemented is compiled.
+ if nonempty:
+ is_datetime64 = [x.dtype == _NS_DTYPE for x in nonempty]
+ if all(is_datetime64):
+ # work around NumPy 1.6 bug
+ new_values = np.concatenate([x.view(np.int64) for x in nonempty],
+ axis=axis)
+ return new_values.view(_NS_DTYPE)
+ elif any(is_datetime64):
+ to_concat = [_to_pydatetime(x) for x in nonempty]
return np.concatenate(to_concat, axis=axis)
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index edcf7a0a491b0..3b6e4ba445ce0 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -11909,6 +11909,23 @@ def test_to_csv_date_format(self):
assert_frame_equal(test, nat_frame)
+ def test_concat_empty_dataframe_dtypes(self):
+ df = DataFrame(columns=list("abc"))
+ df['a'] = df['a'].astype(np.bool_)
+ df['b'] = df['b'].astype(np.int32)
+ df['c'] = df['c'].astype(np.float64)
+
+ result = pd.concat([df, df])
+ self.assertEqual(result['a'].dtype, np.bool_)
+ self.assertEqual(result['b'].dtype, np.int32)
+ self.assertEqual(result['c'].dtype, np.float64)
+
+ result = pd.concat([df, df.astype(np.float64)])
+ self.assertEqual(result['a'].dtype, np.object_)
+ self.assertEqual(result['b'].dtype, np.float64)
+ self.assertEqual(result['c'].dtype, np.float64)
+
+
def skip_if_no_ne(engine='numexpr'):
if engine == 'numexpr':
try:
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index 70dd38c2641ef..6b4a9a2bc4c22 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -5441,6 +5441,15 @@ def test_numpy_unique(self):
# it works!
result = np.unique(self.ts)
+ def test_concat_empty_series_dtypes(self):
+ self.assertEqual(pd.concat([Series(dtype=np.float64)]).dtype, np.float64)
+ self.assertEqual(pd.concat([Series(dtype=np.int8)]).dtype, np.int8)
+ self.assertEqual(pd.concat([Series(dtype=np.bool_)]).dtype, np.bool_)
+
+ self.assertEqual(pd.concat([Series(dtype=np.bool_),
+ Series(dtype=np.int32)]).dtype, np.int32)
+
+
class TestSeriesNonUnique(tm.TestCase):
| I develop an application that does quite a bit of data manipulation. Being aware of `pandas` being functional-but-not-really-heavily-optimized I use it to maintain label consistency and for grouping/merging data, heavy-duty maths is usually done with `numpy` ufuncs. The application contains entities that have no data at the beginning and receive data over their lifetimes. Every once in a while an incoming data chunk will contain no data for a certain entity. Usually it's fine but if the entity was just created the following happens:
``` python
In [1]: pd.__version__
Out[1]: '0.13.0rc1-92-gf6fd509'
In [2]: data = pd.Series(dtype=np.float)
In [3]: chunk = pd.Series(dtype=np.float)
In [4]: pd.concat([data, chunk])
Out[4]: Series([], dtype: object)
```
After that ufuncs like `isnan` cease to work on `data.values` since its dtype has changed to `object`. This PR fixes it.
| https://api.github.com/repos/pandas-dev/pandas/pulls/5742 | 2013-12-19T06:41:20Z | 2014-01-18T14:20:24Z | 2014-01-18T14:20:24Z | 2014-06-15T06:34:27Z |
BUG: Fixed issue #5732. prefix option works in csv | diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index bd0649a7a85f3..bc7002c6b89b0 100644
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -972,7 +972,7 @@ def __init__(self, src, **kwds):
if self.names is None:
if self.prefix:
- self.names = ['X%d' % i
+ self.names = ['%s%d' % (self.prefix, i)
for i in range(self._reader.table_width)]
else:
self.names = lrange(self._reader.table_width)
@@ -1563,7 +1563,7 @@ def _infer_columns(self):
num_original_columns = ncols
if not names:
if self.prefix:
- columns = [['X%d' % i for i in range(ncols)]]
+ columns = [['%s%d' % (self.prefix, i) for i in range(ncols)]]
else:
columns = [lrange(ncols)]
columns = self._handle_usecols(columns, columns[0])
diff --git a/pandas/io/tests/test_parsers.py b/pandas/io/tests/test_parsers.py
index 93a26b70a019e..4f52f4c2fdbe4 100644
--- a/pandas/io/tests/test_parsers.py
+++ b/pandas/io/tests/test_parsers.py
@@ -2072,6 +2072,24 @@ def test_catch_too_many_names(self):
10,11,12\n"""
tm.assertRaises(Exception, read_csv, StringIO(data), header=0, names=['a', 'b', 'c', 'd'])
+ def test_with_prefix(self):
+ # Issue 5732
+ data = """\
+1,2,3
+4,,6
+7,8,9
+10,11,12\n"""
+ result = self.read_csv(StringIO(data), header=None, prefix="abc")
+ tm.assert_equal(list(result.columns), ['abc0', 'abc1', 'abc2'])
+
+ # Test ignore prefix if not inferring headers.
+ result = self.read_csv(StringIO(data), prefix="abc")
+ tm.assert_equal(list(result.columns), ['1', '2', '3'])
+
+ # Test ignore prefix if not inferring headers.
+ result = self.read_csv(StringIO(data), prefix="abc", names=['a', 'b', 'c'])
+ tm.assert_equal(list(result.columns), ['a', 'b', 'c'])
+
class TestPythonParser(ParserTests, tm.TestCase):
def test_negative_skipfooter_raises(self):
| Closes issue https://github.com/pydata/pandas/issues/5732
Good catch, @nmichaud.
| https://api.github.com/repos/pandas-dev/pandas/pulls/5741 | 2013-12-19T05:54:44Z | 2013-12-19T16:28:40Z | null | 2014-06-20T00:02:17Z |
BUG: return Series as DataFrame.dtypes/ftypes for empty dataframes | diff --git a/doc/source/release.rst b/doc/source/release.rst
index 8b753abc83ca7..c33c2fe61429a 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -65,6 +65,7 @@ API Changes
- ``select_as_multiple`` will always raise a ``KeyError``, when a key or the selector is not found (:issue:`6177`)
- ``df['col'] = value`` and ``df.loc[:,'col'] = value`` are now completely equivalent;
previously the ``.loc`` would not necessarily coerce the dtype of the resultant series (:issue:`6149`)
+- ``dtypes`` and ``ftypes`` now return a series with ``dtype=object`` on empty containers (:issue:`5740`)
Experimental Features
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index c8e1247416806..d607be6bfb733 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -1947,7 +1947,8 @@ def get_ftype_counts(self):
def dtypes(self):
""" Return the dtypes in this object """
from pandas import Series
- return Series(self._data.get_dtypes(),index=self._info_axis)
+ return Series(self._data.get_dtypes(), index=self._info_axis,
+ dtype=np.object_)
@property
def ftypes(self):
@@ -1956,7 +1957,8 @@ def ftypes(self):
in this object.
"""
from pandas import Series
- return Series(self._data.get_ftypes(),index=self._info_axis)
+ return Series(self._data.get_ftypes(), index=self._info_axis,
+ dtype=np.object_)
def as_blocks(self, columns=None):
"""
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 6eddd52dba634..f85c95e8b81db 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -12164,6 +12164,47 @@ def test_concat_empty_dataframe_dtypes(self):
self.assertEqual(result['b'].dtype, np.float64)
self.assertEqual(result['c'].dtype, np.float64)
+ def test_empty_frame_dtypes_ftypes(self):
+ empty_df = pd.DataFrame()
+ assert_series_equal(empty_df.dtypes, pd.Series(dtype=np.object))
+ assert_series_equal(empty_df.ftypes, pd.Series(dtype=np.object))
+
+ nocols_df = pd.DataFrame(index=[1,2,3])
+ assert_series_equal(nocols_df.dtypes, pd.Series(dtype=np.object))
+ assert_series_equal(nocols_df.ftypes, pd.Series(dtype=np.object))
+
+ norows_df = pd.DataFrame(columns=list("abc"))
+ assert_series_equal(norows_df.dtypes, pd.Series(np.object, index=list("abc")))
+ assert_series_equal(norows_df.ftypes, pd.Series('object:dense', index=list("abc")))
+
+ norows_int_df = pd.DataFrame(columns=list("abc")).astype(np.int32)
+ assert_series_equal(norows_int_df.dtypes, pd.Series(np.dtype('int32'), index=list("abc")))
+ assert_series_equal(norows_int_df.ftypes, pd.Series('int32:dense', index=list("abc")))
+
+ odict = OrderedDict
+ df = pd.DataFrame(odict([('a', 1), ('b', True), ('c', 1.0)]), index=[1, 2, 3])
+ assert_series_equal(df.dtypes, pd.Series(odict([('a', np.int64),
+ ('b', np.bool),
+ ('c', np.float64)])))
+ assert_series_equal(df.ftypes, pd.Series(odict([('a', 'int64:dense'),
+ ('b', 'bool:dense'),
+ ('c', 'float64:dense')])))
+
+ # same but for empty slice of df
+ assert_series_equal(df[:0].dtypes, pd.Series(odict([('a', np.int),
+ ('b', np.bool),
+ ('c', np.float)])))
+ assert_series_equal(df[:0].ftypes, pd.Series(odict([('a', 'int64:dense'),
+ ('b', 'bool:dense'),
+ ('c', 'float64:dense')])))
+
+def skip_if_no_ne(engine='numexpr'):
+ if engine == 'numexpr':
+ try:
+ import numexpr as ne
+ except ImportError:
+ raise nose.SkipTest("cannot query engine numexpr when numexpr not "
+ "installed")
def skip_if_no_pandas_parser(parser):
| `DataFrame.dtypes` and `DataFrame.ftypes` values were inconsistent for empty dataframes:
``` python
In [2]: pd.DataFrame().dtypes
Out[2]:
Empty DataFrame
Columns: []
Index: []
[0 rows x 0 columns]
In [3]: pd.DataFrame().ftypes
Out[3]:
Empty DataFrame
Columns: []
Index: []
[0 rows x 0 columns]
In [4]: pd.DataFrame(columns=list("abc")).ftypes
Out[4]:
a NaN
b NaN
c NaN
dtype: float64
In [5]: pd.DataFrame(columns=list("abc")).dtypes
Out[5]:
a NaN
b NaN
c NaN
dtype: float64
In [6]: pd.__version__
Out[6]: '0.13.0rc1-92-gf6fd509'
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/5740 | 2013-12-19T05:42:17Z | 2014-02-17T13:59:56Z | 2014-02-17T13:59:56Z | 2014-06-12T17:49:32Z |
BLD: Handle git describe failure more cleanly in setup.py GH5495 | diff --git a/setup.py b/setup.py
index fe921b1ff6029..5a09a27f36eee 100755
--- a/setup.py
+++ b/setup.py
@@ -197,26 +197,30 @@ def build_extensions(self):
FULLVERSION = VERSION
if not ISRELEASED:
+ import subprocess
FULLVERSION += '.dev'
- try:
- import subprocess
+
+ for cmd in ['git','git.cmd']:
try:
- pipe = subprocess.Popen(["git", "describe", "--always"],
- stdout=subprocess.PIPE).stdout
- except OSError:
- # msysgit compatibility
- pipe = subprocess.Popen(
- ["git.cmd", "describe", "--always"],
- stdout=subprocess.PIPE).stdout
- rev = pipe.read().strip()
- # makes distutils blow up on Python 2.7
- if sys.version_info[0] >= 3:
- rev = rev.decode('ascii')
-
- FULLVERSION = rev.lstrip('v')
-
- except:
- warnings.warn("WARNING: Couldn't get git revision")
+ pipe = subprocess.Popen([cmd, "describe", "--always"],
+ stdout=subprocess.PIPE)
+ (so,serr) = pipe.communicate()
+ if pipe.returncode == 0:
+ break
+ except:
+ pass
+
+ if pipe.returncode != 0:
+ warnings.warn("WARNING: Couldn't get git revision, using generic version string")
+ else:
+ rev = so.strip()
+ # makes distutils blow up on Python 2.7
+ if sys.version_info[0] >= 3:
+ rev = rev.decode('ascii')
+
+ # use result og git describe as version string
+ FULLVERSION = rev.lstrip('v')
+
else:
FULLVERSION += QUALIFIER
| closes #5495
| https://api.github.com/repos/pandas-dev/pandas/pulls/5739 | 2013-12-19T04:58:11Z | 2013-12-30T20:55:38Z | 2013-12-30T20:55:38Z | 2014-06-25T08:29:42Z |
BLD: ci/print_versions.py learned to output json | diff --git a/ci/print_versions.py b/ci/print_versions.py
index 560695532e67c..f9123fc28f6fe 100755
--- a/ci/print_versions.py
+++ b/ci/print_versions.py
@@ -1,7 +1,8 @@
#!/usr/bin/env python
-def show_versions():
+
+def show_versions(as_json=False):
import imp
import os
fn = __file__
@@ -9,8 +10,15 @@ def show_versions():
pandas_dir = os.path.abspath(os.path.join(this_dir,".."))
sv_path = os.path.join(pandas_dir, 'pandas','util')
mod = imp.load_module('pvmod', *imp.find_module('print_versions', [sv_path]))
- return mod.show_versions()
+ return mod.show_versions(as_json)
if __name__ == '__main__':
- show_versions()
+ # optparse is 2.6-safe
+ from optparse import OptionParser
+ parser = OptionParser()
+ parser.add_option("-j", "--json", action="store_true", help="Format output as JSON")
+
+ (options, args) = parser.parse_args()
+
+ show_versions(as_json=options.json)
diff --git a/pandas/util/print_versions.py b/pandas/util/print_versions.py
index c40366ec2d804..ca94448f7294c 100644
--- a/pandas/util/print_versions.py
+++ b/pandas/util/print_versions.py
@@ -1,167 +1,96 @@
import os
import platform
import sys
+import struct
+def get_sys_info():
+ "Returns system information as a dict"
-def show_versions():
- print("\nINSTALLED VERSIONS")
- print("------------------")
- print("Python: %d.%d.%d.%s.%s" % sys.version_info[:])
-
+ # list of tuples over dict because OrderedDict not in 2.6, least
+ # resistance.
+ blob = []
try:
sysname, nodename, release, version, machine, processor = platform.uname()
- print("OS: %s" % (sysname))
- print("Release: %s" % (release))
- #print("Version: %s" % (version))
- #print("Machine: %s" % (machine))
- print("Processor: %s" % (processor))
- print("byteorder: %s" % sys.byteorder)
- print("LC_ALL: %s" % os.environ.get('LC_ALL', "None"))
- print("LANG: %s" % os.environ.get('LANG', "None"))
+ blob = [
+ ("python", "%d.%d.%d.%s.%s" % sys.version_info[:]),
+ ("python-bits", struct.calcsize("P") * 8),
+ ("OS","%s" % (sysname)),
+ ("OS-release", "%s" % (release)),
+ # ("Version", "%s" % (version)),
+ # ("Machine", "%s" % (machine)),
+ ("processor", "%s" % (processor)),
+ ("byteorder", "%s" % sys.byteorder),
+ ("LC_ALL", "%s" % os.environ.get('LC_ALL', "None")),
+ ("LANG", "%s" % os.environ.get('LANG', "None")),
+
+ ]
except:
pass
- print("")
-
- try:
- import pandas
- print("pandas: %s" % pandas.__version__)
- except:
- print("pandas: Not installed")
-
- try:
- import Cython
- print("Cython: %s" % Cython.__version__)
- except:
- print("Cython: Not installed")
-
- try:
- import numpy
- print("Numpy: %s" % numpy.version.version)
- except:
- print("Numpy: Not installed")
-
- try:
- import scipy
- print("Scipy: %s" % scipy.version.version)
- except:
- print("Scipy: Not installed")
-
- try:
- import statsmodels
- print("statsmodels: %s" % statsmodels.__version__)
- except:
- print("statsmodels: Not installed")
- try:
- import patsy
- print(" patsy: %s" % patsy.__version__)
- except:
- print(" patsy: Not installed")
-
- try:
- import scikits.timeseries as ts
- print("scikits.timeseries: %s" % ts.__version__)
- except:
- print("scikits.timeseries: Not installed")
-
- try:
- import dateutil
- print("dateutil: %s" % dateutil.__version__)
- except:
- print("dateutil: Not installed")
-
- try:
- import pytz
- print("pytz: %s" % pytz.VERSION)
- except:
- print("pytz: Not installed")
-
- try:
- import bottleneck
- print("bottleneck: %s" % bottleneck.__version__)
- except:
- print("bottleneck: Not installed")
-
- try:
- import tables
- print("PyTables: %s" % tables.__version__)
- except:
- print("PyTables: Not Installed")
-
- try:
- import numexpr
- print(" numexpr: %s" % numexpr.__version__)
- except:
- print(" numexpr: Not Installed")
-
- try:
- import matplotlib
- print("matplotlib: %s" % matplotlib.__version__)
- except:
- print("matplotlib: Not installed")
-
- try:
- import openpyxl
- print("openpyxl: %s" % openpyxl.__version__)
- except:
- print("openpyxl: Not installed")
-
- try:
- import xlrd
- print("xlrd: %s" % xlrd.__VERSION__)
- except:
- print("xlrd: Not installed")
-
- try:
- import xlwt
- print("xlwt: %s" % xlwt.__VERSION__)
- except:
- print("xlwt: Not installed")
-
- try:
- import xlsxwriter
- print("xlsxwriter: %s" % xlsxwriter.__version__)
- except:
- print("xlsxwriter: Not installed")
-
- try:
- import sqlalchemy
- print("sqlalchemy: %s" % sqlalchemy.__version__)
- except:
- print("sqlalchemy: Not installed")
-
- try:
- import lxml
- from lxml import etree
- print("lxml: %s" % etree.__version__)
- except:
- print("lxml: Not installed")
-
- try:
- import bs4
- print("bs4: %s" % bs4.__version__)
- except:
- print("bs4: Not installed")
-
- try:
- import html5lib
- print("html5lib: %s" % html5lib.__version__)
- except:
- print("html5lib: Not installed")
-
- try:
- import bq
- print("bigquery: %s" % bq._VersionNumber())
- except:
- print("bigquery: Not installed")
-
- try:
- import apiclient
- print("apiclient: %s" % apiclient.__version__)
- except:
- print("apiclient: Not installed")
-
+ return blob
+
+
+def show_versions(as_json=False):
+ import imp
+ sys_info = get_sys_info()
+
+ deps = [
+ # (MODULE_NAME, f(mod) -> mod version)
+ ("pandas", lambda mod: mod.__version__),
+ ("Cython", lambda mod: mod.__version__),
+ ("numpy", lambda mod: mod.version.version),
+ ("scipy", lambda mod: mod.version.version),
+ ("statsmodels", lambda mod: mod.__version__),
+ ("patsy", lambda mod: mod.__version__),
+ ("scikits.timeseries", lambda mod: mod.__version__),
+ ("dateutil", lambda mod: mod.__version__),
+ ("pytz", lambda mod: mod.VERSION),
+ ("bottleneck", lambda mod: mod.__version__),
+ ("tables", lambda mod: mod.__version__),
+ ("numexpr", lambda mod: mod.__version__),
+ ("matplotlib", lambda mod: mod.__version__),
+ ("openpyxl", lambda mod: mod.__version__),
+ ("xlrd", lambda mod: mod.__VERSION__),
+ ("xlwt", lambda mod: mod.__VERSION__),
+ ("xlsxwriter", lambda mod: mod.__version__),
+ ("sqlalchemy", lambda mod: mod.__version__),
+ ("lxml", lambda mod: mod.etree.__version__),
+ ("bs4", lambda mod: mod.__version__),
+ ("html5lib", lambda mod: mod.__version__),
+ ("bq", lambda mod: mod._VersionNumber()),
+ ("apiclient", lambda mod: mod.__version__),
+ ]
+
+ deps_blob = list()
+ for (modname, ver_f) in deps:
+ try:
+ mod = imp.load_module(modname, *imp.find_module(modname))
+ ver = ver_f(mod)
+ deps_blob.append((modname, ver))
+ except:
+ deps_blob.append((modname, None))
+
+ if (as_json):
+ # 2.6-safe
+ try:
+ import json
+ except:
+ import simplejson as json
+
+ print(json.dumps(dict(system=dict(sys_info), dependencies=dict(deps_blob)), indent=2))
+
+ else:
+
+ print("\nINSTALLED VERSIONS")
+ print("------------------")
+
+ for k, stat in sys_info:
+ print("%s: %s" % (k, stat))
+
+ print("")
+ for k, stat in deps_blob:
+ print("%s: %s" % (k, stat))
if __name__ == "__main__":
- show_versions()
+ show_versions(as_json=False)
| Putting some pieces in place.
| https://api.github.com/repos/pandas-dev/pandas/pulls/5738 | 2013-12-19T04:31:31Z | 2013-12-31T01:24:12Z | 2013-12-31T01:24:12Z | 2014-06-18T11:08:50Z |
BUG: raise KeyError if missing value in py3 on multi-index (GH5725), revisted | diff --git a/pandas/core/index.py b/pandas/core/index.py
index 30f93564db318..5c77c1e5e9516 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -2306,8 +2306,11 @@ def _try_mi(k):
compat.PY3 and isinstance(key, compat.string_types)):
try:
return _try_mi(key)
+ except (KeyError):
+ raise
except:
pass
+
try:
return _try_mi(Timestamp(key))
except:
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index e601755ba8aaf..3107b1c679cc7 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -844,15 +844,36 @@ def test_getitem_multiindex(self):
# GH 5725
# the 'A' happens to be a valid Timestamp so the doesn't raise the appropriate
# error, only in PY3 of course!
- index = MultiIndex(levels=[['A', 'B', 'C'], [0, 26, 27, 37, 57, 67, 75, 82]],
+ index = MultiIndex(levels=[['D', 'B', 'C'], [0, 26, 27, 37, 57, 67, 75, 82]],
labels=[[0, 0, 0, 1, 2, 2, 2, 2, 2, 2], [1, 3, 4, 6, 0, 2, 2, 3, 5, 7]],
names=['tag', 'day'])
arr = np.random.randn(len(index),1)
df = DataFrame(arr,index=index,columns=['val'])
+ result = df.val['D']
+ expected = Series(arr.ravel()[0:3],name='val',index=Index([26,37,57],name='day'))
+ assert_series_equal(result,expected)
+
+ def f():
+ df.val['A']
+ self.assertRaises(KeyError, f)
+
+ def f():
+ df.val['X']
+ self.assertRaises(KeyError, f)
+
+ # A is treated as a special Timestamp
+ index = MultiIndex(levels=[['A', 'B', 'C'], [0, 26, 27, 37, 57, 67, 75, 82]],
+ labels=[[0, 0, 0, 1, 2, 2, 2, 2, 2, 2], [1, 3, 4, 6, 0, 2, 2, 3, 5, 7]],
+ names=['tag', 'day'])
+ df = DataFrame(arr,index=index,columns=['val'])
result = df.val['A']
expected = Series(arr.ravel()[0:3],name='val',index=Index([26,37,57],name='day'))
assert_series_equal(result,expected)
+ def f():
+ df.val['X']
+ self.assertRaises(KeyError, f)
+
def test_setitem_dtype_upcast(self):
# GH3216
| #5725
raise KeyError appropriately under py3 on mi series
| https://api.github.com/repos/pandas-dev/pandas/pulls/5737 | 2013-12-19T01:04:09Z | 2013-12-19T01:24:38Z | 2013-12-19T01:24:38Z | 2014-06-19T17:03:00Z |
TST: Cleanup temp files in Excel test. | diff --git a/pandas/io/tests/test_excel.py b/pandas/io/tests/test_excel.py
index eeeb914a3754e..edcb80ae74f6f 100644
--- a/pandas/io/tests/test_excel.py
+++ b/pandas/io/tests/test_excel.py
@@ -1070,12 +1070,15 @@ def test_ExcelWriter_dispatch(self):
except ImportError:
_skip_if_no_openpyxl()
writer_klass = _OpenpyxlWriter
- writer = ExcelWriter('apple.xlsx')
- tm.assert_isinstance(writer, writer_klass)
+
+ with ensure_clean('.xlsx') as path:
+ writer = ExcelWriter(path)
+ tm.assert_isinstance(writer, writer_klass)
_skip_if_no_xlwt()
- writer = ExcelWriter('apple.xls')
- tm.assert_isinstance(writer, _XlwtWriter)
+ with ensure_clean('.xls') as path:
+ writer = ExcelWriter(path)
+ tm.assert_isinstance(writer, _XlwtWriter)
def test_register_writer(self):
# some awkward mocking to test out dispatch and such actually works
| Fix for issue #5735
| https://api.github.com/repos/pandas-dev/pandas/pulls/5736 | 2013-12-19T00:30:15Z | 2013-12-19T00:31:32Z | 2013-12-19T00:31:32Z | 2014-06-30T06:58:37Z |
Fix prefix argument for read_csv/read_table | diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index bd0649a7a85f3..7004bcaf0cb74 100644
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -972,7 +972,7 @@ def __init__(self, src, **kwds):
if self.names is None:
if self.prefix:
- self.names = ['X%d' % i
+ self.names = ['%s%d' % (self.prefix, i)
for i in range(self._reader.table_width)]
else:
self.names = lrange(self._reader.table_width)
@@ -1563,7 +1563,7 @@ def _infer_columns(self):
num_original_columns = ncols
if not names:
if self.prefix:
- columns = [['X%d' % i for i in range(ncols)]]
+ columns = [['%s%d' % (self.prefix,i) for i in range(ncols)]]
else:
columns = [lrange(ncols)]
columns = self._handle_usecols(columns, columns[0])
diff --git a/pandas/io/tests/test_parsers.py b/pandas/io/tests/test_parsers.py
index 93a26b70a019e..4fe95647bae28 100644
--- a/pandas/io/tests/test_parsers.py
+++ b/pandas/io/tests/test_parsers.py
@@ -968,6 +968,22 @@ def test_no_header(self):
self.assert_(np.array_equal(df2.columns, names))
+ def test_no_header_prefix(self):
+ data = """1,2,3,4,5
+6,7,8,9,10
+11,12,13,14,15
+"""
+ df_pref = self.read_table(StringIO(data), sep=',', prefix='Field',
+ header=None)
+
+ expected = [[1, 2, 3, 4, 5.],
+ [6, 7, 8, 9, 10],
+ [11, 12, 13, 14, 15]]
+ tm.assert_almost_equal(df_pref.values, expected)
+
+ self.assert_(np.array_equal(df_pref.columns,
+ ['Field0', 'Field1', 'Field2', 'Field3', 'Field4']))
+
def test_header_with_index_col(self):
data = """foo,1,2,3
bar,4,5,6
| Closes #5732
| https://api.github.com/repos/pandas-dev/pandas/pulls/5733 | 2013-12-18T15:29:56Z | 2014-01-03T01:09:56Z | 2014-01-03T01:09:56Z | 2014-06-14T13:17:44Z |
DOC: add demo of factorize | diff --git a/doc/source/reshaping.rst b/doc/source/reshaping.rst
index f50586f12d2dd..288cd48b10aca 100644
--- a/doc/source/reshaping.rst
+++ b/doc/source/reshaping.rst
@@ -417,3 +417,25 @@ This function is often used along with discretization functions like ``cut``:
get_dummies(cut(values, bins))
+
+Factorizing values
+------------------
+
+To encode 1-d values as an enumerated type use ``factorize``:
+
+.. ipython:: python
+
+ x = pd.Series(['A', 'A', np.nan, 'B', 3.14, np.inf])
+ x
+ labels, uniques = pd.factorize(x)
+ labels
+ uniques
+
+Note that ``factorize`` is similar to ``numpy.unique``, but differs in its
+handling of NaN:
+
+.. ipython:: python
+
+ pd.factorize(x, sort=True)
+ np.unique(x, return_inverse=True)[::-1]
+
| Here is some documentation of `factorize`, per [this request](http://stackoverflow.com/questions/20619851/pandas-equivalent-of-statas-encode/20619971?noredirect=1#comment30860849_20619971).
| https://api.github.com/repos/pandas-dev/pandas/pulls/5731 | 2013-12-18T14:57:56Z | 2013-12-18T15:15:45Z | 2013-12-18T15:15:45Z | 2014-07-16T08:43:44Z |
CLN/BUG: indexing fixes (GH5725, GH5727) | diff --git a/doc/source/release.rst b/doc/source/release.rst
index dc82550be6500..3a22de3cb43f3 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -826,6 +826,7 @@ Bug Fixes
- Bug in repeated indexing of object with resultant non-unique index (:issue:`5678`)
- Bug in fillna with Series and a passed series/dict (:issue:`5703`)
- Bug in groupby transform with a datetime-like grouper (:issue:`5712`)
+ - Bug in multi-index selection in PY3 when using certain keys (:issue:`5725`)
pandas 0.12.0
-------------
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index b77ea2b22f4fa..e07655b0539a5 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -721,13 +721,13 @@ def __setstate__(self, state):
# to avoid definitional recursion
# e.g. say fill_value needing _data to be
# defined
- for k in self._internal_names:
+ for k in self._internal_names_set:
if k in state:
v = state[k]
object.__setattr__(self, k, v)
for k, v in state.items():
- if k not in self._internal_names:
+ if k not in self._internal_names_set:
object.__setattr__(self, k, v)
else:
@@ -938,15 +938,22 @@ def to_clipboard(self, excel=None, sep=None, **kwargs):
@classmethod
def _create_indexer(cls, name, indexer):
""" create an indexer like _name in the class """
- iname = '_%s' % name
- setattr(cls, iname, None)
- def _indexer(self):
- if getattr(self, iname, None) is None:
- setattr(self, iname, indexer(self, name))
- return getattr(self, iname)
+ if getattr(cls, name, None) is None:
+ iname = '_%s' % name
+ setattr(cls, iname, None)
- setattr(cls, name, property(_indexer))
+ def _indexer(self):
+ i = getattr(self, iname)
+ if i is None:
+ i = indexer(self, name)
+ setattr(self, iname, i)
+ return i
+
+ setattr(cls, name, property(_indexer))
+
+ # add to our internal names set
+ cls._internal_names_set.add(iname)
def get(self, key, default=None):
"""
@@ -1831,9 +1838,9 @@ def fillna(self, value=None, method=None, axis=0, inplace=False,
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
value : scalar, dict, or Series
- Value to use to fill holes (e.g. 0), alternately a dict/Series of
- values specifying which value to use for each index (for a Series) or
- column (for a DataFrame). (values not in the dict/Series will not be
+ Value to use to fill holes (e.g. 0), alternately a dict/Series of
+ values specifying which value to use for each index (for a Series) or
+ column (for a DataFrame). (values not in the dict/Series will not be
filled). This value cannot be a list.
axis : {0, 1}, default 0
0: fill column-by-column
@@ -1845,8 +1852,8 @@ def fillna(self, value=None, method=None, axis=0, inplace=False,
limit : int, default None
Maximum size gap to forward or backward fill
downcast : dict, default is None
- a dict of item->dtype of what to downcast if possible,
- or the string 'infer' which will try to downcast to an appropriate
+ a dict of item->dtype of what to downcast if possible,
+ or the string 'infer' which will try to downcast to an appropriate
equal type (e.g. float64 to int64 if possible)
See also
diff --git a/pandas/core/index.py b/pandas/core/index.py
index 7ae273d08fa87..30f93564db318 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -2300,8 +2300,14 @@ def _try_mi(k):
# a Timestamp will raise a TypeError in a multi-index
# rather than a KeyError, try it here
+ # note that a string that 'looks' like a Timestamp will raise
+ # a KeyError! (GH5725)
if isinstance(key, (datetime.datetime, np.datetime64)) or (
compat.PY3 and isinstance(key, compat.string_types)):
+ try:
+ return _try_mi(key)
+ except:
+ pass
try:
return _try_mi(Timestamp(key))
except:
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index 4954decd5195b..e601755ba8aaf 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -3,7 +3,7 @@
import itertools
import warnings
-from pandas.compat import range, lrange, StringIO, lmap, map
+from pandas.compat import range, lrange, lzip, StringIO, lmap, map
from numpy import random, nan
from numpy.random import randn
import numpy as np
@@ -249,6 +249,15 @@ def _print(result, error = None):
k2 = key2
_eq(t, o, a, obj, key1, k2)
+ def test_indexer_caching(self):
+ # GH5727
+ # make sure that indexers are in the _internal_names_set
+ n = 1000001
+ arrays = [lrange(n), lrange(n)]
+ index = MultiIndex.from_tuples(lzip(*arrays))
+ s = Series(np.zeros(n), index=index)
+ str(s)
+
def test_at_and_iat_get(self):
def _check(f, func, values = False):
@@ -830,6 +839,20 @@ def test_xs_multiindex(self):
expected.columns = expected.columns.droplevel('lvl1')
assert_frame_equal(result, expected)
+ def test_getitem_multiindex(self):
+
+ # GH 5725
+ # the 'A' happens to be a valid Timestamp so the doesn't raise the appropriate
+ # error, only in PY3 of course!
+ index = MultiIndex(levels=[['A', 'B', 'C'], [0, 26, 27, 37, 57, 67, 75, 82]],
+ labels=[[0, 0, 0, 1, 2, 2, 2, 2, 2, 2], [1, 3, 4, 6, 0, 2, 2, 3, 5, 7]],
+ names=['tag', 'day'])
+ arr = np.random.randn(len(index),1)
+ df = DataFrame(arr,index=index,columns=['val'])
+ result = df.val['A']
+ expected = Series(arr.ravel()[0:3],name='val',index=Index([26,37,57],name='day'))
+ assert_series_equal(result,expected)
+
def test_setitem_dtype_upcast(self):
# GH3216
| closes #5727
closes #5725
| https://api.github.com/repos/pandas-dev/pandas/pulls/5730 | 2013-12-18T14:52:16Z | 2013-12-18T20:30:27Z | 2013-12-18T20:30:27Z | 2014-06-20T00:36:45Z |
DOC: small doc build warning: Note -> Notes | diff --git a/pandas/core/strings.py b/pandas/core/strings.py
index 12f21df9e7c0e..3b1b220d3fac7 100644
--- a/pandas/core/strings.py
+++ b/pandas/core/strings.py
@@ -332,8 +332,8 @@ def str_match(arr, pat, case=True, flags=0, na=np.nan, as_indexer=False):
matches : boolean array (if as_indexer=True)
matches : array of tuples (if as_indexer=False, default but deprecated)
- Note
- ----
+ Notes
+ -----
To extract matched groups, which is the deprecated behavior of match, use
str.extract.
"""
| https://api.github.com/repos/pandas-dev/pandas/pulls/5726 | 2013-12-18T08:52:58Z | 2013-12-18T14:57:35Z | 2013-12-18T14:57:35Z | 2014-07-16T08:43:40Z | |
BUG: don't use partial setting with scalars (GH5720) | diff --git a/doc/source/release.rst b/doc/source/release.rst
index 5ce9ccd25a7fc..79079cc52a148 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -246,7 +246,7 @@ API Changes
(:issue:`4390`)
- allow ``ix/loc`` for Series/DataFrame/Panel to set on any axis even when
the single-key is not currently contained in the index for that axis
- (:issue:`2578`, :issue:`5226`, :issue:`5632`)
+ (:issue:`2578`, :issue:`5226`, :issue:`5632`, :issue:`5720`)
- Default export for ``to_clipboard`` is now csv with a sep of `\t` for
compat (:issue:`3368`)
- ``at`` now will enlarge the object inplace (and return the same)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 90641a833f2a7..2f299488bd321 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1904,16 +1904,17 @@ def _ensure_valid_index(self, value):
if not len(self.index):
# GH5632, make sure that we are a Series convertible
- try:
- value = Series(value)
- except:
- pass
+ if is_list_like(value):
+ try:
+ value = Series(value)
+ except:
+ pass
- if not isinstance(value, Series):
- raise ValueError('Cannot set a frame with no defined index '
- 'and a value that cannot be converted to a '
- 'Series')
- self._data.set_axis(1, value.index.copy(), check_axis=False)
+ if not isinstance(value, Series):
+ raise ValueError('Cannot set a frame with no defined index '
+ 'and a value that cannot be converted to a '
+ 'Series')
+ self._data.set_axis(1, value.index.copy(), check_axis=False)
def _set_item(self, key, value):
"""
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index e396bee3f4ad9..4954decd5195b 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -1730,6 +1730,14 @@ def f():
str(df)
assert_frame_equal(df,expected)
+ # GH5720
+ # don't create rows when empty
+ df = DataFrame({"A": [1, 2, 3], "B": [1.2, 4.2, 5.2]})
+ y = df[df.A > 5]
+ y['New'] = np.nan
+ expected = DataFrame(columns=['A','B','New'])
+ assert_frame_equal(y, expected)
+
def test_cache_updating(self):
# GH 4939, make sure to update the cache on setitem
| closes #5720
| https://api.github.com/repos/pandas-dev/pandas/pulls/5723 | 2013-12-17T20:36:18Z | 2013-12-17T21:32:24Z | 2013-12-17T21:32:24Z | 2014-06-21T16:51:31Z |
DOC: DatetimeIndex accepts name param | diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py
index fd9fac58a973c..23b949c1fedfb 100644
--- a/pandas/tseries/index.py
+++ b/pandas/tseries/index.py
@@ -119,6 +119,8 @@ class DatetimeIndex(Int64Index):
closed : string or None, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None)
+ name : object
+ Name to be stored in the index
"""
_join_precedence = 10
diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py
index f2f137e18a15c..f4dcdb7a44a3e 100644
--- a/pandas/tseries/tests/test_timeseries.py
+++ b/pandas/tseries/tests/test_timeseries.py
@@ -1966,6 +1966,11 @@ def test_constructor_coverage(self):
end='2011-01-01', freq='B')
self.assertRaises(ValueError, DatetimeIndex, periods=10, freq='D')
+ def test_constructor_name(self):
+ idx = DatetimeIndex(start='2000-01-01', periods=1, freq='A',
+ name='TEST')
+ self.assertEquals(idx.name, 'TEST')
+
def test_comparisons_coverage(self):
rng = date_range('1/1/2000', periods=10)
| Just a docstring change to reflect `DatetimeIndex` taking a name parameter. I added a test since it wasn't explicitly tested anywhere.
``` python
In [1]: from pandas import DatetimeIndex
In [2]: idx = DatetimeIndex(start='2000-01-01', periods=1, freq='A', name='TEST')
In [3]: idx.name
Out[3]: 'TEST'
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/5722 | 2013-12-17T16:26:49Z | 2013-12-17T17:36:22Z | 2013-12-17T17:36:22Z | 2016-11-03T12:37:39Z |
DOC: trim CONTRIBUTING.MD | diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 2966aed5f57ee..1c1423678fffb 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -4,41 +4,39 @@ All contributions, bug reports, bug fixes, documentation improvements,
enhancements and ideas are welcome.
The [GitHub "issues" tab](https://github.com/pydata/pandas/issues)
-contains some issues labeled "Good as first PR"; these are
-tasks which do not require deep knowledge of the package. Look those up if you're
+contains some issues labeled "Good as first PR"; Look those up if you're
looking for a quick way to help out.
-Please try and follow these guidelines, as this makes it easier for us to accept
-your contribution or address the issue you're having.
-
#### Bug Reports
- Please include a short, self-contained Python snippet reproducing the problem.
You can have the code formatted nicely by using [GitHub Flavored Markdown](http://github.github.com/github-flavored-markdown/) :
```python
-
+
print("I ♥ pandas!")
```
- - A [test case](https://github.com/pydata/pandas/tree/master/pandas/tests) may be more helpful.
- - Specify the pandas (and NumPy) version used. (check `pandas.__version__`
- and `numpy.__version__`)
- - Explain what the expected behavior was, and what you saw instead.
- - If the issue seems to involve some of [pandas' dependencies](https://github.com/pydata/pandas#dependencies)
- such as
- [NumPy](http://numpy.org),
- [matplotlib](http://matplotlib.org/), and
- [PyTables](http://www.pytables.org/)
- you should include (the relevant parts of) the output of
+ - Specify the pandas version used and those of it's dependencies. You can simply include the output of
[`ci/print_versions.py`](https://github.com/pydata/pandas/blob/master/ci/print_versions.py).
+ - Explain what the expected behavior was, and what you saw instead.
#### Pull Requests
- - **Make sure the test suite passes** for both python2 and python3.
- You can use `test_fast.sh`, **tox** locally, and/or enable **Travis-CI** on your fork.
- See "Getting Travis-CI going" below.
+ - **Make sure the test suite passes** on your box, Use the provided `test_*.sh` scripts or tox.
+ - Enable [Travis-Ci](http://travis-ci.org/pydata/pandas). See "Getting Travis-CI going" below.
+ - Use [proper commit messages](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html):
+ - a subject line with `< 80` chars.
+ - One blank line.
+ - Optionally, a commit message body.
+ - Please reference relevant Github issues in your commit message using `GH1234`
+ or `#1234`. Either style is fine but the '#' style generates nose when your rebase your PR.
+ - `doc/source/release.rst` and `doc/source/vx.y.z.txt` contain an ongoing
+ changelog for each release. Add entries to these files
+ as needed in a separate commit in your PR: document the fix, enhancement,
+ or (unavoidable) breaking change.
+ - Keep style fixes to a separate commit to make your PR more readable.
- An informal commit message format is in effect for the project. Please try
and adhere to it. Check `git log` for examples. Here are some common prefixes
along with general guidelines for when to use them:
@@ -49,69 +47,25 @@ your contribution or address the issue you're having.
- **BLD**: Updates to the build process/scripts
- **PERF**: Performance improvement
- **CLN**: Code cleanup
- - Commit messages should have:
- - a subject line with `< 80` chars
- - one blank line
- - a commit message body, if there's a need for one
- - If you are changing any code, you should enable Travis-CI on your fork
- to make it easier for the team to see that the PR does indeed pass all the tests.
- - **Backward-compatibility really matters**. Pandas already has a large user base and
- a lot of existing user code.
- - Don't break old code if you can avoid it.
- - If there is a need, explain it in the PR.
- - Changes to method signatures should be made in a way which doesn't break existing
- code. For example, you should beware of changes to ordering and naming of keyword
- arguments.
+ - Maintain backward-compatibility. Pandas has lots of users with lots of existing code. Don't break it.
+ - If you think breakage is required clearly state why as part of the PR.
+ - Be careful when changing method signatures.
- Add deprecation warnings where needed.
- - Performance matters. You can use the included `test_perf.sh`
- script to make sure your PR does not introduce any new performance regressions
- in the library.
+ - Performance matters. Make sure your PR hasn't introduced perf regressions by using `test_perf.sh`.
- Docstrings follow the [numpydoc](https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt) format.
+ - When writing tests, use 2.6 compatible `self.assertFoo` methods. Some polyfills such as `assertRaises`
+ can be found in `pandas.util.testing`.
+ - Generally, pandas source files should not contain attributions. You can include a "thanks to..."
+ in the release changelog. The rest is `git blame`/`git log`.
+ - For extra brownie points, you can squash and reorder the commits in your PR using `git rebase -i`.
+ Use your own judgment to decide what history needs to be preserved. If git frightens you, that's OK too.
+ - Use `raise AssertionError` over `assert` unless you want the assertion stripped by `python -o`.
- **Don't** merge upstream into a branch you're going to submit as a PR.
This can create all sorts of problems. Use `git rebase` instead. This ensures
no merge conflicts occur when your code is merged by the core team.
- - Please reference the GH issue number in your commit message using `GH1234`
- or `#1234`. Either style is fine.
- - Use `raise AssertionError` rather then plain `assert` in library code (`assert` is fine
- for test code). `python -o` strips assertions. Better safe than sorry.
- - When writing tests, don't use "new" assertion methods added to the `unittest` module
- in 2.7 since pandas currently supports 2.6. The most common pitfall is:
-
- with self.assertRaises(ValueError):
- foo
-
-
- which fails with Python 2.6. You need to use `assertRaises` from
- `pandas.util.testing` instead (or use `self.assertRaises(TheException,func,args)`).
-
- - `doc/source/release.rst` and `doc/source/vx.y.z.txt` contain an ongoing
- changelog for each release. Add entries to these files
- as needed in a separate commit in your PR: document the fix, enhancement,
- or (unavoidable) breaking change.
- - For extra brownie points, use `git rebase -i` to squash and reorder
- commits in your PR so that the history makes the most sense. Use your own
- judgment to decide what history needs to be preserved.
- - Pandas source code should not -- with some exceptions, such as 3rd party licensed code --
- generally speaking, include an "Authors" list or attribution to individuals in source code.
- `RELEASE.rst` details changes and enhancements to the code over time.
- A "thanks goes to @JohnSmith." as part of the appropriate entry is a suitable way to acknowledge
- contributions. The rest is `git blame`/`git log`.
- Feel free to ask the commiter who merges your code to include such an entry
- or include it directly yourself as part of the PR if you'd like to.
- **We're always glad to have new contributors join us from the ever-growing pandas community.**
- You may also be interested in the copyright policy as detailed in the pandas [LICENSE](https://github.com/pydata/pandas/blob/master/LICENSE).
+ - The pandas copyright policy is detailed in the pandas [LICENSE](https://github.com/pydata/pandas/blob/master/LICENSE).
- On the subject of [PEP8](http://www.python.org/dev/peps/pep-0008/): yes.
- - On the subject of massive PEP8 fix PRs touching everything, please consider the following:
- - They create noisy merge conflicts for people working in their own fork.
- - They make `git blame` less effective.
- - Different tools / people achieve PEP8 in different styles. This can create
- "style wars" and churn that produces little real benefit.
- - If your code changes are intermixed with style fixes, they are harder to review
- before merging. Keep style fixes in separate commits.
- - It's fine to clean-up a little around an area you just worked on.
- - Generally it's a BAD idea to PEP8 on documentation.
-
- Having said that, if you still feel a PEP8 storm is in order, go for it.
+ - On the subject of a massive PEP8-storm touching everything: not too often (once per release works).
### Notes on plotting function conventions
@@ -137,11 +91,7 @@ Here's a few high-level notes:
See the Green "Good to merge!" banner? that's it.
-This is especially important for new contributors, as members of the pandas dev team
-like to know that the test suite passes before considering it for merging.
-Even regular contributors who test religiously on their local box (using tox
-for example) often rely on a PR+travis=green to make double sure everything
-works ok on another system, as occasionally, it doesn't.
+It's important to get travis working as PRs won't generally get merged until travis is green.
#### Steps to enable Travis-CI
| Still working on the 140 char version. maybe we should just use the sha1 instead. hmm.
| https://api.github.com/repos/pandas-dev/pandas/pulls/5721 | 2013-12-17T15:55:04Z | 2013-12-17T15:55:09Z | 2013-12-17T15:55:09Z | 2014-07-12T15:00:06Z |
CLN: add diff to series/dataframe groupby dispatch whitelist | diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index 46711e4917e4c..e8a9d6e49a066 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -68,7 +68,7 @@
'shift', 'tshift',
'ffill', 'bfill',
'pct_change', 'skew',
- 'corr', 'cov',
+ 'corr', 'cov', 'diff',
]) | _plotting_methods
_series_apply_whitelist = \
diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py
index fef6a18acd7ff..942efdfc23740 100644
--- a/pandas/tests/test_groupby.py
+++ b/pandas/tests/test_groupby.py
@@ -3270,6 +3270,7 @@ def test_groupby_whitelist(self):
'plot', 'boxplot', 'hist',
'median', 'dtypes',
'corrwith', 'corr', 'cov',
+ 'diff',
])
s_whitelist = frozenset([
'last', 'first',
@@ -3290,6 +3291,7 @@ def test_groupby_whitelist(self):
'median', 'dtype',
'corr', 'cov',
'value_counts',
+ 'diff',
])
for obj, whitelist in zip((df, s),
@@ -3411,7 +3413,7 @@ def test_tab_completion(self):
'resample', 'cummin', 'fillna', 'cumsum', 'cumcount',
'all', 'shift', 'skew', 'bfill', 'irow', 'ffill',
'take', 'tshift', 'pct_change', 'any', 'mad', 'corr', 'corrwith',
- 'cov', 'dtypes',
+ 'cov', 'dtypes', 'diff',
])
self.assertEqual(results, expected)
| Building on #5480, this PR adds `diff` to the groupby dispatch whitelist.
| https://api.github.com/repos/pandas-dev/pandas/pulls/5719 | 2013-12-17T04:33:21Z | 2013-12-17T15:45:32Z | 2013-12-17T15:45:32Z | 2014-07-06T04:39:17Z |
BUG: In a HDFStore, correctly handle data_columns with a Panel (GH5717) | diff --git a/doc/source/release.rst b/doc/source/release.rst
index d5163b9dbc60b..5ce9ccd25a7fc 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -550,6 +550,7 @@ Bug Fixes
(:issue:`4708`)
- Fixed decoding perf issue on pyt3 (:issue:`5441`)
- Validate levels in a multi-index before storing (:issue:`5527`)
+ - Correctly handle ``data_columns`` with a Panel (:issue:`5717`)
- Fixed bug in tslib.tz_convert(vals, tz1, tz2): it could raise IndexError
exception while trying to access trans[pos + 1] (:issue:`4496`)
- The ``by`` argument now works correctly with the ``layout`` argument
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 09618b77a2968..bc99417c67310 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -3446,6 +3446,8 @@ def read(self, where=None, columns=None, **kwargs):
# the data need to be sorted
sorted_values = c.take_data().take(sorter, axis=0)
+ if sorted_values.ndim == 1:
+ sorted_values = sorted_values.reshape(sorted_values.shape[0],1)
take_labels = [l.take(sorter) for l in labels]
items = Index(c.values)
diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py
index e9c04932aba40..c9955b1ae2fb2 100644
--- a/pandas/io/tests/test_pytables.py
+++ b/pandas/io/tests/test_pytables.py
@@ -1350,6 +1350,29 @@ def check_col(key,name,size):
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == 'foo')]
tm.assert_frame_equal(result,expected)
+ with ensure_clean_store(self.path) as store:
+ # panel
+ # GH5717 not handling data_columns
+ np.random.seed(1234)
+ p = tm.makePanel()
+
+ store.append('p1',p)
+ tm.assert_panel_equal(store.select('p1'),p)
+
+ store.append('p2',p,data_columns=True)
+ tm.assert_panel_equal(store.select('p2'),p)
+
+ result = store.select('p2',where='ItemA>0')
+ expected = p.to_frame()
+ expected = expected[expected['ItemA']>0]
+ tm.assert_frame_equal(result.to_frame(),expected)
+
+ result = store.select('p2',where='ItemA>0 & minor_axis=["A","B"]')
+ expected = p.to_frame()
+ expected = expected[expected['ItemA']>0]
+ expected = expected[expected.reset_index(level=['major']).index.isin(['A','B'])]
+ tm.assert_frame_equal(result.to_frame(),expected)
+
def test_create_table_index(self):
with ensure_clean_store(self.path) as store:
| closes #5717
| https://api.github.com/repos/pandas-dev/pandas/pulls/5718 | 2013-12-17T02:19:01Z | 2013-12-17T02:35:54Z | 2013-12-17T02:35:54Z | 2014-06-19T11:37:31Z |
TST/DOC: close win32 tests issues (GH5711) | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 5e79d05146de3..b77ea2b22f4fa 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -1830,23 +1830,24 @@ def fillna(self, value=None, method=None, axis=0, inplace=False,
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
- value : scalar or dict
- Value to use to fill holes (e.g. 0), alternately a dict of values
- specifying which value to use for each column (columns not in the
- dict will not be filled). This value cannot be a list.
+ value : scalar, dict, or Series
+ Value to use to fill holes (e.g. 0), alternately a dict/Series of
+ values specifying which value to use for each index (for a Series) or
+ column (for a DataFrame). (values not in the dict/Series will not be
+ filled). This value cannot be a list.
axis : {0, 1}, default 0
0: fill column-by-column
1: fill row-by-row
inplace : boolean, default False
If True, fill in place. Note: this will modify any
other views on this object, (e.g. a no-copy slice for a column in a
- DataFrame). Still returns the object.
+ DataFrame).
limit : int, default None
Maximum size gap to forward or backward fill
- downcast : dict, default is None, a dict of item->dtype of what to
- downcast if possible, or the string 'infer' which will try to
- downcast to an appropriate equal type (e.g. float64 to int64 if
- possible)
+ downcast : dict, default is None
+ a dict of item->dtype of what to downcast if possible,
+ or the string 'infer' which will try to downcast to an appropriate
+ equal type (e.g. float64 to int64 if possible)
See also
--------
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index bfd2b784490ca..e396bee3f4ad9 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -1382,7 +1382,7 @@ def test_astype_assignment(self):
df_orig = DataFrame([['1','2','3','.4',5,6.,'foo']],columns=list('ABCDEFG'))
df = df_orig.copy()
- df.iloc[:,0:2] = df.iloc[:,0:2].astype(int)
+ df.iloc[:,0:2] = df.iloc[:,0:2].astype(np.int64)
expected = DataFrame([[1,2,'3','.4',5,6.,'foo']],columns=list('ABCDEFG'))
assert_frame_equal(df,expected)
@@ -1393,12 +1393,12 @@ def test_astype_assignment(self):
# GH5702 (loc)
df = df_orig.copy()
- df.loc[:,'A'] = df.loc[:,'A'].astype(int)
+ df.loc[:,'A'] = df.loc[:,'A'].astype(np.int64)
expected = DataFrame([[1,'2','3','.4',5,6.,'foo']],columns=list('ABCDEFG'))
assert_frame_equal(df,expected)
df = df_orig.copy()
- df.loc[:,['B','C']] = df.loc[:,['B','C']].astype(int)
+ df.loc[:,['B','C']] = df.loc[:,['B','C']].astype(np.int64)
expected = DataFrame([['1',2,3,'.4',5,6.,'foo']],columns=list('ABCDEFG'))
assert_frame_equal(df,expected)
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index 188d61f397f5c..21f94f0c5d9e1 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -1345,7 +1345,7 @@ def f():
s[0:3] = list(range(3))
expected = Series([0,1,2])
- assert_series_equal(s, expected)
+ assert_series_equal(s.astype(np.int64), expected, )
# slice with step
s = Series(list('abcdef'))
| DOC string updates for fillna
closes #5711
| https://api.github.com/repos/pandas-dev/pandas/pulls/5716 | 2013-12-17T00:14:42Z | 2013-12-17T01:18:26Z | 2013-12-17T01:18:26Z | 2014-07-16T08:43:26Z |
Add test for DataFrame.corrwith and np.corrcoef compatibility | diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 5b501de026c57..3a29fa41046ca 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -6463,6 +6463,15 @@ def test_corrwith_series(self):
assert_series_equal(result, expected)
+ def test_corrwith_matches_corrcoef(self):
+ df1 = DataFrame(np.arange(10000), columns=['a'])
+ df2 = DataFrame(np.arange(10000)**2, columns=['a'])
+ c1 = df1.corrwith(df2)['a']
+ c2 = np.corrcoef(df1['a'],df2['a'])[0][1]
+
+ assert_almost_equal(c1, c2)
+ self.assert_(c1 < 1)
+
def test_drop_names(self):
df = DataFrame([[1, 2, 3],[3, 4, 5],[5, 6, 7]], index=['a', 'b', 'c'],
columns=['d', 'e', 'f'])
| Add apparently-once-failing code taken from
http://stackoverflow.com/questions/20617854/is-there-a-bug-in-pandas-dataframe-corrwith-function
as a test.
| https://api.github.com/repos/pandas-dev/pandas/pulls/5714 | 2013-12-16T19:22:20Z | 2013-12-16T19:54:23Z | 2013-12-16T19:54:23Z | 2014-06-21T02:58:03Z |
BUG: Bug in groupby transform with a datetime-like grouper (GH5712) | diff --git a/doc/source/release.rst b/doc/source/release.rst
index 0a853938f6cad..d5163b9dbc60b 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -823,6 +823,7 @@ Bug Fixes
- Work around regression in numpy 1.7.0 which erroneously raises IndexError from ``ndarray.item`` (:issue:`5666`)
- Bug in repeated indexing of object with resultant non-unique index (:issue:`5678`)
- Bug in fillna with Series and a passed series/dict (:issue:`5703`)
+ - Bug in groupby transform with a datetime-like grouper (:issue:`5712`)
pandas 0.12.0
-------------
diff --git a/pandas/core/common.py b/pandas/core/common.py
index 7569653fc650b..7b652c36ae47d 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -2009,6 +2009,14 @@ def needs_i8_conversion(arr_or_dtype):
is_timedelta64_dtype(arr_or_dtype))
+def is_numeric_dtype(arr_or_dtype):
+ if isinstance(arr_or_dtype, np.dtype):
+ tipo = arr_or_dtype.type
+ else:
+ tipo = arr_or_dtype.dtype.type
+ return (issubclass(tipo, (np.number, np.bool_))
+ and not issubclass(tipo, (np.datetime64, np.timedelta64)))
+
def is_float_dtype(arr_or_dtype):
if isinstance(arr_or_dtype, np.dtype):
tipo = arr_or_dtype.type
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index 558843f55777c..46711e4917e4c 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -19,9 +19,11 @@
import pandas.core.algorithms as algos
import pandas.core.common as com
from pandas.core.common import(_possibly_downcast_to_dtype, isnull,
- notnull, _DATELIKE_DTYPES)
+ notnull, _DATELIKE_DTYPES, is_numeric_dtype,
+ is_timedelta64_dtype, is_datetime64_dtype)
import pandas.lib as lib
+from pandas.lib import Timestamp
import pandas.algos as _algos
import pandas.hashtable as _hash
@@ -257,6 +259,16 @@ def indices(self):
""" dict {group name -> group indices} """
return self.grouper.indices
+ def _get_index(self, name):
+ """ safe get index """
+ try:
+ return self.indices[name]
+ except:
+ if isinstance(name, Timestamp):
+ name = name.value
+ return self.indices[name]
+ raise
+
@property
def name(self):
if self._selection is None:
@@ -350,7 +362,7 @@ def get_group(self, name, obj=None):
if obj is None:
obj = self.obj
- inds = self.indices[name]
+ inds = self._get_index(name)
return obj.take(inds, axis=self.axis, convert=False)
def __iter__(self):
@@ -676,7 +688,7 @@ def _try_cast(self, result, obj):
def _cython_agg_general(self, how, numeric_only=True):
output = {}
for name, obj in self._iterate_slices():
- is_numeric = _is_numeric_dtype(obj.dtype)
+ is_numeric = is_numeric_dtype(obj.dtype)
if numeric_only and not is_numeric:
continue
@@ -714,7 +726,7 @@ def _python_agg_general(self, func, *args, **kwargs):
# since we are masking, make sure that we have a float object
values = result
- if _is_numeric_dtype(values.dtype):
+ if is_numeric_dtype(values.dtype):
values = com.ensure_float(values)
output[name] = self._try_cast(values[mask], result)
@@ -1080,7 +1092,7 @@ def aggregate(self, values, how, axis=0):
raise NotImplementedError
out_shape = (self.ngroups,) + values.shape[1:]
- if _is_numeric_dtype(values.dtype):
+ if is_numeric_dtype(values.dtype):
values = com.ensure_float(values)
is_numeric = True
else:
@@ -1474,6 +1486,15 @@ def __init__(self, index, grouper=None, name=None, level=None,
self.grouper = None # Try for sanity
raise AssertionError(errmsg)
+ # if we have a date/time-like grouper, make sure that we have Timestamps like
+ if getattr(self.grouper,'dtype',None) is not None:
+ if is_datetime64_dtype(self.grouper):
+ from pandas import to_datetime
+ self.grouper = to_datetime(self.grouper)
+ elif is_timedelta64_dtype(self.grouper):
+ from pandas import to_timedelta
+ self.grouper = to_timedelta(self.grouper)
+
def __repr__(self):
return 'Grouping(%s)' % self.name
@@ -1821,7 +1842,7 @@ def transform(self, func, *args, **kwargs):
# need to do a safe put here, as the dtype may be different
# this needs to be an ndarray
result = Series(result)
- result.iloc[self.indices[name]] = res
+ result.iloc[self._get_index(name)] = res
result = result.values
# downcast if we can (and need)
@@ -1860,7 +1881,7 @@ def true_and_notnull(x, *args, **kwargs):
return b and notnull(b)
try:
- indices = [self.indices[name] if true_and_notnull(group) else []
+ indices = [self._get_index(name) if true_and_notnull(group) else []
for name, group in self]
except ValueError:
raise TypeError("the filter must return a boolean result")
@@ -1921,7 +1942,7 @@ def _cython_agg_blocks(self, how, numeric_only=True):
for block in data.blocks:
values = block.values
- is_numeric = _is_numeric_dtype(values.dtype)
+ is_numeric = is_numeric_dtype(values.dtype)
if numeric_only and not is_numeric:
continue
@@ -2412,7 +2433,7 @@ def filter(self, func, dropna=True, *args, **kwargs):
res = path(group)
def add_indices():
- indices.append(self.indices[name])
+ indices.append(self._get_index(name))
# interpret the result of the filter
if isinstance(res, (bool, np.bool_)):
@@ -2973,12 +2994,6 @@ def _reorder_by_uniques(uniques, labels):
}
-def _is_numeric_dtype(dt):
- typ = dt.type
- return (issubclass(typ, (np.number, np.bool_))
- and not issubclass(typ, (np.datetime64, np.timedelta64)))
-
-
def _intercept_function(func):
return _func_table.get(func, func)
diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py
index f834094475c1b..fef6a18acd7ff 100644
--- a/pandas/tests/test_groupby.py
+++ b/pandas/tests/test_groupby.py
@@ -627,6 +627,14 @@ def test_transform_broadcast(self):
for idx in gp.index:
assert_fp_equal(res.xs(idx), agged[idx])
+ def test_transform_bug(self):
+ # GH 5712
+ # transforming on a datetime column
+ df = DataFrame(dict(A = Timestamp('20130101'), B = np.arange(5)))
+ result = df.groupby('A')['B'].transform(lambda x: x.rank(ascending=False))
+ expected = Series(np.arange(5,0,step=-1),name='B')
+ assert_series_equal(result,expected)
+
def test_transform_multiple(self):
grouped = self.ts.groupby([lambda x: x.year, lambda x: x.month])
| closes #5712
| https://api.github.com/repos/pandas-dev/pandas/pulls/5713 | 2013-12-16T18:49:31Z | 2013-12-16T19:53:58Z | 2013-12-16T19:53:58Z | 2014-06-24T10:34:39Z |
StataWriter: Replace non-isalnum characters in variable names by _ instead of integral represantation of replaced character. Eliminate duplicates created by replacement. | diff --git a/doc/source/release.rst b/doc/source/release.rst
index 5d40cbe82e87b..fef086f8c5f57 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -221,6 +221,7 @@ Improvements to existing features
MultiIndex and Hierarchical Rows. Set the ``merge_cells`` to ``False`` to
restore the previous behaviour. (:issue:`5254`)
- The FRED DataReader now accepts multiple series (:issue`3413`)
+ - StataWriter adjusts variable names to Stata's limitations (:issue:`5709`)
API Changes
~~~~~~~~~~~
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index 8c172db162cd6..55bcbd76c2248 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -1068,11 +1068,55 @@ def _write_descriptors(self, typlist=None, varlist=None, srtlist=None,
self._write(typ)
# varlist, length 33*nvar, char array, null terminated
+ converted_names = []
+ duplicate_var_id = 0
+ for j, name in enumerate(self.varlist):
+ orig_name = name
+ # Replaces all characters disallowed in .dta format by their integral representation.
+ for c in name:
+ if (c < 'A' or c > 'Z') and (c < 'a' or c > 'z') and (c < '0' or c > '9') and c != '_':
+ name = name.replace(c, '_')
+
+ # Variable name may not start with a number
+ if name[0] > '0' and name[0] < '9':
+ name = '_' + name
+
+ name = name[:min(len(name), 32)]
+
+ if not name == orig_name:
+ # check for duplicates
+ while self.varlist.count(name) > 0:
+ # prepend ascending number to avoid duplicates
+ name = '_' + str(duplicate_var_id) + name
+ name = name[:min(len(name), 32)]
+ duplicate_var_id += 1
+
+ # need to possibly encode the orig name if its unicode
+ try:
+ orig_name = orig_name.encode('utf-8')
+ except:
+ pass
+
+ converted_names.append('{0} -> {1}'.format(orig_name, name))
+ self.varlist[j] = name
+
for name in self.varlist:
name = self._null_terminate(name, True)
name = _pad_bytes(name[:32], 33)
self._write(name)
+ if converted_names:
+ from warnings import warn
+ warn("""Not all pandas column names were valid Stata variable names.
+ Made the following replacements:
+
+ {0}
+
+ If this is not what you expect, please make sure you have Stata-compliant
+ column names in your DataFrame (max 32 characters, only alphanumerics and
+ underscores)/
+ """.format('\n '.join(converted_names)))
+
# srtlist, 2*(nvar+1), int array, encoded by byteorder
srtlist = _pad_bytes("", (2*(nvar+1)))
self._write(srtlist)
diff --git a/pandas/io/tests/test_stata.py b/pandas/io/tests/test_stata.py
index 76dae396c04ed..f75cf7ebb18d1 100644
--- a/pandas/io/tests/test_stata.py
+++ b/pandas/io/tests/test_stata.py
@@ -231,6 +231,38 @@ def test_encoding(self):
self.assert_(result == expected)
self.assert_(isinstance(result, unicode))
+ def test_read_write_dta11(self):
+ original = DataFrame([(1, 2, 3, 4)],
+ columns=['good', compat.u('b\u00E4d'), '8number', 'astringwithmorethan32characters______'])
+ formatted = DataFrame([(1, 2, 3, 4)],
+ columns=['good', 'b_d', '_8number', 'astringwithmorethan32characters_'])
+ formatted.index.name = 'index'
+
+ with tm.ensure_clean() as path:
+ with warnings.catch_warnings(record=True) as w:
+ original.to_stata(path, None, False)
+ np.testing.assert_equal(
+ len(w), 1) # should get a warning for that format.
+
+ written_and_read_again = self.read_dta(path)
+ tm.assert_frame_equal(written_and_read_again.set_index('index'), formatted)
+
+ def test_read_write_dta12(self):
+ original = DataFrame([(1, 2, 3, 4)],
+ columns=['astringwithmorethan32characters_1', 'astringwithmorethan32characters_2', '+', '-'])
+ formatted = DataFrame([(1, 2, 3, 4)],
+ columns=['astringwithmorethan32characters_', '_0astringwithmorethan32character', '_', '_1_'])
+ formatted.index.name = 'index'
+
+ with tm.ensure_clean() as path:
+ with warnings.catch_warnings(record=True) as w:
+ original.to_stata(path, None, False)
+ np.testing.assert_equal(
+ len(w), 1) # should get a warning for that format.
+
+ written_and_read_again = self.read_dta(path)
+ tm.assert_frame_equal(written_and_read_again.set_index('index'), formatted)
+
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| New pull request as replacement for PR-#5525
| https://api.github.com/repos/pandas-dev/pandas/pulls/5709 | 2013-12-16T10:08:26Z | 2013-12-18T00:50:14Z | 2013-12-18T00:50:14Z | 2014-06-16T04:21:15Z |
Correct "sentinel" spelling. | diff --git a/doc/source/missing_data.rst b/doc/source/missing_data.rst
index 10053f61d8574..73ec9c47bc473 100644
--- a/doc/source/missing_data.rst
+++ b/doc/source/missing_data.rst
@@ -93,7 +93,7 @@ Datetimes
---------
For datetime64[ns] types, ``NaT`` represents missing values. This is a pseudo-native
-sentinal value that can be represented by numpy in a singular dtype (datetime64[ns]).
+sentinel value that can be represented by numpy in a singular dtype (datetime64[ns]).
Pandas objects provide intercompatibility between ``NaT`` and ``NaN``.
.. ipython:: python
diff --git a/pandas/core/common.py b/pandas/core/common.py
index d251a2617f98d..7569653fc650b 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -2332,11 +2332,11 @@ def _where_compat(mask, arr1, arr2):
return np.where(mask, arr1, arr2)
-def sentinal_factory():
- class Sentinal(object):
+def sentinel_factory():
+ class Sentinel(object):
pass
- return Sentinal()
+ return Sentinel()
def in_interactive_session():
diff --git a/pandas/core/format.py b/pandas/core/format.py
index e14d34c2abfbe..47745635bbc39 100644
--- a/pandas/core/format.py
+++ b/pandas/core/format.py
@@ -740,8 +740,8 @@ def _column_header():
template = 'colspan="%d" halign="left"'
# GH3547
- sentinal = com.sentinal_factory()
- levels = self.columns.format(sparsify=sentinal, adjoin=False,
+ sentinel = com.sentinel_factory()
+ levels = self.columns.format(sparsify=sentinel, adjoin=False,
names=False)
# Truncate column names
if len(levels[0]) > self.max_cols:
@@ -750,7 +750,7 @@ def _column_header():
else:
truncated = False
- level_lengths = _get_level_lengths(levels, sentinal)
+ level_lengths = _get_level_lengths(levels, sentinel)
row_levels = self.frame.index.nlevels
@@ -859,14 +859,14 @@ def _write_hierarchical_rows(self, fmt_values, indent):
if self.fmt.sparsify:
# GH3547
- sentinal = com.sentinal_factory()
- levels = frame.index[:nrows].format(sparsify=sentinal,
+ sentinel = com.sentinel_factory()
+ levels = frame.index[:nrows].format(sparsify=sentinel,
adjoin=False, names=False)
# Truncate row names
if truncate:
levels = [lev[:self.max_rows] for lev in levels]
- level_lengths = _get_level_lengths(levels, sentinal)
+ level_lengths = _get_level_lengths(levels, sentinel)
for i in range(min(len(frame), self.max_rows)):
row = []
@@ -905,14 +905,14 @@ def _write_hierarchical_rows(self, fmt_values, indent):
self.write_tr(row, indent, self.indent_delta, tags=None)
-def _get_level_lengths(levels, sentinal=''):
+def _get_level_lengths(levels, sentinel=''):
from itertools import groupby
def _make_grouper():
record = {'count': 0}
def grouper(x):
- if x != sentinal:
+ if x != sentinel:
record['count'] += 1
return record['count']
return grouper
diff --git a/pandas/core/index.py b/pandas/core/index.py
index 18d6a1a04e3f9..7ae273d08fa87 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -2371,16 +2371,16 @@ def format(self, space=2, sparsify=None, adjoin=True, names=False,
sparsify = get_option("display.multi_sparse")
if sparsify:
- sentinal = ''
+ sentinel = ''
# GH3547
- # use value of sparsify as sentinal, unless it's an obvious
+ # use value of sparsify as sentinel, unless it's an obvious
# "Truthey" value
if sparsify not in [True, 1]:
- sentinal = sparsify
+ sentinel = sparsify
# little bit of a kludge job for #1217
result_levels = _sparsify(result_levels,
start=int(names),
- sentinal=sentinal)
+ sentinel=sentinel)
if adjoin:
return com.adjoin(space, *result_levels).split('\n')
@@ -3379,7 +3379,7 @@ def _wrap_joined_index(self, joined, other):
# For utility purposes
-def _sparsify(label_list, start=0, sentinal=''):
+def _sparsify(label_list, start=0, sentinel=''):
pivoted = lzip(*label_list)
k = len(label_list)
@@ -3396,7 +3396,7 @@ def _sparsify(label_list, start=0, sentinal=''):
break
if p == t:
- sparse_cur.append(sentinal)
+ sparse_cur.append(sentinel)
else:
sparse_cur.extend(cur[i:])
result.append(sparse_cur)
| Closes #5706 -- more a test that my git PR pipeline is working than because of its significance.
| https://api.github.com/repos/pandas-dev/pandas/pulls/5707 | 2013-12-16T03:31:30Z | 2013-12-16T13:43:16Z | 2013-12-16T13:43:16Z | 2014-06-16T16:35:38Z |
BUG: Bug in fillna with Series and a passed series/dict (GH5703) | diff --git a/doc/source/release.rst b/doc/source/release.rst
index 5d40cbe82e87b..0a853938f6cad 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -822,6 +822,7 @@ Bug Fixes
- Bug in groupby returning non-consistent types when user function returns a ``None``, (:issue:`5592`)
- Work around regression in numpy 1.7.0 which erroneously raises IndexError from ``ndarray.item`` (:issue:`5666`)
- Bug in repeated indexing of object with resultant non-unique index (:issue:`5678`)
+ - Bug in fillna with Series and a passed series/dict (:issue:`5703`)
pandas 0.12.0
-------------
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 253136b9a11c3..5e79d05146de3 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -1905,7 +1905,16 @@ def fillna(self, value=None, method=None, axis=0, inplace=False,
if len(self._get_axis(axis)) == 0:
return self
- if isinstance(value, (dict, com.ABCSeries)):
+
+ if self.ndim == 1 and value is not None:
+ if isinstance(value, (dict, com.ABCSeries)):
+ from pandas import Series
+ value = Series(value)
+
+ new_data = self._data.fillna(value, inplace=inplace,
+ downcast=downcast)
+
+ elif isinstance(value, (dict, com.ABCSeries)):
if axis == 1:
raise NotImplementedError('Currently only can fill '
'with dict/Series column '
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index dbad353bab62c..6ec08fe501bcd 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -664,7 +664,7 @@ def putmask(self, mask, new, align=True, inplace=False):
# if we are passed a scalar None, convert it here
if not is_list_like(new) and isnull(new):
- new = np.nan
+ new = self.fill_value
if self._can_hold_element(new):
new = self._try_cast(new)
@@ -830,7 +830,7 @@ def _interpolate(self, method=None, index=None, values=None,
data = data.astype(np.float64)
if fill_value is None:
- fill_value = np.nan
+ fill_value = self.fill_value
if method in ('krogh', 'piecewise_polynomial', 'pchip'):
if not index.is_monotonic:
@@ -1196,6 +1196,10 @@ class TimeDeltaBlock(IntBlock):
_can_hold_na = True
is_numeric = False
+ @property
+ def fill_value(self):
+ return tslib.iNaT
+
def _try_fill(self, value):
""" if we are a NaT, return the actual fill value """
if isinstance(value, type(tslib.NaT)) or isnull(value):
@@ -1532,6 +1536,10 @@ def _try_coerce_result(self, result):
result = lib.Timestamp(result)
return result
+ @property
+ def fill_value(self):
+ return tslib.iNaT
+
def _try_fill(self, value):
""" if we are a NaT, return the actual fill value """
if isinstance(value, type(tslib.NaT)) or isnull(value):
@@ -3190,18 +3198,15 @@ def reindex_items(self, new_items, indexer=None, copy=True,
blk = blk.reindex_items_from(new_items)
else:
blk.ref_items = new_items
- if blk is not None:
- new_blocks.append(blk)
+ new_blocks.extend(_valid_blocks(blk))
else:
# unique
if self.axes[0].is_unique and new_items.is_unique:
for block in self.blocks:
-
- newb = block.reindex_items_from(new_items, copy=copy)
- if newb is not None and len(newb.items) > 0:
- new_blocks.append(newb)
+ blk = block.reindex_items_from(new_items, copy=copy)
+ new_blocks.extend(_valid_blocks(blk))
# non-unique
else:
@@ -3411,7 +3416,11 @@ def __init__(self, block, axis, do_integrity_check=False, fastpath=True):
if fastpath:
self.axes = [axis]
if isinstance(block, list):
- if len(block) != 1:
+
+ # empty block
+ if len(block) == 0:
+ block = [np.array([])]
+ elif len(block) != 1:
raise ValueError('Cannot create SingleBlockManager with '
'more than 1 block')
block = block[0]
@@ -3875,6 +3884,13 @@ def _consolidate(blocks, items):
return new_blocks
+def _valid_blocks(newb):
+ if newb is None:
+ return []
+ if not isinstance(newb, list):
+ newb = [ newb ]
+ return [ b for b in newb if len(b.items) > 0 ]
+
def _merge_blocks(blocks, items, dtype=None, _can_consolidate=True):
if len(blocks) == 1:
return blocks[0]
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index d3104cdfad062..188d61f397f5c 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -2760,6 +2760,35 @@ def test_fillna(self):
self.assertRaises(ValueError, ts.fillna)
self.assertRaises(ValueError, self.ts.fillna, value=0, method='ffill')
+ # GH 5703
+ s1 = Series([np.nan])
+ s2 = Series([1])
+ result = s1.fillna(s2)
+ expected = Series([1.])
+ assert_series_equal(result,expected)
+ result = s1.fillna({})
+ assert_series_equal(result,s1)
+ result = s1.fillna(Series(()))
+ assert_series_equal(result,s1)
+ result = s2.fillna(s1)
+ assert_series_equal(result,s2)
+ result = s1.fillna({ 0 : 1})
+ assert_series_equal(result,expected)
+ result = s1.fillna({ 1 : 1})
+ assert_series_equal(result,Series([np.nan]))
+ result = s1.fillna({ 0 : 1, 1 : 1})
+ assert_series_equal(result,expected)
+ result = s1.fillna(Series({ 0 : 1, 1 : 1}))
+ assert_series_equal(result,expected)
+ result = s1.fillna(Series({ 0 : 1, 1 : 1},index=[4,5]))
+ assert_series_equal(result,s1)
+
+ s1 = Series([0, 1, 2], list('abc'))
+ s2 = Series([0, np.nan, 2], list('bac'))
+ result = s2.fillna(s1)
+ expected = Series([0,0,2.], list('bac'))
+ assert_series_equal(result,expected)
+
def test_fillna_bug(self):
x = Series([nan, 1., nan, 3., nan], ['z', 'a', 'b', 'c', 'd'])
filled = x.fillna(method='ffill')
| closes #5703
```
In [1]: s1 = Series([np.nan])
In [2]: s2 = Series([1])
In [3]: s1.fillna(s2)
Out[3]:
0 1
dtype: float64
In [4]: s1.fillna({ 1 : 1})
Out[4]:
0 1
dtype: float64
In [5]: s1.fillna({ 1 : 2, 2 : 3})
ValueError: cannot fillna on a 1-dim object with more than one value
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/5705 | 2013-12-15T23:52:58Z | 2013-12-16T16:23:30Z | 2013-12-16T16:23:30Z | 2014-06-21T11:40:12Z |
BUG: loc assignment with astype buggy, (GH5702) | diff --git a/doc/source/release.rst b/doc/source/release.rst
index f6cbbd23011a8..5d40cbe82e87b 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -636,8 +636,8 @@ Bug Fixes
(causing the original stack trace to be truncated).
- Fix selection with ``ix/loc`` and non_unique selectors (:issue:`4619`)
- Fix assignment with iloc/loc involving a dtype change in an existing column
- (:issue:`4312`) have internal setitem_with_indexer in core/indexing to use
- Block.setitem
+ (:issue:`4312`, :issue:`5702`) have internal setitem_with_indexer in core/indexing
+ to use Block.setitem
- Fixed bug where thousands operator was not handled correctly for floating
point numbers in csv_import (:issue:`4322`)
- Fix an issue with CacheableOffset not properly being used by many
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 471136dc2386b..dbad353bab62c 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -601,8 +601,12 @@ def setitem(self, indexer, value):
"different length than the value")
try:
- # set and return a block
- values[indexer] = value
+ # if we are an exact match (ex-broadcasting),
+ # then use the resultant dtype
+ if len(arr_value.shape) and arr_value.shape[0] == values.shape[0] and np.prod(arr_value.shape) == np.prod(values.shape):
+ values = arr_value.reshape(values.shape)
+ else:
+ values[indexer] = value
# coerce and try to infer the dtypes of the result
if np.isscalar(value):
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index 59c7bda35c544..bfd2b784490ca 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -1376,21 +1376,42 @@ def gen_expected(df,mask):
expected = gen_expected(df,mask)
assert_frame_equal(result,expected)
- def test_astype_assignment_with_iloc(self):
+ def test_astype_assignment(self):
- # GH4312
+ # GH4312 (iloc)
df_orig = DataFrame([['1','2','3','.4',5,6.,'foo']],columns=list('ABCDEFG'))
df = df_orig.copy()
- df.iloc[:,0:3] = df.iloc[:,0:3].astype(int)
- result = df.get_dtype_counts().sort_index()
- expected = Series({ 'int64' : 4, 'float64' : 1, 'object' : 2 }).sort_index()
- assert_series_equal(result,expected)
+ df.iloc[:,0:2] = df.iloc[:,0:2].astype(int)
+ expected = DataFrame([[1,2,'3','.4',5,6.,'foo']],columns=list('ABCDEFG'))
+ assert_frame_equal(df,expected)
df = df_orig.copy()
- df.iloc[:,0:3] = df.iloc[:,0:3].convert_objects(convert_numeric=True)
- result = df.get_dtype_counts().sort_index()
- expected = Series({ 'int64' : 4, 'float64' : 1, 'object' : 2 }).sort_index()
+ df.iloc[:,0:2] = df.iloc[:,0:2].convert_objects(convert_numeric=True)
+ expected = DataFrame([[1,2,'3','.4',5,6.,'foo']],columns=list('ABCDEFG'))
+ assert_frame_equal(df,expected)
+
+ # GH5702 (loc)
+ df = df_orig.copy()
+ df.loc[:,'A'] = df.loc[:,'A'].astype(int)
+ expected = DataFrame([[1,'2','3','.4',5,6.,'foo']],columns=list('ABCDEFG'))
+ assert_frame_equal(df,expected)
+
+ df = df_orig.copy()
+ df.loc[:,['B','C']] = df.loc[:,['B','C']].astype(int)
+ expected = DataFrame([['1',2,3,'.4',5,6.,'foo']],columns=list('ABCDEFG'))
+ assert_frame_equal(df,expected)
+
+ # full replacements / no nans
+ df = DataFrame({'A': [1., 2., 3., 4.]})
+ df.iloc[:, 0] = df['A'].astype(np.int64)
+ expected = DataFrame({'A': [1, 2, 3, 4]})
+ assert_frame_equal(df,expected)
+
+ df = DataFrame({'A': [1., 2., 3., 4.]})
+ df.loc[:, 'A'] = df['A'].astype(np.int64)
+ expected = DataFrame({'A': [1, 2, 3, 4]})
+ assert_frame_equal(df,expected)
def test_astype_assignment_with_dups(self):
@@ -1496,7 +1517,7 @@ def f():
assert_frame_equal(df,expected)
# mixed dtype frame, overwrite
- expected = DataFrame(dict({ 'A' : [0,2,4], 'B' : Series([0.,2.,4.]) }))
+ expected = DataFrame(dict({ 'A' : [0,2,4], 'B' : Series([0,2,4]) }))
df = df_orig.copy()
df['B'] = df['B'].astype(np.float64)
df.ix[:,'B'] = df.ix[:,'A']
@@ -1504,14 +1525,14 @@ def f():
# single dtype frame, partial setting
expected = df_orig.copy()
- expected['C'] = df['A'].astype(np.float64)
+ expected['C'] = df['A']
df = df_orig.copy()
df.ix[:,'C'] = df.ix[:,'A']
assert_frame_equal(df,expected)
# mixed frame, partial setting
expected = df_orig.copy()
- expected['C'] = df['A'].astype(np.float64)
+ expected['C'] = df['A']
df = df_orig.copy()
df.ix[:,'C'] = df.ix[:,'A']
assert_frame_equal(df,expected)
| closes #5702
| https://api.github.com/repos/pandas-dev/pandas/pulls/5704 | 2013-12-15T23:18:37Z | 2013-12-15T23:56:16Z | 2013-12-15T23:56:16Z | 2014-06-21T03:16:05Z |
Smarter formatting of timedelta and datetime columns | diff --git a/doc/source/release.rst b/doc/source/release.rst
index fc9f18279087b..6d550d4f0b588 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -76,6 +76,8 @@ Improvements to existing features
- support ``dtypes`` on ``Panel``
- extend ``Panel.apply`` to allow arbitrary functions (rather than only ufuncs) (:issue:`1148`)
allow multiple axes to be used to operate on slabs of a ``Panel``
+ - The ``ArrayFormatter``s for ``datetime`` and ``timedelta64`` now intelligently
+ limit precision based on the values in the array (:issue:`3401`)
.. _release.bug_fixes-0.13.1:
@@ -99,6 +101,8 @@ Bug Fixes
- Bug in creating an empty DataFrame, copying, then assigning (:issue:`5932`)
- Bug in DataFrame.tail with empty frame (:issue:`5846`)
- Bug in propogating metadata on ``resample`` (:issue:`5862`)
+ - Fixed string-representation of ``NaT`` to be "NaT" (:issue:`5708`)
+ - Fixed string-representation for Timestamp to show nanoseconds if present (:issue:`5912`)
pandas 0.13.0
-------------
diff --git a/doc/source/v0.13.1.txt b/doc/source/v0.13.1.txt
index 76b915c519440..31004d24e56a6 100644
--- a/doc/source/v0.13.1.txt
+++ b/doc/source/v0.13.1.txt
@@ -83,6 +83,27 @@ Enhancements
result
result.loc[:,:,'ItemA']
+- The ``ArrayFormatter``s for ``datetime`` and ``timedelta64`` now intelligently
+ limit precision based on the values in the array (:issue:`3401`)
+
+ Previously output might look like:
+
+ .. code-block:: python
+
+ age today diff
+ 0 2001-01-01 00:00:00 2013-04-19 00:00:00 4491 days, 00:00:00
+ 1 2004-06-01 00:00:00 2013-04-19 00:00:00 3244 days, 00:00:00
+
+ Now the output looks like:
+
+ .. ipython:: python
+
+ df = DataFrame([ Timestamp('20010101'),
+ Timestamp('20040601') ], columns=['age'])
+ df['today'] = Timestamp('20130419')
+ df['diff'] = df['today']-df['age']
+ df
+
Experimental
~~~~~~~~~~~~
diff --git a/pandas/core/format.py b/pandas/core/format.py
index 47745635bbc39..24b0554755ead 100644
--- a/pandas/core/format.py
+++ b/pandas/core/format.py
@@ -14,11 +14,13 @@
from pandas.core.config import get_option, set_option, reset_option
import pandas.core.common as com
import pandas.lib as lib
+from pandas.tslib import iNaT
import numpy as np
import itertools
import csv
+from datetime import time
from pandas.tseries.period import PeriodIndex, DatetimeIndex
@@ -1609,7 +1611,7 @@ def format_array(values, formatter, float_format=None, na_rep='NaN',
if digits is None:
digits = get_option("display.precision")
- fmt_obj = fmt_klass(values, digits, na_rep=na_rep,
+ fmt_obj = fmt_klass(values, digits=digits, na_rep=na_rep,
float_format=float_format,
formatter=formatter, space=space,
justify=justify)
@@ -1704,7 +1706,7 @@ def _val(x, threshold):
fmt_values = [_val(x, threshold) for x in self.values]
return _trim_zeros(fmt_values, self.na_rep)
- def get_result(self):
+ def _format_strings(self):
if self.formatter is not None:
fmt_values = [self.formatter(x) for x in self.values]
else:
@@ -1732,64 +1734,124 @@ def get_result(self):
fmt_str = '%% .%de' % (self.digits - 1)
fmt_values = self._format_with(fmt_str)
- return _make_fixed_width(fmt_values, self.justify)
+ return fmt_values
class IntArrayFormatter(GenericArrayFormatter):
- def get_result(self):
- if self.formatter:
- formatter = self.formatter
- else:
- formatter = lambda x: '% d' % x
+ def _format_strings(self):
+ formatter = self.formatter or (lambda x: '% d' % x)
fmt_values = [formatter(x) for x in self.values]
- return _make_fixed_width(fmt_values, self.justify)
+ return fmt_values
class Datetime64Formatter(GenericArrayFormatter):
+ def __init__(self, values, nat_rep='NaT', date_format=None, **kwargs):
+ super(Datetime64Formatter, self).__init__(values, **kwargs)
+ self.nat_rep = nat_rep
+ self.date_format = date_format
- def get_result(self):
- if self.formatter:
- formatter = self.formatter
- else:
- formatter = _format_datetime64
+ def _format_strings(self):
+ formatter = self.formatter or _get_format_datetime64_from_values(
+ self.values,
+ nat_rep=self.nat_rep,
+ date_format=self.date_format)
fmt_values = [formatter(x) for x in self.values]
- return _make_fixed_width(fmt_values, self.justify)
+ return fmt_values
-def _format_datetime64(x, tz=None):
- if isnull(x):
- return 'NaT'
- stamp = lib.Timestamp(x, tz=tz)
- return stamp._repr_base
+def _format_datetime64(x, tz=None, nat_rep='NaT'):
+ if x is None or lib.checknull(x):
+ return nat_rep
+ if tz is not None or not isinstance(x, lib.Timestamp):
+ x = lib.Timestamp(x, tz=tz)
-class Timedelta64Formatter(Datetime64Formatter):
+ return str(x)
- def get_result(self):
- if self.formatter:
- formatter = self.formatter
- else:
- formatter = _format_timedelta64
+def _format_datetime64_dateonly(x, nat_rep='NaT', date_format=None):
+ if x is None or lib.checknull(x):
+ return nat_rep
+
+ if not isinstance(x, lib.Timestamp):
+ x = lib.Timestamp(x)
+
+ if date_format:
+ return x.strftime(date_format)
+ else:
+ return x._date_repr
+
+
+def _is_dates_only(values):
+ for d in values:
+ if isinstance(d, np.datetime64):
+ d = lib.Timestamp(d)
+
+ if d is not None and not lib.checknull(d) and d._has_time_component():
+ return False
+ return True
+
+
+def _get_format_datetime64(is_dates_only, nat_rep='NaT', date_format=None):
+
+ if is_dates_only:
+ return lambda x, tz=None: _format_datetime64_dateonly(x,
+ nat_rep=nat_rep,
+ date_format=date_format)
+ else:
+ return lambda x, tz=None: _format_datetime64(x, tz=tz, nat_rep=nat_rep)
+
+
+def _get_format_datetime64_from_values(values,
+ nat_rep='NaT',
+ date_format=None):
+ is_dates_only = _is_dates_only(values)
+ return _get_format_datetime64(is_dates_only=is_dates_only,
+ nat_rep=nat_rep,
+ date_format=date_format)
+
+
+class Timedelta64Formatter(GenericArrayFormatter):
+
+ def _format_strings(self):
+ formatter = self.formatter or _get_format_timedelta64(self.values)
fmt_values = [formatter(x) for x in self.values]
- return _make_fixed_width(fmt_values, self.justify)
+ return fmt_values
+
+
+def _get_format_timedelta64(values):
+ values_int = values.astype(np.int64)
-def _format_timedelta64(x):
- if isnull(x):
- return 'NaT'
+ consider_values = values_int != iNaT
- return lib.repr_timedelta64(x)
+ one_day_in_nanos = (86400 * 1e9)
+ even_days = np.logical_and(consider_values, values_int % one_day_in_nanos != 0).sum() == 0
+ all_sub_day = np.logical_and(consider_values, np.abs(values_int) >= one_day_in_nanos).sum() == 0
+
+ format_short = even_days or all_sub_day
+ format = "short" if format_short else "long"
+
+ def impl(x):
+ if x is None or lib.checknull(x):
+ return 'NaT'
+ elif format_short and x == 0:
+ return "0 days" if even_days else "00:00:00"
+ else:
+ return lib.repr_timedelta64(x, format=format)
+
+ return impl
def _make_fixed_width(strings, justify='right', minimum=None, truncated=False):
- if len(strings) == 0:
+
+ if len(strings) == 0 or justify == 'all':
return strings
_strlen = _strlen_func()
diff --git a/pandas/tests/test_format.py b/pandas/tests/test_format.py
index f66c59fade2c1..a9855c4e73c6e 100644
--- a/pandas/tests/test_format.py
+++ b/pandas/tests/test_format.py
@@ -13,15 +13,17 @@
from numpy.random import randn
import numpy as np
-from pandas import DataFrame, Series, Index
+from pandas import DataFrame, Series, Index, _np_version_under1p7, Timestamp
import pandas.core.format as fmt
import pandas.util.testing as tm
from pandas.util.terminal import get_terminal_size
import pandas
+import pandas.tslib as tslib
import pandas as pd
from pandas.core.config import (set_option, get_option,
option_context, reset_option)
+from datetime import datetime
_frame = DataFrame(tm.getSeriesData())
@@ -55,6 +57,17 @@ def has_expanded_repr(df):
return True
return False
+def skip_if_np_version_under1p7():
+ if _np_version_under1p7:
+ import nose
+
+ raise nose.SkipTest('numpy >= 1.7 required')
+
+def _skip_if_no_pytz():
+ try:
+ import pytz
+ except ImportError:
+ raise nose.SkipTest("pytz not installed")
class TestDataFrameFormatting(tm.TestCase):
_multiprocess_can_split_ = True
@@ -770,11 +783,11 @@ def test_wide_repr(self):
with option_context('mode.sim_interactive', True):
col = lambda l, k: [tm.rands(k) for _ in range(l)]
max_cols = get_option('display.max_columns')
- df = DataFrame([col(max_cols-1, 25) for _ in range(10)])
+ df = DataFrame([col(max_cols - 1, 25) for _ in range(10)])
set_option('display.expand_frame_repr', False)
rep_str = repr(df)
- print(rep_str)
- assert "10 rows x %d columns" % (max_cols-1) in rep_str
+
+ assert "10 rows x %d columns" % (max_cols - 1) in rep_str
set_option('display.expand_frame_repr', True)
wide_repr = repr(df)
self.assert_(rep_str != wide_repr)
@@ -1749,7 +1762,7 @@ def test_float_trim_zeros(self):
def test_datetimeindex(self):
- from pandas import date_range, NaT, Timestamp
+ from pandas import date_range, NaT
index = date_range('20130102',periods=6)
s = Series(1,index=index)
result = s.to_string()
@@ -1779,32 +1792,33 @@ def test_timedelta64(self):
# adding NaTs
y = s-s.shift(1)
result = y.to_string()
- self.assertTrue('1 days, 00:00:00' in result)
+ self.assertTrue('1 days' in result)
+ self.assertTrue('00:00:00' not in result)
self.assertTrue('NaT' in result)
# with frac seconds
o = Series([datetime(2012,1,1,microsecond=150)]*3)
y = s-o
result = y.to_string()
- self.assertTrue('-00:00:00.000150' in result)
+ self.assertTrue('-0 days, 00:00:00.000150' in result)
# rounding?
o = Series([datetime(2012,1,1,1)]*3)
y = s-o
result = y.to_string()
- self.assertTrue('-01:00:00' in result)
+ self.assertTrue('-0 days, 01:00:00' in result)
self.assertTrue('1 days, 23:00:00' in result)
o = Series([datetime(2012,1,1,1,1)]*3)
y = s-o
result = y.to_string()
- self.assertTrue('-01:01:00' in result)
+ self.assertTrue('-0 days, 01:01:00' in result)
self.assertTrue('1 days, 22:59:00' in result)
o = Series([datetime(2012,1,1,1,1,microsecond=150)]*3)
y = s-o
result = y.to_string()
- self.assertTrue('-01:01:00.000150' in result)
+ self.assertTrue('-0 days, 01:01:00.000150' in result)
self.assertTrue('1 days, 22:58:59.999850' in result)
# neg time
@@ -2039,6 +2053,212 @@ class TestFloatArrayFormatter(tm.TestCase):
def test_misc(self):
obj = fmt.FloatArrayFormatter(np.array([], dtype=np.float64))
result = obj.get_result()
+ self.assertTrue(len(result) == 0)
+
+ def test_format(self):
+ obj = fmt.FloatArrayFormatter(np.array([12, 0], dtype=np.float64))
+ result = obj.get_result()
+ self.assertEqual(result[0], " 12")
+ self.assertEqual(result[1], " 0")
+
+
+class TestRepr_timedelta64(tm.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ skip_if_np_version_under1p7()
+
+ def test_legacy(self):
+ delta_1d = pd.to_timedelta(1, unit='D')
+ delta_0d = pd.to_timedelta(0, unit='D')
+ delta_1s = pd.to_timedelta(1, unit='s')
+ delta_500ms = pd.to_timedelta(500, unit='ms')
+
+ self.assertEqual(tslib.repr_timedelta64(delta_1d), "1 days, 00:00:00")
+ self.assertEqual(tslib.repr_timedelta64(-delta_1d), "-1 days, 00:00:00")
+ self.assertEqual(tslib.repr_timedelta64(delta_0d), "00:00:00")
+ self.assertEqual(tslib.repr_timedelta64(delta_1s), "00:00:01")
+ self.assertEqual(tslib.repr_timedelta64(delta_500ms), "00:00:00.500000")
+ self.assertEqual(tslib.repr_timedelta64(delta_1d + delta_1s), "1 days, 00:00:01")
+ self.assertEqual(tslib.repr_timedelta64(delta_1d + delta_500ms), "1 days, 00:00:00.500000")
+
+ def test_short(self):
+ delta_1d = pd.to_timedelta(1, unit='D')
+ delta_0d = pd.to_timedelta(0, unit='D')
+ delta_1s = pd.to_timedelta(1, unit='s')
+ delta_500ms = pd.to_timedelta(500, unit='ms')
+
+ self.assertEqual(tslib.repr_timedelta64(delta_1d, format='short'), "1 days")
+ self.assertEqual(tslib.repr_timedelta64(-delta_1d, format='short'), "-1 days")
+ self.assertEqual(tslib.repr_timedelta64(delta_0d, format='short'), "00:00:00")
+ self.assertEqual(tslib.repr_timedelta64(delta_1s, format='short'), "00:00:01")
+ self.assertEqual(tslib.repr_timedelta64(delta_500ms, format='short'), "00:00:00.500000")
+ self.assertEqual(tslib.repr_timedelta64(delta_1d + delta_1s, format='short'), "1 days, 00:00:01")
+ self.assertEqual(tslib.repr_timedelta64(delta_1d + delta_500ms, format='short'), "1 days, 00:00:00.500000")
+
+ def test_long(self):
+ delta_1d = pd.to_timedelta(1, unit='D')
+ delta_0d = pd.to_timedelta(0, unit='D')
+ delta_1s = pd.to_timedelta(1, unit='s')
+ delta_500ms = pd.to_timedelta(500, unit='ms')
+
+ self.assertEqual(tslib.repr_timedelta64(delta_1d, format='long'), "1 days, 00:00:00")
+ self.assertEqual(tslib.repr_timedelta64(-delta_1d, format='long'), "-1 days, 00:00:00")
+ self.assertEqual(tslib.repr_timedelta64(delta_0d, format='long'), "0 days, 00:00:00")
+ self.assertEqual(tslib.repr_timedelta64(delta_1s, format='long'), "0 days, 00:00:01")
+ self.assertEqual(tslib.repr_timedelta64(delta_500ms, format='long'), "0 days, 00:00:00.500000")
+ self.assertEqual(tslib.repr_timedelta64(delta_1d + delta_1s, format='long'), "1 days, 00:00:01")
+ self.assertEqual(tslib.repr_timedelta64(delta_1d + delta_500ms, format='long'), "1 days, 00:00:00.500000")
+
+
+class TestTimedelta64Formatter(tm.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ skip_if_np_version_under1p7()
+
+ def test_mixed(self):
+ x = pd.to_timedelta(list(range(5)) + [pd.NaT], unit='D')
+ y = pd.to_timedelta(list(range(5)) + [pd.NaT], unit='s')
+ result = fmt.Timedelta64Formatter(x + y).get_result()
+ self.assertEqual(result[0].strip(), "0 days, 00:00:00")
+ self.assertEqual(result[1].strip(), "1 days, 00:00:01")
+
+ def test_mixed_neg(self):
+ x = pd.to_timedelta(list(range(5)) + [pd.NaT], unit='D')
+ y = pd.to_timedelta(list(range(5)) + [pd.NaT], unit='s')
+ result = fmt.Timedelta64Formatter(-(x + y)).get_result()
+ self.assertEqual(result[0].strip(), "0 days, 00:00:00")
+ self.assertEqual(result[1].strip(), "-1 days, 00:00:01")
+
+ def test_days(self):
+ x = pd.to_timedelta(list(range(5)) + [pd.NaT], unit='D')
+ result = fmt.Timedelta64Formatter(x).get_result()
+ self.assertEqual(result[0].strip(), "0 days")
+ self.assertEqual(result[1].strip(), "1 days")
+
+ result = fmt.Timedelta64Formatter(x[1:2]).get_result()
+ self.assertEqual(result[0].strip(), "1 days")
+
+ def test_days_neg(self):
+ x = pd.to_timedelta(list(range(5)) + [pd.NaT], unit='D')
+ result = fmt.Timedelta64Formatter(-x).get_result()
+ self.assertEqual(result[0].strip(), "0 days")
+ self.assertEqual(result[1].strip(), "-1 days")
+
+ def test_subdays(self):
+ y = pd.to_timedelta(list(range(5)) + [pd.NaT], unit='s')
+ result = fmt.Timedelta64Formatter(y).get_result()
+ self.assertEqual(result[0].strip(), "00:00:00")
+ self.assertEqual(result[1].strip(), "00:00:01")
+
+ def test_subdays_neg(self):
+ y = pd.to_timedelta(list(range(5)) + [pd.NaT], unit='s')
+ result = fmt.Timedelta64Formatter(-y).get_result()
+ self.assertEqual(result[0].strip(), "00:00:00")
+ self.assertEqual(result[1].strip(), "-00:00:01")
+
+ def test_zero(self):
+ x = pd.to_timedelta(list(range(1)) + [pd.NaT], unit='D')
+ result = fmt.Timedelta64Formatter(x).get_result()
+ self.assertEqual(result[0].strip(), "0 days")
+
+ x = pd.to_timedelta(list(range(1)), unit='D')
+ result = fmt.Timedelta64Formatter(x).get_result()
+ self.assertEqual(result[0].strip(), "0 days")
+
+
+class TestDatetime64Formatter(tm.TestCase):
+ def test_mixed(self):
+ x = pd.Series([datetime(2013, 1, 1), datetime(2013, 1, 1, 12), pd.NaT])
+ result = fmt.Datetime64Formatter(x).get_result()
+ self.assertEqual(result[0].strip(), "2013-01-01 00:00:00")
+ self.assertEqual(result[1].strip(), "2013-01-01 12:00:00")
+
+ def test_dates(self):
+ x = pd.Series([datetime(2013, 1, 1), datetime(2013, 1, 2), pd.NaT])
+ result = fmt.Datetime64Formatter(x).get_result()
+ self.assertEqual(result[0].strip(), "2013-01-01")
+ self.assertEqual(result[1].strip(), "2013-01-02")
+
+ def test_date_nanos(self):
+ x = pd.Series([Timestamp(200)])
+ result = fmt.Datetime64Formatter(x).get_result()
+ self.assertEqual(result[0].strip(), "1970-01-01 00:00:00.000000200")
+
+
+class TestNaTFormatting(tm.TestCase):
+ def test_repr(self):
+ self.assertEqual(repr(pd.NaT), "NaT")
+
+ def test_str(self):
+ self.assertEqual(str(pd.NaT), "NaT")
+
+
+class TestDatetimeIndexFormat(tm.TestCase):
+ def test_datetime(self):
+ formatted = pd.to_datetime([datetime(2003, 1, 1, 12), pd.NaT]).format()
+ self.assertEqual(formatted[0], "2003-01-01 12:00:00")
+ self.assertEqual(formatted[1], "NaT")
+
+ def test_date(self):
+ formatted = pd.to_datetime([datetime(2003, 1, 1), pd.NaT]).format()
+ self.assertEqual(formatted[0], "2003-01-01")
+ self.assertEqual(formatted[1], "NaT")
+
+ def test_date_tz(self):
+ formatted = pd.to_datetime([datetime(2013,1,1)], utc=True).format()
+ self.assertEqual(formatted[0], "2013-01-01 00:00:00+00:00")
+
+ formatted = pd.to_datetime([datetime(2013,1,1), pd.NaT], utc=True).format()
+ self.assertEqual(formatted[0], "2013-01-01 00:00:00+00:00")
+
+ def test_date_explict_date_format(self):
+ formatted = pd.to_datetime([datetime(2003, 2, 1), pd.NaT]).format(date_format="%m-%d-%Y", na_rep="UT")
+ self.assertEqual(formatted[0], "02-01-2003")
+ self.assertEqual(formatted[1], "UT")
+
+
+class TestDatetimeIndexUnicode(tm.TestCase):
+ def test_dates(self):
+ text = str(pd.to_datetime([datetime(2013,1,1), datetime(2014,1,1)]))
+ self.assertTrue("[2013-01-01," in text)
+ self.assertTrue(", 2014-01-01]" in text)
+
+ def test_mixed(self):
+ text = str(pd.to_datetime([datetime(2013,1,1), datetime(2014,1,1,12), datetime(2014,1,1)]))
+ self.assertTrue("[2013-01-01 00:00:00," in text)
+ self.assertTrue(", 2014-01-01 00:00:00]" in text)
+
+
+class TestStringRepTimestamp(tm.TestCase):
+ def test_no_tz(self):
+ dt_date = datetime(2013, 1, 2)
+ self.assertEqual(str(dt_date), str(Timestamp(dt_date)))
+
+ dt_datetime = datetime(2013, 1, 2, 12, 1, 3)
+ self.assertEqual(str(dt_datetime), str(Timestamp(dt_datetime)))
+
+ dt_datetime_us = datetime(2013, 1, 2, 12, 1, 3, 45)
+ self.assertEqual(str(dt_datetime_us), str(Timestamp(dt_datetime_us)))
+
+ ts_nanos_only = Timestamp(200)
+ self.assertEqual(str(ts_nanos_only), "1970-01-01 00:00:00.000000200")
+
+ ts_nanos_micros = Timestamp(1200)
+ self.assertEqual(str(ts_nanos_micros), "1970-01-01 00:00:00.000001200")
+
+ def test_tz(self):
+ _skip_if_no_pytz()
+
+ import pytz
+
+ dt_date = datetime(2013, 1, 2, tzinfo=pytz.utc)
+ self.assertEqual(str(dt_date), str(Timestamp(dt_date)))
+
+ dt_datetime = datetime(2013, 1, 2, 12, 1, 3, tzinfo=pytz.utc)
+ self.assertEqual(str(dt_datetime), str(Timestamp(dt_datetime)))
+
+ dt_datetime_us = datetime(2013, 1, 2, 12, 1, 3, 45, tzinfo=pytz.utc)
+ self.assertEqual(str(dt_datetime_us), str(Timestamp(dt_datetime_us)))
if __name__ == '__main__':
import nose
diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py
index 6779e1a61c081..e6115a7c0e95d 100644
--- a/pandas/tseries/index.py
+++ b/pandas/tseries/index.py
@@ -529,8 +529,16 @@ def _mpl_repr(self):
_na_value = tslib.NaT
"""The expected NA value to use with this index."""
+ @cache_readonly
+ def _is_dates_only(self):
+ from pandas.core.format import _is_dates_only
+ return _is_dates_only(self.values)
+
def __unicode__(self):
- from pandas.core.format import _format_datetime64
+ from pandas.core.format import _get_format_datetime64
+
+ formatter = _get_format_datetime64(is_dates_only=self._is_dates_only)
+
values = self.values
freq = None
@@ -539,15 +547,15 @@ def __unicode__(self):
summary = str(self.__class__)
if len(self) == 1:
- first = _format_datetime64(values[0], tz=self.tz)
+ first = formatter(values[0], tz=self.tz)
summary += '\n[%s]' % first
elif len(self) == 2:
- first = _format_datetime64(values[0], tz=self.tz)
- last = _format_datetime64(values[-1], tz=self.tz)
+ first = formatter(values[0], tz=self.tz)
+ last = formatter(values[-1], tz=self.tz)
summary += '\n[%s, %s]' % (first, last)
elif len(self) > 2:
- first = _format_datetime64(values[0], tz=self.tz)
- last = _format_datetime64(values[-1], tz=self.tz)
+ first = formatter(values[0], tz=self.tz)
+ last = formatter(values[-1], tz=self.tz)
summary += '\n[%s, ..., %s]' % (first, last)
tagline = '\nLength: %d, Freq: %s, Timezone: %s'
@@ -630,30 +638,14 @@ def __contains__(self, key):
def _format_with_header(self, header, **kwargs):
return header + self._format_native_types(**kwargs)
- def _format_native_types(self, na_rep=u('NaT'), date_format=None, **kwargs):
- data = list(self)
-
- # tz formatter or time formatter
- zero_time = time(0, 0)
- if date_format is None:
- for d in data:
- if d.time() != zero_time or d.tzinfo is not None:
- return [u('%s') % x for x in data]
-
- values = np.array(data, dtype=object)
- mask = isnull(self.values)
- values[mask] = na_rep
-
- imask = -mask
-
- if date_format is None:
- date_formatter = lambda x: u('%d-%.2d-%.2d' % (x.year, x.month, x.day))
- else:
- date_formatter = lambda x: u(x.strftime(date_format))
-
- values[imask] = np.array([date_formatter(dt) for dt in values[imask]])
-
- return values.tolist()
+ def _format_native_types(self, na_rep=u('NaT'),
+ date_format=None, **kwargs):
+ data = self._get_object_index()
+ from pandas.core.format import Datetime64Formatter
+ return Datetime64Formatter(values=data,
+ nat_rep=na_rep,
+ date_format=date_format,
+ justify='all').get_result()
def isin(self, values):
"""
diff --git a/pandas/tseries/tests/test_timezones.py b/pandas/tseries/tests/test_timezones.py
index d82f91767d413..8f0c817d33a2b 100644
--- a/pandas/tseries/tests/test_timezones.py
+++ b/pandas/tseries/tests/test_timezones.py
@@ -427,7 +427,7 @@ def test_index_with_timezone_repr(self):
rng_eastern = rng.tz_localize('US/Eastern')
- rng_repr = repr(rng)
+ rng_repr = repr(rng_eastern)
self.assert_('2010-04-13 00:00:00' in rng_repr)
def test_index_astype_asobject_tzinfos(self):
diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx
index bda6625f3c3ad..0bac159404e34 100644
--- a/pandas/tslib.pyx
+++ b/pandas/tslib.pyx
@@ -125,6 +125,8 @@ def _is_fixed_offset(tz):
except AttributeError:
return True
+_zero_time = datetime_time(0, 0)
+
# Python front end to C extension type _Timestamp
# This serves as the box for datetime64
class Timestamp(_Timestamp):
@@ -203,13 +205,17 @@ class Timestamp(_Timestamp):
pass
zone = "'%s'" % zone if zone else 'None'
- return "Timestamp('%s', tz=%s)" % (result,zone)
+ return "Timestamp('%s', tz=%s)" % (result, zone)
@property
- def _repr_base(self):
- result = '%d-%.2d-%.2d %.2d:%.2d:%.2d' % (self.year, self.month,
- self.day, self.hour,
- self.minute, self.second)
+ def _date_repr(self):
+ # Ideal here would be self.strftime("%Y-%m-%d"), but
+ # the datetime strftime() methods require year >= 1900
+ return '%d-%.2d-%.2d' % (self.year, self.month, self.day)
+
+ @property
+ def _time_repr(self):
+ result = '%.2d:%.2d:%.2d' % (self.hour, self.minute, self.second)
if self.nanosecond != 0:
nanos = self.nanosecond + 1000 * self.microsecond
@@ -219,6 +225,10 @@ class Timestamp(_Timestamp):
return result
+ @property
+ def _repr_base(self):
+ return '%s %s' % (self._date_repr, self._time_repr)
+
@property
def tz(self):
"""
@@ -338,6 +348,32 @@ class Timestamp(_Timestamp):
ts.dts.hour, ts.dts.min, ts.dts.sec,
ts.dts.us, ts.tzinfo)
+ def isoformat(self, sep='T'):
+ base = super(_Timestamp, self).isoformat(sep=sep)
+ if self.nanosecond == 0:
+ return base
+
+ if self.tzinfo is not None:
+ base1, base2 = base[:-6], base[-6:]
+ else:
+ base1, base2 = base, ""
+
+ if self.microsecond != 0:
+ base1 += "%.3d" % self.nanosecond
+ else:
+ base1 += ".%.9d" % self.nanosecond
+
+ return base1 + base2
+
+ def _has_time_component(self):
+ """
+ Returns if the Timestamp has a time component
+ in addition to the date part
+ """
+ return (self.time() != _zero_time
+ or self.tzinfo is not None
+ or self.nanosecond != 0)
+
_nat_strings = set(['NaT','nat','NAT','nan','NaN','NAN'])
class NaTType(_NaT):
@@ -355,6 +391,9 @@ class NaTType(_NaT):
def __repr__(self):
return 'NaT'
+ def __str__(self):
+ return 'NaT'
+
def __hash__(self):
return iNaT
@@ -1140,8 +1179,21 @@ def array_to_timedelta64(ndarray[object] values, coerce=True):
return result
-def repr_timedelta64(object value):
- """ provide repr for timedelta64 """
+
+def repr_timedelta64(object value, format=None):
+ """
+ provide repr for timedelta64
+
+ Parameters
+ ----------
+ value : timedelta64
+ format : None|"short"|"long"
+
+ Returns
+ -------
+ converted : Timestamp
+
+ """
ivalue = value.view('i8')
@@ -1178,19 +1230,24 @@ def repr_timedelta64(object value):
seconds_pretty = "%02d" % seconds
else:
sp = abs(round(1e6*frac))
- seconds_pretty = "%02d.%06d" % (seconds,sp)
+ seconds_pretty = "%02d.%06d" % (seconds, sp)
if sign < 0:
sign_pretty = "-"
else:
sign_pretty = ""
- if days:
- return "%s%d days, %02d:%02d:%s" % (sign_pretty, days, hours, minutes,
+ if days or format == 'long':
+ if (hours or minutes or seconds or frac) or format != 'short':
+ return "%s%d days, %02d:%02d:%s" % (sign_pretty, days, hours, minutes,
seconds_pretty)
+ else:
+ return "%s%d days" % (sign_pretty, days)
+
return "%s%02d:%02d:%s" % (sign_pretty, hours, minutes, seconds_pretty)
+
def array_strptime(ndarray[object] values, object fmt, coerce=False):
cdef:
Py_ssize_t i, n = len(values)
| Closes #3401
Closes #5708
Closes #5912
| https://api.github.com/repos/pandas-dev/pandas/pulls/5701 | 2013-12-15T20:15:09Z | 2014-01-15T02:57:34Z | 2014-01-15T02:57:34Z | 2014-06-24T01:58:21Z |
Add cdfplot | diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py
index 1fee318059b7f..85811c7bbcf46 100644
--- a/pandas/tests/test_graphics.py
+++ b/pandas/tests/test_graphics.py
@@ -28,6 +28,13 @@ def _skip_if_no_scipy():
raise nose.SkipTest("no scipy")
+def _skip_if_no_sm():
+ try:
+ import statsmodels.api as sm
+ except ImportError:
+ raise nose.SkipTest("no statsmodels")
+
+
@tm.mplskip
class TestSeriesPlots(tm.TestCase):
def setUp(self):
@@ -60,6 +67,7 @@ def test_plot(self):
_check_plot_works(self.series[:5].plot, kind='barh')
_check_plot_works(self.series[:10].plot, kind='barh')
_check_plot_works(Series(randn(10)).plot, kind='bar', color='black')
+ _check_plot_works(Series(randn(10)).plot, kind='cdf')
@slow
def test_plot_figsize_and_title(self):
@@ -453,6 +461,26 @@ def test_plot_xy(self):
# columns.inferred_type == 'mixed'
# TODO add MultiIndex test
+ def test_get_plot_kind(self):
+ from pandas.tools.plotting import (LinePlot, BarPlot, DistributionPlot,
+ ScatterPlot, _get_plot_kind)
+ kinds = ['line', 'bar', 'barh', 'scatter']
+ klasses = [LinePlot, BarPlot, BarPlot, ScatterPlot]
+ for kind, kls in zip(kinds, klasses):
+ result = _get_plot_kind(kind)
+ self.assertEqual(result, kls)
+
+ for kind in ['kde', 'cdf']:
+ result = _get_plot_kind(kind)
+ self.assertEqual(result.func, DistributionPlot)
+ self.assertEqual(result.keywords, {'kind': kind})
+
+ with tm.assertRaises(ValueError):
+ _get_plot_kind('scatter', series=True)
+
+ with tm.assertRaises(ValueError):
+ _get_plot_kind('NOT A PLOT KIND')
+
@slow
def test_xcompat(self):
import pandas as pd
diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py
index 9984c3fd76f81..32a623d9f9148 100644
--- a/pandas/tools/plotting.py
+++ b/pandas/tools/plotting.py
@@ -94,6 +94,25 @@
def _get_standard_kind(kind):
return {'density': 'kde'}.get(kind, kind)
+
+def _get_plot_kind(kind, series=False):
+ from functools import partial
+ plot_kinds = {'kde': partial(DistributionPlot, kind='kde'),
+ 'cdf': partial(DistributionPlot, kind='cdf'),
+ 'bar': BarPlot,
+ 'barh': BarPlot,
+ 'line': LinePlot,
+ 'scatter': ScatterPlot}
+
+ if kind == 'scatter' and series:
+ raise ValueError('Invalid chart type (%s) given for series plot' % kind)
+ try:
+ klass = plot_kinds[kind]
+ return klass
+ except KeyError:
+ raise ValueError('Invalid chart type given %s' % kind)
+
+
def _get_standard_colors(num_colors=None, colormap=None, color_type='default',
color=None):
import matplotlib.pyplot as plt
@@ -1174,53 +1193,69 @@ def _get_marked_label(self, label, col_num):
return label
-class KdePlot(MPLPlot):
- def __init__(self, data, bw_method=None, ind=None, **kwargs):
+class DistributionPlot(MPLPlot):
+ def __init__(self, data, kind, **kwargs):
+ """
+ data : NDFrame
+ kind : str
+ `kde` or `cdf`
+ """
MPLPlot.__init__(self, data, **kwargs)
- self.bw_method=bw_method
- self.ind=ind
+ self.kind = kind
+ self.kde_kwds = {'ind': self.kwds.pop('ind', None),
+ 'bw_method': self.kwds.pop('bw_method', None)}
+ self.plotf = self._get_plot_function()
+ self.colors = self._get_colors()
def _make_plot(self):
- from scipy.stats import gaussian_kde
- from scipy import __version__ as spv
- from distutils.version import LooseVersion
- plotf = self._get_plot_function()
- colors = self._get_colors()
+ if self.kind == 'kde':
+ from scipy.stats import gaussian_kde
+ from scipy import __version__ as spv
+ from distutils.version import LooseVersion
+ else:
+ import statsmodels.api as sm
+
for i, (label, y) in enumerate(self._iter_data()):
ax = self._get_ax(i)
style = self._get_style(i, label)
label = com.pprint_thing(label)
+ ind = self.kde_kwds.get('ind')
+ bw_method = self.kde_kwds.get('bw_method')
- if LooseVersion(spv) >= '0.11.0':
- gkde = gaussian_kde(y, bw_method=self.bw_method)
- else:
- gkde = gaussian_kde(y)
- if self.bw_method is not None:
- msg = ('bw_method was added in Scipy 0.11.0.' +
- ' Scipy version in use is %s.' % spv)
- warnings.warn(msg)
-
- sample_range = max(y) - min(y)
-
- if self.ind is None:
- ind = np.linspace(min(y) - 0.5 * sample_range,
- max(y) + 0.5 * sample_range, 1000)
+ # calculation
+ if self.kind == 'kde':
+ if LooseVersion(spv) >= '0.11.0':
+ gkde = gaussian_kde(y, bw_method=bw_method)
+ else:
+ gkde = gaussian_kde(y)
+ if bw_method is not None: # Is bw_method always a str?
+ msg = ('bw_method was added in Scipy 0.11.0.' +
+ ' Scipy version in use is %s.' % spv)
+ warnings.warn(msg)
+
+ sample_range = max(y) - min(y)
+ if ind is None:
+ ind = np.linspace(min(y) - 0.5 * sample_range,
+ max(y) + 0.5 * sample_range, 1000)
+ y = gkde.evaluate(ind)
+ ax.set_ylabel("Density")
else:
- ind = self.ind
-
- ax.set_ylabel("Density")
+ k = sm.nonparametric.KDEUnivariate(y)
+ k.fit()
+ if ind is None:
+ ind = k.support
+ y = k.cdf
- y = gkde.evaluate(ind)
kwds = self.kwds.copy()
kwds['label'] = label
- self._maybe_add_color(colors, kwds, style, i)
+ self._maybe_add_color(self.colors, kwds, style, i)
if style is None:
args = (ax, ind, y)
else:
args = (ax, ind, y, style)
- plotf(*args, **kwds)
+ self.plotf(*args, **kwds)
ax.grid(self.grid)
def _post_plot_logic(self):
@@ -1228,6 +1263,7 @@ def _post_plot_logic(self):
for ax in self.axes:
ax.legend(loc='best')
+
class ScatterPlot(MPLPlot):
def __init__(self, data, x, y, **kwargs):
MPLPlot.__init__(self, data, **kwargs)
@@ -1679,16 +1715,7 @@ def plot_frame(frame=None, x=None, y=None, subplots=False, sharex=True,
ax_or_axes : matplotlib.AxesSubplot or list of them
"""
kind = _get_standard_kind(kind.lower().strip())
- if kind == 'line':
- klass = LinePlot
- elif kind in ('bar', 'barh'):
- klass = BarPlot
- elif kind == 'kde':
- klass = KdePlot
- elif kind == 'scatter':
- klass = ScatterPlot
- else:
- raise ValueError('Invalid chart type given %s' % kind)
+ klass = _get_plot_kind(kind)
if kind == 'scatter':
plot_obj = klass(frame, x=x, y=y, kind=kind, subplots=subplots,
@@ -1782,15 +1809,7 @@ def plot_series(series, label=None, kind='line', use_index=True, rot=None,
See matplotlib documentation online for more on this subject
"""
kind = _get_standard_kind(kind.lower().strip())
- if kind == 'line':
- klass = LinePlot
- elif kind in ('bar', 'barh'):
- klass = BarPlot
- elif kind == 'kde':
- klass = KdePlot
- else:
- raise ValueError('Invalid chart type given %s' % kind)
-
+ klass = _get_plot_kind(kind, series=True)
"""
If no axis is specified, we check whether there are existing figures.
If so, we get the current axis and check whether yaxis ticks are on the
| WIP for now. Closes #2669
I'm using statsmodels' KDE implementation right now; it has a `cdf` method, but scipy's gaussian_kde doesn't. I need to check the math, but I think something like doing a cumsum on the density and normalizing by the sum should be the same.
I'm also going to add kwargs for things like the inverse cdf.

| https://api.github.com/repos/pandas-dev/pandas/pulls/5700 | 2013-12-14T21:16:42Z | 2014-01-29T04:19:09Z | null | 2016-11-03T12:37:45Z |
DISP: show column dtype in DataFrame.info() output | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 90641a833f2a7..4aad541e3fcd1 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1426,10 +1426,12 @@ def info(self, verbose=True, buf=None, max_cols=None):
if len(cols) != len(counts): # pragma: no cover
raise AssertionError('Columns must equal counts (%d != %d)' %
(len(cols), len(counts)))
+ dtypes = self.dtypes
for col, count in compat.iteritems(counts):
+ dtype = dtypes[col]
col = com.pprint_thing(col)
lines.append(_put_str(col, space) +
- '%d non-null values' % count)
+ '%d non-null %s' % (count, dtype))
else:
lines.append(self.columns.summary(name='Columns'))
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 5b501de026c57..8103812a633df 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -6018,6 +6018,21 @@ def test_info_duplicate_columns(self):
columns=['a', 'a', 'b', 'b'])
frame.info(buf=io)
+ def test_info_shows_column_dtypes(self):
+ dtypes = ['int64', 'float64', 'datetime64[ns]', 'timedelta64[ns]',
+ 'complex128', 'object', 'bool']
+ data = {}
+ n = 10
+ for i, dtype in enumerate(dtypes):
+ data[i] = np.random.randint(2, size=n).astype(dtype)
+ df = DataFrame(data)
+ buf = StringIO()
+ df.info(buf=buf)
+ res = buf.getvalue()
+ for i, dtype in enumerate(dtypes):
+ name = '%d %d non-null %s' % (i, n, dtype)
+ assert name in res
+
def test_dtypes(self):
self.mixed_frame['bool'] = self.mixed_frame['A'] > 0
result = self.mixed_frame.dtypes
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index d3104cdfad062..3e0a719bd93bd 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -2741,6 +2741,12 @@ def test_fillna_raise(self):
self.assertRaises(TypeError, s.fillna, [1, 2])
self.assertRaises(TypeError, s.fillna, (1, 2))
+ def test_raise_on_info(self):
+ s = Series(np.random.randn(10))
+ with tm.assertRaises(AttributeError):
+ s.info()
+
+
# TimeSeries-specific
def test_fillna(self):
| closes #3429.
Old:
```
In [39]: DataFrame(randn(10,2)).info()
<class 'pandas.core.frame.DataFrame'>
Int64Index: 10 entries, 0 to 9
Data columns (total 2 columns):
0 10 non-null values
1 10 non-null values
dtypes: float64(2)
```
New:
```
In [39]: DataFrame(randn(10,2)).info()
<class 'pandas.core.frame.DataFrame'>
Int64Index: 10 entries, 0 to 9
Data columns (total 2 columns):
0 10 non-null float64
1 10 non-null object
dtypes: float64(1), object(1)
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/5682 | 2013-12-12T03:13:32Z | 2014-01-01T02:54:25Z | 2014-01-01T02:54:25Z | 2014-06-12T20:01:14Z |
BUG: Bug in repeated indexing of object with resultant non-unique index (GH5678) | diff --git a/doc/source/release.rst b/doc/source/release.rst
index 8a911a6e41d0b..f6cbbd23011a8 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -821,6 +821,7 @@ Bug Fixes
- Bug in selecting from a non-unique index with ``loc`` (:issue:`5553`)
- Bug in groupby returning non-consistent types when user function returns a ``None``, (:issue:`5592`)
- Work around regression in numpy 1.7.0 which erroneously raises IndexError from ``ndarray.item`` (:issue:`5666`)
+ - Bug in repeated indexing of object with resultant non-unique index (:issue:`5678`)
pandas 0.12.0
-------------
diff --git a/pandas/core/series.py b/pandas/core/series.py
index bc9f6af61b9ec..ecfd99e61a090 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -481,7 +481,10 @@ def _slice(self, slobj, axis=0, raise_on_error=False, typ=None):
def __getitem__(self, key):
try:
- return self.index.get_value(self, key)
+ result = self.index.get_value(self, key)
+ if isinstance(result, np.ndarray):
+ return self._constructor(result,index=[key]*len(result)).__finalize__(self)
+ return result
except InvalidIndexError:
pass
except (KeyError, ValueError):
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index b6e7b10232bf5..d941224fec52e 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -316,6 +316,14 @@ def test_at_timestamp(self):
def test_iat_invalid_args(self):
pass
+ def test_repeated_getitem_dups(self):
+ # GH 5678
+ # repeated gettitems on a dup index returing a ndarray
+ df = DataFrame(np.random.random_sample((20,5)), index=['ABCDE'[x%5] for x in range(20)])
+ expected = df.loc['A',0]
+ result = df.loc[:,0].loc['A']
+ assert_series_equal(result,expected)
+
def test_iloc_getitem_int(self):
# integer
| closes #5678
| https://api.github.com/repos/pandas-dev/pandas/pulls/5680 | 2013-12-11T14:33:26Z | 2013-12-11T15:26:47Z | 2013-12-11T15:26:47Z | 2014-07-09T16:15:48Z |
API/ENH: Detect trying to set inplace on copies in a nicer way, related (GH5597) | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 93587cd11b597..90641a833f2a7 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1566,11 +1566,14 @@ def _ixs(self, i, axis=0, copy=False):
# a location index by definition
i = _maybe_convert_indices(i, len(self._get_axis(axis)))
- return self.reindex(i, takeable=True)._setitem_copy(True)
+ result = self.reindex(i, takeable=True)
+ copy=True
else:
new_values, copy = self._data.fast_2d_xs(i, copy=copy)
- return Series(new_values, index=self.columns,
- name=self.index[i])._setitem_copy(copy)
+ result = Series(new_values, index=self.columns,
+ name=self.index[i])
+ result.is_copy=copy
+ return result
# icol
else:
@@ -1680,7 +1683,7 @@ def _getitem_multilevel(self, key):
else:
new_values = self.values[:, loc]
result = DataFrame(new_values, index=self.index,
- columns=result_columns)
+ columns=result_columns).__finalize__(self)
if len(result.columns) == 1:
top = result.columns[0]
if ((type(top) == str and top == '') or
@@ -1689,6 +1692,7 @@ def _getitem_multilevel(self, key):
if isinstance(result, Series):
result = Series(result, index=self.index, name=key)
+ result.is_copy=True
return result
else:
return self._get_item_cache(key)
@@ -2136,7 +2140,8 @@ def xs(self, key, axis=0, level=None, copy=True, drop_level=True):
new_values, copy = self._data.fast_2d_xs(loc, copy=copy)
result = Series(new_values, index=self.columns,
- name=self.index[loc])._setitem_copy(copy)
+ name=self.index[loc])
+ result.is_copy=True
else:
result = self[loc]
@@ -2307,7 +2312,6 @@ def set_index(self, keys, drop=True, append=False, inplace=False,
if inplace:
frame = self
-
else:
frame = self.copy()
@@ -2552,8 +2556,8 @@ def drop_duplicates(self, cols=None, take_last=False, inplace=False):
if inplace:
inds, = (-duplicated).nonzero()
- self._data = self._data.take(inds)
- self._clear_item_cache()
+ new_data = self._data.take(inds)
+ self._update_inplace(new_data)
else:
return self[-duplicated]
@@ -2717,13 +2721,12 @@ def trans(v):
if inplace:
if axis == 1:
- self._data = self._data.reindex_items(
+ new_data = self._data.reindex_items(
self._data.items[indexer],
copy=False)
elif axis == 0:
- self._data = self._data.take(indexer)
-
- self._clear_item_cache()
+ new_data = self._data.take(indexer)
+ self._update_inplace(new_data)
else:
return self.take(indexer, axis=axis, convert=False, is_copy=False)
@@ -2763,13 +2766,12 @@ def sortlevel(self, level=0, axis=0, ascending=True, inplace=False):
if inplace:
if axis == 1:
- self._data = self._data.reindex_items(
+ new_data = self._data.reindex_items(
self._data.items[indexer],
copy=False)
elif axis == 0:
- self._data = self._data.take(indexer)
-
- self._clear_item_cache()
+ new_data = self._data.take(indexer)
+ self._update_inplace(new_data)
else:
return self.take(indexer, axis=axis, convert=False, is_copy=False)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 624384e484dc0..253136b9a11c3 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -59,7 +59,7 @@ def _single_replace(self, to_replace, method, inplace, limit):
dtype=self.dtype).__finalize__(self)
if inplace:
- self._data = result._data
+ self._update_inplace(result._data)
return
return result
@@ -562,9 +562,7 @@ def f(x):
result._clear_item_cache()
if inplace:
- self._data = result._data
- self._clear_item_cache()
-
+ self._update_inplace(result._data)
else:
return result.__finalize__(self)
@@ -994,12 +992,22 @@ def _maybe_update_cacher(self, clear=False):
if clear, then clear our cache """
cacher = getattr(self, '_cacher', None)
if cacher is not None:
- try:
- cacher[1]()._maybe_cache_changed(cacher[0], self)
- except:
+ ref = cacher[1]()
- # our referant is dead
+ # we are trying to reference a dead referant, hence
+ # a copy
+ if ref is None:
del self._cacher
+ self.is_copy = True
+ self._check_setitem_copy(stacklevel=5, t='referant')
+ else:
+ try:
+ ref._maybe_cache_changed(cacher[0], self)
+ except:
+ pass
+ if ref.is_copy:
+ self.is_copy = True
+ self._check_setitem_copy(stacklevel=5, t='referant')
if clear:
self._clear_item_cache()
@@ -1014,12 +1022,7 @@ def _set_item(self, key, value):
self._data.set(key, value)
self._clear_item_cache()
- def _setitem_copy(self, copy):
- """ set the _is_copy of the iiem """
- self.is_copy = copy
- return self
-
- def _check_setitem_copy(self, stacklevel=4):
+ def _check_setitem_copy(self, stacklevel=4, t='setting'):
""" validate if we are doing a settitem on a chained copy.
If you call this function, be sure to set the stacklevel such that the
@@ -1027,9 +1030,13 @@ def _check_setitem_copy(self, stacklevel=4):
if self.is_copy:
value = config.get_option('mode.chained_assignment')
- t = ("A value is trying to be set on a copy of a slice from a "
- "DataFrame.\nTry using .loc[row_index,col_indexer] = value "
- "instead")
+ if t == 'referant':
+ t = ("A value is trying to be set on a copy of a slice from a "
+ "DataFrame")
+ else:
+ t = ("A value is trying to be set on a copy of a slice from a "
+ "DataFrame.\nTry using .loc[row_index,col_indexer] = value "
+ "instead")
if value == 'raise':
raise SettingWithCopyError(t)
elif value == 'warn':
@@ -1103,7 +1110,7 @@ def take(self, indices, axis=0, convert=True, is_copy=True):
# maybe set copy if we didn't actually change the index
if is_copy and not result._get_axis(axis).equals(self._get_axis(axis)):
- result = result._setitem_copy(is_copy)
+ result.is_copy=is_copy
return result
@@ -1218,7 +1225,7 @@ def _update_inplace(self, result):
# decision that we may revisit in the future.
self._reset_cache()
self._clear_item_cache()
- self._data = result._data
+ self._data = getattr(result,'_data',result)
self._maybe_update_cacher()
def add_prefix(self, prefix):
@@ -1910,14 +1917,13 @@ def fillna(self, value=None, method=None, axis=0, inplace=False,
continue
obj = result[k]
obj.fillna(v, inplace=True)
- obj._maybe_update_cacher()
return result
else:
new_data = self._data.fillna(value, inplace=inplace,
downcast=downcast)
if inplace:
- self._data = new_data
+ self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
@@ -2165,7 +2171,7 @@ def replace(self, to_replace=None, value=None, inplace=False, limit=None,
new_data = new_data.convert(copy=not inplace, convert_numeric=False)
if inplace:
- self._data = new_data
+ self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
@@ -2272,10 +2278,10 @@ def interpolate(self, method='linear', axis=0, limit=None, inplace=False,
if inplace:
if axis == 1:
- self._data = new_data
+ self._update_inplace(new_data)
self = self.T
else:
- self._data = new_data
+ self._update_inplace(new_data)
else:
res = self._constructor(new_data).__finalize__(self)
if axis == 1:
@@ -2856,8 +2862,9 @@ def where(self, cond, other=np.nan, inplace=False, axis=None, level=None,
if inplace:
# we may have different type blocks come out of putmask, so
# reconstruct the block manager
- self._data = self._data.putmask(cond, other, align=axis is None,
- inplace=True)
+ new_data = self._data.putmask(cond, other, align=axis is None,
+ inplace=True)
+ self._update_inplace(new_data)
else:
new_data = self._data.where(other, cond, align=axis is None,
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index a4e273c43e483..8444c7a9b2a00 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -209,7 +209,7 @@ def _setitem_with_indexer(self, indexer, value):
labels = _safe_append_to_index(index, key)
self.obj._data = self.obj.reindex_axis(labels, i)._data
self.obj._maybe_update_cacher(clear=True)
- self.obj._setitem_copy(False)
+ self.obj.is_copy=False
if isinstance(labels, MultiIndex):
self.obj.sortlevel(inplace=True)
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index ffc30c81ededd..5b501de026c57 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -11607,6 +11607,7 @@ def _check_f(base, f):
_check_f(data.copy(), f)
# -----Series-----
+ d = data.copy()['c']
# reset_index
f = lambda x: x.reset_index(inplace=True, drop=True)
@@ -11614,15 +11615,15 @@ def _check_f(base, f):
# fillna
f = lambda x: x.fillna(0, inplace=True)
- _check_f(data.copy()['c'], f)
+ _check_f(d.copy(), f)
# replace
f = lambda x: x.replace(1, 0, inplace=True)
- _check_f(data.copy()['c'], f)
+ _check_f(d.copy(), f)
# rename
f = lambda x: x.rename({1: 'foo'}, inplace=True)
- _check_f(data.copy()['c'], f)
+ _check_f(d.copy(), f)
def test_isin(self):
# GH #4211
diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py
index 22c72e1e5d82e..f834094475c1b 100644
--- a/pandas/tests/test_groupby.py
+++ b/pandas/tests/test_groupby.py
@@ -365,7 +365,7 @@ def f(grp):
return None
return grp.iloc[0].loc['C']
result = df.groupby('A').apply(f)
- e = df.groupby('A').first()['C']
+ e = df.groupby('A').first()['C'].copy()
e.loc['Pony'] = np.nan
e.name = None
assert_series_equal(result,e)
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index d941224fec52e..59c7bda35c544 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -1733,9 +1733,9 @@ def test_cache_updating(self):
df.index = index
# setting via chained assignment
- df.loc[0]['z'].iloc[0] = 1.
- result = df.loc[(0,0),'z']
- self.assert_(result == 1)
+ def f():
+ df.loc[0]['z'].iloc[0] = 1.
+ self.assertRaises(com.SettingWithCopyError, f)
# correct setting
df.loc[(0,0),'z'] = 2
@@ -1891,6 +1891,20 @@ def random_text(nobs=100):
self.assert_(df.is_copy is False)
df['a'] += 1
+ # inplace ops
+ # original from: http://stackoverflow.com/questions/20508968/series-fillna-in-a-multiindex-dataframe-does-not-fill-is-this-a-bug
+ a = [12, 23]
+ b = [123, None]
+ c = [1234, 2345]
+ d = [12345, 23456]
+ tuples = [('eyes', 'left'), ('eyes', 'right'), ('ears', 'left'), ('ears', 'right')]
+ events = {('eyes', 'left'): a, ('eyes', 'right'): b, ('ears', 'left'): c, ('ears', 'right'): d}
+ multiind = MultiIndex.from_tuples(tuples, names=['part', 'side'])
+ zed = DataFrame(events, index=['a', 'b'], columns=multiind)
+ def f():
+ zed['eyes']['right'].fillna(value=555, inplace=True)
+ self.assertRaises(com.SettingWithCopyError, f)
+
pd.set_option('chained_assignment','warn')
def test_float64index_slicing_bug(self):
diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py
index 151f222d7357a..7dd9dbd51d730 100644
--- a/pandas/tests/test_multilevel.py
+++ b/pandas/tests/test_multilevel.py
@@ -1141,14 +1141,27 @@ def test_is_lexsorted(self):
self.assert_(index.lexsort_depth == 0)
def test_frame_getitem_view(self):
- df = self.frame.T
+ df = self.frame.T.copy()
+
+ # this works because we are modifying the underlying array
+ # really a no-no
df['foo'].values[:] = 0
self.assert_((df['foo'].values == 0).all())
# but not if it's mixed-type
df['foo', 'four'] = 'foo'
df = df.sortlevel(0, axis=1)
- df['foo']['one'] = 2
+
+ # this will work, but will raise/warn as its chained assignment
+ def f():
+ df['foo']['one'] = 2
+ return df
+ self.assertRaises(com.SettingWithCopyError, f)
+
+ try:
+ df = f()
+ except:
+ pass
self.assert_((df['foo', 'one'] == 0).all())
def test_frame_getitem_not_sorted(self):
| related #5597
pretty common error that I have seen, originally from:
http://stackoverflow.com/questions/20508968/series-fillna-in-a-multiindex-dataframe-does-not-fill-is-this-a-bug
```
In [1]: a = [12, 23]
In [2]: b = [123, None]
In [3]: c = [1234, 2345]
In [4]: d = [12345, 23456]
In [5]: tuples = [('eyes', 'left'), ('eyes', 'right'), ('ears', 'left'), ('ears', 'right')]
In [6]: events = {('eyes', 'left'): a, ('eyes', 'right'): b, ('ears', 'left'): c, ('ears', 'right'): d}
In [7]: multiind = pandas.MultiIndex.from_tuples(tuples, names=['part', 'side'])
In [8]: zed = pandas.DataFrame(events, index=['a', 'b'], columns=multiind)
```
Shows a warning (the message is comewhat generic)
```
In [9]: zed['eyes']['right'].fillna(value=555, inplace=True)
/usr/local/bin/ipython:1: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame
#!/usr/local/bin/python
In [10]: zed
Out[10]:
part eyes ears
side left right left right
a 12 123 1234 12345
b 23 NaN 2345 23456
[2 rows x 4 columns]
```
Correct method
```
In [11]: zed.loc[:,('eyes','right')].fillna(value=555, inplace=True)
In [12]: zed
Out[12]:
part eyes ears
side left right left right
a 12 123 1234 12345
b 23 555 2345 23456
[2 rows x 4 columns]
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/5679 | 2013-12-11T14:01:37Z | 2013-12-11T21:10:14Z | 2013-12-11T21:10:14Z | 2014-06-13T22:14:03Z |
BUG: properly handle a user function ingroupby that returns all scalars (GH5592) | diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index 960baa503036c..558843f55777c 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -18,7 +18,8 @@
from pandas.util.decorators import cache_readonly, Appender
import pandas.core.algorithms as algos
import pandas.core.common as com
-from pandas.core.common import _possibly_downcast_to_dtype, isnull, notnull
+from pandas.core.common import(_possibly_downcast_to_dtype, isnull,
+ notnull, _DATELIKE_DTYPES)
import pandas.lib as lib
import pandas.algos as _algos
@@ -2169,11 +2170,12 @@ def _wrap_applied_output(self, keys, values, not_indexed_same=False):
break
if v is None:
return DataFrame()
- values = [
- x if x is not None else
- v._constructor(**v._construct_axes_dict())
- for x in values
- ]
+ elif isinstance(v, NDFrame):
+ values = [
+ x if x is not None else
+ v._constructor(**v._construct_axes_dict())
+ for x in values
+ ]
v = values[0]
@@ -2235,11 +2237,17 @@ def _wrap_applied_output(self, keys, values, not_indexed_same=False):
# through to the outer else caluse
return Series(values, index=key_index)
+ # if we have date/time like in the original, then coerce dates
+ # as we are stacking can easily have object dtypes here
+ cd = True
+ if self.obj.ndim == 2 and self.obj.dtypes.isin(_DATELIKE_DTYPES).any():
+ cd = 'coerce'
return DataFrame(stacked_values, index=index,
- columns=columns).convert_objects()
+ columns=columns).convert_objects(convert_dates=cd, convert_numeric=True)
else:
- return Series(values, index=key_index)
+ return Series(values, index=key_index).convert_objects(
+ convert_dates='coerce',convert_numeric=True)
else:
# Handle cases like BinGrouper
return self._concat_objects(keys, values,
diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py
index 6802b57bc39d1..22c72e1e5d82e 100644
--- a/pandas/tests/test_groupby.py
+++ b/pandas/tests/test_groupby.py
@@ -322,10 +322,12 @@ def func(dataf):
# GH5592
# inconcistent return type
df = DataFrame(dict(A = [ 'Tiger', 'Tiger', 'Tiger', 'Lamb', 'Lamb', 'Pony', 'Pony' ],
- B = Series(np.arange(7),dtype='int64')))
+ B = Series(np.arange(7),dtype='int64'),
+ C = date_range('20130101',periods=7)))
+
def f(grp):
return grp.iloc[0]
- expected = df.groupby('A').first()
+ expected = df.groupby('A').first()[['B']]
result = df.groupby('A').apply(f)[['B']]
assert_frame_equal(result,expected)
@@ -347,6 +349,27 @@ def f(grp):
e.loc['Pony'] = np.nan
assert_frame_equal(result,e)
+ # 5592 revisited, with datetimes
+ def f(grp):
+ if grp.name == 'Pony':
+ return None
+ return grp.iloc[0]
+ result = df.groupby('A').apply(f)[['C']]
+ e = df.groupby('A').first()[['C']]
+ e.loc['Pony'] = np.nan
+ assert_frame_equal(result,e)
+
+ # scalar outputs
+ def f(grp):
+ if grp.name == 'Pony':
+ return None
+ return grp.iloc[0].loc['C']
+ result = df.groupby('A').apply(f)
+ e = df.groupby('A').first()['C']
+ e.loc['Pony'] = np.nan
+ e.name = None
+ assert_series_equal(result,e)
+
def test_agg_regression1(self):
grouped = self.tsframe.groupby([lambda x: x.year, lambda x: x.month])
result = grouped.agg(np.mean)
| related #5592
BUG: handle coercion of None for date/time like dtypes in a groupby result
| https://api.github.com/repos/pandas-dev/pandas/pulls/5675 | 2013-12-10T22:07:47Z | 2013-12-10T22:42:06Z | 2013-12-10T22:42:06Z | 2014-06-21T12:51:25Z |
BUG: HDFStore improperly inferring a freq on datetimeindexes | diff --git a/doc/source/io.rst b/doc/source/io.rst
index d2ad38f1a2893..3e3ced2b8aba3 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -2089,6 +2089,7 @@ dict:
.. ipython:: python
+ np.random.seed(1234)
index = date_range('1/1/2000', periods=8)
s = Series(randn(5), index=['a', 'b', 'c', 'd', 'e'])
df = DataFrame(randn(8, 3), index=index,
@@ -2513,6 +2514,7 @@ be data_columns
df_dc.ix[4:6,'string'] = np.nan
df_dc.ix[7:9,'string'] = 'bar'
df_dc['string2'] = 'cool'
+ df_dc.ix[1:3,['B','C']] = 1.0
df_dc
# on-disk operations
@@ -2520,7 +2522,7 @@ be data_columns
store.select('df_dc', [ Term('B>0') ])
# getting creative
- store.select('df_dc', ['B > 0', 'C > 0', 'string == foo'])
+ store.select('df_dc', 'B > 0 & C > 0 & string == foo')
# this is in-memory version of this type of selection
df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == 'foo')]
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 6ebc33afdd43d..09618b77a2968 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -1434,12 +1434,11 @@ def convert(self, values, nan_rep, encoding):
self.values = Index(values, **kwargs)
except:
- # if the output freq is different that what we recorded, then infer
- # it
+ # if the output freq is different that what we recorded,
+ # it should be None (see also 'doc example part 2')
if 'freq' in kwargs:
- kwargs['freq'] = 'infer'
- self.values = Index(
- _maybe_convert(values, self.kind, encoding), **kwargs)
+ kwargs['freq'] = None
+ self.values = Index(values, **kwargs)
# set the timezone if indicated
# we stored in utc, so reverse to local timezone
diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py
index 1953f79482a22..e9c04932aba40 100644
--- a/pandas/io/tests/test_pytables.py
+++ b/pandas/io/tests/test_pytables.py
@@ -1327,6 +1327,29 @@ def check_col(key,name,size):
df_dc.string == 'foo')]
tm.assert_frame_equal(result, expected)
+ with ensure_clean_store(self.path) as store:
+ # doc example part 2
+ np.random.seed(1234)
+ index = date_range('1/1/2000', periods=8)
+ df_dc = DataFrame(np.random.randn(8, 3), index=index,
+ columns=['A', 'B', 'C'])
+ df_dc['string'] = 'foo'
+ df_dc.ix[4:6,'string'] = np.nan
+ df_dc.ix[7:9,'string'] = 'bar'
+ df_dc.ix[:,['B','C']] = df_dc.ix[:,['B','C']].abs()
+ df_dc['string2'] = 'cool'
+
+ # on-disk operations
+ store.append('df_dc', df_dc, data_columns = ['B', 'C', 'string', 'string2'])
+
+ result = store.select('df_dc', [ Term('B>0') ])
+ expected = df_dc[df_dc.B>0]
+ tm.assert_frame_equal(result,expected)
+
+ result = store.select('df_dc', ['B > 0', 'C > 0', 'string == "foo"'])
+ expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == 'foo')]
+ tm.assert_frame_equal(result,expected)
+
def test_create_table_index(self):
with ensure_clean_store(self.path) as store:
| surfaced in a doc example
http://stackoverflow.com/questions/20489519/how-come-pandas-select-results-in-the-examples-dont-match
| https://api.github.com/repos/pandas-dev/pandas/pulls/5674 | 2013-12-10T14:28:19Z | 2013-12-10T14:48:29Z | 2013-12-10T14:48:29Z | 2014-07-16T08:42:49Z |
DOC: add pickle compat warning to docs | diff --git a/doc/source/conf.py b/doc/source/conf.py
index 695a954f78cfb..8e8ac4a61acf5 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -248,7 +248,7 @@
'wiki ')}
# remove the docstring of the flags attribute (inherited from numpy ndarray)
-# because these give doc build errors (see GH issue 5331)
+# because these give doc build errors (see GH issue 5331)
def remove_flags_docstring(app, what, name, obj, options, lines):
if what == "attribute" and name.endswith(".flags"):
del lines[:]
diff --git a/doc/source/io.rst b/doc/source/io.rst
index a6f022d85272e..d2ad38f1a2893 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -1967,9 +1967,16 @@ any pickled pandas object (or any other pickled object) from file:
See: http://docs.python.org/2.7/library/pickle.html
+.. warning::
+
+ In 0.13, pickle preserves compatibility with pickles created prior to 0.13. These must
+ be read with ``pd.read_pickle``, rather than the default python ``pickle.load``.
+ See `this question <http://stackoverflow.com/questions/20444593/pandas-compiled-from-source-default-pickle-behavior-changed>`__
+ for a detailed explanation.
+
.. note::
- These methods were previously ``save`` and ``load``, prior to 0.12.0, and are now deprecated.
+ These methods were previously ``pd.save`` and ``pd.load``, prior to 0.12.0, and are now deprecated.
.. _io.msgpack:
diff --git a/doc/source/v0.13.0.txt b/doc/source/v0.13.0.txt
index 8763a5aafb39a..720150015909e 100644
--- a/doc/source/v0.13.0.txt
+++ b/doc/source/v0.13.0.txt
@@ -1,7 +1,7 @@
.. _whatsnew_0130:
-v0.13.0 (October ??, 2013)
---------------------------
+v0.13.0 (December ??, 2013)
+---------------------------
This is a major release from 0.12.0 and includes a number of API changes, several new features and
enhancements along with a large number of bug fixes.
@@ -817,6 +817,7 @@ Experimental
As of 10/10/13, there is a bug in Google's API preventing result sets
from being larger than 100,000 rows. A patch is scheduled for the week of
10/14/13.
+
.. _whatsnew_0130.refactoring:
Internal Refactoring
@@ -860,6 +861,8 @@ to unify methods and behaviors. Series formerly subclassed directly from
- ``Series(0.5)`` would previously return the scalar ``0.5``, instead this will return a 1-element ``Series``
+- Pickle compatibility is preserved for pickles created prior to 0.13. These must be unpickled with ``pd.read_pickle``, see :ref:`Pickling<io.pickle>`.
+
- Refactor of series.py/frame.py/panel.py to move common code to generic.py
- added ``_setup_axes`` to created generic NDFrame structures
| https://api.github.com/repos/pandas-dev/pandas/pulls/5667 | 2013-12-09T13:56:52Z | 2013-12-09T13:57:05Z | 2013-12-09T13:57:05Z | 2014-06-30T04:29:05Z | |
BUG: Work-around a numpy regression affecting pandas.eval() with numexpr | diff --git a/doc/source/release.rst b/doc/source/release.rst
index 97b86703e73b8..8a911a6e41d0b 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -820,6 +820,7 @@ Bug Fixes
- Bug fix in apply when using custom function and objects are not mutated (:issue:`5545`)
- Bug in selecting from a non-unique index with ``loc`` (:issue:`5553`)
- Bug in groupby returning non-consistent types when user function returns a ``None``, (:issue:`5592`)
+ - Work around regression in numpy 1.7.0 which erroneously raises IndexError from ``ndarray.item`` (:issue:`5666`)
pandas 0.12.0
-------------
diff --git a/pandas/computation/align.py b/pandas/computation/align.py
index b61169e1f55e0..71adb74492425 100644
--- a/pandas/computation/align.py
+++ b/pandas/computation/align.py
@@ -249,6 +249,9 @@ def _reconstruct_object(typ, obj, axes, dtype):
try:
ret = ret_value.item()
- except ValueError:
+ except (ValueError, IndexError):
+ # XXX: we catch IndexError to absorb a
+ # regression in numpy 1.7.0
+ # fixed by numpy/numpy@04b89c63
ret = ret_value
return ret
| numpy 1.7.0 erroneously raises IndexError instead of ValueError
from ndarray.item() when the array is not of length 1. This can be
seen as a failure of
```
computation.tests.test_eval.TestScope.test_global_scope
```
for the cases that engine='numexpr'.
Absorb the splatter from this regression by explicitly catching the
erroneous IndexError.
closes #5535.
| https://api.github.com/repos/pandas-dev/pandas/pulls/5666 | 2013-12-09T04:31:29Z | 2013-12-09T20:19:46Z | 2013-12-09T20:19:46Z | 2014-06-13T06:54:13Z |
BUG: scatter_matrix problem, ranges of diagonal and off-diagonal plots are different | diff --git a/doc/source/release.rst b/doc/source/release.rst
index 6e764e39b4db8..94652eb489f4a 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -126,6 +126,8 @@ Bug Fixes
of pandas in QTConsole, now fixed. If you're using an older version and
need to supress the warnings, see (:issue:`5922`).
- Bug in merging ``timedelta`` dtypes (:issue:`5695`)
+ - Bug in plotting.scatter_matrix function. Wrong alignment among diagonal
+ and off-diagonal plots, see (:issue:`5497`).
pandas 0.13.0
-------------
diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py
index c3a19bb5714c7..1fee318059b7f 100644
--- a/pandas/tests/test_graphics.py
+++ b/pandas/tests/test_graphics.py
@@ -755,6 +755,7 @@ def scat(**kwds):
_check_plot_works(scat, diagonal='kde')
_check_plot_works(scat, diagonal='density')
_check_plot_works(scat, diagonal='hist')
+ _check_plot_works(scat, range_padding=.1)
def scat2(x, y, by=None, ax=None, figsize=None):
return plt.scatter_plot(df, x, y, by, ax, figsize=None)
diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py
index aa5a5a017146b..9984c3fd76f81 100644
--- a/pandas/tools/plotting.py
+++ b/pandas/tools/plotting.py
@@ -202,25 +202,34 @@ def use(self, key, value):
def scatter_matrix(frame, alpha=0.5, figsize=None, ax=None, grid=False,
diagonal='hist', marker='.', density_kwds=None,
- hist_kwds=None, **kwds):
+ hist_kwds=None, range_padding=0.05, **kwds):
"""
Draw a matrix of scatter plots.
Parameters
----------
frame : DataFrame
- alpha : amount of transparency applied
- figsize : a tuple (width, height) in inches
- ax : Matplotlib axis object
- grid : setting this to True will show the grid
- diagonal : pick between 'kde' and 'hist' for
+ alpha : float, optional
+ amount of transparency applied
+ figsize : (float,float), optional
+ a tuple (width, height) in inches
+ ax : Matplotlib axis object, optional
+ grid : bool, optional
+ setting this to True will show the grid
+ diagonal : {'hist', 'kde'}
+ pick between 'kde' and 'hist' for
either Kernel Density Estimation or Histogram
plot in the diagonal
- marker : Matplotlib marker type, default '.'
+ marker : str, optional
+ Matplotlib marker type, default '.'
hist_kwds : other plotting keyword arguments
To be passed to hist function
density_kwds : other plotting keyword arguments
To be passed to kernel density estimate plot
+ range_padding : float, optional
+ relative extension of axis range in x and y
+ with respect to (x_max - x_min) or (y_max - y_min),
+ default 0.05
kwds : other plotting keyword arguments
To be passed to scatter function
@@ -250,6 +259,13 @@ def scatter_matrix(frame, alpha=0.5, figsize=None, ax=None, grid=False,
# workaround because `c='b'` is hardcoded in matplotlibs scatter method
kwds.setdefault('c', plt.rcParams['patch.facecolor'])
+ boundaries_list = []
+ for a in df.columns:
+ values = df[a].values[mask[a].values]
+ rmin_, rmax_ = np.min(values), np.max(values)
+ rdelta_ext = (rmax_ - rmin_) * range_padding / 2.
+ boundaries_list.append((rmin_ - rdelta_ext, rmax_+ rdelta_ext))
+
for i, a in zip(lrange(n), df.columns):
for j, b in zip(lrange(n), df.columns):
ax = axes[i, j]
@@ -260,18 +276,25 @@ def scatter_matrix(frame, alpha=0.5, figsize=None, ax=None, grid=False,
# Deal with the diagonal by drawing a histogram there.
if diagonal == 'hist':
ax.hist(values, **hist_kwds)
+
elif diagonal in ('kde', 'density'):
from scipy.stats import gaussian_kde
y = values
gkde = gaussian_kde(y)
ind = np.linspace(y.min(), y.max(), 1000)
ax.plot(ind, gkde.evaluate(ind), **density_kwds)
+
+ ax.set_xlim(boundaries_list[i])
+
else:
common = (mask[a] & mask[b]).values
ax.scatter(df[b][common], df[a][common],
marker=marker, alpha=alpha, **kwds)
+ ax.set_xlim(boundaries_list[j])
+ ax.set_ylim(boundaries_list[i])
+
ax.set_xlabel('')
ax.set_ylabel('')
| close #5497
**Short story:**
In [this](https://github.com/pydata/pandas/issues/5497) issue I signaled that the x_lim and y_lim in scatter_matrix plots are different. In particular x/y_lim of diagonal hists and x/y_lim of scatter plots are different.
For instance, this causes peaks nonalignments in the 1-margin and 2-margin of the plotted multivariate pdf. See pictures in the issue.
This is a possible fix.
**Long Story:**
In a `scatter_matrix` plot we have a diagonal constituted by histograms and `scatter_plots` to show correlation among variables in the subplots away from the diagonal.
Let's consider this scatter matrix in which we have two variables `A` and `misfit`

The ranges of `A` and of `misfit` are shown by the axis thicks along the boundary subplots (i.e. plots in the first row and column (starting form the bottom left corner)).
As we can see there is a clear misalignment of the peak among plot 1,2 and plot 2,2.
This is due to the fact that the plot range for each plot (`xlim`) is determined automatically by matplotlib, hence there is a possible disagreement among different plots.
In this pull request, the `xlim`s and `ylim`s are forced to be consistent and are calculated as the real ranges of the variables (max - min) plus a given correction (here 5%).
i.e., for each plot
```
rdelta_ext = (rmax_ - rmin_)*xy_range_extension # xy_range_extension defaults 5%
ax.set_xlim( (rmin_ - rdelta_ext , rmax_+ rdelta_ext) )
```
This is an example of a `scatter_matrix` obtained with the correction (please compare e.g subplots 1,2 and 2,2 )

| https://api.github.com/repos/pandas-dev/pandas/pulls/5665 | 2013-12-08T13:45:21Z | 2014-01-21T12:48:06Z | null | 2014-06-24T04:31:39Z |
CLN: PEP8 cleanup of the io module | diff --git a/pandas/computation/align.py b/pandas/computation/align.py
index b61169e1f55e0..b2b7fcc3e1158 100644
--- a/pandas/computation/align.py
+++ b/pandas/computation/align.py
@@ -152,7 +152,9 @@ def _align_core(terms):
copy=False)
# need to fill if we have a bool dtype/array
- if isinstance(ti, (np.ndarray, pd.Series)) and ti.dtype == object and pd.lib.is_bool_array(ti.values):
+ if (isinstance(ti, (np.ndarray, pd.Series))
+ and ti.dtype == object
+ and pd.lib.is_bool_array(ti.values)):
r = f(fill_value=True)
else:
r = f()
diff --git a/pandas/computation/expr.py b/pandas/computation/expr.py
index 0baa596778996..c16205ff34b1f 100644
--- a/pandas/computation/expr.py
+++ b/pandas/computation/expr.py
@@ -512,18 +512,21 @@ def _possibly_evaluate_binop(self, op, op_class, lhs, rhs,
res = op(lhs, rhs)
if self.engine != 'pytables':
- if (res.op in _cmp_ops_syms and getattr(lhs,'is_datetime',False) or getattr(rhs,'is_datetime',False)):
- # all date ops must be done in python bc numexpr doesn't work well
- # with NaT
+ if (res.op in _cmp_ops_syms
+ and getattr(lhs, 'is_datetime', False)
+ or getattr(rhs, 'is_datetime', False)):
+ # all date ops must be done in python bc numexpr doesn't work
+ # well with NaT
return self._possibly_eval(res, self.binary_ops)
if res.op in eval_in_python:
# "in"/"not in" ops are always evaluated in python
return self._possibly_eval(res, eval_in_python)
elif self.engine != 'pytables':
- if (getattr(lhs,'return_type',None) == object or getattr(rhs,'return_type',None) == object):
- # evaluate "==" and "!=" in python if either of our operands has an
- # object return type
+ if (getattr(lhs, 'return_type', None) == object
+ or getattr(rhs, 'return_type', None) == object):
+ # evaluate "==" and "!=" in python if either of our operands
+ # has an object return type
return self._possibly_eval(res, eval_in_python +
maybe_eval_in_python)
return res
diff --git a/pandas/computation/tests/test_eval.py b/pandas/computation/tests/test_eval.py
index cfbd9335ef9a0..073526f526abe 100644
--- a/pandas/computation/tests/test_eval.py
+++ b/pandas/computation/tests/test_eval.py
@@ -1022,7 +1022,8 @@ def check_performance_warning_for_poor_alignment(self, engine, parser):
def test_performance_warning_for_poor_alignment(self):
for engine, parser in ENGINES_PARSERS:
- yield self.check_performance_warning_for_poor_alignment, engine, parser
+ yield (self.check_performance_warning_for_poor_alignment, engine,
+ parser)
#------------------------------------
diff --git a/pandas/core/format.py b/pandas/core/format.py
index 7135573d48644..45bf07b49eead 100644
--- a/pandas/core/format.py
+++ b/pandas/core/format.py
@@ -264,8 +264,8 @@ class DataFrameFormatter(TableFormatter):
def __init__(self, frame, buf=None, columns=None, col_space=None,
header=True, index=True, na_rep='NaN', formatters=None,
justify=None, float_format=None, sparsify=None,
- index_names=True, line_width=None, max_rows=None, max_cols=None,
- show_dimensions=False, **kwds):
+ index_names=True, line_width=None, max_rows=None,
+ max_cols=None, show_dimensions=False, **kwds):
self.frame = frame
self.buf = buf if buf is not None else StringIO()
self.show_index_names = index_names
@@ -284,7 +284,8 @@ def __init__(self, frame, buf=None, columns=None, col_space=None,
self.line_width = line_width
self.max_rows = max_rows
self.max_cols = max_cols
- self.max_rows_displayed = min(max_rows or len(self.frame),len(self.frame))
+ self.max_rows_displayed = min(max_rows or len(self.frame),
+ len(self.frame))
self.show_dimensions = show_dimensions
if justify is None:
@@ -330,7 +331,8 @@ def _to_str_columns(self):
*(_strlen(x) for x in cheader))
fmt_values = _make_fixed_width(fmt_values, self.justify,
- minimum=max_colwidth, truncated=truncate_v)
+ minimum=max_colwidth,
+ truncated=truncate_v)
max_len = max(np.max([_strlen(x) for x in fmt_values]),
max_colwidth)
@@ -349,8 +351,8 @@ def _to_str_columns(self):
if self.index:
strcols.insert(0, str_index)
if truncate_h:
- strcols.append(([''] * len(str_columns[-1])) \
- + (['...'] * min(len(self.frame), self.max_rows)) )
+ strcols.append(([''] * len(str_columns[-1]))
+ + (['...'] * min(len(self.frame), self.max_rows)))
return strcols
@@ -382,8 +384,8 @@ def to_string(self, force_unicode=None):
self.buf.writelines(text)
if self.show_dimensions:
- self.buf.write("\n\n[%d rows x %d columns]" \
- % (len(frame), len(frame.columns)) )
+ self.buf.write("\n\n[%d rows x %d columns]"
+ % (len(frame), len(frame.columns)))
def _join_multiline(self, *strcols):
lwidth = self.line_width
@@ -484,10 +486,11 @@ def write(buf, frame, column_format, strcols):
def _format_col(self, i):
formatter = self._get_formatter(i)
- return format_array((self.frame.iloc[:self.max_rows_displayed,i]).get_values(),
- formatter, float_format=self.float_format,
- na_rep=self.na_rep,
- space=self.col_space)
+ return format_array(
+ (self.frame.iloc[:self.max_rows_displayed, i]).get_values(),
+ formatter, float_format=self.float_format, na_rep=self.na_rep,
+ space=self.col_space
+ )
def to_html(self, classes=None):
"""
@@ -679,8 +682,6 @@ def write_result(self, buf):
'not %s') % type(self.classes))
_classes.extend(self.classes)
-
-
self.write('<table border="1" class="%s">' % ' '.join(_classes),
indent)
@@ -698,9 +699,9 @@ def write_result(self, buf):
self.write('</table>', indent)
if self.fmt.show_dimensions:
- by = chr(215) if compat.PY3 else unichr(215) # ×
+ by = chr(215) if compat.PY3 else unichr(215) # ×
self.write(u('<p>%d rows %s %d columns</p>') %
- (len(frame), by, len(frame.columns)) )
+ (len(frame), by, len(frame.columns)))
_put_lines(buf, self.elements)
def _write_header(self, indent):
@@ -783,8 +784,9 @@ def _column_header():
align=align)
if self.fmt.has_index_names:
- row = [x if x is not None else '' for x in self.frame.index.names] \
- + [''] * min(len(self.columns), self.max_cols)
+ row = [
+ x if x is not None else '' for x in self.frame.index.names
+ ] + [''] * min(len(self.columns), self.max_cols)
self.write_tr(row, indent, self.indent_delta, header=True)
indent -= self.indent_delta
@@ -851,7 +853,7 @@ def _write_hierarchical_rows(self, fmt_values, indent):
truncate = (len(frame) > self.max_rows)
idx_values = frame.index[:nrows].format(sparsify=False, adjoin=False,
- names=False)
+ names=False)
idx_values = lzip(*idx_values)
if self.fmt.sparsify:
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index d0a1511ec1cca..93587cd11b597 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -432,8 +432,9 @@ def _repr_fits_horizontal_(self, ignore_width=False):
def _info_repr(self):
"""True if the repr should show the info view."""
info_repr_option = (get_option("display.large_repr") == "info")
- return info_repr_option and not \
- (self._repr_fits_horizontal_() and self._repr_fits_vertical_())
+ return info_repr_option and not (
+ self._repr_fits_horizontal_() and self._repr_fits_vertical_()
+ )
def __unicode__(self):
"""
@@ -486,8 +487,7 @@ def _repr_html_(self):
return ('<div style="max-height:1000px;'
'max-width:1500px;overflow:auto;">\n' +
self.to_html(max_rows=max_rows, max_cols=max_cols,
- show_dimensions=True) \
- + '\n</div>')
+ show_dimensions=True) + '\n</div>')
else:
return None
@@ -1283,7 +1283,8 @@ def to_string(self, buf=None, columns=None, col_space=None, colSpace=None,
index_names=index_names,
header=header, index=index,
line_width=line_width,
- max_rows=max_rows, max_cols=max_cols,
+ max_rows=max_rows,
+ max_cols=max_cols,
show_dimensions=show_dimensions)
formatter.to_string()
@@ -1310,7 +1311,8 @@ def to_html(self, buf=None, columns=None, col_space=None, colSpace=None,
escape : boolean, default True
Convert the characters <, >, and & to HTML-safe sequences.=
max_rows : int, optional
- Maximum number of rows to show before truncating. If None, show all.
+ Maximum number of rows to show before truncating. If None, show
+ all.
max_cols : int, optional
Maximum number of columns to show before truncating. If None, show
all.
@@ -1336,7 +1338,8 @@ def to_html(self, buf=None, columns=None, col_space=None, colSpace=None,
header=header, index=index,
bold_rows=bold_rows,
escape=escape,
- max_rows=max_rows, max_cols=max_cols,
+ max_rows=max_rows,
+ max_cols=max_cols,
show_dimensions=show_dimensions)
formatter.to_html(classes=classes)
@@ -1904,7 +1907,8 @@ def _ensure_valid_index(self, value):
if not isinstance(value, Series):
raise ValueError('Cannot set a frame with no defined index '
- 'and a value that cannot be converted to a Series')
+ 'and a value that cannot be converted to a '
+ 'Series')
self._data.set_axis(1, value.index.copy(), check_axis=False)
def _set_item(self, key, value):
@@ -4597,7 +4601,7 @@ def extract_index(data):
def _prep_ndarray(values, copy=True):
- if not isinstance(values, (np.ndarray,Series)):
+ if not isinstance(values, (np.ndarray, Series)):
if len(values) == 0:
return np.empty((0, 0), dtype=object)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 4089b13fca5c7..624384e484dc0 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -42,8 +42,8 @@ def is_dictlike(x):
def _single_replace(self, to_replace, method, inplace, limit):
if self.ndim != 1:
- raise TypeError('cannot replace {0} with method {1} on a {2}'.format(to_replace,
- method,type(self).__name__))
+ raise TypeError('cannot replace {0} with method {1} on a {2}'
+ .format(to_replace, method, type(self).__name__))
orig_dtype = self.dtype
result = self if inplace else self.copy()
@@ -2047,7 +2047,7 @@ def replace(self, to_replace=None, value=None, inplace=False, limit=None,
# passing a single value that is scalar like
# when value is None (GH5319), for compat
if not is_dictlike(to_replace) and not is_dictlike(regex):
- to_replace = [ to_replace ]
+ to_replace = [to_replace]
if isinstance(to_replace, (tuple, list)):
return _single_replace(self, to_replace, method, inplace,
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index 7a7fe32963457..960baa503036c 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -649,9 +649,9 @@ def _index_with_as_index(self, b):
original = self.obj.index
gp = self.grouper
levels = chain((gp.levels[i][gp.labels[i][b]]
- for i in range(len(gp.groupings))),
- (original.get_level_values(i)[b]
- for i in range(original.nlevels)))
+ for i in range(len(gp.groupings))),
+ (original.get_level_values(i)[b]
+ for i in range(original.nlevels)))
new = MultiIndex.from_arrays(list(levels))
new.names = gp.names + original.names
return new
@@ -2161,7 +2161,6 @@ def _wrap_applied_output(self, keys, values, not_indexed_same=False):
else:
key_index = Index(keys, name=key_names[0])
-
# make Nones an empty object
if com._count_not_none(*values) != len(values):
v = None
@@ -2170,14 +2169,20 @@ def _wrap_applied_output(self, keys, values, not_indexed_same=False):
break
if v is None:
return DataFrame()
- values = [ x if x is not None else v._constructor(**v._construct_axes_dict()) for x in values ]
+ values = [
+ x if x is not None else
+ v._constructor(**v._construct_axes_dict())
+ for x in values
+ ]
v = values[0]
if isinstance(v, (np.ndarray, Series)):
if isinstance(v, Series):
applied_index = self.obj._get_axis(self.axis)
- all_indexed_same = _all_indexes_same([x.index for x in values ])
+ all_indexed_same = _all_indexes_same([
+ x.index for x in values
+ ])
singular_series = (len(values) == 1 and
applied_index.nlevels == 1)
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 08f935539ecfc..a4e273c43e483 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -830,7 +830,9 @@ def _reindex(keys, level=None):
# see GH5553, make sure we use the right indexer
new_indexer = np.arange(len(indexer))
- new_indexer[cur_indexer] = np.arange(len(result._get_axis(axis)))
+ new_indexer[cur_indexer] = np.arange(
+ len(result._get_axis(axis))
+ )
new_indexer[missing_indexer] = -1
# we have a non_unique selector, need to use the original
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index e8b18ae93b287..471136dc2386b 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -3480,7 +3480,10 @@ def _delete_from_block(self, i, item):
super(SingleBlockManager, self)._delete_from_block(i, item)
# reset our state
- self._block = self.blocks[0] if len(self.blocks) else make_block(np.array([],dtype=self._block.dtype),[],[])
+ self._block = (
+ self.blocks[0] if len(self.blocks) else
+ make_block(np.array([], dtype=self._block.dtype), [], [])
+ )
self._values = self._block.values
def get_slice(self, slobj, raise_on_error=False):
diff --git a/pandas/core/reshape.py b/pandas/core/reshape.py
index d421fa36326aa..1244d0140a01b 100644
--- a/pandas/core/reshape.py
+++ b/pandas/core/reshape.py
@@ -786,6 +786,7 @@ def lreshape(data, groups, dropna=True, label=None):
return DataFrame(mdata, columns=id_cols + pivot_cols)
+
def wide_to_long(df, stubnames, i, j):
"""
Wide panel to long format. Less flexible but more user-friendly than melt.
@@ -848,8 +849,8 @@ def get_var_names(df, regex):
def melt_stub(df, stub, i, j):
varnames = get_var_names(df, "^"+stub)
- newdf = melt(df, id_vars=i, value_vars=varnames,
- value_name=stub, var_name=j)
+ newdf = melt(df, id_vars=i, value_vars=varnames, value_name=stub,
+ var_name=j)
newdf_j = newdf[j].str.replace(stub, "")
try:
newdf_j = newdf_j.astype(int)
@@ -870,6 +871,7 @@ def melt_stub(df, stub, i, j):
newdf = newdf.merge(new, how="outer", on=id_vars + [j], copy=False)
return newdf.set_index([i, j])
+
def convert_dummies(data, cat_variables, prefix_sep='_'):
"""
Compute DataFrame with specified columns converted to dummy variables (0 /
diff --git a/pandas/io/auth.py b/pandas/io/auth.py
index 15e3eb70d91b2..74b6b13000108 100644
--- a/pandas/io/auth.py
+++ b/pandas/io/auth.py
@@ -117,6 +117,7 @@ def init_service(http):
"""
return gapi.build('analytics', 'v3', http=http)
+
def reset_default_token_store():
import os
os.remove(DEFAULT_TOKEN_FILE)
diff --git a/pandas/io/clipboard.py b/pandas/io/clipboard.py
index 13135d255d9e2..143b507c41c3f 100644
--- a/pandas/io/clipboard.py
+++ b/pandas/io/clipboard.py
@@ -2,6 +2,7 @@
from pandas import compat, get_option, DataFrame
from pandas.compat import StringIO
+
def read_clipboard(**kwargs): # pragma: no cover
"""
Read text from clipboard and pass to read_table. See read_table for the
@@ -20,7 +21,10 @@ def read_clipboard(**kwargs): # pragma: no cover
# try to decode (if needed on PY3)
if compat.PY3:
try:
- text = compat.bytes_to_str(text,encoding=kwargs.get('encoding') or get_option('display.encoding'))
+ text = compat.bytes_to_str(
+ text, encoding=(kwargs.get('encoding') or
+ get_option('display.encoding'))
+ )
except:
pass
return read_table(StringIO(text), **kwargs)
@@ -58,7 +62,7 @@ def to_clipboard(obj, excel=None, sep=None, **kwargs): # pragma: no cover
if sep is None:
sep = '\t'
buf = StringIO()
- obj.to_csv(buf,sep=sep, **kwargs)
+ obj.to_csv(buf, sep=sep, **kwargs)
clipboard_set(buf.getvalue())
return
except:
@@ -70,4 +74,3 @@ def to_clipboard(obj, excel=None, sep=None, **kwargs): # pragma: no cover
else:
objstr = str(obj)
clipboard_set(objstr)
-
diff --git a/pandas/io/common.py b/pandas/io/common.py
index 6b8186e253199..d6b2827f94d36 100644
--- a/pandas/io/common.py
+++ b/pandas/io/common.py
@@ -13,7 +13,8 @@
_urlopen = urlopen
from urllib.parse import urlparse as parse_url
import urllib.parse as compat_parse
- from urllib.parse import uses_relative, uses_netloc, uses_params, urlencode, urljoin
+ from urllib.parse import (uses_relative, uses_netloc, uses_params,
+ urlencode, urljoin)
from urllib.error import URLError
from http.client import HTTPException
else:
@@ -72,8 +73,8 @@ def _is_s3_url(url):
def maybe_read_encoded_stream(reader, encoding=None):
- """ read an encoded stream from the reader and transform the bytes to unicode
- if required based on the encoding
+ """read an encoded stream from the reader and transform the bytes to
+ unicode if required based on the encoding
Parameters
----------
@@ -84,7 +85,7 @@ def maybe_read_encoded_stream(reader, encoding=None):
-------
a tuple of (a stream of decoded bytes, the encoding which was used)
- """
+ """
if compat.PY3 or encoding is not None: # pragma: no cover
if encoding:
@@ -97,6 +98,7 @@ def maybe_read_encoded_stream(reader, encoding=None):
encoding = None
return reader, encoding
+
def get_filepath_or_buffer(filepath_or_buffer, encoding=None):
"""
If the filepath_or_buffer is a url, translate and return the buffer
@@ -114,7 +116,7 @@ def get_filepath_or_buffer(filepath_or_buffer, encoding=None):
if _is_url(filepath_or_buffer):
req = _urlopen(str(filepath_or_buffer))
- return maybe_read_encoded_stream(req,encoding)
+ return maybe_read_encoded_stream(req, encoding)
if _is_s3_url(filepath_or_buffer):
try:
diff --git a/pandas/io/data.py b/pandas/io/data.py
index cf49515cac576..a3968446930e8 100644
--- a/pandas/io/data.py
+++ b/pandas/io/data.py
@@ -469,6 +469,7 @@ def fetch_data(url, name):
axis=1, join='outer')
return df
+
def get_data_famafrench(name):
# path of zip files
zip_file_url = ('http://mba.tuck.dartmouth.edu/pages/faculty/'
diff --git a/pandas/io/date_converters.py b/pandas/io/date_converters.py
index ef92b8692c07f..3ffcef4b21552 100644
--- a/pandas/io/date_converters.py
+++ b/pandas/io/date_converters.py
@@ -26,7 +26,7 @@ def parse_all_fields(year_col, month_col, day_col, hour_col, minute_col,
minute_col = _maybe_cast(minute_col)
second_col = _maybe_cast(second_col)
return lib.try_parse_datetime_components(year_col, month_col, day_col,
- hour_col, minute_col, second_col)
+ hour_col, minute_col, second_col)
def generic_parser(parse_func, *cols):
diff --git a/pandas/io/excel.py b/pandas/io/excel.py
index b97c9da0b0d18..ad7c37fba4c2f 100644
--- a/pandas/io/excel.py
+++ b/pandas/io/excel.py
@@ -547,8 +547,8 @@ def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0):
colletter = get_column_letter(col)
xcell = wks.cell("%s%s" % (colletter, row))
for field in style.__fields__:
- xcell.style.__setattr__(field, \
- style.__getattribute__(field))
+ xcell.style.__setattr__(
+ field, style.__getattribute__(field))
@classmethod
def _convert_to_style(cls, style_dict):
@@ -778,10 +778,10 @@ def _convert_to_style(self, style_dict, num_format_str=None):
alignment = style_dict.get('alignment')
if alignment:
if (alignment.get('horizontal')
- and alignment['horizontal'] == 'center'):
+ and alignment['horizontal'] == 'center'):
xl_format.set_align('center')
if (alignment.get('vertical')
- and alignment['vertical'] == 'top'):
+ and alignment['vertical'] == 'top'):
xl_format.set_align('top')
# Map the cell borders to XlsxWriter border properties.
diff --git a/pandas/io/ga.py b/pandas/io/ga.py
index 4391b2637a837..f002994888932 100644
--- a/pandas/io/ga.py
+++ b/pandas/io/ga.py
@@ -48,8 +48,8 @@
%s
""" % _QUERY_PARAMS
-_GA_READER_DOC = """Given query parameters, return a DataFrame with all the data
-or an iterator that returns DataFrames containing chunks of the data
+_GA_READER_DOC = """Given query parameters, return a DataFrame with all the
+data or an iterator that returns DataFrames containing chunks of the data
Parameters
----------
@@ -89,12 +89,14 @@
Local host redirect if unspecified
"""
+
def reset_token_store():
"""
Deletes the default token store
"""
auth.reset_default_token_store()
+
@Substitution(extras=_AUTH_PARAMS)
@Appender(_GA_READER_DOC)
def read_ga(metrics, dimensions, start_date, **kwargs):
@@ -185,9 +187,8 @@ def _init_service(self, secrets):
return auth.init_service(http)
def get_account(self, name=None, id=None, **kwargs):
- """
- Retrieve an account that matches the name, id, or some account attribute
- specified in **kwargs
+ """ Retrieve an account that matches the name, id, or some account
+ attribute specified in **kwargs
Parameters
----------
@@ -385,6 +386,7 @@ def _maybe_add_arg(query, field, data, prefix='ga'):
data = ','.join(['%s:%s' % (prefix, x) for x in data])
query[field] = data
+
def _get_match(obj_store, name, id, **kwargs):
key, val = None, None
if len(kwargs) > 0:
diff --git a/pandas/io/gbq.py b/pandas/io/gbq.py
index 2d490ec071b4e..010277533589c 100644
--- a/pandas/io/gbq.py
+++ b/pandas/io/gbq.py
@@ -38,7 +38,8 @@
# These are some custom exceptions that the
# to_gbq() method can throw
-class SchemaMissing(PandasError,IOError):
+
+class SchemaMissing(PandasError, IOError):
"""
Raised when attempting to write a DataFrame to
a new table in Google BigQuery without specifying
@@ -46,14 +47,16 @@ class SchemaMissing(PandasError,IOError):
"""
pass
-class InvalidSchema(PandasError,IOError):
+
+class InvalidSchema(PandasError, IOError):
"""
Raised when attempting to write a DataFrame to
Google BigQuery with an invalid table schema.
"""
pass
-class TableExistsFail(PandasError,IOError):
+
+class TableExistsFail(PandasError, IOError):
"""
Raised when attempting to write a DataFrame to
an existing Google BigQuery table without specifying
@@ -61,7 +64,8 @@ class TableExistsFail(PandasError,IOError):
"""
pass
-class InvalidColumnOrder(PandasError,IOError):
+
+class InvalidColumnOrder(PandasError, IOError):
"""
Raised when the provided column order for output
results DataFrame does not match the schema
@@ -83,6 +87,7 @@ def _authenticate():
"""
return bq.Client.Get()
+
def _parse_entry(field_value, field_type):
"""
Given a value and the corresponding BigQuery data type,
@@ -147,10 +152,7 @@ def _parse_page(raw_page, col_names, col_types, col_dtypes):
page_row_count = len(raw_page)
# Place to hold the results for a page of data
- page_array = np.zeros(
- (page_row_count,),
- dtype=zip(col_names,col_dtypes)
- )
+ page_array = np.zeros((page_row_count,), dtype=zip(col_names, col_dtypes))
for row_num, raw_row in enumerate(raw_page):
entries = raw_row.get('f', [])
# Iterate over each entry - setting proper field types
@@ -163,6 +165,7 @@ def _parse_page(raw_page, col_names, col_types, col_dtypes):
return page_array
+
def _parse_data(client, job, index_col=None, col_order=None):
"""
Iterate through the query results and piece together the
@@ -196,9 +199,9 @@ def _parse_data(client, job, index_col=None, col_order=None):
Notes:
-----
- This script relies on Google being consistent with their
+ This script relies on Google being consistent with their
pagination API. We are using the most flexible iteration method
- that we could find in the bq.py/bigquery_client.py API's, but
+ that we could find in the bq.py/bigquery_client.py API's, but
these have undergone large amounts of change recently.
We have encountered bugs with this functionality, see:
@@ -209,10 +212,11 @@ def _parse_data(client, job, index_col=None, col_order=None):
# see: http://pandas.pydata.org/pandas-docs/dev/missing_data.html#missing-data-casting-rules-and-indexing
dtype_map = {'INTEGER': np.dtype(float),
'FLOAT': np.dtype(float),
- 'TIMESTAMP': 'M8[ns]'} # This seems to be buggy without nanosecond indicator
+ 'TIMESTAMP': 'M8[ns]'} # This seems to be buggy without
+ # nanosecond indicator
# We first need the schema to get information about the columns of
- # our dataframe.
+ # our dataframe.
table_dict = job['configuration']['query']['destinationTable']
fields = client.GetTableSchema(table_dict)['fields']
@@ -226,23 +230,23 @@ def _parse_data(client, job, index_col=None, col_order=None):
# TODO: Do this in one clean step
for field in fields:
col_types.append(field['type'])
- # Note the encoding... numpy doesn't like titles that are UTF8, which is the return
- # type from the API
+ # Note the encoding... numpy doesn't like titles that are UTF8, which
+ # is the return type from the API
col_names.append(field['name'].encode('ascii', 'ignore'))
- # Note, it would be nice to use 'str' types, but BigQuery doesn't have a fixed length
- # in mind - just maxes out at 64k
- col_dtypes.append(dtype_map.get(field['type'],object))
+ # Note, it would be nice to use 'str' types, but BigQuery doesn't have
+ # a fixed length in mind - just maxes out at 64k
+ col_dtypes.append(dtype_map.get(field['type'], object))
-
# How many columns are there
num_columns = len(col_names)
-
+
# Iterate over the result rows.
# Since Google's API now requires pagination of results,
- # we do that here. The following is repurposed from
+ # we do that here. The following is repurposed from
# bigquery_client.py :: Client._JobTableReader._ReadOnePage
- # TODO: Enable Reading From Table, see Client._TableTableReader._ReadOnePage
+ # TODO: Enable Reading From Table,
+ # see Client._TableTableReader._ReadOnePage
# Initially, no page token is set
page_token = None
@@ -254,13 +258,12 @@ def _parse_data(client, job, index_col=None, col_order=None):
total_rows = max_rows
# This is the starting row for a particular page...
- # is ignored if page_token is present, though
+ # is ignored if page_token is present, though
# it may be useful if we wish to implement SQL like LIMITs
# with minimums
start_row = 0
- # Keep our page DataFrames until the end when we
- # concatentate them
+ # Keep our page DataFrames until the end when we concatenate them
dataframe_list = list()
current_job = job['jobReference']
@@ -298,7 +301,8 @@ def _parse_data(client, job, index_col=None, col_order=None):
start_row += len(raw_page)
if total_rows > 0:
completed = (100 * start_row) / total_rows
- logger.info('Remaining Rows: ' + str(total_rows - start_row) + '(' + str(completed) + '% Complete)')
+ logger.info('Remaining Rows: ' + str(total_rows - start_row) + '('
+ + str(completed) + '% Complete)')
else:
logger.info('No Rows')
@@ -308,8 +312,9 @@ def _parse_data(client, job, index_col=None, col_order=None):
# but we felt it was still a good idea.
if not page_token and not raw_page and start_row != total_rows:
raise bigquery_client.BigqueryInterfaceError(
- ("Not enough rows returned by server. Expected: {0}" + \
- " Rows, But Recieved {1}").format(total_rows, start_row))
+ 'Not enough rows returned by server. Expected: {0} Rows, But '
+ 'Received {1}'.format(total_rows, start_row)
+ )
# Build final dataframe
final_df = concat(dataframe_list, ignore_index=True)
@@ -320,14 +325,19 @@ def _parse_data(client, job, index_col=None, col_order=None):
final_df.set_index(index_col, inplace=True)
col_names.remove(index_col)
else:
- raise InvalidColumnOrder('Index column "{0}" does not exist in DataFrame.'.format(index_col))
+ raise InvalidColumnOrder(
+ 'Index column "{0}" does not exist in DataFrame.'
+ .format(index_col)
+ )
# Change the order of columns in the DataFrame based on provided list
if col_order is not None:
if sorted(col_order) == sorted(col_names):
final_df = final_df[col_order]
else:
- raise InvalidColumnOrder('Column order does not match this DataFrame.')
+ raise InvalidColumnOrder(
+ 'Column order does not match this DataFrame.'
+ )
# Downcast floats to integers and objects to booleans
# if there are no NaN's. This is presently due to a
@@ -335,13 +345,15 @@ def _parse_data(client, job, index_col=None, col_order=None):
final_df._data = final_df._data.downcast(dtypes='infer')
return final_df
-def to_gbq(dataframe, destination_table, schema=None, col_order=None, if_exists='fail', **kwargs):
- """Write a DataFrame to a Google BigQuery table.
-
- If the table exists, the DataFrame will be appended. If not, a new table
- will be created, in which case the schema will have to be specified. By default,
- rows will be written in the order they appear in the DataFrame, though
- the user may specify an alternative order.
+
+def to_gbq(dataframe, destination_table, schema=None, col_order=None,
+ if_exists='fail', **kwargs):
+ """Write a DataFrame to a Google BigQuery table.
+
+ If the table exists, the DataFrame will be appended. If not, a new table
+ will be created, in which case the schema will have to be specified. By
+ default, rows will be written in the order they appear in the DataFrame,
+ though the user may specify an alternative order.
Parameters
----------
@@ -350,9 +362,11 @@ def to_gbq(dataframe, destination_table, schema=None, col_order=None, if_exists=
destination_table : string
name of table to be written, in the form 'dataset.tablename'
schema : sequence (optional)
- list of column types in order for data to be inserted, e.g. ['INTEGER', 'TIMESTAMP', 'BOOLEAN']
+ list of column types in order for data to be inserted,
+ e.g. ['INTEGER', 'TIMESTAMP', 'BOOLEAN']
col_order : sequence (optional)
- order which columns are to be inserted, e.g. ['primary_key', 'birthday', 'username']
+ order which columns are to be inserted,
+ e.g. ['primary_key', 'birthday', 'username']
if_exists : {'fail', 'replace', 'append'} (optional)
- fail: If table exists, do nothing.
- replace: If table exists, drop it, recreate it, and insert data.
@@ -362,42 +376,50 @@ def to_gbq(dataframe, destination_table, schema=None, col_order=None, if_exists=
Raises
------
SchemaMissing :
- Raised if the 'if_exists' parameter is set to 'replace', but no schema is specified
+ Raised if the 'if_exists' parameter is set to 'replace', but no schema
+ is specified
TableExists :
- Raised if the specified 'destination_table' exists but the 'if_exists' parameter is set to 'fail' (the default)
+ Raised if the specified 'destination_table' exists but the 'if_exists'
+ parameter is set to 'fail' (the default)
InvalidSchema :
Raised if the 'schema' parameter does not match the provided DataFrame
"""
if not _BQ_INSTALLED:
if sys.version_info >= (3, 0):
- raise NotImplementedError('gbq module does not support Python 3 yet')
+ raise NotImplementedError('gbq module does not support Python 3 '
+ 'yet')
else:
raise ImportError('Could not import Google BigQuery Client.')
if not _BQ_VALID_VERSION:
- raise ImportError("pandas requires bigquery >= 2.0.17 for Google BigQuery "
- "support, current version " + _BQ_VERSION)
+ raise ImportError("pandas requires bigquery >= 2.0.17 for Google "
+ "BigQuery support, current version " + _BQ_VERSION)
- ALLOWED_TYPES = ['STRING', 'INTEGER', 'FLOAT', 'BOOLEAN', 'TIMESTAMP', 'RECORD']
+ ALLOWED_TYPES = ['STRING', 'INTEGER', 'FLOAT', 'BOOLEAN', 'TIMESTAMP',
+ 'RECORD']
if if_exists == 'replace' and schema is None:
- raise SchemaMissing('Cannot replace a table without specifying the data schema')
+ raise SchemaMissing('Cannot replace a table without specifying the '
+ 'data schema')
else:
client = _authenticate()
table_reference = client.GetTableReference(destination_table)
if client.TableExists(table_reference):
if if_exists == 'fail':
- raise TableExistsFail('Cannot overwrite existing tables if \'if_exists="fail"\'')
+ raise TableExistsFail('Cannot overwrite existing tables if '
+ '\'if_exists="fail"\'')
else:
- # Build up a string representation of the
+ # Build up a string representation of the
# table's schema. Since the table already
# exists, we ask ask the API for it, which
# is returned in a list of dictionaries
# describing column data. Iterate over these
# and build up a string of form:
# "col_name1 : col_type1, col_name2 : col_type2..."
- schema_full = client.GetTableSchema(dict(table_reference))['fields']
+ schema_full = client.GetTableSchema(
+ dict(table_reference)
+ )['fields']
schema = ''
for count, row in enumerate(schema_full):
if count > 0:
@@ -406,11 +428,13 @@ def to_gbq(dataframe, destination_table, schema=None, col_order=None, if_exists=
else:
logger.info('Creating New Table')
if schema is None:
- raise SchemaMissing('Cannot create a new table without specifying the data schema')
+ raise SchemaMissing('Cannot create a new table without '
+ 'specifying the data schema')
else:
columns = dataframe.columns
if len(schema) != len(columns):
- raise InvalidSchema('Incorrect number of columns in schema')
+ raise InvalidSchema('Incorrect number of columns in '
+ 'schema')
else:
schema_string = ''
for count, name in enumerate(columns):
@@ -420,7 +444,9 @@ def to_gbq(dataframe, destination_table, schema=None, col_order=None, if_exists=
if column_type in ALLOWED_TYPES:
schema_string += name + ':' + schema[count].lower()
else:
- raise InvalidSchema('Invalid Type: ' + column_type + ". Must be one of: " + str(ALLOWED_TYPES))
+ raise InvalidSchema('Invalid Type: ' + column_type
+ + ". Must be one of: " +
+ str(ALLOWED_TYPES))
schema = schema_string
opts = kwargs
@@ -437,18 +463,22 @@ def to_gbq(dataframe, destination_table, schema=None, col_order=None, if_exists=
with tempfile.NamedTemporaryFile() as csv_file:
dataframe.to_csv(csv_file.name, index=False, encoding='utf-8')
- job = client.Load(table_reference, csv_file.name, schema=schema, **opts)
+ job = client.Load(table_reference, csv_file.name, schema=schema,
+ **opts)
-def read_gbq(query, project_id = None, destination_table = None, index_col=None, col_order=None, **kwargs):
+
+def read_gbq(query, project_id=None, destination_table=None, index_col=None,
+ col_order=None, **kwargs):
"""Load data from Google BigQuery.
-
- The main method a user calls to load data from Google BigQuery into a pandas DataFrame.
- This is a simple wrapper for Google's bq.py and bigquery_client.py, which we use
- to get the source data. Because of this, this script respects the user's bq settings
- file, '~/.bigqueryrc', if it exists. Such a file can be generated using 'bq init'. Further,
- additional parameters for the query can be specified as either ``**kwds`` in the command,
- or using FLAGS provided in the 'gflags' module. Particular options can be found in
- bigquery_client.py.
+
+ The main method a user calls to load data from Google BigQuery into a
+ pandas DataFrame. This is a simple wrapper for Google's bq.py and
+ bigquery_client.py, which we use to get the source data. Because of this,
+ this script respects the user's bq settings file, '~/.bigqueryrc', if it
+ exists. Such a file can be generated using 'bq init'. Further, additional
+ parameters for the query can be specified as either ``**kwds`` in the
+ command, or using FLAGS provided in the 'gflags' module. Particular options
+ can be found in bigquery_client.py.
Parameters
----------
@@ -464,8 +494,8 @@ def read_gbq(query, project_id = None, destination_table = None, index_col=None,
DataFrame
destination_table : string (optional)
If provided, send the results to the given table.
- **kwargs :
- To be passed to bq.Client.Create(). Particularly: 'trace',
+ **kwargs :
+ To be passed to bq.Client.Create(). Particularly: 'trace',
'sync', 'api', 'api_version'
Returns
@@ -476,13 +506,14 @@ def read_gbq(query, project_id = None, destination_table = None, index_col=None,
"""
if not _BQ_INSTALLED:
if sys.version_info >= (3, 0):
- raise NotImplementedError('gbq module does not support Python 3 yet')
+ raise NotImplementedError('gbq module does not support Python 3 '
+ 'yet')
else:
raise ImportError('Could not import Google BigQuery Client.')
if not _BQ_VALID_VERSION:
- raise ImportError("pandas requires bigquery >= 2.0.17 for Google BigQuery "
- "support, current version " + _BQ_VERSION)
+ raise ImportError('pandas requires bigquery >= 2.0.17 for Google '
+ 'BigQuery support, current version ' + _BQ_VERSION)
query_args = kwargs
query_args['project_id'] = project_id
@@ -493,5 +524,5 @@ def read_gbq(query, project_id = None, destination_table = None, index_col=None,
client = _authenticate()
job = client.Query(**query_args)
-
+
return _parse_data(client, job, index_col=index_col, col_order=col_order)
diff --git a/pandas/io/packers.py b/pandas/io/packers.py
index 08299738f31a2..5d392e94106e9 100644
--- a/pandas/io/packers.py
+++ b/pandas/io/packers.py
@@ -49,7 +49,8 @@
from pandas.compat import u, PY3
from pandas import (
Timestamp, Period, Series, DataFrame, Panel, Panel4D,
- Index, MultiIndex, Int64Index, PeriodIndex, DatetimeIndex, Float64Index, NaT
+ Index, MultiIndex, Int64Index, PeriodIndex, DatetimeIndex, Float64Index,
+ NaT
)
from pandas.sparse.api import SparseSeries, SparseDataFrame, SparsePanel
from pandas.sparse.array import BlockIndex, IntIndex
@@ -87,7 +88,8 @@ def to_msgpack(path_or_buf, *args, **kwargs):
args : an object or objects to serialize
append : boolean whether to append to an existing msgpack
(default is False)
- compress : type of compressor (zlib or blosc), default to None (no compression)
+ compress : type of compressor (zlib or blosc), default to None (no
+ compression)
"""
global compressor
compressor = kwargs.pop('compress', None)
@@ -111,6 +113,7 @@ def writer(fh):
else:
writer(path_or_buf)
+
def read_msgpack(path_or_buf, iterator=False, **kwargs):
"""
Load msgpack pandas object from the specified
@@ -153,7 +156,7 @@ def read(fh):
return read(fh)
# treat as a string-like
- if not hasattr(path_or_buf,'read'):
+ if not hasattr(path_or_buf, 'read'):
try:
fh = compat.BytesIO(path_or_buf)
@@ -230,6 +233,7 @@ def convert(values):
# ndarray (on original dtype)
return v.tostring()
+
def unconvert(values, dtype, compress=None):
if dtype == np.object_:
@@ -251,7 +255,8 @@ def unconvert(values, dtype, compress=None):
return np.frombuffer(values, dtype=dtype)
# from a string
- return np.fromstring(values.encode('latin1'),dtype=dtype)
+ return np.fromstring(values.encode('latin1'), dtype=dtype)
+
def encode(obj):
"""
@@ -264,11 +269,11 @@ def encode(obj):
return {'typ': 'period_index',
'klass': obj.__class__.__name__,
'name': getattr(obj, 'name', None),
- 'freq': getattr(obj,'freqstr',None),
+ 'freq': getattr(obj, 'freqstr', None),
'dtype': obj.dtype.num,
'data': convert(obj.asi8)}
elif isinstance(obj, DatetimeIndex):
- tz = getattr(obj,'tz',None)
+ tz = getattr(obj, 'tz', None)
# store tz info and data as UTC
if tz is not None:
@@ -279,8 +284,8 @@ def encode(obj):
'name': getattr(obj, 'name', None),
'dtype': obj.dtype.num,
'data': convert(obj.asi8),
- 'freq': getattr(obj,'freqstr',None),
- 'tz': tz }
+ 'freq': getattr(obj, 'freqstr', None),
+ 'tz': tz}
elif isinstance(obj, MultiIndex):
return {'typ': 'multi_index',
'klass': obj.__class__.__name__,
@@ -295,7 +300,9 @@ def encode(obj):
'data': convert(obj.values)}
elif isinstance(obj, Series):
if isinstance(obj, SparseSeries):
- raise NotImplementedError("msgpack sparse series is not implemented")
+ raise NotImplementedError(
+ 'msgpack sparse series is not implemented'
+ )
#d = {'typ': 'sparse_series',
# 'klass': obj.__class__.__name__,
# 'dtype': obj.dtype.num,
@@ -316,7 +323,9 @@ def encode(obj):
'compress': compressor}
elif issubclass(tobj, NDFrame):
if isinstance(obj, SparseDataFrame):
- raise NotImplementedError("msgpack sparse frame is not implemented")
+ raise NotImplementedError(
+ 'msgpack sparse frame is not implemented'
+ )
#d = {'typ': 'sparse_dataframe',
# 'klass': obj.__class__.__name__,
# 'columns': obj.columns}
@@ -326,7 +335,9 @@ def encode(obj):
# for name, ss in compat.iteritems(obj)])
#return d
elif isinstance(obj, SparsePanel):
- raise NotImplementedError("msgpack sparse frame is not implemented")
+ raise NotImplementedError(
+ 'msgpack sparse frame is not implemented'
+ )
#d = {'typ': 'sparse_panel',
# 'klass': obj.__class__.__name__,
# 'items': obj.items}
@@ -353,7 +364,8 @@ def encode(obj):
'compress': compressor
} for b in data.blocks]}
- elif isinstance(obj, (datetime, date, np.datetime64, timedelta, np.timedelta64)):
+ elif isinstance(obj, (datetime, date, np.datetime64, timedelta,
+ np.timedelta64)):
if isinstance(obj, Timestamp):
tz = obj.tzinfo
if tz is not None:
@@ -436,18 +448,22 @@ def decode(obj):
return Period(ordinal=obj['ordinal'], freq=obj['freq'])
elif typ == 'index':
dtype = dtype_for(obj['dtype'])
- data = unconvert(obj['data'], np.typeDict[obj['dtype']], obj.get('compress'))
+ data = unconvert(obj['data'], np.typeDict[obj['dtype']],
+ obj.get('compress'))
return globals()[obj['klass']](data, dtype=dtype, name=obj['name'])
elif typ == 'multi_index':
- data = unconvert(obj['data'], np.typeDict[obj['dtype']], obj.get('compress'))
- data = [ tuple(x) for x in data ]
+ data = unconvert(obj['data'], np.typeDict[obj['dtype']],
+ obj.get('compress'))
+ data = [tuple(x) for x in data]
return globals()[obj['klass']].from_tuples(data, names=obj['names'])
elif typ == 'period_index':
data = unconvert(obj['data'], np.int64, obj.get('compress'))
- return globals()[obj['klass']](data, name=obj['name'], freq=obj['freq'])
+ return globals()[obj['klass']](data, name=obj['name'],
+ freq=obj['freq'])
elif typ == 'datetime_index':
data = unconvert(obj['data'], np.int64, obj.get('compress'))
- result = globals()[obj['klass']](data, freq=obj['freq'], name=obj['name'])
+ result = globals()[obj['klass']](data, freq=obj['freq'],
+ name=obj['name'])
tz = obj['tz']
# reverse tz conversion
@@ -457,13 +473,17 @@ def decode(obj):
elif typ == 'series':
dtype = dtype_for(obj['dtype'])
index = obj['index']
- return globals()[obj['klass']](unconvert(obj['data'], dtype, obj['compress']), index=index, name=obj['name'])
+ return globals()[obj['klass']](unconvert(obj['data'], dtype,
+ obj['compress']),
+ index=index, name=obj['name'])
elif typ == 'block_manager':
axes = obj['axes']
def create_block(b):
dtype = dtype_for(b['dtype'])
- return make_block(unconvert(b['values'], dtype, b['compress']).reshape(b['shape']), b['items'], axes[0], klass=getattr(internals, b['klass']))
+ return make_block(unconvert(b['values'], dtype, b['compress'])
+ .reshape(b['shape']), b['items'], axes[0],
+ klass=getattr(internals, b['klass']))
blocks = [create_block(b) for b in obj['blocks']]
return globals()[obj['klass']](BlockManager(blocks, axes))
@@ -479,21 +499,29 @@ def create_block(b):
return np.timedelta64(int(obj['data']))
#elif typ == 'sparse_series':
# dtype = dtype_for(obj['dtype'])
- # return globals(
- # )[obj['klass']](unconvert(obj['sp_values'], dtype, obj['compress']), sparse_index=obj['sp_index'],
- # index=obj['index'], fill_value=obj['fill_value'], kind=obj['kind'], name=obj['name'])
+ # return globals()[obj['klass']](
+ # unconvert(obj['sp_values'], dtype, obj['compress']),
+ # sparse_index=obj['sp_index'], index=obj['index'],
+ # fill_value=obj['fill_value'], kind=obj['kind'], name=obj['name'])
#elif typ == 'sparse_dataframe':
- # return globals()[obj['klass']](obj['data'],
- # columns=obj['columns'], default_fill_value=obj['default_fill_value'], default_kind=obj['default_kind'])
+ # return globals()[obj['klass']](
+ # obj['data'], columns=obj['columns'],
+ # default_fill_value=obj['default_fill_value'],
+ # default_kind=obj['default_kind']
+ # )
#elif typ == 'sparse_panel':
- # return globals()[obj['klass']](obj['data'],
- # items=obj['items'], default_fill_value=obj['default_fill_value'], default_kind=obj['default_kind'])
+ # return globals()[obj['klass']](
+ # obj['data'], items=obj['items'],
+ # default_fill_value=obj['default_fill_value'],
+ # default_kind=obj['default_kind'])
elif typ == 'block_index':
- return globals()[obj['klass']](obj['length'], obj['blocs'], obj['blengths'])
+ return globals()[obj['klass']](obj['length'], obj['blocs'],
+ obj['blengths'])
elif typ == 'int_index':
return globals()[obj['klass']](obj['length'], obj['indices'])
elif typ == 'ndarray':
- return unconvert(obj['data'], np.typeDict[obj['dtype']], obj.get('compress')).reshape(obj['shape'])
+ return unconvert(obj['data'], np.typeDict[obj['dtype']],
+ obj.get('compress')).reshape(obj['shape'])
elif typ == 'np_scalar':
if obj.get('sub_typ') == 'np_complex':
return c2f(obj['real'], obj['imag'], obj['dtype'])
@@ -585,7 +613,7 @@ def __iter__(self):
try:
path_exists = os.path.exists(self.path)
- except (TypeError):
+ except TypeError:
path_exists = False
if path_exists:
@@ -595,7 +623,7 @@ def __iter__(self):
else:
- if not hasattr(self.path,'read'):
+ if not hasattr(self.path, 'read'):
fh = compat.BytesIO(self.path)
else:
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index e62ecd5a541df..bd0649a7a85f3 100644
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -30,14 +30,15 @@
Parameters
----------
filepath_or_buffer : string or file handle / StringIO. The string could be
- a URL. Valid URL schemes include http, ftp, s3, and file. For file URLs, a host
- is expected. For instance, a local file could be
+ a URL. Valid URL schemes include http, ftp, s3, and file. For file URLs, a
+ host is expected. For instance, a local file could be
file ://localhost/path/to/table.csv
%s
lineterminator : string (length 1), default None
Character to break file into lines. Only valid with C parser
quotechar : string
- The character to used to denote the start and end of a quoted item. Quoted items can include the delimiter and it will be ignored.
+ The character to used to denote the start and end of a quoted item. Quoted
+ items can include the delimiter and it will be ignored.
quoting : int
Controls whether quotes should be recognized. Values are taken from
`csv.QUOTE_*` values. Acceptable values are 0, 1, 2, and 3 for
@@ -55,9 +56,9 @@
header : int row number(s) to use as the column names, and the start of the
data. Defaults to 0 if no ``names`` passed, otherwise ``None``. Explicitly
pass ``header=0`` to be able to replace existing names. The header can be
- a list of integers that specify row locations for a multi-index on the columns
- E.g. [0,1,3]. Intervening rows that are not specified will be skipped.
- (E.g. 2 in this example are skipped)
+ a list of integers that specify row locations for a multi-index on the
+ columns E.g. [0,1,3]. Intervening rows that are not specified will be
+ skipped. (E.g. 2 in this example are skipped)
skiprows : list-like or integer
Row numbers to skip (0-indexed) or number of rows to skip (int)
at the start of the file
@@ -251,7 +252,7 @@ def _read(filepath_or_buffer, kwds):
'squeeze': False,
'compression': None,
'mangle_dupe_cols': True,
- 'tupleize_cols':False,
+ 'tupleize_cols': False,
}
@@ -437,9 +438,10 @@ def read_fwf(filepath_or_buffer, colspecs='infer', widths=None, **kwds):
# common NA values
# no longer excluding inf representations
# '1.#INF','-1.#INF', '1.#INF000000',
-_NA_VALUES = set(['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN',
- '#N/A','N/A', 'NA', '#NA', 'NULL', 'NaN',
- 'nan', ''])
+_NA_VALUES = set([
+ '-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN', '#N/A', 'N/A', 'NA', '#NA',
+ 'NULL', 'NaN', 'nan', ''
+])
class TextFileReader(object):
@@ -653,14 +655,14 @@ def __init__(self, kwds):
self.na_fvalues = kwds.get('na_fvalues')
self.true_values = kwds.get('true_values')
self.false_values = kwds.get('false_values')
- self.tupleize_cols = kwds.get('tupleize_cols',False)
+ self.tupleize_cols = kwds.get('tupleize_cols', False)
self._date_conv = _make_date_converter(date_parser=self.date_parser,
dayfirst=self.dayfirst)
# validate header options for mi
self.header = kwds.get('header')
- if isinstance(self.header,(list,tuple,np.ndarray)):
+ if isinstance(self.header, (list, tuple, np.ndarray)):
if kwds.get('as_recarray'):
raise ValueError("cannot specify as_recarray when "
"specifying a multi-index header")
@@ -702,7 +704,8 @@ def _should_parse_dates(self, i):
else:
return (j in self.parse_dates) or (name in self.parse_dates)
- def _extract_multi_indexer_columns(self, header, index_names, col_names, passed_names=False):
+ def _extract_multi_indexer_columns(self, header, index_names, col_names,
+ passed_names=False):
""" extract and return the names, index_names, col_names
header is a list-of-lists returned from the parsers """
if len(header) < 2:
@@ -715,8 +718,8 @@ def _extract_multi_indexer_columns(self, header, index_names, col_names, passed_
if ic is None:
ic = []
- if not isinstance(ic, (list,tuple,np.ndarray)):
- ic = [ ic ]
+ if not isinstance(ic, (list, tuple, np.ndarray)):
+ ic = [ic]
sic = set(ic)
# clean the index_names
@@ -726,22 +729,29 @@ def _extract_multi_indexer_columns(self, header, index_names, col_names, passed_
# extract the columns
field_count = len(header[0])
+
def extract(r):
- return tuple([ r[i] for i in range(field_count) if i not in sic ])
- columns = lzip(*[ extract(r) for r in header ])
+ return tuple([r[i] for i in range(field_count) if i not in sic])
+
+ columns = lzip(*[extract(r) for r in header])
names = ic + columns
- # if we find 'Unnamed' all of a single level, then our header was too long
+ # if we find 'Unnamed' all of a single level, then our header was too
+ # long
for n in range(len(columns[0])):
- if all([ 'Unnamed' in c[n] for c in columns ]):
- raise _parser.CParserError("Passed header=[%s] are too many rows for this "
- "multi_index of columns" % ','.join([ str(x) for x in self.header ]))
+ if all(['Unnamed' in c[n] for c in columns]):
+ raise _parser.CParserError(
+ "Passed header=[%s] are too many rows for this "
+ "multi_index of columns"
+ % ','.join([str(x) for x in self.header])
+ )
# clean the column names (if we have an index_col)
if len(ic):
- col_names = [ r[0] if len(r[0]) and 'Unnamed' not in r[0] else None for r in header ]
+ col_names = [r[0] if len(r[0]) and 'Unnamed' not in r[0] else None
+ for r in header]
else:
- col_names = [ None ] * len(header)
+ col_names = [None] * len(header)
passed_names = True
@@ -749,9 +759,10 @@ def extract(r):
def _maybe_make_multi_index_columns(self, columns, col_names=None):
# possibly create a column mi here
- if not self.tupleize_cols and len(columns) and not isinstance(
- columns, MultiIndex) and all([ isinstance(c,tuple) for c in columns]):
- columns = MultiIndex.from_tuples(columns,names=col_names)
+ if (not self.tupleize_cols and len(columns) and
+ not isinstance(columns, MultiIndex) and
+ all([isinstance(c, tuple) for c in columns])):
+ columns = MultiIndex.from_tuples(columns, names=col_names)
return columns
def _make_index(self, data, alldata, columns, indexnamerow=False):
@@ -849,9 +860,8 @@ def _agg_index(self, index, try_parse_dates=True):
if isinstance(self.na_values, dict):
col_name = self.index_names[i]
if col_name is not None:
- col_na_values, col_na_fvalues = _get_na_values(col_name,
- self.na_values,
- self.na_fvalues)
+ col_na_values, col_na_fvalues = _get_na_values(
+ col_name, self.na_values, self.na_fvalues)
arr, _ = self._convert_types(arr, col_na_values | col_na_fvalues)
arrays.append(arr)
@@ -865,14 +875,14 @@ def _convert_to_ndarrays(self, dct, na_values, na_fvalues, verbose=False,
result = {}
for c, values in compat.iteritems(dct):
conv_f = None if converters is None else converters.get(c, None)
- col_na_values, col_na_fvalues = _get_na_values(c, na_values, na_fvalues)
+ col_na_values, col_na_fvalues = _get_na_values(c, na_values,
+ na_fvalues)
coerce_type = True
if conv_f is not None:
values = lib.map_infer(values, conv_f)
coerce_type = False
- cvals, na_count = self._convert_types(values,
- set(col_na_values) | col_na_fvalues,
- coerce_type)
+ cvals, na_count = self._convert_types(
+ values, set(col_na_values) | col_na_fvalues, coerce_type)
result[c] = cvals
if verbose and na_count:
print('Filled %d NA values in column %s' % (na_count, str(c)))
@@ -951,8 +961,12 @@ def __init__(self, src, **kwds):
else:
if len(self._reader.header) > 1:
# we have a multi index in the columns
- self.names, self.index_names, self.col_names, passed_names = self._extract_multi_indexer_columns(
- self._reader.header, self.index_names, self.col_names, passed_names)
+ self.names, self.index_names, self.col_names, passed_names = (
+ self._extract_multi_indexer_columns(
+ self._reader.header, self.index_names, self.col_names,
+ passed_names
+ )
+ )
else:
self.names = list(self._reader.header[0])
@@ -963,8 +977,9 @@ def __init__(self, src, **kwds):
else:
self.names = lrange(self._reader.table_width)
- # If the names were inferred (not passed by user) and usedcols is defined,
- # then ensure names refers to the used columns, not the document's columns.
+ # If the names were inferred (not passed by user) and usedcols is
+ # defined, then ensure names refers to the used columns, not the
+ # document's columns.
if self.usecols and passed_names:
col_indices = []
for u in self.usecols:
@@ -972,7 +987,8 @@ def __init__(self, src, **kwds):
col_indices.append(self.names.index(u))
else:
col_indices.append(u)
- self.names = [n for i, n in enumerate(self.names) if i in col_indices]
+ self.names = [n for i, n in enumerate(self.names)
+ if i in col_indices]
if len(self.names) < len(self.usecols):
raise ValueError("Usecols do not match names.")
@@ -982,11 +998,12 @@ def __init__(self, src, **kwds):
if not self._has_complex_date_col:
if (self._reader.leading_cols == 0 and
- _is_index_col(self.index_col)):
+ _is_index_col(self.index_col)):
self._name_processed = True
(index_names, self.names,
- self.index_col) = _clean_index_names(self.names, self.index_col)
+ self.index_col) = _clean_index_names(self.names,
+ self.index_col)
if self.index_names is None:
self.index_names = index_names
@@ -1265,8 +1282,11 @@ def __init__(self, f, **kwds):
# The original set is stored in self.original_columns.
if len(self.columns) > 1:
# we are processing a multi index column
- self.columns, self.index_names, self.col_names, _ = self._extract_multi_indexer_columns(
- self.columns, self.index_names, self.col_names)
+ self.columns, self.index_names, self.col_names, _ = (
+ self._extract_multi_indexer_columns(
+ self.columns, self.index_names, self.col_names
+ )
+ )
# Update list of original names to include all indices.
self.num_original_columns = len(self.columns)
else:
@@ -1291,7 +1311,8 @@ def __init__(self, f, **kwds):
self._no_thousands_columns = None
def _set_no_thousands_columns(self):
- # Create a set of column ids that are not to be stripped of thousands operators.
+ # Create a set of column ids that are not to be stripped of thousands
+ # operators.
noconvert_columns = set()
def _set(x):
@@ -1478,7 +1499,8 @@ def _infer_columns(self):
for i, c in enumerate(line):
if c == '':
if have_mi_columns:
- this_columns.append('Unnamed: %d_level_%d' % (i, level))
+ this_columns.append('Unnamed: %d_level_%d'
+ % (i, level))
else:
this_columns.append('Unnamed: %d' % i)
unnamed_count += 1
@@ -1494,16 +1516,17 @@ def _infer_columns(self):
counts[col] = cur_count + 1
elif have_mi_columns:
- # if we have grabbed an extra line, but its not in our format
- # so save in the buffer, and create an blank extra line for the rest of the
- # parsing code
+ # if we have grabbed an extra line, but its not in our
+ # format so save in the buffer, and create an blank extra
+ # line for the rest of the parsing code
if hr == header[-1]:
lc = len(this_columns)
- ic = len(self.index_col) if self.index_col is not None else 0
+ ic = (len(self.index_col)
+ if self.index_col is not None else 0)
if lc != unnamed_count and lc-ic > unnamed_count:
clear_buffer = False
- this_columns = [ None ] * lc
- self.buf = [ self.buf[-1] ]
+ this_columns = [None] * lc
+ self.buf = [self.buf[-1]]
columns.append(this_columns)
if len(columns) == 1:
@@ -1513,17 +1536,19 @@ def _infer_columns(self):
self._clear_buffer()
if names is not None:
- if (self.usecols is not None and len(names) != len(self.usecols)) \
- or (self.usecols is None and len(names) != len(columns[0])):
-
+ if ((self.usecols is not None
+ and len(names) != len(self.usecols))
+ or (self.usecols is None
+ and len(names) != len(columns[0]))):
raise ValueError('Number of passed names did not match '
- 'number of header fields in the file')
+ 'number of header fields in the file')
if len(columns) > 1:
raise TypeError('Cannot pass names with multi-index '
'columns')
if self.usecols is not None:
- # Set _use_cols. We don't store columns because they are overwritten.
+ # Set _use_cols. We don't store columns because they are
+ # overwritten.
self._handle_usecols(columns, names)
else:
self._col_indices = None
@@ -1538,9 +1563,9 @@ def _infer_columns(self):
num_original_columns = ncols
if not names:
if self.prefix:
- columns = [ ['X%d' % i for i in range(ncols)] ]
+ columns = [['X%d' % i for i in range(ncols)]]
else:
- columns = [ lrange(ncols) ]
+ columns = [lrange(ncols)]
columns = self._handle_usecols(columns, columns[0])
else:
if self.usecols is None or len(names) == num_original_columns:
@@ -1548,8 +1573,10 @@ def _infer_columns(self):
num_original_columns = len(names)
else:
if self.usecols and len(names) != len(self.usecols):
- raise ValueError('Number of passed names did not match '
- 'number of header fields in the file')
+ raise ValueError(
+ 'Number of passed names did not match number of '
+ 'header fields in the file'
+ )
# Ignore output but set used columns.
self._handle_usecols([names], names)
columns = [names]
@@ -1566,7 +1593,8 @@ def _handle_usecols(self, columns, usecols_key):
if self.usecols is not None:
if any([isinstance(u, string_types) for u in self.usecols]):
if len(columns) > 1:
- raise ValueError("If using multiple headers, usecols must be integers.")
+ raise ValueError("If using multiple headers, usecols must "
+ "be integers.")
col_indices = []
for u in self.usecols:
if isinstance(u, string_types):
@@ -1576,7 +1604,8 @@ def _handle_usecols(self, columns, usecols_key):
else:
col_indices = self.usecols
- columns = [[n for i, n in enumerate(column) if i in col_indices] for column in columns]
+ columns = [[n for i, n in enumerate(column) if i in col_indices]
+ for column in columns]
self._col_indices = col_indices
return columns
@@ -1640,8 +1669,9 @@ def _check_thousands(self, lines):
for i, x in enumerate(l):
if (not isinstance(x, compat.string_types) or
self.thousands not in x or
- (self._no_thousands_columns and i in self._no_thousands_columns) or
- nonnum.search(x.strip())):
+ (self._no_thousands_columns
+ and i in self._no_thousands_columns)
+ or nonnum.search(x.strip())):
rl.append(x)
else:
rl.append(x.replace(self.thousands, ''))
@@ -1746,9 +1776,14 @@ def _rows_to_cols(self, content):
if self.usecols:
if self._implicit_index:
- zipped_content = [a for i, a in enumerate(zipped_content) if i < len(self.index_col) or i - len(self.index_col) in self._col_indices]
+ zipped_content = [
+ a for i, a in enumerate(zipped_content)
+ if (i < len(self.index_col)
+ or i - len(self.index_col) in self._col_indices)
+ ]
else:
- zipped_content = [a for i, a in enumerate(zipped_content) if i in self._col_indices]
+ zipped_content = [a for i, a in enumerate(zipped_content)
+ if i in self._col_indices]
return zipped_content
def _get_lines(self, rows=None):
@@ -1802,8 +1837,8 @@ def _get_lines(self, rows=None):
except csv.Error as inst:
if 'newline inside string' in str(inst):
row_num = str(self.pos + rows)
- msg = ('EOF inside string starting with line '
- + row_num)
+ msg = ('EOF inside string starting with '
+ 'line ' + row_num)
raise Exception(msg)
raise
except StopIteration:
@@ -1948,7 +1983,9 @@ def _clean_na_values(na_values, keep_default_na=True):
for k, v in compat.iteritems(na_values):
v = set(list(v)) | _NA_VALUES
na_values[k] = v
- na_fvalues = dict([ (k, _floatify_na_values(v)) for k, v in na_values.items() ])
+ na_fvalues = dict([
+ (k, _floatify_na_values(v)) for k, v in na_values.items()
+ ])
else:
if not com.is_list_like(na_values):
na_values = [na_values]
@@ -1987,7 +2024,8 @@ def _clean_index_names(columns, index_col):
index_names.append(name)
# hack
- if isinstance(index_names[0], compat.string_types) and 'Unnamed' in index_names[0]:
+ if isinstance(index_names[0], compat.string_types)\
+ and 'Unnamed' in index_names[0]:
index_names[0] = None
return index_names, columns, index_col
@@ -2071,10 +2109,13 @@ def _get_col_names(colspec, columns):
def _concat_date_cols(date_cols):
if len(date_cols) == 1:
if compat.PY3:
- return np.array([compat.text_type(x) for x in date_cols[0]], dtype=object)
+ return np.array([compat.text_type(x) for x in date_cols[0]],
+ dtype=object)
else:
- return np.array([str(x) if not isinstance(x, compat.string_types) else x
- for x in date_cols[0]], dtype=object)
+ return np.array([
+ str(x) if not isinstance(x, compat.string_types) else x
+ for x in date_cols[0]
+ ], dtype=object)
rs = np.array([' '.join([compat.text_type(y) for y in x])
for x in zip(*date_cols)], dtype=object)
@@ -2101,9 +2142,9 @@ def __init__(self, f, colspecs, delimiter, comment):
for colspec in self.colspecs:
if not (isinstance(colspec, (tuple, list)) and
- len(colspec) == 2 and
- isinstance(colspec[0], (int, np.integer)) and
- isinstance(colspec[1], (int, np.integer))):
+ len(colspec) == 2 and
+ isinstance(colspec[0], (int, np.integer)) and
+ isinstance(colspec[1], (int, np.integer))):
raise TypeError('Each column specification must be '
'2 element tuple or list of integers')
diff --git a/pandas/io/pickle.py b/pandas/io/pickle.py
index 97633873e7b40..915c1e9ae1574 100644
--- a/pandas/io/pickle.py
+++ b/pandas/io/pickle.py
@@ -1,5 +1,6 @@
from pandas.compat import cPickle as pkl, pickle_compat as pc, PY3
+
def to_pickle(obj, path):
"""
Pickle (serialize) object to input file path
@@ -19,8 +20,8 @@ def read_pickle(path):
Load pickled pandas object (or any other pickled object) from the specified
file path
- Warning: Loading pickled data received from untrusted sources can be unsafe.
- See: http://docs.python.org/2.7/library/pickle.html
+ Warning: Loading pickled data received from untrusted sources can be
+ unsafe. See: http://docs.python.org/2.7/library/pickle.html
Parameters
----------
@@ -38,10 +39,10 @@ def try_read(path, encoding=None):
# pass encoding only if its not None as py2 doesn't handle
# the param
try:
- with open(path,'rb') as fh:
+ with open(path, 'rb') as fh:
return pc.load(fh, encoding=encoding, compat=False)
except:
- with open(path,'rb') as fh:
+ with open(path, 'rb') as fh:
return pc.load(fh, encoding=encoding, compat=True)
try:
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index db2028c70dc20..6ebc33afdd43d 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -74,10 +74,11 @@ def _ensure_term(where):
create the terms here with a frame_level=2 (we are 2 levels down)
"""
- # only consider list/tuple here as an ndarray is automaticaly a coordinate list
- if isinstance(where, (list,tuple)):
+ # only consider list/tuple here as an ndarray is automaticaly a coordinate
+ # list
+ if isinstance(where, (list, tuple)):
where = [w if not maybe_expression(w) else Term(w, scope_level=2)
- for w in where if w is not None ]
+ for w in where if w is not None]
elif maybe_expression(where):
where = Term(where, scope_level=2)
return where
@@ -124,11 +125,11 @@ class DuplicateWarning(Warning):
# formats
_FORMAT_MAP = {
- u('f') : 'fixed',
- u('fixed') : 'fixed',
- u('t') : 'table',
- u('table') : 'table',
- }
+ u('f'): 'fixed',
+ u('fixed'): 'fixed',
+ u('t'): 'table',
+ u('table'): 'table',
+}
format_deprecate_doc = """
the table keyword has been deprecated
@@ -169,7 +170,7 @@ class DuplicateWarning(Warning):
# table class map
_TABLE_MAP = {
u('generic_table'): 'GenericTable',
- u('appendable_series') : 'AppendableSeriesTable',
+ u('appendable_series'): 'AppendableSeriesTable',
u('appendable_multiseries'): 'AppendableMultiSeriesTable',
u('appendable_frame'): 'AppendableFrameTable',
u('appendable_multiframe'): 'AppendableMultiFrameTable',
@@ -202,8 +203,10 @@ class DuplicateWarning(Warning):
with config.config_prefix('io.hdf'):
config.register_option('dropna_table', True, dropna_doc,
validator=config.is_bool)
- config.register_option('default_format', None, format_doc,
- validator=config.is_one_of_factory(['fixed','table',None]))
+ config.register_option(
+ 'default_format', None, format_doc,
+ validator=config.is_one_of_factory(['fixed', 'table', None])
+ )
# oh the troubles to reduce import time
_table_mod = None
@@ -271,7 +274,7 @@ def to_hdf(path_or_buf, key, value, mode=None, complevel=None, complib=None,
def read_hdf(path_or_buf, key, **kwargs):
- """ read from the store, closeit if we opened it
+ """ read from the store, close it if we opened it
Retrieve pandas object stored in file, optionally based on where
criteria
@@ -281,12 +284,16 @@ def read_hdf(path_or_buf, key, **kwargs):
path_or_buf : path (string), or buffer to read from
key : group identifier in the store
where : list of Term (or convertable) objects, optional
- start : optional, integer (defaults to None), row number to start selection
- stop : optional, integer (defaults to None), row number to stop selection
- columns : optional, a list of columns that if not None, will limit the return columns
+ start : optional, integer (defaults to None), row number to start
+ selection
+ stop : optional, integer (defaults to None), row number to stop
+ selection
+ columns : optional, a list of columns that if not None, will limit the
+ return columns
iterator : optional, boolean, return an iterator, default False
chunksize : optional, nrows to include in iteration, return an iterator
- auto_close : optional, boolean, should automatically close the store when finished, default is False
+ auto_close : optional, boolean, should automatically close the store
+ when finished, default is False
Returns
-------
@@ -442,8 +449,8 @@ def __unicode__(self):
pprint_thing(s or 'invalid_HDFStore node'))
except Exception as detail:
keys.append(k)
- values.append(
- "[invalid_HDFStore node: %s]" % pprint_thing(detail))
+ values.append("[invalid_HDFStore node: %s]"
+ % pprint_thing(detail))
output += adjoin(12, keys, values)
else:
@@ -456,7 +463,8 @@ def __unicode__(self):
def keys(self):
"""
Return a (potentially unordered) list of the keys corresponding to the
- objects stored in the HDFStore. These are ABSOLUTE path-names (e.g. have the leading '/'
+ objects stored in the HDFStore. These are ABSOLUTE path-names (e.g.
+ have the leading '/'
"""
return [n._v_pathname for n in self.groups()]
@@ -482,15 +490,18 @@ def open(self, mode='a', **kwargs):
if self._mode != mode:
- # if we are chaning a write mode to read, ok
+ # if we are changing a write mode to read, ok
if self._mode in ['a', 'w'] and mode in ['r', 'r+']:
pass
elif mode in ['w']:
# this would truncate, raise here
if self.is_open:
- raise PossibleDataLossError("Re-opening the file [{0}] with mode [{1}] "
- "will delete the current file!".format(self._path, self._mode))
+ raise PossibleDataLossError(
+ "Re-opening the file [{0}] with mode [{1}] "
+ "will delete the current file!"
+ .format(self._path, self._mode)
+ )
self._mode = mode
@@ -588,10 +599,12 @@ def select(self, key, where=None, start=None, stop=None, columns=None,
where : list of Term (or convertable) objects, optional
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
- columns : a list of columns that if not None, will limit the return columns
+ columns : a list of columns that if not None, will limit the return
+ columns
iterator : boolean, return an iterator, default False
chunksize : nrows to include in iteration, return an iterator
- auto_close : boolean, should automatically close the store when finished, default is False
+ auto_close : boolean, should automatically close the store when
+ finished, default is False
Returns
-------
@@ -636,16 +649,20 @@ def select_as_coordinates(
stop : integer (defaults to None), row number to stop selection
"""
where = _ensure_term(where)
- return self.get_storer(key).read_coordinates(where=where, start=start, stop=stop, **kwargs)
+ return self.get_storer(key).read_coordinates(where=where, start=start,
+ stop=stop, **kwargs)
def unique(self, key, column, **kwargs):
warnings.warn("unique(key,column) is deprecated\n"
- "use select_column(key,column).unique() instead",FutureWarning)
- return self.get_storer(key).read_column(column=column, **kwargs).unique()
+ "use select_column(key,column).unique() instead",
+ FutureWarning)
+ return self.get_storer(key).read_column(column=column,
+ **kwargs).unique()
def select_column(self, key, column, **kwargs):
"""
- return a single column from the table. This is generally only useful to select an indexable
+ return a single column from the table. This is generally only useful to
+ select an indexable
Parameters
----------
@@ -654,8 +671,10 @@ def select_column(self, key, column, **kwargs):
Exceptions
----------
- raises KeyError if the column is not found (or key is not a valid store)
- raises ValueError if the column can not be extracted indivually (it is part of a data block)
+ raises KeyError if the column is not found (or key is not a valid
+ store)
+ raises ValueError if the column can not be extracted individually (it
+ is part of a data block)
"""
return self.get_storer(key).read_column(column=column, **kwargs)
@@ -668,7 +687,8 @@ def select_as_multiple(self, keys, where=None, selector=None, columns=None,
Parameters
----------
keys : a list of the tables
- selector : the table to apply the where criteria (defaults to keys[0] if not supplied)
+ selector : the table to apply the where criteria (defaults to keys[0]
+ if not supplied)
columns : the columns I want back
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
@@ -677,7 +697,8 @@ def select_as_multiple(self, keys, where=None, selector=None, columns=None,
Exceptions
----------
- raise if any of the keys don't refer to tables or if they are not ALL THE SAME DIMENSIONS
+ raise if any of the keys don't refer to tables or if they are not ALL
+ THE SAME DIMENSIONS
"""
# default to single select
@@ -708,8 +729,9 @@ def select_as_multiple(self, keys, where=None, selector=None, columns=None,
raise TypeError("Invalid table [%s]" % k)
if not t.is_table:
raise TypeError(
- "object [%s] is not a table, and cannot be used in all select as multiple" %
- t.pathname)
+ "object [%s] is not a table, and cannot be used in all "
+ "select as multiple" % t.pathname
+ )
if nrows is None:
nrows = t.nrows
@@ -735,12 +757,16 @@ def func(_start, _stop):
axis = list(set([t.non_index_axes[0][0] for t in tbls]))[0]
# concat and return
- return concat(objs, axis=axis, verify_integrity=False).consolidate()
+ return concat(objs, axis=axis,
+ verify_integrity=False).consolidate()
if iterator or chunksize is not None:
- return TableIterator(self, func, nrows=nrows, start=start, stop=stop, chunksize=chunksize, auto_close=auto_close)
+ return TableIterator(self, func, nrows=nrows, start=start,
+ stop=stop, chunksize=chunksize,
+ auto_close=auto_close)
- return TableIterator(self, func, nrows=nrows, start=start, stop=stop, auto_close=auto_close).get_values()
+ return TableIterator(self, func, nrows=nrows, start=start, stop=stop,
+ auto_close=auto_close).get_values()
def put(self, key, value, format=None, append=False, **kwargs):
"""
@@ -754,11 +780,12 @@ def put(self, key, value, format=None, append=False, **kwargs):
fixed(f) : Fixed format
Fast writing/reading. Not-appendable, nor searchable
table(t) : Table format
- Write as a PyTables Table structure which may perform worse but
- allow more flexible operations like searching / selecting subsets
- of the data
+ Write as a PyTables Table structure which may perform
+ worse but allow more flexible operations like searching
+ / selecting subsets of the data
append : boolean, default False
- This will force Table format, append the input data to the existing.
+ This will force Table format, append the input data to the
+ existing.
encoding : default None, provide an encoding for strings
"""
if format is None:
@@ -816,7 +843,8 @@ def remove(self, key, where=None, start=None, stop=None):
'can only remove with where on objects written as tables')
return s.delete(where=where, start=start, stop=stop)
- def append(self, key, value, format=None, append=True, columns=None, dropna=None, **kwargs):
+ def append(self, key, value, format=None, append=True, columns=None,
+ dropna=None, **kwargs):
"""
Append to Table in file. Node must already exist and be Table
format.
@@ -827,18 +855,20 @@ def append(self, key, value, format=None, append=True, columns=None, dropna=None
value : {Series, DataFrame, Panel, Panel4D}
format: 'table' is the default
table(t) : table format
- Write as a PyTables Table structure which may perform worse but
- allow more flexible operations like searching / selecting subsets
- of the data
- append : boolean, default True, append the input data to the existing
- data_columns : list of columns to create as data columns, or True to use all columns
+ Write as a PyTables Table structure which may perform
+ worse but allow more flexible operations like searching
+ / selecting subsets of the data
+ append : boolean, default True, append the input data to the
+ existing
+ data_columns : list of columns to create as data columns, or True to
+ use all columns
min_itemsize : dict of columns that specify minimum string sizes
nan_rep : string to use as string nan represenation
chunksize : size to chunk the writing
expectedrows : expected TOTAL row size of this table
encoding : default None, provide an encoding for strings
- dropna : boolean, default True, do not write an ALL nan row to the store
- settable by the option 'io.hdf.dropna_table'
+ dropna : boolean, default True, do not write an ALL nan row to
+ the store settable by the option 'io.hdf.dropna_table'
Notes
-----
Does *not* check if data being appended overlaps with existing
@@ -853,21 +883,24 @@ def append(self, key, value, format=None, append=True, columns=None, dropna=None
if format is None:
format = get_option("io.hdf.default_format") or 'table'
kwargs = self._validate_format(format, kwargs)
- self._write_to_group(key, value, append=append, dropna=dropna, **kwargs)
+ self._write_to_group(key, value, append=append, dropna=dropna,
+ **kwargs)
- def append_to_multiple(self, d, value, selector, data_columns=None, axes=None, dropna=True, **kwargs):
+ def append_to_multiple(self, d, value, selector, data_columns=None,
+ axes=None, dropna=True, **kwargs):
"""
Append to multiple tables
Parameters
----------
- d : a dict of table_name to table_columns, None is acceptable as the values of
- one node (this will get all the remaining columns)
+ d : a dict of table_name to table_columns, None is acceptable as the
+ values of one node (this will get all the remaining columns)
value : a pandas object
- selector : a string that designates the indexable table; all of its columns will
- be designed as data_columns, unless data_columns is passed, in which
- case these are used
- data_columns : list of columns to create as data columns, or True to use all columns
+ selector : a string that designates the indexable table; all of its
+ columns will be designed as data_columns, unless data_columns is
+ passed, in which case these are used
+ data_columns : list of columns to create as data columns, or True to
+ use all columns
dropna : if evaluates to True, drop rows from all tables if any single
row in each table has all NaN
@@ -879,15 +912,18 @@ def append_to_multiple(self, d, value, selector, data_columns=None, axes=None, d
if axes is not None:
raise TypeError("axes is currently not accepted as a parameter to"
" append_to_multiple; you can create the "
- "tables indepdently instead")
+ "tables independently instead")
if not isinstance(d, dict):
raise ValueError(
- "append_to_multiple must have a dictionary specified as the way to split the value")
+ "append_to_multiple must have a dictionary specified as the "
+ "way to split the value"
+ )
if selector not in d:
raise ValueError(
- "append_to_multiple requires a selector that is in passed dict")
+ "append_to_multiple requires a selector that is in passed dict"
+ )
# figure out the splitting axis (the non_index_axis)
axis = list(set(range(value.ndim)) - set(_AXES_MAP[type(value)]))[0]
@@ -899,7 +935,9 @@ def append_to_multiple(self, d, value, selector, data_columns=None, axes=None, d
if v is None:
if remain_key is not None:
raise ValueError(
- "append_to_multiple can only have one value in d that is None")
+ "append_to_multiple can only have one value in d that "
+ "is None"
+ )
remain_key = k
else:
remain_values.extend(v)
@@ -952,15 +990,23 @@ def create_table_index(self, key, **kwargs):
return
if not s.is_table:
- raise TypeError("cannot create table index on a Fixed format store")
+ raise TypeError(
+ "cannot create table index on a Fixed format store")
s.create_index(**kwargs)
def groups(self):
- """ return a list of all the top-level nodes (that are not themselves a pandas storage object) """
+ """return a list of all the top-level nodes (that are not themselves a
+ pandas storage object)
+ """
_tables()
self._check_if_open()
- return [g for g in self._handle.walkNodes() if getattr(g._v_attrs, 'pandas_type', None) or getattr(
- g, 'table', None) or (isinstance(g, _table_mod.table.Table) and g._v_name != u('table'))]
+ return [
+ g for g in self._handle.walkNodes()
+ if (getattr(g._v_attrs, 'pandas_type', None) or
+ getattr(g, 'table', None) or
+ (isinstance(g, _table_mod.table.Table) and
+ g._v_name != u('table')))
+ ]
def get_node(self, key):
""" return the node with the key or None if it does not exist """
@@ -981,16 +1027,16 @@ def get_storer(self, key):
s.infer_axes()
return s
- def copy(
- self, file, mode='w', propindexes=True, keys=None, complib = None, complevel = None,
- fletcher32=False, overwrite=True):
+ def copy(self, file, mode='w', propindexes=True, keys=None, complib=None,
+ complevel=None, fletcher32=False, overwrite=True):
""" copy the existing store to a new file, upgrading in place
Parameters
----------
propindexes: restore indexes in copied file (defaults to True)
keys : list of keys to include in the copy (defaults to all)
- overwrite : overwrite (remove and replace) existing nodes in the new store (default is True)
+ overwrite : overwrite (remove and replace) existing nodes in the
+ new store (default is True)
mode, complib, complevel, fletcher32 same as in HDFStore.__init__
Returns
@@ -1022,8 +1068,11 @@ def copy(
index = False
if propindexes:
index = [a.name for a in s.axes if a.is_indexed]
- new_store.append(k, data, index=index, data_columns=getattr(
- s, 'data_columns', None), encoding=s.encoding)
+ new_store.append(
+ k, data, index=index,
+ data_columns=getattr(s, 'data_columns', None),
+ encoding=s.encoding
+ )
else:
new_store.put(k, data, encoding=s.encoding)
@@ -1039,10 +1088,10 @@ def _validate_format(self, format, kwargs):
kwargs = kwargs.copy()
# table arg
- table = kwargs.pop('table',None)
+ table = kwargs.pop('table', None)
if table is not None:
- warnings.warn(format_deprecate_doc,FutureWarning)
+ warnings.warn(format_deprecate_doc, FutureWarning)
if table:
format = 'table'
@@ -1053,17 +1102,21 @@ def _validate_format(self, format, kwargs):
try:
kwargs['format'] = _FORMAT_MAP[format.lower()]
except:
- raise TypeError("invalid HDFStore format specified [{0}]".format(format))
+ raise TypeError("invalid HDFStore format specified [{0}]"
+ .format(format))
return kwargs
- def _create_storer(self, group, format=None, value=None, append=False, **kwargs):
+ def _create_storer(self, group, format=None, value=None, append=False,
+ **kwargs):
""" return a suitable class to operate """
def error(t):
raise TypeError(
- "cannot properly create the storer for: [%s] [group->%s,value->%s,format->%s,append->%s,kwargs->%s]" %
- (t, group, type(value), format, append, kwargs))
+ "cannot properly create the storer for: [%s] [group->%s,"
+ "value->%s,format->%s,append->%s,kwargs->%s]"
+ % (t, group, type(value), format, append, kwargs)
+ )
pt = _ensure_decoded(getattr(group._v_attrs, 'pandas_type', None))
tt = _ensure_decoded(getattr(group._v_attrs, 'table_type', None))
@@ -1073,12 +1126,14 @@ def error(t):
if value is None:
_tables()
- if getattr(group, 'table', None) or isinstance(group, _table_mod.table.Table):
+ if (getattr(group, 'table', None) or
+ isinstance(group, _table_mod.table.Table)):
pt = u('frame_table')
tt = u('generic_table')
else:
raise TypeError(
- "cannot create a storer if the object is not existing nor a value are passed")
+ "cannot create a storer if the object is not existing "
+ "nor a value are passed")
else:
try:
@@ -1104,14 +1159,14 @@ def error(t):
if value is not None:
if pt == u('series_table'):
- index = getattr(value,'index',None)
+ index = getattr(value, 'index', None)
if index is not None:
if index.nlevels == 1:
tt = u('appendable_series')
elif index.nlevels > 1:
tt = u('appendable_multiseries')
elif pt == u('frame_table'):
- index = getattr(value,'index',None)
+ index = getattr(value, 'index', None)
if index is not None:
if index.nlevels == 1:
tt = u('appendable_frame')
@@ -1138,8 +1193,7 @@ def error(t):
except:
error('_TABLE_MAP')
- def _write_to_group(
- self, key, value, format, index=True, append=False,
+ def _write_to_group(self, key, value, format, index=True, append=False,
complib=None, encoding=None, **kwargs):
group = self.get_node(key)
@@ -1150,7 +1204,7 @@ def _write_to_group(
# we don't want to store a table node at all if are object is 0-len
# as there are not dtypes
- if getattr(value,'empty',None) and (format == 'table' or append):
+ if getattr(value, 'empty', None) and (format == 'table' or append):
return
if group is None:
@@ -1175,7 +1229,8 @@ def _write_to_group(
if append:
# raise if we are trying to append to a Fixed format,
# or a table that exists (and we are putting)
- if not s.is_table or (s.is_table and format == 'fixed' and s.is_exists):
+ if (not s.is_table or
+ (s.is_table and format == 'fixed' and s.is_exists)):
raise ValueError('Can only append to Tables')
if not s.is_exists:
s.set_object_info()
@@ -1183,7 +1238,9 @@ def _write_to_group(
s.set_object_info()
if not s.is_table and complib:
- raise ValueError('Compression not supported on Fixed format stores')
+ raise ValueError(
+ 'Compression not supported on Fixed format stores'
+ )
# write the object
s.write(obj=value, append=append, complib=complib, **kwargs)
@@ -1210,8 +1267,8 @@ class TableIterator(object):
start : the passed start value (default is None)
stop : the passed stop value (default is None)
chunksize : the passed chunking valeu (default is 50000)
- auto_close : boolean, automatically close the store at the end of iteration,
- default is False
+ auto_close : boolean, automatically close the store at the end of
+ iteration, default is False
kwargs : the passed kwargs
"""
@@ -1274,10 +1331,9 @@ class IndexCol(StringMixin):
is_data_indexable = True
_info_fields = ['freq', 'tz', 'index_name']
- def __init__(
- self, values=None, kind=None, typ=None, cname=None, itemsize=None,
- name=None, axis=None, kind_attr=None, pos=None, freq=None, tz=None,
- index_name=None, **kwargs):
+ def __init__(self, values=None, kind=None, typ=None, cname=None,
+ itemsize=None, name=None, axis=None, kind_attr=None, pos=None,
+ freq=None, tz=None, index_name=None, **kwargs):
self.values = values
self.kind = kind
self.typ = typ
@@ -1335,7 +1391,8 @@ def __unicode__(self):
def __eq__(self, other):
""" compare 2 col items """
- return all([getattr(self, a, None) == getattr(other, a, None) for a in ['name', 'cname', 'axis', 'pos']])
+ return all([getattr(self, a, None) == getattr(other, a, None)
+ for a in ['name', 'cname', 'axis', 'pos']])
def __ne__(self, other):
return not self.__eq__(other)
@@ -1353,7 +1410,7 @@ def copy(self):
return new_self
def infer(self, table):
- """ infer this column from the table: create and return a new object """
+ """infer this column from the table: create and return a new object"""
new_self = self.copy()
new_self.set_table(table)
new_self.get_attr()
@@ -1420,7 +1477,8 @@ def __iter__(self):
def maybe_set_size(self, min_itemsize=None, **kwargs):
""" maybe set a string col itemsize:
- min_itemsize can be an interger or a dict with this columns name with an integer size """
+ min_itemsize can be an interger or a dict with this columns name
+ with an integer size """
if _ensure_decoded(self.kind) == u('string'):
if isinstance(min_itemsize, dict):
@@ -1446,10 +1504,11 @@ def validate_col(self, itemsize=None):
if itemsize is None:
itemsize = self.itemsize
if c.itemsize < itemsize:
- raise ValueError("Trying to store a string with len [%s] in [%s] column but\n"
- "this column has a limit of [%s]!\n"
- "Consider using min_itemsize to preset the sizes on these columns"
- % (itemsize, self.cname, c.itemsize))
+ raise ValueError(
+ "Trying to store a string with len [%s] in [%s] "
+ "column but\nthis column has a limit of [%s]!\n"
+ "Consider using min_itemsize to preset the sizes on "
+ "these columns" % (itemsize, self.cname, c.itemsize))
return c.itemsize
return None
@@ -1484,9 +1543,10 @@ def update_info(self, info):
setattr(self, key, None)
else:
- raise ValueError("invalid info for [%s] for [%s]"""
- ", existing_value [%s] conflicts with new value [%s]" % (self.name,
- key, existing_value, value))
+ raise ValueError(
+ "invalid info for [%s] for [%s], existing_value [%s] "
+ "conflicts with new value [%s]"
+ % (self.name, key, existing_value, value))
else:
if value is not None or existing_value is not None:
idx[key] = value
@@ -1537,7 +1597,8 @@ class DataCol(IndexCol):
----------
data : the actual data
- cname : the column name in the table to hold the data (typeically values)
+ cname : the column name in the table to hold the data (typically
+ values)
"""
is_an_indexable = False
is_data_indexable = False
@@ -1574,11 +1635,14 @@ def __init__(self, values=None, kind=None, typ=None,
self.set_data(data)
def __unicode__(self):
- return "name->%s,cname->%s,dtype->%s,shape->%s" % (self.name, self.cname, self.dtype, self.shape)
+ return "name->%s,cname->%s,dtype->%s,shape->%s" % (
+ self.name, self.cname, self.dtype, self.shape
+ )
def __eq__(self, other):
""" compare 2 col items """
- return all([getattr(self, a, None) == getattr(other, a, None) for a in ['name', 'cname', 'dtype', 'pos']])
+ return all([getattr(self, a, None) == getattr(other, a, None)
+ for a in ['name', 'cname', 'dtype', 'pos']])
def set_data(self, data, dtype=None):
self.data = data
@@ -1644,7 +1708,9 @@ def set_atom(self, block, existing_col, min_itemsize,
# if this block has more than one timezone, raise
if len(set([r.tzinfo for r in rvalues])) != 1:
raise TypeError(
- "too many timezones in this block, create separate data columns")
+ "too many timezones in this block, create separate "
+ "data columns"
+ )
# convert this column to datetime64[ns] utc, and save the tz
index = DatetimeIndex(rvalues)
@@ -1707,9 +1773,11 @@ def set_atom_string(
col = block.get(item)
inferred_type = lib.infer_dtype(col.ravel())
if inferred_type != 'string':
- raise TypeError("Cannot serialize the column [%s] because\n"
- "its data contents are [%s] object dtype" %
- (item, inferred_type))
+ raise TypeError(
+ "Cannot serialize the column [%s] because\n"
+ "its data contents are [%s] object dtype"
+ % (item, inferred_type)
+ )
# itemsize is the maximum length of a string (along any dimension)
itemsize = lib.max_len_string_array(com._ensure_object(data.ravel()))
@@ -1781,7 +1849,7 @@ def cvalues(self):
return self.data
def validate_attr(self, append):
- """ validate that we have the same order as the existing & same dtype """
+ """validate that we have the same order as the existing & same dtype"""
if append:
existing_fields = getattr(self.attrs, self.kind_attr, None)
if (existing_fields is not None and
@@ -1792,11 +1860,13 @@ def validate_attr(self, append):
existing_dtype = getattr(self.attrs, self.dtype_attr, None)
if (existing_dtype is not None and
existing_dtype != self.dtype):
- raise ValueError("appended items dtype do not match existing items dtype"
- " in table!")
+ raise ValueError("appended items dtype do not match existing "
+ "items dtype in table!")
def convert(self, values, nan_rep, encoding):
- """ set the data from this selection (and convert to the correct dtype if we can) """
+ """set the data from this selection (and convert to the correct dtype
+ if we can)
+ """
try:
values = values[self.cname]
except:
@@ -1829,9 +1899,10 @@ def convert(self, values, nan_rep, encoding):
try:
self.data = np.array(
[date.fromordinal(v) for v in self.data], dtype=object)
- except (ValueError):
+ except ValueError:
self.data = np.array(
- [date.fromtimestamp(v) for v in self.data], dtype=object)
+ [date.fromtimestamp(v) for v in self.data],
+ dtype=object)
elif dtype == u('datetime'):
self.data = np.array(
[datetime.fromtimestamp(v) for v in self.data],
@@ -1914,7 +1985,8 @@ def __init__(self, parent, group, encoding=None, **kwargs):
@property
def is_old_version(self):
- return self.version[0] <= 0 and self.version[1] <= 10 and self.version[2] < 1
+ return (self.version[0] <= 0 and self.version[1] <= 10 and
+ self.version[2] < 1)
def set_version(self):
""" compute and set our version """
@@ -1929,7 +2001,8 @@ def set_version(self):
@property
def pandas_type(self):
- return _ensure_decoded(getattr(self.group._v_attrs, 'pandas_type', None))
+ return _ensure_decoded(getattr(self.group._v_attrs,
+ 'pandas_type', None))
@property
def format_type(self):
@@ -2041,7 +2114,9 @@ def write(self, **kwargs):
"cannot write on an abstract storer: sublcasses should implement")
def delete(self, where=None, **kwargs):
- """ support fully deleting the node in its entirety (only) - where specification must be None """
+ """support fully deleting the node in its entirety (only) - where
+ specification must be None
+ """
if where is None:
self._handle.removeNode(self.group, recursive=True)
return None
@@ -2052,8 +2127,7 @@ def delete(self, where=None, **kwargs):
class GenericFixed(Fixed):
""" a generified fixed version """
- _index_type_map = {DatetimeIndex: 'datetime',
- PeriodIndex: 'period'}
+ _index_type_map = {DatetimeIndex: 'datetime', PeriodIndex: 'period'}
_reverse_index_map = dict([(v, k)
for k, v in compat.iteritems(_index_type_map)])
attributes = []
@@ -2078,11 +2152,13 @@ def f(values, freq=None, tz=None):
def validate_read(self, kwargs):
if kwargs.get('columns') is not None:
- raise TypeError("cannot pass a column specification when reading a Fixed format store."
- "this store must be selected in its entirety")
+ raise TypeError("cannot pass a column specification when reading "
+ "a Fixed format store. this store must be "
+ "selected in its entirety")
if kwargs.get('where') is not None:
- raise TypeError("cannot pass a where specification when reading from a Fixed format store."
- "this store must be selected in its entirety")
+ raise TypeError("cannot pass a where specification when reading "
+ "from a Fixed format store. this store must be "
+ "selected in its entirety")
@property
def is_exists(self):
@@ -2246,9 +2322,10 @@ def read_index_node(self, node):
data = node[:]
# If the index was an empty array write_array_empty() will
# have written a sentinel. Here we relace it with the original.
- if 'shape' in node._v_attrs \
- and self._is_empty_array(getattr(node._v_attrs, 'shape')):
- data = np.empty(getattr(node._v_attrs, 'shape'), dtype=getattr(node._v_attrs, 'value_type'))
+ if ('shape' in node._v_attrs and
+ self._is_empty_array(getattr(node._v_attrs, 'shape'))):
+ data = np.empty(getattr(node._v_attrs, 'shape'),
+ dtype=getattr(node._v_attrs, 'value_type'))
kind = _ensure_decoded(node._v_attrs.kind)
name = None
@@ -2268,8 +2345,8 @@ def read_index_node(self, node):
if kind in (u('date'), u('datetime')):
index = factory(
- _unconvert_index(data, kind, encoding=self.encoding), dtype=object,
- **kwargs)
+ _unconvert_index(data, kind, encoding=self.encoding),
+ dtype=object, **kwargs)
else:
index = factory(
_unconvert_index(data, kind, encoding=self.encoding), **kwargs)
@@ -2351,10 +2428,12 @@ def write_array(self, key, value, items=None):
else:
if value.dtype.type == np.datetime64:
self._handle.createArray(self.group, key, value.view('i8'))
- getattr(self.group, key)._v_attrs.value_type = 'datetime64'
+ getattr(
+ self.group, key)._v_attrs.value_type = 'datetime64'
elif value.dtype.type == np.timedelta64:
self._handle.createArray(self.group, key, value.view('i8'))
- getattr(self.group, key)._v_attrs.value_type = 'timedelta64'
+ getattr(
+ self.group, key)._v_attrs.value_type = 'timedelta64'
else:
self._handle.createArray(self.group, key, value)
@@ -2423,7 +2502,8 @@ def read(self, **kwargs):
sp_values = self.read_array('sp_values')
sp_index = self.read_index('sp_index')
return SparseSeries(sp_values, index=index, sparse_index=sp_index,
- kind=self.kind or u('block'), fill_value=self.fill_value,
+ kind=self.kind or u('block'),
+ fill_value=self.fill_value,
name=self.name)
def write(self, obj, **kwargs):
@@ -2596,14 +2676,20 @@ class Table(Fixed):
Attrs in Table Node
-------------------
- These are attributes that are store in the main table node, they are necessary
- to recreate these tables when read back in.
-
- index_axes : a list of tuples of the (original indexing axis and index column)
- non_index_axes: a list of tuples of the (original index axis and columns on a non-indexing axis)
- values_axes : a list of the columns which comprise the data of this table
- data_columns : a list of the columns that we are allowing indexing (these become single columns in values_axes), or True to force all columns
- nan_rep : the string to use for nan representations for string objects
+ These are attributes that are store in the main table node, they are
+ necessary to recreate these tables when read back in.
+
+ index_axes : a list of tuples of the (original indexing axis and
+ index column)
+ non_index_axes: a list of tuples of the (original index axis and
+ columns on a non-indexing axis)
+ values_axes : a list of the columns which comprise the data of this
+ table
+ data_columns : a list of the columns that we are allowing indexing
+ (these become single columns in values_axes), or True to force all
+ columns
+ nan_rep : the string to use for nan representations for string
+ objects
levels : the names of levels
"""
@@ -2641,14 +2727,10 @@ def __unicode__(self):
if self.is_old_version:
ver = "[%s]" % '.'.join([str(x) for x in self.version])
- return "%-12.12s%s (typ->%s,nrows->%s,ncols->%s,indexers->[%s]%s)" % (self.pandas_type,
- ver,
- self.table_type_short,
- self.nrows,
- self.ncols,
- ','.join(
- [a.name for a in self.index_axes]),
- dc)
+ return "%-12.12s%s (typ->%s,nrows->%s,ncols->%s,indexers->[%s]%s)" % (
+ self.pandas_type, ver, self.table_type_short, self.nrows,
+ self.ncols, ','.join([a.name for a in self.index_axes]), dc
+ )
def __getitem__(self, c):
""" return the axis for c """
@@ -2676,25 +2758,30 @@ def validate(self, other):
oax = ov[i]
if sax != oax:
raise ValueError(
- "invalid combinate of [%s] on appending data [%s] vs current table [%s]" %
- (c, sax, oax))
+ "invalid combinate of [%s] on appending data [%s] "
+ "vs current table [%s]" % (c, sax, oax))
# should never get here
raise Exception(
- "invalid combinate of [%s] on appending data [%s] vs current table [%s]" % (c, sv, ov))
+ "invalid combinate of [%s] on appending data [%s] vs "
+ "current table [%s]" % (c, sv, ov))
@property
def is_multi_index(self):
- """ the levels attribute is 1 or a list in the case of a multi-index """
- return isinstance(self.levels,list)
+ """the levels attribute is 1 or a list in the case of a multi-index"""
+ return isinstance(self.levels, list)
def validate_multiindex(self, obj):
- """ validate that we can store the multi-index; reset and return the new object """
- levels = [ l if l is not None else "level_{0}".format(i) for i, l in enumerate(obj.index.names) ]
+ """validate that we can store the multi-index; reset and return the
+ new object
+ """
+ levels = [l if l is not None else "level_{0}".format(i)
+ for i, l in enumerate(obj.index.names)]
try:
return obj.reset_index(), levels
- except (ValueError):
- raise ValueError("duplicate names/columns in the multi-index when storing as a table")
+ except ValueError:
+ raise ValueError("duplicate names/columns in the multi-index when "
+ "storing as a table")
@property
def nrows_expected(self):
@@ -2738,17 +2825,21 @@ def is_transposed(self):
@property
def data_orientation(self):
- """ return a tuple of my permutated axes, non_indexable at the front """
- return tuple(itertools.chain([int(a[0]) for a in self.non_index_axes], [int(a.axis) for a in self.index_axes]))
+ """return a tuple of my permutated axes, non_indexable at the front"""
+ return tuple(itertools.chain([int(a[0]) for a in self.non_index_axes],
+ [int(a.axis) for a in self.index_axes]))
def queryables(self):
""" return a dict of the kinds allowable columns for this object """
# compute the values_axes queryables
- return dict([(a.cname, a.kind) for a in self.index_axes] +
- [(self.storage_obj_type._AXIS_NAMES[axis], None) for axis, values in self.non_index_axes] +
- [(v.cname, v.kind) for v in self.values_axes if v.name in set(self.data_columns)]
- )
+ return dict(
+ [(a.cname, a.kind) for a in self.index_axes] +
+ [(self.storage_obj_type._AXIS_NAMES[axis], None)
+ for axis, values in self.non_index_axes] +
+ [(v.cname, v.kind) for v in self.values_axes
+ if v.name in set(self.data_columns)]
+ )
def index_cols(self):
""" return a list of my index cols """
@@ -2788,22 +2879,26 @@ def get_attrs(self):
self.levels = getattr(
self.attrs, 'levels', None) or []
t = self.table
- self.index_axes = [a.infer(t)
- for a in self.indexables if a.is_an_indexable]
- self.values_axes = [a.infer(t)
- for a in self.indexables if not a.is_an_indexable]
+ self.index_axes = [
+ a.infer(t) for a in self.indexables if a.is_an_indexable
+ ]
+ self.values_axes = [
+ a.infer(t) for a in self.indexables if not a.is_an_indexable
+ ]
def validate_version(self, where=None):
""" are we trying to operate on an old version? """
if where is not None:
- if self.version[0] <= 0 and self.version[1] <= 10 and self.version[2] < 1:
+ if (self.version[0] <= 0 and self.version[1] <= 10 and
+ self.version[2] < 1):
ws = incompatibility_doc % '.'.join(
[str(x) for x in self.version])
warnings.warn(ws, IncompatibilityWarning)
def validate_min_itemsize(self, min_itemsize):
- """ validate the min_itemisze doesn't contain items that are not in the axes
- this needs data_columns to be defined """
+ """validate the min_itemisze doesn't contain items that are not in the
+ axes this needs data_columns to be defined
+ """
if min_itemsize is None:
return
if not isinstance(min_itemsize, dict):
@@ -2817,8 +2912,8 @@ def validate_min_itemsize(self, min_itemsize):
continue
if k not in q:
raise ValueError(
- "min_itemsize has the key [%s] which is not an axis or data_column" %
- k)
+ "min_itemsize has the key [%s] which is not an axis or "
+ "data_column" % k)
@property
def indexables(self):
@@ -2828,8 +2923,10 @@ def indexables(self):
self._indexables = []
# index columns
- self._indexables.extend([IndexCol(name=name, axis=axis, pos=i)
- for i, (axis, name) in enumerate(self.attrs.index_cols)])
+ self._indexables.extend([
+ IndexCol(name=name, axis=axis, pos=i)
+ for i, (axis, name) in enumerate(self.attrs.index_cols)
+ ])
# values columns
dc = set(self.data_columns)
@@ -2839,7 +2936,8 @@ def f(i, c):
klass = DataCol
if c in dc:
klass = DataIndexableCol
- return klass.create_for_block(i=i, name=c, pos=base_pos + i, version=self.version)
+ return klass.create_for_block(i=i, name=c, pos=base_pos + i,
+ version=self.version)
self._indexables.extend(
[f(i, c) for i, c in enumerate(self.attrs.values_cols)])
@@ -2854,7 +2952,8 @@ def create_index(self, columns=None, optlevel=None, kind=None):
Paramaters
----------
- columns : False (don't create an index), True (create all columns index), None or list_like (the indexers to index)
+ columns : False (don't create an index), True (create all columns
+ index), None or list_like (the indexers to index)
optlevel: optimization level (defaults to 6)
kind : kind of index (defaults to 'medium')
@@ -2907,7 +3006,9 @@ def create_index(self, columns=None, optlevel=None, kind=None):
v.createIndex(**kw)
def read_axes(self, where, **kwargs):
- """ create and return the axes sniffed from the table: return boolean for success """
+ """create and return the axes sniffed from the table: return boolean
+ for success
+ """
# validate the version
self.validate_version(where)
@@ -2932,15 +3033,18 @@ def get_object(self, obj):
return obj
def validate_data_columns(self, data_columns, min_itemsize):
- """ take the input data_columns and min_itemize and create a data_columns spec """
+ """take the input data_columns and min_itemize and create a data
+ columns spec
+ """
if not len(self.non_index_axes):
return []
axis, axis_labels = self.non_index_axes[0]
- info = self.info.get(axis,dict())
+ info = self.info.get(axis, dict())
if info.get('type') == 'MultiIndex' and data_columns is not None:
- raise ValueError("cannot use a multi-index on axis [{0}] with data_columns".format(axis))
+ raise ValueError("cannot use a multi-index on axis [{0}] with "
+ "data_columns".format(axis))
# evaluate the passed data_columns, True == use all columns
# take only valide axis labels
@@ -2953,8 +3057,10 @@ def validate_data_columns(self, data_columns, min_itemsize):
if isinstance(min_itemsize, dict):
existing_data_columns = set(data_columns)
- data_columns.extend(
- [k for k in min_itemsize.keys() if k != 'values' and k not in existing_data_columns])
+ data_columns.extend([
+ k for k in min_itemsize.keys()
+ if k != 'values' and k not in existing_data_columns
+ ])
# return valid columns in the order of our axis
return [c for c in data_columns if c in axis_labels]
@@ -2962,17 +3068,21 @@ def validate_data_columns(self, data_columns, min_itemsize):
def create_axes(self, axes, obj, validate=True, nan_rep=None,
data_columns=None, min_itemsize=None, **kwargs):
""" create and return the axes
- leagcy tables create an indexable column, indexable index, non-indexable fields
+ leagcy tables create an indexable column, indexable index,
+ non-indexable fields
Parameters:
-----------
- axes: a list of the axes in order to create (names or numbers of the axes)
+ axes: a list of the axes in order to create (names or numbers of
+ the axes)
obj : the object to create axes on
- validate: validate the obj against an existiing object already written
+ validate: validate the obj against an existing object already
+ written
min_itemsize: a dict of the min size for a column in bytes
nan_rep : a values to use for string column nan_rep
encoding : the encoding for string values
- data_columns : a list of columns that we want to create separate to allow indexing (or True will force all colummns)
+ data_columns : a list of columns that we want to create separate to
+ allow indexing (or True will force all columns)
"""
@@ -2981,8 +3091,9 @@ def create_axes(self, axes, obj, validate=True, nan_rep=None,
try:
axes = _AXES_MAP[type(obj)]
except:
- raise TypeError("cannot properly create the storer for: [group->%s,value->%s]" %
- (self.group._v_name, type(obj)))
+ raise TypeError("cannot properly create the storer for: "
+ "[group->%s,value->%s]"
+ % (self.group._v_name, type(obj)))
# map axes to numbers
axes = [obj._get_axis_number(a) for a in axes]
@@ -3021,7 +3132,8 @@ def create_axes(self, axes, obj, validate=True, nan_rep=None,
if i in axes:
name = obj._AXIS_NAMES[i]
index_axes_map[i] = _convert_index(
- a, self.encoding, self.format_type).set_name(name).set_axis(i)
+ a, self.encoding, self.format_type
+ ).set_name(name).set_axis(i)
else:
# we might be able to change the axes on the appending data if
@@ -3037,16 +3149,17 @@ def create_axes(self, axes, obj, validate=True, nan_rep=None,
append_axis = exist_axis
# the non_index_axes info
- info = _get_info(self.info,i)
+ info = _get_info(self.info, i)
info['names'] = list(a.names)
info['type'] = a.__class__.__name__
self.non_index_axes.append((i, append_axis))
# set axis positions (based on the axes)
- self.index_axes = [index_axes_map[a].set_pos(
- j).update_info(self.info) for j,
- a in enumerate(axes)]
+ self.index_axes = [
+ index_axes_map[a].set_pos(j).update_info(self.info)
+ for j, a in enumerate(axes)
+ ]
j = len(self.index_axes)
# check for column conflicts
@@ -3066,11 +3179,13 @@ def create_axes(self, axes, obj, validate=True, nan_rep=None,
data_columns = self.validate_data_columns(
data_columns, min_itemsize)
if len(data_columns):
- blocks = block_obj.reindex_axis(Index(axis_labels) - Index(
- data_columns), axis=axis)._data.blocks
+ blocks = block_obj.reindex_axis(
+ Index(axis_labels) - Index(data_columns),
+ axis=axis
+ )._data.blocks
for c in data_columns:
- blocks.extend(block_obj.reindex_axis(
- [c], axis=axis)._data.blocks)
+ blocks.extend(
+ block_obj.reindex_axis([c], axis=axis)._data.blocks)
# reorder the blocks in the same order as the existing_table if we can
if existing_table is not None:
@@ -3097,7 +3212,8 @@ def create_axes(self, axes, obj, validate=True, nan_rep=None,
name = None
# we have a data_column
- if data_columns and len(b.items) == 1 and b.items[0] in data_columns:
+ if (data_columns and len(b.items) == 1 and
+ b.items[0] in data_columns):
klass = DataIndexableCol
name = b.items[0]
self.data_columns.append(name)
@@ -3108,8 +3224,9 @@ def create_axes(self, axes, obj, validate=True, nan_rep=None,
try:
existing_col = existing_table.values_axes[i]
except:
- raise ValueError("Incompatible appended table [%s] with existing table [%s]" %
- (blocks, existing_table.values_axes))
+ raise ValueError("Incompatible appended table [%s] with "
+ "existing table [%s]"
+ % (blocks, existing_table.values_axes))
else:
existing_col = None
@@ -3128,9 +3245,12 @@ def create_axes(self, axes, obj, validate=True, nan_rep=None,
self.values_axes.append(col)
except (NotImplementedError, ValueError, TypeError) as e:
raise e
- except (Exception) as detail:
- raise Exception("cannot find the correct atom type -> [dtype->%s,items->%s] %s" % (
- b.dtype.name, b.items, str(detail)))
+ except Exception as detail:
+ raise Exception(
+ "cannot find the correct atom type -> "
+ "[dtype->%s,items->%s] %s"
+ % (b.dtype.name, b.items, str(detail))
+ )
j += 1
# validate our min_itemsize
@@ -3160,7 +3280,8 @@ def process_filter(field, filt):
# see if the field is the name of an axis
if field == axis_name:
takers = op(axis_values, filt)
- return obj.ix._getitem_axis(takers, axis=axis_number)
+ return obj.ix._getitem_axis(takers,
+ axis=axis_number)
# this might be the name of a file IN an axis
elif field in axis_values:
@@ -3173,7 +3294,8 @@ def process_filter(field, filt):
if isinstance(obj, DataFrame):
axis_number = 1 - axis_number
takers = op(values, filt)
- return obj.ix._getitem_axis(takers, axis=axis_number)
+ return obj.ix._getitem_axis(takers,
+ axis=axis_number)
raise ValueError(
"cannot find the field [%s] for filtering!" % field)
@@ -3182,8 +3304,8 @@ def process_filter(field, filt):
return obj
- def create_description(
- self, complib=None, complevel=None, fletcher32=False, expectedrows=None):
+ def create_description(self, complib=None, complevel=None,
+ fletcher32=False, expectedrows=None):
""" create the description of the table from the axes & values """
# expected rows estimate
@@ -3197,9 +3319,9 @@ def create_description(
if complib:
if complevel is None:
complevel = self._complevel or 9
- filters = _tables().Filters(complevel=complevel,
- complib=complib,
- fletcher32=fletcher32 or self._fletcher32)
+ filters = _tables().Filters(
+ complevel=complevel, complib=complib,
+ fletcher32=fletcher32 or self._fletcher32)
d['filters'] = filters
elif self._filters is not None:
d['filters'] = self._filters
@@ -3207,7 +3329,9 @@ def create_description(
return d
def read_coordinates(self, where=None, start=None, stop=None, **kwargs):
- """ select coordinates (row numbers) from a table; return the coordinates object """
+ """select coordinates (row numbers) from a table; return the
+ coordinates object
+ """
# validate the version
self.validate_version(where)
@@ -3222,7 +3346,9 @@ def read_coordinates(self, where=None, start=None, stop=None, **kwargs):
return Index(self.selection.select_coords())
def read_column(self, column, where=None, **kwargs):
- """ return a single column from the table, generally only indexables are interesting """
+ """return a single column from the table, generally only indexables
+ are interesting
+ """
# validate the version
self.validate_version()
@@ -3241,13 +3367,14 @@ def read_column(self, column, where=None, **kwargs):
if not a.is_data_indexable:
raise ValueError(
- "column [%s] can not be extracted individually; it is not data indexable" %
- column)
+ "column [%s] can not be extracted individually; it is "
+ "not data indexable" % column)
# column must be an indexable or a data column
c = getattr(self.table.cols, column)
a.set_info(self.info)
- return Series(a.convert(c[:], nan_rep=self.nan_rep, encoding=self.encoding).take_data())
+ return Series(a.convert(c[:], nan_rep=self.nan_rep,
+ encoding=self.encoding).take_data())
raise KeyError("column [%s] not found in the table" % column)
@@ -3268,7 +3395,8 @@ def read(self, **kwargs):
def write(self, **kwargs):
""" write in a format that we can search later on (but cannot append
to): write out the indicies and the values using _write_array
- (e.g. a CArray) create an indexing table so that we can search"""
+ (e.g. a CArray) create an indexing table so that we can search
+ """
raise NotImplementedError("WORKTable needs to implement write")
@@ -3279,11 +3407,12 @@ class LegacyTable(Table):
append (but doesn't require them), and stores the data in a format
that can be easily searched
- """
- _indexables = [IndexCol(name='index', axis=1, pos=0),
- IndexCol(name='column', axis=2,
- pos=1, index_kind='columns_kind'),
- DataCol(name='fields', cname='values', kind_attr='fields', pos=2)]
+ """
+ _indexables = [
+ IndexCol(name='index', axis=1, pos=0),
+ IndexCol(name='column', axis=2, pos=1, index_kind='columns_kind'),
+ DataCol(name='fields', cname='values', kind_attr='fields', pos=2)
+ ]
table_type = u('legacy')
ndim = 3
@@ -3291,7 +3420,9 @@ def write(self, **kwargs):
raise TypeError("write operations are not allowed on legacy tables!")
def read(self, where=None, columns=None, **kwargs):
- """ we have n indexable columns, with an arbitrary number of data axes """
+ """we have n indexable columns, with an arbitrary number of data
+ axes
+ """
if not self.read_axes(where=where, **kwargs):
return None
@@ -3395,8 +3526,8 @@ class AppendableTable(LegacyTable):
table_type = u('appendable')
def write(self, obj, axes=None, append=False, complib=None,
- complevel=None, fletcher32=None, min_itemsize=None, chunksize=None,
- expectedrows=None, dropna=True, **kwargs):
+ complevel=None, fletcher32=None, min_itemsize=None,
+ chunksize=None, expectedrows=None, dropna=True, **kwargs):
if not append and self.is_exists:
self._handle.removeNode(self.group, 'table')
@@ -3485,7 +3616,7 @@ def write_data(self, chunksize, dropna=True):
# reshape the values if needed
values = [a.take_data() for a in self.values_axes]
values = [v.transpose(np.roll(np.arange(v.ndim), v.ndim - 1))
- for v in values]
+ for v in values]
bvalues = []
for i, v in enumerate(values):
new_shape = (nrows,) + self.dtype[names[nindexes + i]].shape
@@ -3617,7 +3748,8 @@ def read(self, where=None, columns=None, **kwargs):
if not self.read_axes(where=where, **kwargs):
return None
- info = self.info.get(self.non_index_axes[0][0],dict()) if len(self.non_index_axes) else dict()
+ info = (self.info.get(self.non_index_axes[0][0], dict())
+ if len(self.non_index_axes) else dict())
index = self.index_axes[0].values
frames = []
for a in self.values_axes:
@@ -3630,7 +3762,7 @@ def read(self, where=None, columns=None, **kwargs):
cols = Index(a.values)
names = info.get('names')
if names is not None:
- cols.set_names(names,inplace=True)
+ cols.set_names(names, inplace=True)
if self.is_transposed:
values = a.cvalues
@@ -3679,9 +3811,10 @@ def write(self, obj, data_columns=None, **kwargs):
""" we are going to write this as a frame table """
if not isinstance(obj, DataFrame):
name = obj.name or 'values'
- obj = DataFrame({ name : obj }, index=obj.index)
+ obj = DataFrame({name: obj}, index=obj.index)
obj.columns = [name]
- return super(AppendableSeriesTable, self).write(obj=obj, data_columns=obj.columns, **kwargs)
+ return super(AppendableSeriesTable, self).write(
+ obj=obj, data_columns=obj.columns, **kwargs)
def read(self, columns=None, **kwargs):
@@ -3694,13 +3827,14 @@ def read(self, columns=None, **kwargs):
if is_multi_index:
s.set_index(self.levels, inplace=True)
- s = s.iloc[:,0]
+ s = s.iloc[:, 0]
# remove the default name
if s.name == 'values':
s.name = None
return s
+
class AppendableMultiSeriesTable(AppendableSeriesTable):
""" support the new appendable table formats """
pandas_kind = u('series_table')
@@ -3715,8 +3849,8 @@ def write(self, obj, **kwargs):
obj.columns = cols
return super(AppendableMultiSeriesTable, self).write(obj=obj, **kwargs)
-class GenericTable(AppendableFrameTable):
+class GenericTable(AppendableFrameTable):
""" a table that read/writes the generic pytables table format """
pandas_kind = u('frame_table')
table_type = u('generic_table')
@@ -3756,7 +3890,7 @@ def indexables(self):
for i, n in enumerate(d._v_names):
dc = GenericDataIndexableCol(
- name=n, pos=i, values=[n], version = self.version)
+ name=n, pos=i, values=[n], version=self.version)
self._indexables.append(dc)
return self._indexables
@@ -3786,7 +3920,8 @@ def write(self, obj, data_columns=None, **kwargs):
for n in self.levels:
if n not in data_columns:
data_columns.insert(0, n)
- return super(AppendableMultiFrameTable, self).write(obj=obj, data_columns=data_columns, **kwargs)
+ return super(AppendableMultiFrameTable, self).write(
+ obj=obj, data_columns=data_columns, **kwargs)
def read(self, columns=None, **kwargs):
if columns is not None:
@@ -3798,7 +3933,9 @@ def read(self, columns=None, **kwargs):
df = df.set_index(self.levels)
# remove names for 'level_%d'
- df.index = df.index.set_names([ None if self._re_levels.search(l) else l for l in df.index.names ])
+ df.index = df.index.set_names([
+ None if self._re_levels.search(l) else l for l in df.index.names
+ ])
return df
@@ -3844,11 +3981,12 @@ def _reindex_axis(obj, axis, labels, other=None):
if other is not None:
labels = labels & _ensure_index(other.unique())
if not labels.equals(ax):
- slicer = [ slice(None, None) ] * obj.ndim
+ slicer = [slice(None, None)] * obj.ndim
slicer[axis] = labels
obj = obj.loc[tuple(slicer)]
return obj
+
def _get_info(info, name):
""" get/create the info for this name """
try:
@@ -3857,19 +3995,21 @@ def _get_info(info, name):
idx = info[name] = dict()
return idx
+
def _convert_index(index, encoding=None, format_type=None):
index_name = getattr(index, 'name', None)
if isinstance(index, DatetimeIndex):
converted = index.asi8
return IndexCol(converted, 'datetime64', _tables().Int64Col(),
- freq=getattr(index, 'freq', None), tz=getattr(index, 'tz', None),
+ freq=getattr(index, 'freq', None),
+ tz=getattr(index, 'tz', None),
index_name=index_name)
elif isinstance(index, (Int64Index, PeriodIndex)):
atom = _tables().Int64Col()
return IndexCol(
index.values, 'integer', atom, freq=getattr(index, 'freq', None),
- index_name=index_name)
+ index_name=index_name)
if isinstance(index, MultiIndex):
raise TypeError('MultiIndex not supported here!')
@@ -3881,7 +4021,8 @@ def _convert_index(index, encoding=None, format_type=None):
if inferred_type == 'datetime64':
converted = values.view('i8')
return IndexCol(converted, 'datetime64', _tables().Int64Col(),
- freq=getattr(index, 'freq', None), tz=getattr(index, 'tz', None),
+ freq=getattr(index, 'freq', None),
+ tz=getattr(index, 'tz', None),
index_name=index_name)
elif inferred_type == 'datetime':
converted = np.array([(time.mktime(v.timetuple()) +
@@ -3901,15 +4042,18 @@ def _convert_index(index, encoding=None, format_type=None):
converted = _convert_string_array(values, encoding)
itemsize = converted.dtype.itemsize
return IndexCol(
- converted, 'string', _tables().StringCol(itemsize), itemsize=itemsize,
- index_name=index_name)
+ converted, 'string', _tables().StringCol(itemsize),
+ itemsize=itemsize, index_name=index_name
+ )
elif inferred_type == 'unicode':
if format_type == 'fixed':
atom = _tables().ObjectAtom()
return IndexCol(np.asarray(values, dtype='O'), 'object', atom,
index_name=index_name)
raise TypeError(
- "[unicode] is not supported as a in index type for [{0}] formats".format(format_type))
+ "[unicode] is not supported as a in index type for [{0}] formats"
+ .format(format_type)
+ )
elif inferred_type == 'integer':
# take a guess for now, hope the values fit
@@ -4027,6 +4171,7 @@ def _need_convert(kind):
return True
return False
+
class Selection(object):
"""
@@ -4065,9 +4210,14 @@ def __init__(self, table, where=None, start=None, stop=None, **kwargs):
stop = self.table.nrows
self.coordinates = np.arange(start, stop)[where]
elif issubclass(where.dtype.type, np.integer):
- if (self.start is not None and (where < self.start).any()) or (self.stop is not None and (where >= self.stop).any()):
+ if ((self.start is not None and
+ (where < self.start).any()) or
+ (self.stop is not None and
+ (where >= self.stop).any())):
raise ValueError(
- "where must have index locations >= start and < stop")
+ "where must have index locations >= start and "
+ "< stop"
+ )
self.coordinates = where
except:
@@ -4089,21 +4239,27 @@ def generate(self, where):
q = self.table.queryables()
try:
return Expr(where, queryables=q, encoding=self.table.encoding)
- except (NameError) as detail:
-
- # raise a nice message, suggesting that the user should use data_columns
- raise ValueError("The passed where expression: {0}\n"
- " contains an invalid variable reference\n"
- " all of the variable refrences must be a reference to\n"
- " an axis (e.g. 'index' or 'columns'), or a data_column\n"
- " The currently defined references are: {1}\n".format(where,','.join(q.keys())))
+ except NameError as detail:
+ # raise a nice message, suggesting that the user should use
+ # data_columns
+ raise ValueError(
+ "The passed where expression: {0}\n"
+ " contains an invalid variable reference\n"
+ " all of the variable refrences must be a "
+ "reference to\n"
+ " an axis (e.g. 'index' or 'columns'), or a "
+ "data_column\n"
+ " The currently defined references are: {1}\n"
+ .format(where, ','.join(q.keys()))
+ )
def select(self):
"""
generate the selection
"""
if self.condition is not None:
- return self.table.table.readWhere(self.condition.format(), start=self.start, stop=self.stop)
+ return self.table.table.readWhere(self.condition.format(),
+ start=self.start, stop=self.stop)
elif self.coordinates is not None:
return self.table.table.readCoordinates(self.coordinates)
return self.table.table.read(start=self.start, stop=self.stop)
@@ -4115,7 +4271,9 @@ def select_coords(self):
if self.condition is None:
return np.arange(self.table.nrows)
- return self.table.table.getWhereList(self.condition.format(), start=self.start, stop=self.stop, sort=True)
+ return self.table.table.getWhereList(self.condition.format(),
+ start=self.start, stop=self.stop,
+ sort=True)
# utilities ###
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index 1d0d1d17ec631..8c172db162cd6 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -2,9 +2,9 @@
Module contains tools for processing Stata files into DataFrames
The StataReader below was originally written by Joe Presbrey as part of PyDTA.
-It has been extended and improved by Skipper Seabold from the Statsmodels project
-who also developed the StataWriter and was finally added to pandas in an once again
-improved version.
+It has been extended and improved by Skipper Seabold from the Statsmodels
+project who also developed the StataWriter and was finally added to pandas in
+an once again improved version.
You can find more information on http://presbrey.mit.edu/PyDTA and
http://statsmodels.sourceforge.net/devel/
@@ -25,7 +25,8 @@
from pandas.io.common import get_filepath_or_buffer
-def read_stata(filepath_or_buffer, convert_dates=True, convert_categoricals=True, encoding=None, index=None):
+def read_stata(filepath_or_buffer, convert_dates=True,
+ convert_categoricals=True, encoding=None, index=None):
"""
Read Stata file into DataFrame
@@ -63,7 +64,8 @@ def _stata_elapsed_date_to_datetime(date, fmt):
Examples
--------
- >>> _stata_elapsed_date_to_datetime(52, "%tw") datetime.datetime(1961, 1, 1, 0, 0)
+ >>> _stata_elapsed_date_to_datetime(52, "%tw")
+ datetime.datetime(1961, 1, 1, 0, 0)
Notes
-----
@@ -199,8 +201,11 @@ def __init__(self, offset, value):
'.' or ('.' + chr(value - offset + 96))
else:
self._str = '.'
- string = property(lambda self: self._str, doc="The Stata representation of the missing value: '.', '.a'..'.z'")
- value = property(lambda self: self._value, doc='The binary representation of the missing value.')
+ string = property(lambda self: self._str,
+ doc="The Stata representation of the missing value: "
+ "'.', '.a'..'.z'")
+ value = property(lambda self: self._value,
+ doc='The binary representation of the missing value.')
def __unicode__(self):
return self.string
@@ -292,19 +297,22 @@ def _decode_bytes(self, str, errors=None):
class StataReader(StataParser):
"""
- Class for working with a Stata dataset. There are two possibilities for usage:
+ Class for working with a Stata dataset. There are two possibilities for
+ usage:
* The from_dta() method on the DataFrame class.
- This will return a DataFrame with the Stata dataset. Note that when using the
- from_dta() method, you will not have access to meta-information like variable
- labels or the data label.
-
- * Work with this object directly. Upon instantiation, the header of the Stata data
- file is read, giving you access to attributes like variable_labels(), data_label(),
- nobs(), ... A DataFrame with the data is returned by the read() method; this will
- also fill up the value_labels. Note that calling the value_labels() method will
- result in an error if the read() method has not been called yet. This is because
- the value labels are stored at the end of a Stata dataset, after the data.
+ This will return a DataFrame with the Stata dataset. Note that when
+ using the from_dta() method, you will not have access to
+ meta-information like variable labels or the data label.
+
+ * Work with this object directly. Upon instantiation, the header of the
+ Stata data file is read, giving you access to attributes like
+ variable_labels(), data_label(), nobs(), ... A DataFrame with the data
+ is returned by the read() method; this will also fill up the
+ value_labels. Note that calling the value_labels() method will result in
+ an error if the read() method has not been called yet. This is because
+ the value labels are stored at the end of a Stata dataset, after the
+ data.
Parameters
----------
@@ -323,7 +331,9 @@ def __init__(self, path_or_buf, encoding='cp1252'):
self._data_read = False
self._value_labels_read = False
if isinstance(path_or_buf, str):
- path_or_buf, encoding = get_filepath_or_buffer(path_or_buf, encoding=self._default_encoding)
+ path_or_buf, encoding = get_filepath_or_buffer(
+ path_or_buf, encoding=self._default_encoding
+ )
if isinstance(path_or_buf, (str, compat.text_type, bytes)):
self.path_or_buf = open(path_or_buf, 'rb')
@@ -334,17 +344,22 @@ def __init__(self, path_or_buf, encoding='cp1252'):
def _read_header(self):
first_char = self.path_or_buf.read(1)
- if struct.unpack('c', first_char)[0] == b'<': # format 117 or higher (XML like)
+ if struct.unpack('c', first_char)[0] == b'<':
+ # format 117 or higher (XML like)
self.path_or_buf.read(27) # stata_dta><header><release>
self.format_version = int(self.path_or_buf.read(3))
if self.format_version not in [117]:
- raise ValueError("Version of given Stata file is not 104, 105, 108, 113 (Stata 8/9), 114 (Stata 10/11), 115 (Stata 12) or 117 (Stata 13)")
+ raise ValueError("Version of given Stata file is not 104, "
+ "105, 108, 113 (Stata 8/9), 114 (Stata "
+ "10/11), 115 (Stata 12) or 117 (Stata 13)")
self.path_or_buf.read(21) # </release><byteorder>
self.byteorder = self.path_or_buf.read(3) == "MSF" and '>' or '<'
self.path_or_buf.read(15) # </byteorder><K>
- self.nvar = struct.unpack(self.byteorder + 'H', self.path_or_buf.read(2))[0]
+ self.nvar = struct.unpack(self.byteorder + 'H',
+ self.path_or_buf.read(2))[0]
self.path_or_buf.read(7) # </K><N>
- self.nobs = struct.unpack(self.byteorder + 'I', self.path_or_buf.read(4))[0]
+ self.nobs = struct.unpack(self.byteorder + 'I',
+ self.path_or_buf.read(4))[0]
self.path_or_buf.read(11) # </N><label>
strlen = struct.unpack('b', self.path_or_buf.read(1))[0]
self.data_label = self.path_or_buf.read(strlen)
@@ -354,20 +369,31 @@ def _read_header(self):
self.path_or_buf.read(26) # </timestamp></header><map>
self.path_or_buf.read(8) # 0x0000000000000000
self.path_or_buf.read(8) # position of <map>
- seek_vartypes = struct.unpack(self.byteorder + 'q', self.path_or_buf.read(8))[0] + 16
- seek_varnames = struct.unpack(self.byteorder + 'q', self.path_or_buf.read(8))[0] + 10
- seek_sortlist = struct.unpack(self.byteorder + 'q', self.path_or_buf.read(8))[0] + 10
- seek_formats = struct.unpack(self.byteorder + 'q', self.path_or_buf.read(8))[0] + 9
- seek_value_label_names = struct.unpack(self.byteorder + 'q', self.path_or_buf.read(8))[0] + 19
- seek_variable_labels = struct.unpack(self.byteorder + 'q', self.path_or_buf.read(8))[0] + 17
+ seek_vartypes = struct.unpack(
+ self.byteorder + 'q', self.path_or_buf.read(8))[0] + 16
+ seek_varnames = struct.unpack(
+ self.byteorder + 'q', self.path_or_buf.read(8))[0] + 10
+ seek_sortlist = struct.unpack(
+ self.byteorder + 'q', self.path_or_buf.read(8))[0] + 10
+ seek_formats = struct.unpack(
+ self.byteorder + 'q', self.path_or_buf.read(8))[0] + 9
+ seek_value_label_names = struct.unpack(
+ self.byteorder + 'q', self.path_or_buf.read(8))[0] + 19
+ seek_variable_labels = struct.unpack(
+ self.byteorder + 'q', self.path_or_buf.read(8))[0] + 17
self.path_or_buf.read(8) # <characteristics>
- self.data_location = struct.unpack(self.byteorder + 'q', self.path_or_buf.read(8))[0] + 6
- self.seek_strls = struct.unpack(self.byteorder + 'q', self.path_or_buf.read(8))[0] + 7
- self.seek_value_labels = struct.unpack(self.byteorder + 'q', self.path_or_buf.read(8))[0] + 14
+ self.data_location = struct.unpack(
+ self.byteorder + 'q', self.path_or_buf.read(8))[0] + 6
+ self.seek_strls = struct.unpack(
+ self.byteorder + 'q', self.path_or_buf.read(8))[0] + 7
+ self.seek_value_labels = struct.unpack(
+ self.byteorder + 'q', self.path_or_buf.read(8))[0] + 14
#self.path_or_buf.read(8) # </stata_dta>
#self.path_or_buf.read(8) # EOF
self.path_or_buf.seek(seek_vartypes)
- typlist = [struct.unpack(self.byteorder + 'H', self.path_or_buf.read(2))[0] for i in range(self.nvar)]
+ typlist = [struct.unpack(self.byteorder + 'H',
+ self.path_or_buf.read(2))[0]
+ for i in range(self.nvar)]
self.typlist = [None]*self.nvar
try:
i = 0
@@ -378,7 +404,8 @@ def _read_header(self):
self.typlist[i] = self.TYPE_MAP_XML[typ]
i += 1
except:
- raise ValueError("cannot convert stata types [{0}]".format(','.join(typlist)))
+ raise ValueError("cannot convert stata types [{0}]"
+ .format(','.join(typlist)))
self.dtyplist = [None]*self.nvar
try:
i = 0
@@ -389,33 +416,45 @@ def _read_header(self):
self.dtyplist[i] = self.DTYPE_MAP_XML[typ]
i += 1
except:
- raise ValueError("cannot convert stata dtypes [{0}]".format(','.join(typlist)))
+ raise ValueError("cannot convert stata dtypes [{0}]"
+ .format(','.join(typlist)))
self.path_or_buf.seek(seek_varnames)
- self.varlist = [self._null_terminate(self.path_or_buf.read(33)) for i in range(self.nvar)]
+ self.varlist = [self._null_terminate(self.path_or_buf.read(33))
+ for i in range(self.nvar)]
self.path_or_buf.seek(seek_sortlist)
- self.srtlist = struct.unpack(self.byteorder + ('h' * (self.nvar + 1)), self.path_or_buf.read(2 * (self.nvar + 1)))[:-1]
+ self.srtlist = struct.unpack(
+ self.byteorder + ('h' * (self.nvar + 1)),
+ self.path_or_buf.read(2 * (self.nvar + 1))
+ )[:-1]
self.path_or_buf.seek(seek_formats)
- self.fmtlist = [self._null_terminate(self.path_or_buf.read(49)) for i in range(self.nvar)]
+ self.fmtlist = [self._null_terminate(self.path_or_buf.read(49))
+ for i in range(self.nvar)]
self.path_or_buf.seek(seek_value_label_names)
- self.lbllist = [self._null_terminate(self.path_or_buf.read(33)) for i in range(self.nvar)]
+ self.lbllist = [self._null_terminate(self.path_or_buf.read(33))
+ for i in range(self.nvar)]
self.path_or_buf.seek(seek_variable_labels)
- self.vlblist = [self._null_terminate(self.path_or_buf.read(81)) for i in range(self.nvar)]
+ self.vlblist = [self._null_terminate(self.path_or_buf.read(81))
+ for i in range(self.nvar)]
else:
# header
self.format_version = struct.unpack('b', first_char)[0]
if self.format_version not in [104, 105, 108, 113, 114, 115]:
- raise ValueError("Version of given Stata file is not 104, 105, 108, 113 (Stata 8/9), 114 (Stata 10/11), 115 (Stata 12) or 117 (Stata 13)")
+ raise ValueError("Version of given Stata file is not 104, "
+ "105, 108, 113 (Stata 8/9), 114 (Stata "
+ "10/11), 115 (Stata 12) or 117 (Stata 13)")
self.byteorder = self.path_or_buf.read(1) == 0x1 and '>' or '<'
self.filetype = struct.unpack('b', self.path_or_buf.read(1))[0]
self.path_or_buf.read(1) # unused
- self.nvar = struct.unpack(self.byteorder + 'H', self.path_or_buf.read(2))[0]
- self.nobs = struct.unpack(self.byteorder + 'I', self.path_or_buf.read(4))[0]
+ self.nvar = struct.unpack(self.byteorder + 'H',
+ self.path_or_buf.read(2))[0]
+ self.nobs = struct.unpack(self.byteorder + 'I',
+ self.path_or_buf.read(4))[0]
if self.format_version > 105:
self.data_label = self.path_or_buf.read(81)
else:
@@ -425,51 +464,73 @@ def _read_header(self):
# descriptors
if self.format_version > 108:
- typlist = [ord(self.path_or_buf.read(1)) for i in range(self.nvar)]
+ typlist = [ord(self.path_or_buf.read(1))
+ for i in range(self.nvar)]
else:
- typlist = [self.OLD_TYPE_MAPPING[self._decode_bytes(self.path_or_buf.read(1))] for i in range(self.nvar)]
+ typlist = [
+ self.OLD_TYPE_MAPPING[
+ self._decode_bytes(self.path_or_buf.read(1))
+ ] for i in range(self.nvar)
+ ]
try:
self.typlist = [self.TYPE_MAP[typ] for typ in typlist]
except:
- raise ValueError("cannot convert stata types [{0}]".format(','.join(typlist)))
+ raise ValueError("cannot convert stata types [{0}]"
+ .format(','.join(typlist)))
try:
self.dtyplist = [self.DTYPE_MAP[typ] for typ in typlist]
except:
- raise ValueError("cannot convert stata dtypes [{0}]".format(','.join(typlist)))
+ raise ValueError("cannot convert stata dtypes [{0}]"
+ .format(','.join(typlist)))
if self.format_version > 108:
- self.varlist = [self._null_terminate(self.path_or_buf.read(33)) for i in range(self.nvar)]
+ self.varlist = [self._null_terminate(self.path_or_buf.read(33))
+ for i in range(self.nvar)]
else:
- self.varlist = [self._null_terminate(self.path_or_buf.read(9)) for i in range(self.nvar)]
- self.srtlist = struct.unpack(self.byteorder + ('h' * (self.nvar + 1)), self.path_or_buf.read(2 * (self.nvar + 1)))[:-1]
+ self.varlist = [self._null_terminate(self.path_or_buf.read(9))
+ for i in range(self.nvar)]
+ self.srtlist = struct.unpack(
+ self.byteorder + ('h' * (self.nvar + 1)),
+ self.path_or_buf.read(2 * (self.nvar + 1))
+ )[:-1]
if self.format_version > 113:
- self.fmtlist = [self._null_terminate(self.path_or_buf.read(49)) for i in range(self.nvar)]
+ self.fmtlist = [self._null_terminate(self.path_or_buf.read(49))
+ for i in range(self.nvar)]
elif self.format_version > 104:
- self.fmtlist = [self._null_terminate(self.path_or_buf.read(12)) for i in range(self.nvar)]
+ self.fmtlist = [self._null_terminate(self.path_or_buf.read(12))
+ for i in range(self.nvar)]
else:
- self.fmtlist = [self._null_terminate(self.path_or_buf.read(7)) for i in range(self.nvar)]
+ self.fmtlist = [self._null_terminate(self.path_or_buf.read(7))
+ for i in range(self.nvar)]
if self.format_version > 108:
- self.lbllist = [self._null_terminate(self.path_or_buf.read(33)) for i in range(self.nvar)]
+ self.lbllist = [self._null_terminate(self.path_or_buf.read(33))
+ for i in range(self.nvar)]
else:
- self.lbllist = [self._null_terminate(self.path_or_buf.read(9)) for i in range(self.nvar)]
+ self.lbllist = [self._null_terminate(self.path_or_buf.read(9))
+ for i in range(self.nvar)]
if self.format_version > 105:
- self.vlblist = [self._null_terminate(self.path_or_buf.read(81)) for i in range(self.nvar)]
+ self.vlblist = [self._null_terminate(self.path_or_buf.read(81))
+ for i in range(self.nvar)]
else:
- self.vlblist = [self._null_terminate(self.path_or_buf.read(32)) for i in range(self.nvar)]
+ self.vlblist = [self._null_terminate(self.path_or_buf.read(32))
+ for i in range(self.nvar)]
# ignore expansion fields (Format 105 and later)
- # When reading, read five bytes; the last four bytes now tell you the
- # size of the next read, which you discard. You then continue like
- # this until you read 5 bytes of zeros.
+ # When reading, read five bytes; the last four bytes now tell you
+ # the size of the next read, which you discard. You then continue
+ # like this until you read 5 bytes of zeros.
if self.format_version > 104:
while True:
- data_type = struct.unpack(self.byteorder + 'b', self.path_or_buf.read(1))[0]
+ data_type = struct.unpack(self.byteorder + 'b',
+ self.path_or_buf.read(1))[0]
if self.format_version > 108:
- data_len = struct.unpack(self.byteorder + 'i', self.path_or_buf.read(4))[0]
+ data_len = struct.unpack(self.byteorder + 'i',
+ self.path_or_buf.read(4))[0]
else:
- data_len = struct.unpack(self.byteorder + 'h', self.path_or_buf.read(2))[0]
+ data_len = struct.unpack(self.byteorder + 'h',
+ self.path_or_buf.read(2))[0]
if data_type == 0:
break
self.path_or_buf.read(data_len)
@@ -477,13 +538,15 @@ def _read_header(self):
# necessary data to continue parsing
self.data_location = self.path_or_buf.tell()
- self.has_string_data = len([x for x in self.typlist if type(x) is int]) > 0
+ self.has_string_data = len([x for x in self.typlist
+ if type(x) is int]) > 0
"""Calculate size of a data record."""
self.col_sizes = lmap(lambda x: self._calcsize(x), self.typlist)
def _calcsize(self, fmt):
- return type(fmt) is int and fmt or struct.calcsize(self.byteorder + fmt)
+ return (type(fmt) is int and fmt
+ or struct.calcsize(self.byteorder + fmt))
def _col_size(self, k=None):
if k is None:
@@ -503,7 +566,8 @@ def _unpack(self, fmt, byt):
return d
def _null_terminate(self, s):
- if compat.PY3 or self._encoding is not None: # have bytes not strings, so must decode
+ if compat.PY3 or self._encoding is not None: # have bytes not strings,
+ # so must decode
null_byte = b"\0"
try:
s = s[:s.index(null_byte)]
@@ -523,14 +587,24 @@ def _next(self):
data = [None] * self.nvar
for i in range(len(data)):
if type(typlist[i]) is int:
- data[i] = self._null_terminate(self.path_or_buf.read(typlist[i]))
+ data[i] = self._null_terminate(
+ self.path_or_buf.read(typlist[i])
+ )
else:
- data[i] = self._unpack(typlist[i], self.path_or_buf.read(self._col_size(i)))
+ data[i] = self._unpack(
+ typlist[i], self.path_or_buf.read(self._col_size(i))
+ )
return data
else:
- return list(map(lambda i: self._unpack(typlist[i],
- self.path_or_buf.read(self._col_size(i))),
- range(self.nvar)))
+ return list(
+ map(
+ lambda i: self._unpack(typlist[i],
+ self.path_or_buf.read(
+ self._col_size(i)
+ )),
+ range(self.nvar)
+ )
+ )
def _dataset(self):
"""
@@ -562,14 +636,17 @@ def _read_value_labels(self):
self.path_or_buf.seek(self.seek_value_labels)
else:
if not self._data_read:
- raise Exception("Data has not been read. Because of the layout of Stata files, this is necessary before reading value labels.")
+ raise Exception("Data has not been read. Because of the "
+ "layout of Stata files, this is necessary "
+ "before reading value labels.")
if self._value_labels_read:
raise Exception("Value labels have already been read.")
self.value_label_dict = dict()
if self.format_version <= 108:
- return # Value labels are not supported in version 108 and earlier.
+ # Value labels are not supported in version 108 and earlier.
+ return
while True:
if self.format_version >= 117:
@@ -582,18 +659,24 @@ def _read_value_labels(self):
labname = self._null_terminate(self.path_or_buf.read(33))
self.path_or_buf.read(3) # padding
- n = struct.unpack(self.byteorder + 'I', self.path_or_buf.read(4))[0]
- txtlen = struct.unpack(self.byteorder + 'I', self.path_or_buf.read(4))[0]
+ n = struct.unpack(self.byteorder + 'I',
+ self.path_or_buf.read(4))[0]
+ txtlen = struct.unpack(self.byteorder + 'I',
+ self.path_or_buf.read(4))[0]
off = []
for i in range(n):
- off.append(struct.unpack(self.byteorder + 'I', self.path_or_buf.read(4))[0])
+ off.append(struct.unpack(self.byteorder + 'I',
+ self.path_or_buf.read(4))[0])
val = []
for i in range(n):
- val.append(struct.unpack(self.byteorder + 'I', self.path_or_buf.read(4))[0])
+ val.append(struct.unpack(self.byteorder + 'I',
+ self.path_or_buf.read(4))[0])
txt = self.path_or_buf.read(txtlen)
self.value_label_dict[labname] = dict()
for i in range(n):
- self.value_label_dict[labname][val[i]] = self._null_terminate(txt[off[i]:])
+ self.value_label_dict[labname][val[i]] = (
+ self._null_terminate(txt[off[i]:])
+ )
if self.format_version >= 117:
self.path_or_buf.read(6) # </lbl>
@@ -606,9 +689,11 @@ def _read_strls(self):
if self.path_or_buf.read(3) != b'GSO':
break
- v_o = struct.unpack(self.byteorder + 'L', self.path_or_buf.read(8))[0]
+ v_o = struct.unpack(self.byteorder + 'L',
+ self.path_or_buf.read(8))[0]
typ = self.path_or_buf.read(1)
- length = struct.unpack(self.byteorder + 'I', self.path_or_buf.read(4))[0]
+ length = struct.unpack(self.byteorder + 'I',
+ self.path_or_buf.read(4))[0]
self.GSO[v_o] = self.path_or_buf.read(length-1)
self.path_or_buf.read(1) # zero-termination
@@ -621,7 +706,8 @@ def data(self, convert_dates=True, convert_categoricals=True, index=None):
convert_dates : boolean, defaults to True
Convert date variables to DataFrame time values
convert_categoricals : boolean, defaults to True
- Read value labels and convert columns to Categorical/Factor variables
+ Read value labels and convert columns to Categorical/Factor
+ variables
index : identifier of index column
identifier of column that should be used as index of the DataFrame
@@ -659,21 +745,28 @@ def data(self, convert_dates=True, convert_categoricals=True, index=None):
if self.dtyplist[i] is not None:
col = data.columns[i]
if data[col].dtype is not np.dtype(object):
- data[col] = Series(data[col], data[col].index, self.dtyplist[i])
+ data[col] = Series(data[col], data[col].index,
+ self.dtyplist[i])
if convert_dates:
- cols = np.where(lmap(lambda x: x in _date_formats, self.fmtlist))[0]
+ cols = np.where(lmap(lambda x: x in _date_formats,
+ self.fmtlist))[0]
for i in cols:
col = data.columns[i]
- data[col] = data[col].apply(_stata_elapsed_date_to_datetime, args=(self.fmtlist[i],))
+ data[col] = data[col].apply(_stata_elapsed_date_to_datetime,
+ args=(self.fmtlist[i],))
if convert_categoricals:
- cols = np.where(lmap(lambda x: x in compat.iterkeys(self.value_label_dict), self.lbllist))[0]
+ cols = np.where(
+ lmap(lambda x: x in compat.iterkeys(self.value_label_dict),
+ self.lbllist)
+ )[0]
for i in cols:
col = data.columns[i]
labeled_data = np.copy(data[col])
labeled_data = labeled_data.astype(object)
- for k, v in compat.iteritems(self.value_label_dict[self.lbllist[i]]):
+ for k, v in compat.iteritems(
+ self.value_label_dict[self.lbllist[i]]):
labeled_data[(data[col] == k).values] = v
data[col] = Categorical.from_array(labeled_data)
@@ -684,11 +777,15 @@ def data_label(self):
return self.data_label
def variable_labels(self):
- """Returns variable labels as a dict, associating each variable name with corresponding label"""
+ """Returns variable labels as a dict, associating each variable name
+ with corresponding label
+ """
return dict(zip(self.varlist, self.vlblist))
def value_labels(self):
- """Returns a dict, associating each variable name a dict, associating each value its corresponding label"""
+ """Returns a dict, associating each variable name a dict, associating
+ each value its corresponding label
+ """
if not self._value_labels_read:
self._read_value_labels()
@@ -745,7 +842,9 @@ def _maybe_convert_to_int_keys(convert_dates, varlist):
new_dict.update({varlist.index(key): convert_dates[key]})
else:
if not isinstance(key, int):
- raise ValueError("convery_dates key is not in varlist and is not an int")
+ raise ValueError(
+ "convery_dates key is not in varlist and is not an int"
+ )
new_dict.update({key: convert_dates[key]})
return new_dict
@@ -769,7 +868,8 @@ def _dtype_to_stata_type(dtype):
if dtype.type == np.string_:
return chr(dtype.itemsize)
elif dtype.type == np.object_: # try to coerce it to the biggest string
- # not memory efficient, what else could we do?
+ # not memory efficient, what else could we
+ # do?
return chr(244)
elif dtype == np.float64:
return chr(255)
@@ -856,8 +956,8 @@ class StataWriter(StataParser):
>>> writer = StataWriter('./date_data_file.dta', date, {2 : 'tw'})
>>> writer.write_file()
"""
- def __init__(self, fname, data, convert_dates=None, write_index=True, encoding="latin-1",
- byteorder=None):
+ def __init__(self, fname, data, convert_dates=None, write_index=True,
+ encoding="latin-1", byteorder=None):
super(StataWriter, self).__init__(encoding)
self._convert_dates = convert_dates
self._write_index = write_index
@@ -867,7 +967,9 @@ def __init__(self, fname, data, convert_dates=None, write_index=True, encoding="
if byteorder is None:
byteorder = sys.byteorder
self._byteorder = _set_endianness(byteorder)
- self._file = _open_file_binary_write(fname, self._encoding or self._default_encoding)
+ self._file = _open_file_binary_write(
+ fname, self._encoding or self._default_encoding
+ )
self.type_converters = {253: np.long, 252: int}
def _write(self, to_write):
@@ -875,7 +977,8 @@ def _write(self, to_write):
Helper to call encode before writing to file for Python 3 compat.
"""
if compat.PY3:
- self._file.write(to_write.encode(self._encoding or self._default_encoding))
+ self._file.write(to_write.encode(self._encoding or
+ self._default_encoding))
else:
self._file.write(to_write)
@@ -898,9 +1001,13 @@ def __iter__(self):
self.varlist = data.columns.tolist()
dtypes = data.dtypes
if self._convert_dates is not None:
- self._convert_dates = _maybe_convert_to_int_keys(self._convert_dates, self.varlist)
+ self._convert_dates = _maybe_convert_to_int_keys(
+ self._convert_dates, self.varlist
+ )
for key in self._convert_dates:
- new_type = _convert_datetime_to_stata_type(self._convert_dates[key])
+ new_type = _convert_datetime_to_stata_type(
+ self._convert_dates[key]
+ )
dtypes[key] = np.dtype(new_type)
self.typlist = [_dtype_to_stata_type(dt) for dt in dtypes]
self.fmtlist = [_dtype_to_default_stata_fmt(dt) for dt in dtypes]
@@ -940,14 +1047,18 @@ def _write_header(self, data_label=None, time_stamp=None):
if data_label is None:
self._file.write(self._null_terminate(_pad_bytes("", 80)))
else:
- self._file.write(self._null_terminate(_pad_bytes(data_label[:80], 80)))
+ self._file.write(
+ self._null_terminate(_pad_bytes(data_label[:80], 80))
+ )
# time stamp, 18 bytes, char, null terminated
# format dd Mon yyyy hh:mm
if time_stamp is None:
time_stamp = datetime.datetime.now()
elif not isinstance(time_stamp, datetime):
raise ValueError("time_stamp should be datetime type")
- self._file.write(self._null_terminate(time_stamp.strftime("%d %b %Y %H:%M")))
+ self._file.write(
+ self._null_terminate(time_stamp.strftime("%d %b %Y %H:%M"))
+ )
def _write_descriptors(self, typlist=None, varlist=None, srtlist=None,
fmtlist=None, lbllist=None):
@@ -996,7 +1107,8 @@ def _write_data_nodates(self):
self._write(var)
else:
try:
- self._file.write(struct.pack(byteorder + TYPE_MAP[typ], var))
+ self._file.write(struct.pack(byteorder + TYPE_MAP[typ],
+ var))
except struct.error:
# have to be strict about type pack won't do any
# kind of casting
diff --git a/pandas/io/wb.py b/pandas/io/wb.py
index a585cb9adccbb..362b7b192f746 100644
--- a/pandas/io/wb.py
+++ b/pandas/io/wb.py
@@ -32,21 +32,22 @@ def download(country=['MX', 'CA', 'US'], indicator=['GDPPCKD', 'GDPPCKN'],
"""
# Are ISO-2 country codes valid?
- valid_countries = ["AG", "AL", "AM", "AO", "AR", "AT", "AU", "AZ", "BB",
- "BD", "BE", "BF", "BG", "BH", "BI", "BJ", "BO", "BR", "BS", "BW",
- "BY", "BZ", "CA", "CD", "CF", "CG", "CH", "CI", "CL", "CM", "CN",
- "CO", "CR", "CV", "CY", "CZ", "DE", "DK", "DM", "DO", "DZ", "EC",
- "EE", "EG", "ER", "ES", "ET", "FI", "FJ", "FR", "GA", "GB", "GE",
- "GH", "GM", "GN", "GQ", "GR", "GT", "GW", "GY", "HK", "HN", "HR",
- "HT", "HU", "ID", "IE", "IL", "IN", "IR", "IS", "IT", "JM", "JO",
- "JP", "KE", "KG", "KH", "KM", "KR", "KW", "KZ", "LA", "LB", "LC",
- "LK", "LS", "LT", "LU", "LV", "MA", "MD", "MG", "MK", "ML", "MN",
- "MR", "MU", "MW", "MX", "MY", "MZ", "NA", "NE", "NG", "NI", "NL",
- "NO", "NP", "NZ", "OM", "PA", "PE", "PG", "PH", "PK", "PL", "PT",
- "PY", "RO", "RU", "RW", "SA", "SB", "SC", "SD", "SE", "SG", "SI",
- "SK", "SL", "SN", "SR", "SV", "SY", "SZ", "TD", "TG", "TH", "TN",
- "TR", "TT", "TW", "TZ", "UA", "UG", "US", "UY", "UZ", "VC", "VE",
- "VN", "VU", "YE", "ZA", "ZM", "ZW", "all"]
+ valid_countries = [
+ "AG", "AL", "AM", "AO", "AR", "AT", "AU", "AZ", "BB", "BD", "BE", "BF",
+ "BG", "BH", "BI", "BJ", "BO", "BR", "BS", "BW", "BY", "BZ", "CA", "CD",
+ "CF", "CG", "CH", "CI", "CL", "CM", "CN", "CO", "CR", "CV", "CY", "CZ",
+ "DE", "DK", "DM", "DO", "DZ", "EC", "EE", "EG", "ER", "ES", "ET", "FI",
+ "FJ", "FR", "GA", "GB", "GE", "GH", "GM", "GN", "GQ", "GR", "GT", "GW",
+ "GY", "HK", "HN", "HR", "HT", "HU", "ID", "IE", "IL", "IN", "IR", "IS",
+ "IT", "JM", "JO", "JP", "KE", "KG", "KH", "KM", "KR", "KW", "KZ", "LA",
+ "LB", "LC", "LK", "LS", "LT", "LU", "LV", "MA", "MD", "MG", "MK", "ML",
+ "MN", "MR", "MU", "MW", "MX", "MY", "MZ", "NA", "NE", "NG", "NI", "NL",
+ "NO", "NP", "NZ", "OM", "PA", "PE", "PG", "PH", "PK", "PL", "PT", "PY",
+ "RO", "RU", "RW", "SA", "SB", "SC", "SD", "SE", "SG", "SI", "SK", "SL",
+ "SN", "SR", "SV", "SY", "SZ", "TD", "TG", "TH", "TN", "TR", "TT", "TW",
+ "TZ", "UA", "UG", "US", "UY", "UZ", "VC", "VE", "VN", "VU", "YE", "ZA",
+ "ZM", "ZW", "all"
+ ]
if type(country) == str:
country = [country]
bad_countries = np.setdiff1d(country, valid_countries)
@@ -68,7 +69,8 @@ def download(country=['MX', 'CA', 'US'], indicator=['GDPPCKD', 'GDPPCKN'],
# Warn
if len(bad_indicators) > 0:
print('Failed to obtain indicator(s): %s' % '; '.join(bad_indicators))
- print('The data may still be available for download at http://data.worldbank.org')
+ print('The data may still be available for download at '
+ 'http://data.worldbank.org')
if len(bad_countries) > 0:
print('Invalid ISO-2 codes: %s' % ' '.join(bad_countries))
# Merge WDI series
@@ -84,9 +86,9 @@ def download(country=['MX', 'CA', 'US'], indicator=['GDPPCKD', 'GDPPCKN'],
def _get_data(indicator="NY.GNS.ICTR.GN.ZS", country='US',
start=2002, end=2005):
# Build URL for api call
- url = "http://api.worldbank.org/countries/" + country + "/indicators/" + \
- indicator + "?date=" + str(start) + ":" + str(end) + "&per_page=25000" + \
- "&format=json"
+ url = ("http://api.worldbank.org/countries/" + country + "/indicators/" +
+ indicator + "?date=" + str(start) + ":" + str(end) +
+ "&per_page=25000&format=json")
# Download
with urlopen(url) as response:
data = response.read()
| https://api.github.com/repos/pandas-dev/pandas/pulls/5663 | 2013-12-08T00:18:30Z | 2013-12-09T18:54:59Z | 2013-12-09T18:54:59Z | 2014-06-14T15:40:54Z | |
BUG: compat_pickle should not modify global namespace | diff --git a/pandas/compat/pickle_compat.py b/pandas/compat/pickle_compat.py
index 3365f1bb630b9..03b45336833d3 100644
--- a/pandas/compat/pickle_compat.py
+++ b/pandas/compat/pickle_compat.py
@@ -3,6 +3,7 @@
import sys
import numpy as np
import pandas
+import copy
import pickle as pkl
from pandas import compat
from pandas.compat import u, string_types
@@ -29,7 +30,7 @@ def load_reduce(self):
except:
# try to reencode the arguments
- if self.encoding is not None:
+ if getattr(self,'encoding',None) is not None:
args = tuple([arg.encode(self.encoding)
if isinstance(arg, string_types)
else arg for arg in args])
@@ -39,7 +40,7 @@ def load_reduce(self):
except:
pass
- if self.is_verbose:
+ if getattr(self,'is_verbose',None):
print(sys.exc_info())
print(func, args)
raise
@@ -53,6 +54,7 @@ class Unpickler(pkl._Unpickler):
class Unpickler(pkl.Unpickler):
pass
+Unpickler.dispatch = copy.copy(Unpickler.dispatch)
Unpickler.dispatch[pkl.REDUCE[0]] = load_reduce
| turns out was modifying the python pickle just by importing pandas
when sub classing have to copy a mutable property before modifying
http://stackoverflow.com/questions/20444593/pandas-compiled-from-source-default-pickle-behavior-changed
| https://api.github.com/repos/pandas-dev/pandas/pulls/5661 | 2013-12-07T19:17:52Z | 2013-12-08T15:17:30Z | 2013-12-08T15:17:30Z | 2014-06-24T11:20:04Z |
BUG: Fix for MultiIndex to_excel() with index=False. | diff --git a/pandas/core/format.py b/pandas/core/format.py
index 7135573d48644..f018e0ffdf561 100644
--- a/pandas/core/format.py
+++ b/pandas/core/format.py
@@ -1373,7 +1373,7 @@ def _format_header_mi(self):
coloffset = 0
lnum = 0
- if isinstance(self.df.index, MultiIndex):
+ if self.index and isinstance(self.df.index, MultiIndex):
coloffset = len(self.df.index[0]) - 1
if self.merge_cells:
@@ -1412,10 +1412,11 @@ def _format_header_regular(self):
has_aliases = isinstance(self.header, (tuple, list, np.ndarray))
if has_aliases or self.header:
coloffset = 0
+
if self.index:
coloffset = 1
- if isinstance(self.df.index, MultiIndex):
- coloffset = len(self.df.index[0])
+ if isinstance(self.df.index, MultiIndex):
+ coloffset = len(self.df.index[0])
colnames = self.columns
if has_aliases:
diff --git a/pandas/io/tests/test_excel.py b/pandas/io/tests/test_excel.py
index 3446eb07a111e..eeeb914a3754e 100644
--- a/pandas/io/tests/test_excel.py
+++ b/pandas/io/tests/test_excel.py
@@ -615,7 +615,7 @@ def test_roundtrip_indexlabels(self):
has_index_names=self.merge_cells
).astype(np.int64)
frame.index.names = ['test']
- tm.assert_frame_equal(frame,recons.astype(bool))
+ tm.assert_frame_equal(frame, recons.astype(bool))
with ensure_clean(self.ext) as path:
@@ -715,6 +715,31 @@ def test_to_excel_multiindex_dates(self):
tm.assert_frame_equal(tsframe, recons)
self.assertEquals(recons.index.names, ('time', 'foo'))
+ def test_to_excel_multiindex_no_write_index(self):
+ _skip_if_no_xlrd()
+
+ # Test writing and re-reading a MI witout the index. GH 5616.
+
+ # Initial non-MI frame.
+ frame1 = pd.DataFrame({'a': [10, 20], 'b': [30, 40], 'c': [50, 60]})
+
+ # Add a MI.
+ frame2 = frame1.copy()
+ multi_index = pd.MultiIndex.from_tuples([(70, 80), (90, 100)])
+ frame2.index = multi_index
+
+ with ensure_clean(self.ext) as path:
+
+ # Write out to Excel without the index.
+ frame2.to_excel(path, 'test1', index=False)
+
+ # Read it back in.
+ reader = ExcelFile(path)
+ frame3 = reader.parse('test1')
+
+ # Test that it is the same as the initial frame.
+ tm.assert_frame_equal(frame1, frame3)
+
def test_to_excel_float_format(self):
_skip_if_no_xlrd()
| closes #5616 caused by the updated Excel MultiIndex handling.
This change should go into v0.13.
| https://api.github.com/repos/pandas-dev/pandas/pulls/5659 | 2013-12-07T00:27:33Z | 2013-12-10T13:19:30Z | 2013-12-10T13:19:30Z | 2014-06-18T19:01:49Z |
API: change _is_copy to is_copy attribute on pandas objects GH(5650) | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 3a03d3b48ef19..4089b13fca5c7 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -77,11 +77,11 @@ class NDFrame(PandasObject):
axes : list
copy : boolean, default False
"""
- _internal_names = ['_data', 'name', '_cacher', '_is_copy', '_subtyp',
+ _internal_names = ['_data', 'name', '_cacher', 'is_copy', '_subtyp',
'_index', '_default_kind', '_default_fill_value']
_internal_names_set = set(_internal_names)
_metadata = []
- _is_copy = None
+ is_copy = None
def __init__(self, data, axes=None, copy=False, dtype=None,
fastpath=False):
@@ -96,7 +96,7 @@ def __init__(self, data, axes=None, copy=False, dtype=None,
for i, ax in enumerate(axes):
data = data.reindex_axis(ax, axis=i)
- object.__setattr__(self, '_is_copy', False)
+ object.__setattr__(self, 'is_copy', False)
object.__setattr__(self, '_data', data)
object.__setattr__(self, '_item_cache', {})
@@ -1016,7 +1016,7 @@ def _set_item(self, key, value):
def _setitem_copy(self, copy):
""" set the _is_copy of the iiem """
- self._is_copy = copy
+ self.is_copy = copy
return self
def _check_setitem_copy(self, stacklevel=4):
@@ -1024,7 +1024,7 @@ def _check_setitem_copy(self, stacklevel=4):
If you call this function, be sure to set the stacklevel such that the
user will see the error *at the level of setting*"""
- if self._is_copy:
+ if self.is_copy:
value = config.get_option('mode.chained_assignment')
t = ("A value is trying to be set on a copy of a slice from a "
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 902440ec8e184..ffc30c81ededd 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -11125,7 +11125,7 @@ def test_xs_view(self):
self.assert_((dm.xs(2) == 5).all())
# prior to chained assignment (GH5390)
- # this would raise, but now just rrens a copy (and sets _is_copy)
+ # this would raise, but now just returns a copy (and sets is_copy)
# TODO (?): deal with mixed-type fiasco?
# with assertRaisesRegexp(TypeError, 'cannot get view of mixed-type'):
# self.mixed_frame.xs(self.mixed_frame.index[2], copy=False)
diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py
index d102ac999cab0..1afabc8d4c882 100644
--- a/pandas/tests/test_index.py
+++ b/pandas/tests/test_index.py
@@ -1380,10 +1380,10 @@ def test_set_value_keeps_names(self):
columns=['one', 'two', 'three', 'four'],
index=idx)
df = df.sortlevel()
- self.assert_(df._is_copy is False)
+ self.assert_(df.is_copy is False)
self.assertEqual(df.index.names, ('Name', 'Number'))
df = df.set_value(('grethe', '4'), 'one', 99.34)
- self.assert_(df._is_copy is False)
+ self.assert_(df.is_copy is False)
self.assertEqual(df.index.names, ('Name', 'Number'))
def test_names(self):
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index 7b05a0b78b121..b6e7b10232bf5 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -1776,7 +1776,7 @@ def test_detect_chained_assignment(self):
# work with the chain
expected = DataFrame([[-5,1],[-6,3]],columns=list('AB'))
df = DataFrame(np.arange(4).reshape(2,2),columns=list('AB'),dtype='int64')
- self.assert_(not df._is_copy)
+ self.assert_(not df.is_copy)
df['A'][0] = -5
df['A'][1] = -6
@@ -1784,11 +1784,11 @@ def test_detect_chained_assignment(self):
expected = DataFrame([[-5,2],[np.nan,3.]],columns=list('AB'))
df = DataFrame({ 'A' : Series(range(2),dtype='int64'), 'B' : np.array(np.arange(2,4),dtype=np.float64)})
- self.assert_(not df._is_copy)
+ self.assert_(not df.is_copy)
df['A'][0] = -5
df['A'][1] = np.nan
assert_frame_equal(df, expected)
- self.assert_(not df['A']._is_copy)
+ self.assert_(not df['A'].is_copy)
# using a copy (the chain), fails
df = DataFrame({ 'A' : Series(range(2),dtype='int64'), 'B' : np.array(np.arange(2,4),dtype=np.float64)})
@@ -1800,7 +1800,7 @@ def f():
df = DataFrame({'a' : ['one', 'one', 'two',
'three', 'two', 'one', 'six'],
'c' : Series(range(7),dtype='int64') })
- self.assert_(not df._is_copy)
+ self.assert_(not df.is_copy)
expected = DataFrame({'a' : ['one', 'one', 'two',
'three', 'two', 'one', 'six'],
'c' : [42,42,2,3,4,42,6]})
@@ -1826,10 +1826,10 @@ def f():
with tm.assert_produces_warning(expected_warning=com.SettingWithCopyWarning):
df.loc[0]['A'] = 111
- # make sure that _is_copy is picked up reconstruction
+ # make sure that is_copy is picked up reconstruction
# GH5475
df = DataFrame({"A": [1,2]})
- self.assert_(df._is_copy is False)
+ self.assert_(df.is_copy is False)
with tm.ensure_clean('__tmp__pickle') as path:
df.to_pickle(path)
df2 = pd.read_pickle(path)
@@ -1854,21 +1854,21 @@ def random_text(nobs=100):
# always a copy
x = df.iloc[[0,1,2]]
- self.assert_(x._is_copy is True)
+ self.assert_(x.is_copy is True)
x = df.iloc[[0,1,2,4]]
- self.assert_(x._is_copy is True)
+ self.assert_(x.is_copy is True)
# explicity copy
indexer = df.letters.apply(lambda x : len(x) > 10)
df = df.ix[indexer].copy()
- self.assert_(df._is_copy is False)
+ self.assert_(df.is_copy is False)
df['letters'] = df['letters'].apply(str.lower)
# implicity take
df = random_text(100000)
indexer = df.letters.apply(lambda x : len(x) > 10)
df = df.ix[indexer]
- self.assert_(df._is_copy is True)
+ self.assert_(df.is_copy is True)
df.loc[:,'letters'] = df['letters'].apply(str.lower)
# this will raise
@@ -1880,7 +1880,7 @@ def random_text(nobs=100):
# an identical take, so no copy
df = DataFrame({'a' : [1]}).dropna()
- self.assert_(df._is_copy is False)
+ self.assert_(df.is_copy is False)
df['a'] += 1
pd.set_option('chained_assignment','warn')
| closes #5650
| https://api.github.com/repos/pandas-dev/pandas/pulls/5658 | 2013-12-06T22:30:17Z | 2013-12-07T00:18:10Z | 2013-12-07T00:18:10Z | 2014-07-07T21:26:09Z |
PERF: performance regression in frame/apply (GH5654) | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 5e31b14fa7bd3..d0a1511ec1cca 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -3434,7 +3434,7 @@ def _apply_standard(self, func, axis, ignore_failures=False, reduce=True):
if self._is_mixed_type: # maybe a hack for now
raise AssertionError('Must be mixed type DataFrame')
- values = self.values.ravel()
+ values = self.values
dummy = Series(NA, index=self._get_axis(axis),
dtype=values.dtype)
diff --git a/vb_suite/frame_methods.py b/vb_suite/frame_methods.py
index 63b2a154c75e9..ee4d876d20233 100644
--- a/vb_suite/frame_methods.py
+++ b/vb_suite/frame_methods.py
@@ -289,3 +289,12 @@ def f(K=100):
frame_isnull = Benchmark('isnull(df)', setup,
start_date=datetime(2012,1,1))
+#----------------------------------------------------------------------
+# apply
+
+setup = common_setup + """
+s = Series(np.arange(1028.))
+df = DataFrame({ i:s for i in range(1028) })
+"""
+frame_apply_user_func = Benchmark('df.apply(lambda x: np.corrcoef(x,s)[0,1])', setup,
+ start_date=datetime(2012,1,1))
| closes #5654
| https://api.github.com/repos/pandas-dev/pandas/pulls/5656 | 2013-12-06T18:36:19Z | 2013-12-06T18:53:50Z | 2013-12-06T18:53:50Z | 2014-06-23T23:26:57Z |
BUG: repr_html, fix GH5588 for the MultiIndex case | diff --git a/pandas/core/format.py b/pandas/core/format.py
index 8bc74f2ff4c08..7135573d48644 100644
--- a/pandas/core/format.py
+++ b/pandas/core/format.py
@@ -846,9 +846,11 @@ def _write_hierarchical_rows(self, fmt_values, indent):
frame = self.frame
ncols = min(len(self.columns), self.max_cols)
+ nrows = min(len(self.frame), self.max_rows)
+
truncate = (len(frame) > self.max_rows)
- idx_values = frame.index.format(sparsify=False, adjoin=False,
+ idx_values = frame.index[:nrows].format(sparsify=False, adjoin=False,
names=False)
idx_values = lzip(*idx_values)
@@ -856,8 +858,8 @@ def _write_hierarchical_rows(self, fmt_values, indent):
# GH3547
sentinal = com.sentinal_factory()
- levels = frame.index.format(sparsify=sentinal, adjoin=False,
- names=False)
+ levels = frame.index[:nrows].format(sparsify=sentinal,
+ adjoin=False, names=False)
# Truncate row names
if truncate:
levels = [lev[:self.max_rows] for lev in levels]
diff --git a/vb_suite/frame_methods.py b/vb_suite/frame_methods.py
index a7c863345b9c5..63b2a154c75e9 100644
--- a/vb_suite/frame_methods.py
+++ b/vb_suite/frame_methods.py
@@ -157,7 +157,35 @@ def f(x):
"""
frame_to_html_mixed = Benchmark('df.to_html()', setup,
- start_date=datetime(2010, 6, 1))
+ start_date=datetime(2011, 11, 18))
+
+
+# truncated repr_html, single index
+
+setup = common_setup + """
+nrows=10000
+data=randn(nrows,10)
+idx=MultiIndex.from_arrays(np.tile(randn(3,nrows/100),100))
+df=DataFrame(data,index=idx)
+
+"""
+
+frame_html_repr_trunc_mi = Benchmark('df._repr_html_()', setup,
+ start_date=datetime(2013, 11, 25))
+
+# truncated repr_html, MultiIndex
+
+setup = common_setup + """
+nrows=10000
+data=randn(nrows,10)
+idx=randn(nrows)
+df=DataFrame(data,index=idx)
+
+"""
+
+frame_html_repr_trunc_si = Benchmark('df._repr_html_()', setup,
+ start_date=datetime(2013, 11, 25))
+
# insert many columns
| https://github.com/pydata/pandas/pull/5550#issuecomment-29938267
#5588
| https://api.github.com/repos/pandas-dev/pandas/pulls/5649 | 2013-12-05T22:57:49Z | 2013-12-05T22:57:54Z | 2013-12-05T22:57:54Z | 2014-07-06T14:18:09Z |
Clustered heatmap | diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py
index c4255e706b19f..ba5ae3b0cb52c 100644
--- a/pandas/tools/plotting.py
+++ b/pandas/tools/plotting.py
@@ -21,6 +21,7 @@
try: # mpl optional
import pandas.tseries.converter as conv
+
conv.register() # needs to override so set_xlim works with str/number
except ImportError:
pass
@@ -30,70 +31,72 @@
# to True.
mpl_stylesheet = {
'axes.axisbelow': True,
- 'axes.color_cycle': ['#348ABD',
- '#7A68A6',
- '#A60628',
- '#467821',
- '#CF4457',
- '#188487',
- '#E24A33'],
- 'axes.edgecolor': '#bcbcbc',
- 'axes.facecolor': '#eeeeee',
- 'axes.grid': True,
- 'axes.labelcolor': '#555555',
- 'axes.labelsize': 'large',
- 'axes.linewidth': 1.0,
- 'axes.titlesize': 'x-large',
- 'figure.edgecolor': 'white',
- 'figure.facecolor': 'white',
- 'figure.figsize': (6.0, 4.0),
- 'figure.subplot.hspace': 0.5,
- 'font.family': 'monospace',
- 'font.monospace': ['Andale Mono',
- 'Nimbus Mono L',
- 'Courier New',
- 'Courier',
- 'Fixed',
- 'Terminal',
- 'monospace'],
- 'font.size': 10,
- 'interactive': True,
- 'keymap.all_axes': ['a'],
- 'keymap.back': ['left', 'c', 'backspace'],
- 'keymap.forward': ['right', 'v'],
- 'keymap.fullscreen': ['f'],
- 'keymap.grid': ['g'],
- 'keymap.home': ['h', 'r', 'home'],
- 'keymap.pan': ['p'],
- 'keymap.save': ['s'],
- 'keymap.xscale': ['L', 'k'],
- 'keymap.yscale': ['l'],
- 'keymap.zoom': ['o'],
- 'legend.fancybox': True,
- 'lines.antialiased': True,
- 'lines.linewidth': 1.0,
- 'patch.antialiased': True,
- 'patch.edgecolor': '#EEEEEE',
- 'patch.facecolor': '#348ABD',
- 'patch.linewidth': 0.5,
- 'toolbar': 'toolbar2',
- 'xtick.color': '#555555',
- 'xtick.direction': 'in',
- 'xtick.major.pad': 6.0,
- 'xtick.major.size': 0.0,
- 'xtick.minor.pad': 6.0,
- 'xtick.minor.size': 0.0,
- 'ytick.color': '#555555',
- 'ytick.direction': 'in',
- 'ytick.major.pad': 6.0,
- 'ytick.major.size': 0.0,
- 'ytick.minor.pad': 6.0,
- 'ytick.minor.size': 0.0
+ 'axes.color_cycle': ['#348ABD',
+ '#7A68A6',
+ '#A60628',
+ '#467821',
+ '#CF4457',
+ '#188487',
+ '#E24A33'],
+ 'axes.edgecolor': '#bcbcbc',
+ 'axes.facecolor': '#eeeeee',
+ 'axes.grid': True,
+ 'axes.labelcolor': '#555555',
+ 'axes.labelsize': 'large',
+ 'axes.linewidth': 1.0,
+ 'axes.titlesize': 'x-large',
+ 'figure.edgecolor': 'white',
+ 'figure.facecolor': 'white',
+ 'figure.figsize': (6.0, 4.0),
+ 'figure.subplot.hspace': 0.5,
+ 'font.family': 'monospace',
+ 'font.monospace': ['Andale Mono',
+ 'Nimbus Mono L',
+ 'Courier New',
+ 'Courier',
+ 'Fixed',
+ 'Terminal',
+ 'monospace'],
+ 'font.size': 10,
+ 'interactive': True,
+ 'keymap.all_axes': ['a'],
+ 'keymap.back': ['left', 'c', 'backspace'],
+ 'keymap.forward': ['right', 'v'],
+ 'keymap.fullscreen': ['f'],
+ 'keymap.grid': ['g'],
+ 'keymap.home': ['h', 'r', 'home'],
+ 'keymap.pan': ['p'],
+ 'keymap.save': ['s'],
+ 'keymap.xscale': ['L', 'k'],
+ 'keymap.yscale': ['l'],
+ 'keymap.zoom': ['o'],
+ 'legend.fancybox': True,
+ 'lines.antialiased': True,
+ 'lines.linewidth': 1.0,
+ 'patch.antialiased': True,
+ 'patch.edgecolor': '#EEEEEE',
+ 'patch.facecolor': '#348ABD',
+ 'patch.linewidth': 0.5,
+ 'toolbar': 'toolbar2',
+ 'xtick.color': '#555555',
+ 'xtick.direction': 'in',
+ 'xtick.major.pad': 6.0,
+ 'xtick.major.size': 0.0,
+ 'xtick.minor.pad': 6.0,
+ 'xtick.minor.size': 0.0,
+ 'ytick.color': '#555555',
+ 'ytick.direction': 'in',
+ 'ytick.major.pad': 6.0,
+ 'ytick.major.size': 0.0,
+ 'ytick.minor.pad': 6.0,
+ 'ytick.minor.size': 0.0
}
+
def _get_standard_kind(kind):
return {'density': 'kde'}.get(kind, kind)
+
def _get_standard_colors(num_colors=None, colormap=None, color_type='default',
color=None):
import matplotlib.pyplot as plt
@@ -101,6 +104,7 @@ def _get_standard_colors(num_colors=None, colormap=None, color_type='default',
if color is None and colormap is not None:
if isinstance(colormap, compat.string_types):
import matplotlib.cm as cm
+
cmap = colormap
colormap = cm.get_cmap(colormap)
if colormap is None:
@@ -118,6 +122,7 @@ def _get_standard_colors(num_colors=None, colormap=None, color_type='default',
colors = list(colors)
elif color_type == 'random':
import random
+
def random_color(column):
random.seed(column)
return [random.random() for _ in range(3)]
@@ -127,7 +132,7 @@ def random_color(column):
raise NotImplementedError
if len(colors) != num_colors:
- multiple = num_colors//len(colors) - 1
+ multiple = num_colors // len(colors) - 1
mod = num_colors % len(colors)
colors += multiple * colors
@@ -135,6 +140,7 @@ def random_color(column):
return colors
+
class _Options(dict):
"""
Stores pandas plotting options.
@@ -262,6 +268,7 @@ def scatter_matrix(frame, alpha=0.5, figsize=None, ax=None, grid=False,
ax.hist(values, **hist_kwds)
elif diagonal in ('kde', 'density'):
from scipy.stats import gaussian_kde
+
y = values
gkde = gaussian_kde(y)
ind = np.linspace(y.min(), y.max(), 1000)
@@ -279,9 +286,9 @@ def scatter_matrix(frame, alpha=0.5, figsize=None, ax=None, grid=False,
_label_axis(ax, kind='y', label=a, position='left')
- if j!= 0:
+ if j != 0:
ax.yaxis.set_visible(False)
- if i != n-1:
+ if i != n - 1:
ax.xaxis.set_visible(False)
for ax in axes.flat:
@@ -290,10 +297,11 @@ def scatter_matrix(frame, alpha=0.5, figsize=None, ax=None, grid=False,
return axes
-def _label_axis(ax, kind='x', label='', position='top',
- ticks=True, rotate=False):
+def _label_axis(ax, kind='x', label='', position='top',
+ ticks=True, rotate=False):
from matplotlib.artist import setp
+
if kind == 'x':
ax.set_xlabel(label, visible=True)
ax.xaxis.set_visible(True)
@@ -310,21 +318,22 @@ def _label_axis(ax, kind='x', label='', position='top',
return
-
-
-
def _gca():
import matplotlib.pyplot as plt
+
return plt.gca()
def _gcf():
import matplotlib.pyplot as plt
+
return plt.gcf()
+
def _get_marker_compat(marker):
import matplotlib.lines as mlines
import matplotlib as mpl
+
if mpl.__version__ < '1.1.0' and marker == '.':
return 'o'
if marker not in mlines.lineMarkers:
@@ -450,6 +459,7 @@ def f(x):
if len(amplitudes) % 2 != 0:
result += amplitudes[-1] * sin(harmonic * x)
return result
+
return f
n = len(data)
@@ -685,6 +695,7 @@ def autocorrelation_plot(series, ax=None):
ax: Matplotlib axis object
"""
import matplotlib.pyplot as plt
+
n = len(series)
data = np.asarray(series)
if ax is None:
@@ -694,6 +705,7 @@ def autocorrelation_plot(series, ax=None):
def r(h):
return ((data[:n - h] - mean) * (data[h:] - mean)).sum() / float(n) / c0
+
x = np.arange(n) + 1
y = lmap(r, x)
z95 = 1.959963984540054
@@ -735,6 +747,7 @@ def grouped_hist(data, column=None, by=None, ax=None, bins=50, figsize=None,
-------
axes: collection of Matplotlib Axes
"""
+
def plot_group(group, ax):
ax.hist(group.dropna().values, bins=bins, **kwargs)
@@ -816,6 +829,7 @@ def __init__(self, data, kind=None, by=None, subplots=False, sharex=True,
def _validate_color_args(self):
from pandas import DataFrame
+
if 'color' not in self.kwds and 'colors' in self.kwds:
warnings.warn(("'colors' is being deprecated. Please use 'color'"
"instead of 'colors'"))
@@ -823,13 +837,14 @@ def _validate_color_args(self):
self.kwds['color'] = colors
if ('color' in self.kwds and
- (isinstance(self.data, Series) or
- isinstance(self.data, DataFrame) and len(self.data.columns) == 1)):
+ (isinstance(self.data, Series) or
+ isinstance(self.data, DataFrame) and len(
+ self.data.columns) == 1)):
# support series.plot(color='green')
self.kwds['color'] = [self.kwds['color']]
if ('color' in self.kwds or 'colors' in self.kwds) and \
- self.colormap is not None:
+ self.colormap is not None:
warnings.warn("'color' and 'colormap' cannot be used "
"simultaneously. Using 'color'")
@@ -843,6 +858,7 @@ def _validate_color_args(self):
def _iter_data(self):
from pandas.core.frame import DataFrame
+
if isinstance(self.data, (Series, np.ndarray)):
yield self.label, np.asarray(self.data)
elif isinstance(self.data, DataFrame):
@@ -1017,6 +1033,7 @@ def legend_title(self):
@cache_readonly
def plt(self):
import matplotlib.pyplot as plt
+
return plt
_need_to_set_index = False
@@ -1099,6 +1116,7 @@ def _get_ax(self, i):
def on_right(self, i):
from pandas.core.frame import DataFrame
+
if isinstance(self.secondary_y, bool):
return self.secondary_y
@@ -1126,6 +1144,7 @@ def _get_style(self, i, col_name):
def _get_colors(self):
from pandas.core.frame import DataFrame
+
if isinstance(self.data, DataFrame):
num_colors = len(self.data.columns)
else:
@@ -1150,13 +1169,14 @@ def _get_marked_label(self, label, col_num):
class KdePlot(MPLPlot):
def __init__(self, data, bw_method=None, ind=None, **kwargs):
MPLPlot.__init__(self, data, **kwargs)
- self.bw_method=bw_method
- self.ind=ind
+ self.bw_method = bw_method
+ self.ind = ind
def _make_plot(self):
from scipy.stats import gaussian_kde
from scipy import __version__ as spv
from distutils.version import LooseVersion
+
plotf = self._get_plot_function()
colors = self._get_colors()
for i, (label, y) in enumerate(self._iter_data()):
@@ -1201,12 +1221,13 @@ def _post_plot_logic(self):
for ax in self.axes:
ax.legend(loc='best')
+
class ScatterPlot(MPLPlot):
def __init__(self, data, x, y, **kwargs):
MPLPlot.__init__(self, data, **kwargs)
self.kwds.setdefault('c', self.plt.rcParams['patch.facecolor'])
if x is None or y is None:
- raise ValueError( 'scatter requires and x and y column')
+ raise ValueError('scatter requires and x and y column')
if com.is_integer(x) and not self.data.columns.holds_integer():
x = self.data.columns[x]
if com.is_integer(y) and not self.data.columns.holds_integer():
@@ -1228,7 +1249,6 @@ def _post_plot_logic(self):
class LinePlot(MPLPlot):
-
def __init__(self, data, **kwargs):
self.mark_right = kwargs.pop('mark_right', True)
MPLPlot.__init__(self, data, **kwargs)
@@ -1238,6 +1258,7 @@ def __init__(self, data, **kwargs):
def _index_freq(self):
from pandas.core.frame import DataFrame
+
if isinstance(self.data, (Series, DataFrame)):
freq = getattr(self.data.index, 'freq', None)
if freq is None:
@@ -1259,9 +1280,11 @@ def _is_dynamic_freq(self, freq):
def _no_base(self, freq):
# hack this for 0.10.1, creating more technical debt...sigh
from pandas.core.frame import DataFrame
+
if (isinstance(self.data, (Series, DataFrame))
and isinstance(self.data.index, DatetimeIndex)):
import pandas.tseries.frequencies as freqmod
+
base = freqmod.get_freq(freq)
x = self.data.index
if (base <= freqmod.FreqGroup.FR_DAY):
@@ -1333,6 +1356,7 @@ def _make_plot(self):
def _make_ts_plot(self, data, **kwargs):
from pandas.tseries.plotting import tsplot
+
kwargs = kwargs.copy()
colors = self._get_colors()
@@ -1342,7 +1366,7 @@ def _make_ts_plot(self, data, **kwargs):
def _plot(data, col_num, ax, label, style, **kwds):
newlines = tsplot(data, plotf, ax=ax, label=label,
- style=style, **kwds)
+ style=style, **kwds)
ax.grid(self.grid)
lines.append(newlines[0])
@@ -1402,6 +1426,7 @@ def _maybe_convert_index(self, data):
# tsplot converts automatically, but don't want to convert index
# over and over for DataFrames
from pandas.core.frame import DataFrame
+
if (isinstance(data.index, DatetimeIndex) and
isinstance(data, DataFrame)):
freq = getattr(data.index, 'freq', None)
@@ -1455,7 +1480,6 @@ def _post_plot_logic(self):
class BarPlot(MPLPlot):
-
_default_rot = {'bar': 90, 'barh': 0}
def __init__(self, data, **kwargs):
@@ -1467,7 +1491,7 @@ def __init__(self, data, **kwargs):
else:
self.tickoffset = 0.375
self.bar_width = 0.5
- self.log = kwargs.pop('log',False)
+ self.log = kwargs.pop('log', False)
MPLPlot.__init__(self, data, **kwargs)
def _args_adjust(self):
@@ -1478,7 +1502,7 @@ def _args_adjust(self):
def bar_f(self):
if self.kind == 'bar':
def f(ax, x, y, w, start=None, **kwds):
- return ax.bar(x, y, w, bottom=start,log=self.log, **kwds)
+ return ax.bar(x, y, w, bottom=start, log=self.log, **kwds)
elif self.kind == 'barh':
def f(ax, x, y, w, start=None, log=self.log, **kwds):
return ax.barh(x, y, w, left=start, **kwds)
@@ -1519,7 +1543,7 @@ def _make_plot(self):
start = 0 if mpl_le_1_2_1 else None
if self.subplots:
- rect = bar_f(ax, self.ax_pos, y, self.bar_width,
+ rect = bar_f(ax, self.ax_pos, y, self.bar_width,
start=start, **kwds)
ax.set_title(label)
elif self.stacked:
@@ -1567,8 +1591,8 @@ def _post_plot_logic(self):
if name is not None:
ax.set_ylabel(name)
- # if self.subplots and self.legend:
- # self.axes[0].legend(loc='best')
+ # if self.subplots and self.legend:
+ # self.axes[0].legend(loc='best')
class BoxPlot(MPLPlot):
@@ -1585,7 +1609,6 @@ def plot_frame(frame=None, x=None, y=None, subplots=False, sharex=True,
xlim=None, ylim=None, logx=False, logy=False, xticks=None,
yticks=None, kind='line', sort_columns=False, fontsize=None,
secondary_y=False, **kwds):
-
"""
Make line, bar, or scatter plots of DataFrame series with the index on the x-axis
using matplotlib / pylab.
@@ -1664,8 +1687,8 @@ def plot_frame(frame=None, x=None, y=None, subplots=False, sharex=True,
raise ValueError('Invalid chart type given %s' % kind)
if kind == 'scatter':
- plot_obj = klass(frame, x=x, y=y, kind=kind, subplots=subplots,
- rot=rot,legend=legend, ax=ax, style=style,
+ plot_obj = klass(frame, x=x, y=y, kind=kind, subplots=subplots,
+ rot=rot, legend=legend, ax=ax, style=style,
fontsize=fontsize, use_index=use_index, sharex=sharex,
sharey=sharey, xticks=xticks, yticks=yticks,
xlim=xlim, ylim=ylim, title=title, grid=grid,
@@ -1695,7 +1718,8 @@ def plot_frame(frame=None, x=None, y=None, subplots=False, sharex=True,
else:
plot_obj = klass(frame, kind=kind, subplots=subplots, rot=rot,
- legend=legend, ax=ax, style=style, fontsize=fontsize,
+ legend=legend, ax=ax, style=style,
+ fontsize=fontsize,
use_index=use_index, sharex=sharex, sharey=sharey,
xticks=xticks, yticks=yticks, xlim=xlim, ylim=ylim,
title=title, grid=grid, figsize=figsize, logx=logx,
@@ -1775,6 +1799,7 @@ def plot_series(series, label=None, kind='line', use_index=True, rot=None,
be ignored.
"""
import matplotlib.pyplot as plt
+
if ax is None and len(plt.get_fignums()) > 0:
ax = _gca()
if ax.get_yaxis().get_ticks_position().strip().lower() == 'right':
@@ -1829,6 +1854,7 @@ def boxplot(data, column=None, by=None, ax=None, fontsize=None,
ax : matplotlib.axes.AxesSubplot
"""
from pandas import Series, DataFrame
+
if isinstance(data, Series):
data = DataFrame({'x': data})
column = 'x'
@@ -1838,11 +1864,12 @@ def _get_colors():
return _get_standard_colors(color=kwds.get('color'), num_colors=1)
def maybe_color_bp(bp):
- if 'color' not in kwds :
+ if 'color' not in kwds:
from matplotlib.artist import setp
- setp(bp['boxes'],color=colors[0],alpha=1)
- setp(bp['whiskers'],color=colors[0],alpha=1)
- setp(bp['medians'],color=colors[2],alpha=1)
+
+ setp(bp['boxes'], color=colors[0], alpha=1)
+ setp(bp['whiskers'], color=colors[0], alpha=1)
+ setp(bp['medians'], color=colors[2], alpha=1)
def plot_group(grouped, ax):
keys, values = zip(*grouped)
@@ -1916,7 +1943,8 @@ def format_date_labels(ax, rot):
pass
-def scatter_plot(data, x, y, by=None, ax=None, figsize=None, grid=False, **kwargs):
+def scatter_plot(data, x, y, by=None, ax=None, figsize=None, grid=False,
+ **kwargs):
"""
Make a scatter plot from two DataFrame columns
@@ -2018,6 +2046,7 @@ def hist_frame(data, column=None, by=None, grid=True, xlabelsize=None,
return axes
import matplotlib.pyplot as plt
+
n = len(data.columns)
if layout is not None:
@@ -2026,7 +2055,9 @@ def hist_frame(data, column=None, by=None, grid=True, xlabelsize=None,
rows, cols = layout
if rows * cols < n:
- raise ValueError('Layout of %sx%s is incompatible with %s columns' % (rows, cols, n))
+ raise ValueError(
+ 'Layout of %sx%s is incompatible with %s columns' % (
+ rows, cols, n))
else:
rows, cols = 1, 1
while rows * cols < n:
@@ -2100,9 +2131,9 @@ def hist_series(self, by=None, ax=None, grid=True, xlabelsize=None,
if kwds.get('layout', None) is not None:
raise ValueError("The 'layout' keyword is not supported when "
"'by' is None")
- # hack until the plotting interface is a bit more unified
+ # hack until the plotting interface is a bit more unified
fig = kwds.pop('figure', plt.gcf() if plt.get_fignums() else
- plt.figure(figsize=figsize))
+ plt.figure(figsize=figsize))
if (figsize is not None and tuple(figsize) !=
tuple(fig.get_size_inches())):
fig.set_size_inches(*figsize, forward=True)
@@ -2194,6 +2225,7 @@ def boxplot_frame_groupby(grouped, subplots=True, column=None, fontsize=None,
ret[key] = d
else:
from pandas.tools.merge import concat
+
keys, frames = zip(*grouped)
if grouped.axis == 0:
df = concat(frames, keys=keys, axis=1)
@@ -2488,6 +2520,424 @@ def _maybe_convert_date(x):
x = conv_func(x)
return x
+# helper for cleaning up axes by removing ticks, tick labels, frame, etc.
+def _clean_axis(ax):
+ """Remove ticks, tick labels, and frame from axis"""
+ ax.get_xaxis().set_ticks([])
+ ax.get_yaxis().set_ticks([])
+ for sp in ax.spines.values():
+ sp.set_visible(False)
+
+
+def _color_list_to_matrix_and_cmap(color_list, ind, row=True):
+ """
+ For 'heatmap()'
+ This only works for 1-column color lists..
+ TODO: Support multiple color labels on an element in the heatmap
+ """
+ import matplotlib as mpl
+
+ colors = set(color_list)
+ col_to_value = dict((col, i) for i, col in enumerate(colors))
+
+ # ind = column_dendrogram_distances['leaves']
+ matrix = np.array([col_to_value[col] for col in color_list])[ind]
+ # Is this row-side or column side?
+ if row:
+ new_shape = (len(color_list), 1)
+ else:
+ new_shape = (1, len(color_list))
+ matrix = matrix.reshape(new_shape)
+
+ cmap = mpl.colors.ListedColormap(colors)
+ return matrix, cmap
+
+
+def heatmap(df,
+ title=None,
+ title_fontsize=12,
+ colorbar_label='values',
+ col_side_colors=None,
+ row_side_colors=None,
+ color_scale='linear',
+ cmap=None,
+ linkage_method='average',
+ figsize=None,
+ label_rows=True,
+ label_cols=True,
+ vmin=None,
+ vmax=None,
+ xlabel_fontsize=12,
+ ylabel_fontsize=10,
+ cluster_cols=True,
+ cluster_rows=True,
+ linewidth=0,
+ edgecolor='white',
+ plot_df=None,
+ colorbar_ticklabels_fontsize=10,
+ colorbar_loc="upper left",
+ use_fastcluster=False,
+ metric='euclidean'):
+ """
+ @author Olga Botvinnik olga.botvinnik@gmail.com
+
+ This is liberally borrowed (with permission) from http://bit.ly/1eWcYWc
+ Many thanks to Christopher DeBoever and Mike Lovci for providing heatmap
+ guidance.
+
+
+ :param title_fontsize:
+ :param colorbar_ticklabels_fontsize:
+ :param colorbar_loc: Can be 'upper left' (in the corner), 'right',
+ or 'bottom'
+
+
+ :param df: The dataframe you want to cluster on
+ :param title: Title of the figure
+ :param colorbar_label: What to colorbar (color scale of the heatmap)
+ :param col_side_colors: Label the columns with a color
+ :param row_side_colors: Label the rows with a color
+ :param color_scale: Either 'linear' or 'log'
+ :param cmap: A matplotlib colormap, default is mpl.cm.Blues_r if data is
+ sequential, or mpl.cm.RdBu_r if data is divergent (has both positive and
+ negative numbers)
+ :param figsize: Size of the figure. The default is a function of the
+ dataframe size.
+ :param label_rows: Can be boolean or a list of strings, with exactly the
+ length of the number of rows in df.
+ :param label_cols: Can be boolean or a list of strings, with exactly the
+ length of the number of columns in df.
+ :param col_labels: If True, label with df.columns. If False, unlabeled.
+ Else, this can be an iterable to relabel the columns with labels of your own
+ choosing. This is helpful if you have duplicate column names and pandas
+ won't let you reindex it.
+ :param row_labels: If True, label with df.index. If False, unlabeled.
+ Else, this can be an iterable to relabel the row names with labels of your
+ own choosing. This is helpful if you have duplicate index names and pandas
+ won't let you reindex it.
+ :param xlabel_fontsize: Default 12pt
+ :param ylabel_fontsize: Default 10pt
+ :param cluster_cols: Boolean, whether or not to cluster the columns
+ :param cluster_rows:
+ :param plot_df: The dataframe you want to plot. This can contain NAs and
+ other nasty things.
+ :param row_linkage_method:
+ :param col_linkage_method:
+ :param vmin: Minimum value to plot on heatmap
+ :param vmax: Maximum value to plot on heatmap
+ :param linewidth: Linewidth of lines around heatmap box elements
+ (default 0)
+ :param edgecolor: Color of lines around heatmap box elements (default
+ white)
+ """
+ #@return: fig, row_dendrogram, col_dendrogram
+ #@rtype: matplotlib.figure.Figure, dict, dict
+ #@raise TypeError:
+ import matplotlib.pyplot as plt
+ import matplotlib.gridspec as gridspec
+ import scipy.spatial.distance as distance
+ import scipy.cluster.hierarchy as sch
+ import matplotlib as mpl
+ from collections import Iterable
+
+ #if cluster
+
+ if (df.shape[0] > 1000 or df.shape[1] > 1000) or use_fastcluster:
+ try:
+ import fastcluster
+ linkage_function = fastcluster.linkage
+ except ImportError:
+ raise warnings.warn('Module "fastcluster" not found. The '
+ 'dataframe '
+ 'provided has '
+ 'shape {}, and one '
+ 'of the dimensions has greater than 1000 '
+ 'variables. Calculating linkage on such a '
+ 'matrix will take a long time with vanilla '
+ '"scipy.cluster.hierarchy.linkage", and we '
+ 'suggest fastcluster for such large datasets'\
+ .format(df.shape), RuntimeWarning)
+ else:
+ linkage_function = sch.linkage
+
+ almost_black = '#262626'
+ sch.set_link_color_palette([almost_black])
+ if plot_df is None:
+ plot_df = df
+
+ if (plot_df.index != df.index).any():
+ raise ValueError('plot_df must have the exact same indices as df')
+ if (plot_df.columns != df.columns).any():
+ raise ValueError('plot_df must have the exact same columns as df')
+ # make norm
+
+ # Check if the matrix has values both above and below zero, or only above
+ # or only below zero. If both above and below, then the data is
+ # "divergent" and we will use a colormap with 0 centered at white,
+ # negative values blue, and positive values red. Otherwise, we will use
+ # the YlGnBu colormap.
+ divergent = df.max().max() > 0 and df.min().min() < 0
+
+ if color_scale == 'log':
+ if vmin is None:
+ vmin = max(np.floor(df.dropna(how='all').min().dropna().min()), 1e-10)
+ if vmax is None:
+ vmax = np.ceil(df.dropna(how='all').max().dropna().max())
+ my_norm = mpl.colors.LogNorm(vmin, vmax)
+ elif divergent:
+ abs_max = abs(df.max().max())
+ abs_min = abs(df.min().min())
+ vmaxx = max(abs_max, abs_min)
+ my_norm = mpl.colors.Normalize(vmin=-vmaxx, vmax=vmaxx)
+ else:
+ my_norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
+
+ if cmap is None:
+ cmap = mpl.cm.RdBu_r if divergent else mpl.cm.YlGnBu
+ cmap.set_bad('white')
+
+ # TODO: Add optimal leaf ordering for clusters
+ # TODO: if color_scale is 'log', should distance also be on np.log(df)?
+ # calculate pairwise distances for rows
+ if color_scale == 'log':
+ df = np.log10(df)
+ row_pairwise_dists = distance.squareform(distance.pdist(df,
+ metric=metric))
+ row_linkage = linkage_function(row_pairwise_dists, method=linkage_method)
+
+ # calculate pairwise distances for columns
+ col_pairwise_dists = distance.squareform(distance.pdist(df.T,
+ metric=metric))
+ # cluster
+ col_linkage = linkage_function(col_pairwise_dists, method=linkage_method)
+
+ # heatmap with row names
+
+ def get_width_ratios(shape, side_colors,
+ colorbar_loc, dimension, side_colors_ratio=0.05):
+ """
+ Figures out the ratio of each subfigure within the larger figure.
+ The dendrograms currently are 2*half_dendrogram, which is a proportion of
+ the dataframe shape. Right now, this only supports the colormap in
+ the upper left. The full figure map looks like:
+
+ 0.1 0.1 0.05 1.0
+ 0.1 cb column
+ 0.1 dendrogram
+ 0.05 col colors
+ | r d r
+ | o e o
+ | w n w
+ | d
+ 1.0| r c heatmap
+ | o o
+ | g l
+ | r o
+ | a r
+ | m s
+
+ The colorbar is half_dendrogram of the whitespace in the corner between
+ the row and column dendrogram. Otherwise, it's too big and its
+ corners touch the heatmap, which I didn't like.
+
+ For example, if there are side_colors, need to provide an extra value
+ in the ratio tuples, with the width side_colors_ratio. But if there
+ aren't any side colors, then the tuple is of size 3 (half_dendrogram,
+ half_dendrogram, 1.0), and if there are then the tuple is of size 4 (
+ half_dendrogram, half_dendrogram, 0.05, 1.0)
+
+ :param side_colors:
+ :type side_colors:
+ :param colorbar_loc:
+ :type colorbar_loc:
+ :param dimension:
+ :type dimension:
+ :param side_colors_ratio:
+ :type side_colors_ratio:
+ :return:
+ :rtype:
+ """
+ i = 0 if dimension == 'height' else 1
+ half_dendrogram = shape[i] * 0.1/shape[i]
+ if colorbar_loc not in ('upper left', 'right', 'bottom'):
+ raise AssertionError("{} is not a valid 'colorbar_loc' (valid: "
+ "'upper left', 'right', 'bottom')".format(
+ colorbar_loc))
+ if dimension not in ('height', 'width'):
+ raise AssertionError("{} is not a valid 'dimension' (valid: "
+ "'height', 'width')".format(
+ dimension))
+
+ ratios = [half_dendrogram, half_dendrogram]
+ if side_colors:
+ ratios += [side_colors_ratio]
+
+ if (colorbar_loc == 'right' and dimension == 'width') or (
+ colorbar_loc == 'bottom' and dimension == 'height'):
+ return ratios + [1, 0.05]
+ else:
+ return ratios + [1]
+
+
+ width_ratios = get_width_ratios(df.shape,
+ row_side_colors,
+ colorbar_loc, dimension='width')
+ height_ratios = get_width_ratios(df.shape,
+ col_side_colors,
+ colorbar_loc, dimension='height')
+ nrows = 3 if col_side_colors is None else 4
+ ncols = 3 if row_side_colors is None else 4
+
+ width = df.shape[1] * 0.25
+ height = min(df.shape[0] * .75, 40)
+ if figsize is None:
+ figsize = (width, height)
+ #print figsize
+
+
+
+ fig = plt.figure(figsize=figsize)
+ heatmap_gridspec = \
+ gridspec.GridSpec(nrows, ncols, wspace=0.0, hspace=0.0,
+ width_ratios=width_ratios,
+ height_ratios=height_ratios)
+ # print heatmap_gridspec
+
+ ### col dendrogram ###
+ col_dendrogram_ax = fig.add_subplot(heatmap_gridspec[1, ncols - 1])
+ if cluster_cols:
+ col_dendrogram = sch.dendrogram(col_linkage,
+ color_threshold=np.inf,
+ color_list=[almost_black])
+ else:
+ col_dendrogram = {'leaves': list(range(df.shape[1]))}
+ _clean_axis(col_dendrogram_ax)
+
+ # TODO: Allow for array of color labels
+ ### col colorbar ###
+ if col_side_colors is not None:
+ column_colorbar_ax = fig.add_subplot(heatmap_gridspec[2, ncols - 1])
+ col_side_matrix, col_cmap = _color_list_to_matrix_and_cmap(
+ col_side_colors,
+ ind=col_dendrogram['leaves'],
+ row=False)
+ column_colorbar_ax_pcolormesh = column_colorbar_ax.pcolormesh(
+ col_side_matrix, cmap=col_cmap,
+ edgecolor=edgecolor, linewidth=linewidth)
+ column_colorbar_ax.set_xlim(0, col_side_matrix.shape[1])
+ _clean_axis(column_colorbar_ax)
+
+ ### row dendrogram ###
+ row_dendrogram_ax = fig.add_subplot(heatmap_gridspec[nrows - 1, 1])
+ if cluster_rows:
+ row_dendrogram = \
+ sch.dendrogram(row_linkage,
+ color_threshold=np.inf,
+ orientation='right',
+ color_list=[almost_black])
+ else:
+ row_dendrogram = {'leaves': list(range(df.shape[0]))}
+ _clean_axis(row_dendrogram_ax)
+
+ ### row colorbar ###
+ if row_side_colors is not None:
+ row_colorbar_ax = fig.add_subplot(heatmap_gridspec[nrows - 1, 2])
+ row_side_matrix, row_cmap = _color_list_to_matrix_and_cmap(
+ row_side_colors,
+ ind=row_dendrogram['leaves'],
+ row=True)
+ row_colorbar_ax.pcolormesh(row_side_matrix, cmap=row_cmap,
+ edgecolors=edgecolor, linewidth=linewidth)
+ row_colorbar_ax.set_ylim(0, row_side_matrix.shape[0])
+ _clean_axis(row_colorbar_ax)
+
+ ### heatmap ####
+ heatmap_ax = fig.add_subplot(heatmap_gridspec[nrows - 1, ncols - 1])
+ heatmap_ax_pcolormesh = \
+ heatmap_ax.pcolormesh(plot_df.ix[row_dendrogram['leaves'],
+ col_dendrogram['leaves']].values,
+ norm=my_norm, cmap=cmap,
+ edgecolor=edgecolor,
+ lw=linewidth)
+
+ heatmap_ax.set_ylim(0, df.shape[0])
+ heatmap_ax.set_xlim(0, df.shape[1])
+ _clean_axis(heatmap_ax)
+
+ ## row labels ##
+ if isinstance(label_rows, Iterable):
+ if len(label_rows) == df.shape[0]:
+ yticklabels = label_rows
+ label_rows = True
+ else:
+ raise AssertionError("Length of 'label_rows' must be the same as "
+ "df.shape[0] (len(label_rows)={}, df.shape["
+ "0]={})".format(len(label_rows), df.shape[0]))
+ elif label_rows:
+ yticklabels = df.index
+
+ if label_rows:
+ yticklabels = [yticklabels[i] for i in row_dendrogram['leaves']]
+ heatmap_ax.set_yticks(np.arange(df.shape[0]) + 0.5)
+ heatmap_ax.yaxis.set_ticks_position('right')
+ heatmap_ax.set_yticklabels(yticklabels, fontsize=ylabel_fontsize)
+
+ # Add title if there is one:
+ if title is not None:
+ col_dendrogram_ax.set_title(title, fontsize=title_fontsize)
+
+ ## col labels ##
+ if isinstance(label_cols, Iterable):
+ if len(label_cols) == df.shape[1]:
+ xticklabels = label_cols
+ label_cols = True
+ else:
+ raise AssertionError("Length of 'label_cols' must be the same as "
+ "df.shape[1] (len(label_cols)={}, df.shape["
+ "1]={})".format(len(label_cols), df.shape[1]))
+ elif label_cols:
+ xticklabels = df.columns
+
+ if label_cols:
+ xticklabels = [xticklabels[i] for i in col_dendrogram['leaves']]
+ heatmap_ax.set_xticks(np.arange(df.shape[1]) + 0.5)
+ xticklabels = heatmap_ax.set_xticklabels(xticklabels,
+ fontsize=xlabel_fontsize)
+ # rotate labels 90 degrees
+ for label in xticklabels:
+ label.set_rotation(90)
+
+ # remove the tick lines
+ for l in heatmap_ax.get_xticklines() + heatmap_ax.get_yticklines():
+ l.set_markersize(0)
+
+ ### scale colorbar ###
+ scale_colorbar_ax = fig.add_subplot(
+ heatmap_gridspec[0:(nrows - 1),
+ 0]) # colorbar for scale in upper left corner
+
+ # note that we could pass the norm explicitly with norm=my_norm
+ cb = fig.colorbar(heatmap_ax_pcolormesh,
+ cax=scale_colorbar_ax)
+ cb.set_label(colorbar_label)
+
+ # move ticks to left side of colorbar to avoid problems with tight_layout
+ cb.ax.yaxis.set_ticks_position('left')
+ cb.outline.set_linewidth(0)
+
+ ## Make colorbar narrower
+ #xmin, xmax, ymin, ymax = cb.ax.axis()
+ #cb.ax.set_xlim(xmin, xmax/0.2)
+
+ # make colorbar labels smaller
+ yticklabels = cb.ax.yaxis.get_ticklabels()
+ for t in yticklabels:
+ t.set_fontsize(colorbar_ticklabels_fontsize)
+
+ fig.tight_layout()
+ return fig, row_dendrogram, col_dendrogram
+
+
if __name__ == '__main__':
# import pandas.rpy.common as com
# sales = com.load_data('sanfrancisco.home.sales', package='nutshell')
@@ -2501,6 +2951,7 @@ def _maybe_convert_date(x):
import pandas.tools.plotting as plots
import pandas.core.frame as fr
+
reload(plots)
reload(fr)
from pandas.core.frame import DataFrame
| Hello there,
I'm working on a clustered heatmap figure for pandas, like R's `heatmap.2` which I used I ton when I was still in R-land.
I'm having some trouble wrapping my mind around `pandas.tools.plotting` but I have a draft version of this `heatmap` function with some initial parameters in my fork: https://github.com/olgabot/pandas/commit/4759d31666ca476a07c09a450e42a847abdc2b3d But I'm unsure how to proceed with plumbing this function into the current `pandas` plotting tools.
Here's a notebook of a simple and complex example: http://nbviewer.ipython.org/gist/olgabot/7801024
Also, unlike other many plotting functions, `heatmap` creates a whole figure. I currently return the `fig` instance along with the dendrogram objects for both the rows and columns in case the user wants to do some stats on them. It seems like for this function, accepting an `ax` argument (or even a `fig`) doesn't make any sense because there's at least 4 (up to 6 if you label columns or rows with colors) `ax` instances to create.
FYI besides adding documentation and tests and such I'm still working on:
- reasonable dendrogram sizing for when the dimensions are really large. Right now the dendrogram axis size is a proportion of the figure size, but this should change for even medium (100+) size datasets because then the dendrogram takes over the figure.
- Allowing for multiple colors labels for each row/column
- Adding optimal leaf ordering (important for time-dependent data)
- vmin/vmax for `pcolormesh`
- maybe add some padding between the dendrogram, colorbars, and heatmap?
- Other suggestions welcome!
I'm also still working through all the developer FAQs and such so I'm probably making tons of n00b mistakes so please point them out to me, especially if there are some pandas/python conventions I'm totally not following.
| https://api.github.com/repos/pandas-dev/pandas/pulls/5646 | 2013-12-05T06:30:52Z | 2013-12-22T20:25:09Z | null | 2014-06-20T18:35:28Z |
TST: prevent stderr from leaking to console in util.testing | diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index 1904b5f0be49d..8c5704e151638 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -145,7 +145,8 @@ def check_output(*popenargs, **kwargs): # shamelessly taken from Python 2.7 sou
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
- process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
+ process = subprocess.Popen(stdout=subprocess.PIPE,stderr=subprocess.PIPE,
+ *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
@@ -160,7 +161,7 @@ def _default_locale_getter():
try:
raw_locales = check_output(['locale -a'], shell=True)
except subprocess.CalledProcessError as e:
- raise type(e)("%s, the 'locale -a' command cannot be foundon your "
+ raise type(e)("%s, the 'locale -a' command cannot be found on your "
"system" % e)
return raw_locales
| similar to https://github.com/pydata/pandas/pull/5627
Eliminate some more noise from tests, manifests only on system where `locale` isn't available.
| https://api.github.com/repos/pandas-dev/pandas/pulls/5644 | 2013-12-04T22:11:11Z | 2013-12-04T22:30:18Z | 2013-12-04T22:30:18Z | 2014-06-21T13:28:05Z |
BUG: mixed column selection with dups is buggy (GH5639) | diff --git a/doc/source/release.rst b/doc/source/release.rst
index 3f148748081b9..97b86703e73b8 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -768,7 +768,7 @@ Bug Fixes
- Fixed segfault on ``isnull(MultiIndex)`` (now raises an error instead)
(:issue:`5123`, :issue:`5125`)
- Allow duplicate indices when performing operations that align
- (:issue:`5185`)
+ (:issue:`5185`, :issue:`5639`)
- Compound dtypes in a constructor raise ``NotImplementedError``
(:issue:`5191`)
- Bug in comparing duplicate frames (:issue:`4421`) related
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 959d0186030cd..e8b18ae93b287 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -250,10 +250,9 @@ def reindex_items_from(self, new_ref_items, indexer=None, method=None,
else:
masked_idx = indexer[indexer != -1]
+ new_items = self.items.take(masked_idx)
new_values = com.take_nd(self.values, masked_idx, axis=0,
allow_fill=False)
- new_items = self.items.take(masked_idx)
-
# fill if needed
if needs_fill:
new_values = com.interpolate_2d(new_values, method=method,
@@ -3192,7 +3191,8 @@ def reindex_items(self, new_items, indexer=None, copy=True,
else:
# unique
- if self.axes[0].is_unique:
+ if self.axes[0].is_unique and new_items.is_unique:
+
for block in self.blocks:
newb = block.reindex_items_from(new_items, copy=copy)
@@ -3201,7 +3201,7 @@ def reindex_items(self, new_items, indexer=None, copy=True,
# non-unique
else:
- rl = self._set_ref_locs()
+ rl = self._set_ref_locs(do_refs='force')
for i, idx in enumerate(indexer):
blk, lidx = rl[idx]
item = new_items.take([i])
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 9d9ebc1b95830..902440ec8e184 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -3275,6 +3275,15 @@ def check(result, expected=None):
expected = DataFrame([[False,True],[True,False],[False,False],[True,False]],columns=['A','A'])
assert_frame_equal(result,expected)
+ # mixed column selection
+ # GH 5639
+ dfbool = DataFrame({'one' : Series([True, True, False], index=['a', 'b', 'c']),
+ 'two' : Series([False, False, True, False], index=['a', 'b', 'c', 'd']),
+ 'three': Series([False, True, True, True], index=['a', 'b', 'c', 'd'])})
+ expected = pd.concat([dfbool['one'],dfbool['three'],dfbool['one']],axis=1)
+ result = dfbool[['one', 'three', 'one']]
+ check(result,expected)
+
def test_insert_benchmark(self):
# from the vb_suite/frame_methods/frame_insert_columns
N = 10
| closes #5639
| https://api.github.com/repos/pandas-dev/pandas/pulls/5640 | 2013-12-04T14:16:30Z | 2013-12-04T14:50:54Z | 2013-12-04T14:50:54Z | 2014-06-19T11:13:37Z |
VIS: added ability to plot DataFrames and Series with errorbars | diff --git a/doc/source/release.rst b/doc/source/release.rst
index 7cf2bec0f4144..b91e307bf7c69 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -59,6 +59,8 @@ New features
Date is used primarily in astronomy and represents the number of days from
noon, January 1, 4713 BC. Because nanoseconds are used to define the time
in pandas the actual range of dates that you can use is 1678 AD to 2262 AD. (:issue:`4041`)
+- Added error bar support to the ``.plot`` method of ``DataFrame`` and ``Series`` (:issue:`3796`)
+
API Changes
~~~~~~~~~~~
@@ -126,9 +128,9 @@ API Changes
DataFrame returned by ``GroupBy.apply`` (:issue:`6124`). This facilitates
``DataFrame.stack`` operations where the name of the column index is used as
the name of the inserted column containing the pivoted data.
-
-- The :func:`pivot_table`/:meth:`DataFrame.pivot_table` and :func:`crosstab` functions
- now take arguments ``index`` and ``columns`` instead of ``rows`` and ``cols``. A
+
+- The :func:`pivot_table`/:meth:`DataFrame.pivot_table` and :func:`crosstab` functions
+ now take arguments ``index`` and ``columns`` instead of ``rows`` and ``cols``. A
``FutureWarning`` is raised to alert that the old ``rows`` and ``cols`` arguments
will not be supported in a future release (:issue:`5505`)
diff --git a/doc/source/v0.14.0.txt b/doc/source/v0.14.0.txt
index ea321cbab545a..463e4f2a3a49c 100644
--- a/doc/source/v0.14.0.txt
+++ b/doc/source/v0.14.0.txt
@@ -286,6 +286,20 @@ You can use a right-hand-side of an alignable object as well.
df2.loc[idx[:,:,['C1','C3']],:] = df2*1000
df2
+Plotting With Errorbars
+~~~~~~~~~~~~~~~~~~~~~~~
+
+Plotting with error bars is now supported in the ``.plot`` method of ``DataFrame`` and ``Series`` objects (:issue:`3796`).
+
+x and y errorbars are supported and can be supplied using the ``xerr`` and ``yerr`` keyword arguments to ``.plot()`` The error values can be specified using a variety of formats.
+
+- As a ``DataFrame`` or ``dict`` of errors with one or more of the column names (or dictionary keys) matching one or more of the column names of the plotting ``DataFrame`` or matching the ``name`` attribute of the ``Series``
+- As a ``str`` indicating which of the columns of plotting ``DataFrame`` contain the error values
+- As raw values (``list``, ``tuple``, or ``np.ndarray``). Must be the same length as the plotting ``DataFrame``/``Series``
+
+Asymmetrical error bars are also supported, however raw error values must be provided in this case. For a ``M`` length ``Series``, a ``Mx2`` array should be provided indicating lower and upper (or left and right) errors. For a ``MxN`` ``DataFrame``, asymmetrical errors should be in a ``Mx2xN`` array.
+
+
Prior Version Deprecations/Changes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/source/visualization.rst b/doc/source/visualization.rst
index 5827f2e971e42..bc0bf69df1282 100644
--- a/doc/source/visualization.rst
+++ b/doc/source/visualization.rst
@@ -381,6 +381,40 @@ columns:
plt.close('all')
+.. _visualization.errorbars:
+
+Plotting With Error Bars
+~~~~~~~~~~~~~~~~~~~~~~~~
+Plotting with error bars is now supported in the ``.plot`` method of ``DataFrame`` and ``Series`` objects.
+
+x and y errorbars are supported and be supplied using the ``xerr`` and ``yerr`` keyword arguments to ``.plot()`` The error values can be specified using a variety of formats.
+
+- As a ``DataFrame`` or ``dict`` of errors with column names matching the ``columns`` attribute of the plotting ``DataFrame`` or matching the ``name`` attribute of the ``Series``
+- As a ``str`` indicating which of the columns of plotting ``DataFrame`` contain the error values
+- As raw values (``list``, ``tuple``, or ``np.ndarray``). Must be the same length as the plotting ``DataFrame``/``Series``
+
+Asymmetrical error bars are also supported, however raw error values must be provided in this case. For a ``M`` length ``Series``, a ``Mx2`` array should be provided indicating lower and upper (or left and right) errors. For a ``MxN`` ``DataFrame``, asymmetrical errors should be in a ``Mx2xN`` array.
+
+Here is an example of one way to easily plot group means with standard deviations from the raw data.
+
+.. ipython:: python
+
+ # Generate the data
+ ix3 = pd.MultiIndex.from_arrays([['a', 'a', 'a', 'a', 'b', 'b', 'b', 'b'], ['foo', 'foo', 'bar', 'bar', 'foo', 'foo', 'bar', 'bar']], names=['letter', 'word'])
+ df3 = pd.DataFrame({'data1': [3, 2, 4, 3, 2, 4, 3, 2], 'data2': [6, 5, 7, 5, 4, 5, 6, 5]}, index=ix3)
+
+ # Group by index labels and take the means and standard deviations for each group
+ gp3 = df3.groupby(level=('letter', 'word'))
+ means = gp3.mean()
+ errors = gp3.std()
+ means
+ errors
+
+ # Plot
+ fig, ax = plt.subplots()
+ @savefig errorbar_example.png
+ means.plot(yerr=errors, ax=ax, kind='bar')
+
.. _visualization.scatter_matrix:
Scatter plot matrix
diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py
index 30ba5cd5a70fe..2752d12765fad 100644
--- a/pandas/tests/test_graphics.py
+++ b/pandas/tests/test_graphics.py
@@ -360,6 +360,35 @@ def test_dup_datetime_index_plot(self):
s = Series(values, index=index)
_check_plot_works(s.plot)
+ @slow
+ def test_errorbar_plot(self):
+
+ s = Series(np.arange(10))
+ s_err = np.random.randn(10)
+
+ # test line and bar plots
+ kinds = ['line', 'bar']
+ for kind in kinds:
+ _check_plot_works(s.plot, yerr=Series(s_err), kind=kind)
+ _check_plot_works(s.plot, yerr=s_err, kind=kind)
+ _check_plot_works(s.plot, yerr=s_err.tolist(), kind=kind)
+
+ _check_plot_works(s.plot, xerr=s_err)
+
+ # test time series plotting
+ ix = date_range('1/1/2000', '1/1/2001', freq='M')
+ ts = Series(np.arange(12), index=ix)
+ ts_err = Series(np.random.randn(12), index=ix)
+
+ _check_plot_works(ts.plot, yerr=ts_err)
+
+ # check incorrect lengths and types
+ with tm.assertRaises(ValueError):
+ s.plot(yerr=np.arange(11))
+
+ s_err = ['zzz']*10
+ with tm.assertRaises(TypeError):
+ s.plot(yerr=s_err)
@tm.mplskip
class TestDataFramePlots(tm.TestCase):
@@ -1015,6 +1044,104 @@ def test_allow_cmap(self):
df.plot(kind='hexbin', x='A', y='B', cmap='YlGn',
colormap='BuGn')
+ def test_errorbar_plot(self):
+
+ d = {'x': np.arange(12), 'y': np.arange(12, 0, -1)}
+ df = DataFrame(d)
+ d_err = {'x': np.ones(12)*0.2, 'y': np.ones(12)*0.4}
+ df_err = DataFrame(d_err)
+
+ # check line plots
+ _check_plot_works(df.plot, yerr=df_err, logy=True)
+ _check_plot_works(df.plot, yerr=df_err, logx=True, logy=True)
+
+ kinds = ['line', 'bar', 'barh']
+ for kind in kinds:
+ _check_plot_works(df.plot, yerr=df_err['x'], kind=kind)
+ _check_plot_works(df.plot, yerr=d_err, kind=kind)
+ _check_plot_works(df.plot, yerr=df_err, xerr=df_err, kind=kind)
+ _check_plot_works(df.plot, yerr=df_err['x'], xerr=df_err['x'], kind=kind)
+ _check_plot_works(df.plot, yerr=df_err, xerr=df_err, subplots=True, kind=kind)
+
+ _check_plot_works((df+1).plot, yerr=df_err, xerr=df_err, kind='bar', log=True)
+
+ # yerr is raw error values
+ _check_plot_works(df['y'].plot, yerr=np.ones(12)*0.4)
+ _check_plot_works(df.plot, yerr=np.ones((2, 12))*0.4)
+
+ # yerr is column name
+ df['yerr'] = np.ones(12)*0.2
+ _check_plot_works(df.plot, y='y', x='x', yerr='yerr')
+
+ with tm.assertRaises(ValueError):
+ df.plot(yerr=np.random.randn(11))
+
+ df_err = DataFrame({'x': ['zzz']*12, 'y': ['zzz']*12})
+ with tm.assertRaises(TypeError):
+ df.plot(yerr=df_err)
+
+ @slow
+ def test_errorbar_with_integer_column_names(self):
+ # test with integer column names
+ df = DataFrame(np.random.randn(10, 2))
+ df_err = DataFrame(np.random.randn(10, 2))
+ _check_plot_works(df.plot, yerr=df_err)
+ _check_plot_works(df.plot, y=0, yerr=1)
+
+ @slow
+ def test_errorbar_with_partial_columns(self):
+ df = DataFrame(np.random.randn(10, 3))
+ df_err = DataFrame(np.random.randn(10, 2), columns=[0, 2])
+ kinds = ['line', 'bar']
+ for kind in kinds:
+ _check_plot_works(df.plot, yerr=df_err, kind=kind)
+
+ ix = date_range('1/1/2000', periods=10, freq='M')
+ df.set_index(ix, inplace=True)
+ df_err.set_index(ix, inplace=True)
+ _check_plot_works(df.plot, yerr=df_err, kind='line')
+
+ @slow
+ def test_errorbar_timeseries(self):
+
+ d = {'x': np.arange(12), 'y': np.arange(12, 0, -1)}
+ d_err = {'x': np.ones(12)*0.2, 'y': np.ones(12)*0.4}
+
+ # check time-series plots
+ ix = date_range('1/1/2000', '1/1/2001', freq='M')
+ tdf = DataFrame(d, index=ix)
+ tdf_err = DataFrame(d_err, index=ix)
+
+ kinds = ['line', 'bar', 'barh']
+ for kind in kinds:
+ _check_plot_works(tdf.plot, yerr=tdf_err, kind=kind)
+ _check_plot_works(tdf.plot, yerr=d_err, kind=kind)
+ _check_plot_works(tdf.plot, y='y', kind=kind)
+ _check_plot_works(tdf.plot, y='y', yerr='x', kind=kind)
+ _check_plot_works(tdf.plot, yerr=tdf_err, kind=kind)
+ _check_plot_works(tdf.plot, kind=kind, subplots=True)
+
+
+ def test_errorbar_asymmetrical(self):
+
+ np.random.seed(0)
+ err = np.random.rand(3, 2, 5)
+
+ data = np.random.randn(5, 3)
+ df = DataFrame(data)
+
+ ax = df.plot(yerr=err, xerr=err/2)
+
+ self.assertEqual(ax.lines[7].get_ydata()[0], data[0,1]-err[1,0,0])
+ self.assertEqual(ax.lines[8].get_ydata()[0], data[0,1]+err[1,1,0])
+
+ self.assertEqual(ax.lines[5].get_xdata()[0], -err[1,0,0]/2)
+ self.assertEqual(ax.lines[6].get_xdata()[0], err[1,1,0]/2)
+
+ with tm.assertRaises(ValueError):
+ df.plot(yerr=err.T)
+
+ tm.close()
@tm.mplskip
class TestDataFrameGroupByPlots(tm.TestCase):
diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py
index 7038284b6c2a0..507e0127a5062 100644
--- a/pandas/tools/plotting.py
+++ b/pandas/tools/plotting.py
@@ -831,6 +831,11 @@ def __init__(self, data, kind=None, by=None, subplots=False, sharex=True,
self.fig = fig
self.axes = None
+ # parse errorbar input if given
+ for err_dim in 'xy':
+ if err_dim+'err' in kwds:
+ kwds[err_dim+'err'] = self._parse_errorbars(error_dim=err_dim, **kwds)
+
if not isinstance(secondary_y, (bool, tuple, list, np.ndarray)):
secondary_y = [secondary_y]
self.secondary_y = secondary_y
@@ -971,6 +976,11 @@ def _setup_subplots(self):
axes = [ax]
+ if self.logx:
+ [a.set_xscale('log') for a in axes]
+ if self.logy:
+ [a.set_yscale('log') for a in axes]
+
self.fig = fig
self.axes = axes
@@ -1090,14 +1100,16 @@ def _is_datetype(self):
'time'))
def _get_plot_function(self):
- if self.logy:
- plotf = self.plt.Axes.semilogy
- elif self.logx:
- plotf = self.plt.Axes.semilogx
- elif self.loglog:
- plotf = self.plt.Axes.loglog
- else:
+ '''
+ Returns the matplotlib plotting function (plot or errorbar) based on
+ the presence of errorbar keywords.
+ '''
+
+ if ('xerr' not in self.kwds) and \
+ ('yerr' not in self.kwds):
plotf = self.plt.Axes.plot
+ else:
+ plotf = self.plt.Axes.errorbar
return plotf
@@ -1180,6 +1192,78 @@ def _get_marked_label(self, label, col_num):
else:
return label
+ def _parse_errorbars(self, error_dim='y', **kwds):
+ '''
+ Look for error keyword arguments and return the actual errorbar data
+ or return the error DataFrame/dict
+
+ Error bars can be specified in several ways:
+ Series: the user provides a pandas.Series object of the same
+ length as the data
+ ndarray: provides a np.ndarray of the same length as the data
+ DataFrame/dict: error values are paired with keys matching the
+ key in the plotted DataFrame
+ str: the name of the column within the plotted DataFrame
+ '''
+
+ err_kwd = kwds.pop(error_dim+'err', None)
+ if err_kwd is None:
+ return None
+
+ from pandas import DataFrame, Series
+
+ def match_labels(data, err):
+ err = err.reindex_axis(data.index)
+ return err
+
+ # key-matched DataFrame
+ if isinstance(err_kwd, DataFrame):
+ err = err_kwd
+ err = match_labels(self.data, err)
+
+ # key-matched dict
+ elif isinstance(err_kwd, dict):
+ err = err_kwd
+
+ # Series of error values
+ elif isinstance(err_kwd, Series):
+ # broadcast error series across data
+ err = match_labels(self.data, err_kwd)
+ err = np.atleast_2d(err)
+ err = np.tile(err, (self.nseries, 1))
+
+ # errors are a column in the dataframe
+ elif isinstance(err_kwd, str):
+ err = np.atleast_2d(self.data[err_kwd].values)
+ self.data = self.data[self.data.columns.drop(err_kwd)]
+ err = np.tile(err, (self.nseries, 1))
+
+ elif isinstance(err_kwd, (tuple, list, np.ndarray)):
+
+ # raw error values
+ err = np.atleast_2d(err_kwd)
+
+ err_shape = err.shape
+
+ # asymmetrical error bars
+ if err.ndim==3:
+ if (err_shape[0] != self.nseries) or \
+ (err_shape[1] != 2) or \
+ (err_shape[2] != len(self.data)):
+ msg = "Asymmetrical error bars should be provided " + \
+ "with the shape (%u, 2, %u)" % \
+ (self.nseries, len(self.data))
+ raise ValueError(msg)
+
+ # broadcast errors to each data series
+ if len(err)==1:
+ err = np.tile(err, (self.nseries, 1))
+
+ else:
+ msg = "No valid %serr detected" % error_dim
+ raise ValueError(msg)
+
+ return err
class KdePlot(MPLPlot):
def __init__(self, data, bw_method=None, ind=None, **kwargs):
@@ -1191,7 +1275,7 @@ def _make_plot(self):
from scipy.stats import gaussian_kde
from scipy import __version__ as spv
from distutils.version import LooseVersion
- plotf = self._get_plot_function()
+ plotf = self.plt.Axes.plot
colors = self._get_colors()
for i, (label, y) in enumerate(self._iter_data()):
ax = self._get_ax(i)
@@ -1376,8 +1460,9 @@ def _make_plot(self):
# this is slightly deceptive
if not self.x_compat and self.use_index and self._use_dynamic_x():
data = self._maybe_convert_index(self.data)
- self._make_ts_plot(data, **self.kwds)
+ self._make_ts_plot(data)
else:
+ from pandas.core.frame import DataFrame
lines = []
labels = []
x = self._get_xticks(convert_period=True)
@@ -1391,6 +1476,16 @@ def _make_plot(self):
kwds = self.kwds.copy()
self._maybe_add_color(colors, kwds, style, i)
+ for err_kw in ['xerr', 'yerr']:
+ # user provided label-matched dataframe of errors
+ if err_kw in kwds:
+ if isinstance(kwds[err_kw], (DataFrame, dict)):
+ if label in kwds[err_kw].keys():
+ kwds[err_kw] = kwds[err_kw][label]
+ else: del kwds[err_kw]
+ elif kwds[err_kw] is not None:
+ kwds[err_kw] = kwds[err_kw][i]
+
label = com.pprint_thing(label) # .encode('utf-8')
mask = com.isnull(y)
@@ -1399,10 +1494,11 @@ def _make_plot(self):
y = np.ma.masked_where(mask, y)
kwds['label'] = label
- if style is None:
- args = (ax, x, y)
- else:
+ # prevent style kwarg from going to errorbar, where it is unsupported
+ if style is not None and plotf.__name__=='plot':
args = (ax, x, y, style)
+ else:
+ args = (ax, x, y)
newline = plotf(*args, **kwds)[0]
lines.append(newline)
@@ -1422,6 +1518,8 @@ def _make_plot(self):
def _make_ts_plot(self, data, **kwargs):
from pandas.tseries.plotting import tsplot
+ from pandas.core.frame import DataFrame
+
kwargs = kwargs.copy()
colors = self._get_colors()
@@ -1430,8 +1528,15 @@ def _make_ts_plot(self, data, **kwargs):
labels = []
def _plot(data, col_num, ax, label, style, **kwds):
- newlines = tsplot(data, plotf, ax=ax, label=label,
- style=style, **kwds)
+
+ if plotf.__name__=='plot':
+ newlines = tsplot(data, plotf, ax=ax, label=label,
+ style=style, **kwds)
+ # errorbar function does not support style argument
+ elif plotf.__name__=='errorbar':
+ newlines = tsplot(data, plotf, ax=ax, label=label,
+ **kwds)
+
ax.grid(self.grid)
lines.append(newlines[0])
@@ -1444,19 +1549,33 @@ def _plot(data, col_num, ax, label, style, **kwds):
ax = self._get_ax(0) # self.axes[0]
style = self.style or ''
label = com.pprint_thing(self.label)
- kwds = kwargs.copy()
+ kwds = self.kwds.copy()
self._maybe_add_color(colors, kwds, style, 0)
+ if 'yerr' in kwds:
+ kwds['yerr'] = kwds['yerr'][0]
+
_plot(data, 0, ax, label, self.style, **kwds)
+
else:
for i, col in enumerate(data.columns):
label = com.pprint_thing(col)
ax = self._get_ax(i)
style = self._get_style(i, col)
- kwds = kwargs.copy()
+ kwds = self.kwds.copy()
self._maybe_add_color(colors, kwds, style, i)
+ # key-matched DataFrame of errors
+ if 'yerr' in kwds:
+ yerr = kwds['yerr']
+ if isinstance(yerr, (DataFrame, dict)):
+ if col in yerr.keys():
+ kwds['yerr'] = yerr[col]
+ else: del kwds['yerr']
+ else:
+ kwds['yerr'] = yerr[i]
+
_plot(data[col], i, ax, label, style, **kwds)
self._make_legend(lines, labels)
@@ -1581,6 +1700,7 @@ def f(ax, x, y, w, start=None, log=self.log, **kwds):
def _make_plot(self):
import matplotlib as mpl
+ from pandas import DataFrame, Series
# mpl decided to make their version string unicode across all Python
# versions for mpl >= 1.3 so we have to call str here for python 2
@@ -1599,10 +1719,25 @@ def _make_plot(self):
for i, (label, y) in enumerate(self._iter_data()):
ax = self._get_ax(i)
- label = com.pprint_thing(label)
kwds = self.kwds.copy()
kwds['color'] = colors[i % ncolors]
+ for err_kw in ['xerr', 'yerr']:
+ if err_kw in kwds:
+ # user provided label-matched dataframe of errors
+ if isinstance(kwds[err_kw], (DataFrame, dict)):
+ if label in kwds[err_kw].keys():
+ kwds[err_kw] = kwds[err_kw][label]
+ else: del kwds[err_kw]
+ elif kwds[err_kw] is not None:
+ kwds[err_kw] = kwds[err_kw][i]
+
+ label = com.pprint_thing(label)
+
+ if (('yerr' in kwds) or ('xerr' in kwds)) \
+ and (kwds.get('ecolor') is None):
+ kwds['ecolor'] = mpl.rcParams['xtick.color']
+
start = 0
if self.log:
start = 1
@@ -1694,6 +1829,9 @@ def plot_frame(frame=None, x=None, y=None, subplots=False, sharex=True,
x : label or position, default None
y : label or position, default None
Allows plotting of one column versus another
+ yerr : DataFrame (with matching labels), Series, list-type (tuple, list,
+ ndarray), or str of column name containing y error values
+ xerr : similar functionality as yerr, but for x error values
subplots : boolean, default False
Make separate subplots for each time series
sharex : boolean, default True
@@ -1807,6 +1945,15 @@ def plot_frame(frame=None, x=None, y=None, subplots=False, sharex=True,
label = kwds.pop('label', label)
ser = frame[y]
ser.index.name = label
+
+ for kw in ['xerr', 'yerr']:
+ if (kw in kwds) and \
+ (isinstance(kwds[kw], str) or com.is_integer(kwds[kw])):
+ try:
+ kwds[kw] = frame[kwds[kw]]
+ except (IndexError, KeyError, TypeError):
+ pass
+
return plot_series(ser, label=label, kind=kind,
use_index=use_index,
rot=rot, xticks=xticks, yticks=yticks,
@@ -1876,6 +2023,8 @@ def plot_series(series, label=None, kind='line', use_index=True, rot=None,
-----
See matplotlib documentation online for more on this subject
"""
+
+ from pandas import DataFrame
kind = _get_standard_kind(kind.lower().strip())
if kind == 'line':
klass = LinePlot
@@ -2611,6 +2760,7 @@ def _maybe_convert_date(x):
x = conv_func(x)
return x
+
if __name__ == '__main__':
# import pandas.rpy.common as com
# sales = com.load_data('sanfrancisco.home.sales', package='nutshell')
| close #3796
Addresses some of the concerns in issue #3796. New code allows the DataFrame and Series Line plots and Bar plot functions to include errorbars using `xerr` and `yerr` keyword arguments to `DataFrame/Series.plot()`. It supports specifying x and y errorbars as 1. a separate list/numpy/Series, 2. a DataFrame with the same column names as the plotting DataFrame. For example, using method 2 looks like this:
```
df = pd.DataFrame({'x':[1, 2, 3], 'y':[3, 2, 1]})
df_xerr = pd.DataFrame({'x':[0.6, 0.2, 0.3], 'y':[0.4, 0.5, 0.6]})
df_yerr = pd.DataFrame({'x':[0.5, 0.4, 0.6], 'y':[0.3, 0.7, 0.4]})
df.plot(xerr=df_xerr, yerr=df_yerr)
```
This is my first contribution. I tried to follow the contribution guidelines as best I could, but let me know if anything needs work!
| https://api.github.com/repos/pandas-dev/pandas/pulls/5638 | 2013-12-04T00:10:22Z | 2014-03-18T17:42:12Z | 2014-03-18T17:42:12Z | 2014-06-13T17:00:32Z |
BUG: Validate levels in a multi-index before storing in a HDFStore (GH5527) | diff --git a/doc/source/release.rst b/doc/source/release.rst
index 96004737c4d0f..3f148748081b9 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -549,6 +549,7 @@ Bug Fixes
- A zero length series written in Fixed format not deserializing properly.
(:issue:`4708`)
- Fixed decoding perf issue on pyt3 (:issue:`5441`)
+ - Validate levels in a multi-index before storing (:issue:`5527`)
- Fixed bug in tslib.tz_convert(vals, tz1, tz2): it could raise IndexError
exception while trying to access trans[pos + 1] (:issue:`4496`)
- The ``by`` argument now works correctly with the ``layout`` argument
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index d2fe1e0638192..db2028c70dc20 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -2688,6 +2688,14 @@ def is_multi_index(self):
""" the levels attribute is 1 or a list in the case of a multi-index """
return isinstance(self.levels,list)
+ def validate_multiindex(self, obj):
+ """ validate that we can store the multi-index; reset and return the new object """
+ levels = [ l if l is not None else "level_{0}".format(i) for i, l in enumerate(obj.index.names) ]
+ try:
+ return obj.reset_index(), levels
+ except (ValueError):
+ raise ValueError("duplicate names/columns in the multi-index when storing as a table")
+
@property
def nrows_expected(self):
""" based on our axes, compute the expected nrows """
@@ -3701,10 +3709,9 @@ class AppendableMultiSeriesTable(AppendableSeriesTable):
def write(self, obj, **kwargs):
""" we are going to write this as a frame table """
name = obj.name or 'values'
- cols = list(obj.index.names)
+ obj, self.levels = self.validate_multiindex(obj)
+ cols = list(self.levels)
cols.append(name)
- self.levels = list(obj.index.names)
- obj = obj.reset_index()
obj.columns = cols
return super(AppendableMultiSeriesTable, self).write(obj=obj, **kwargs)
@@ -3764,6 +3771,7 @@ class AppendableMultiFrameTable(AppendableFrameTable):
table_type = u('appendable_multiframe')
obj_type = DataFrame
ndim = 2
+ _re_levels = re.compile("^level_\d+$")
@property
def table_type_short(self):
@@ -3774,11 +3782,11 @@ def write(self, obj, data_columns=None, **kwargs):
data_columns = []
elif data_columns is True:
data_columns = obj.columns[:]
- for n in obj.index.names:
+ obj, self.levels = self.validate_multiindex(obj)
+ for n in self.levels:
if n not in data_columns:
data_columns.insert(0, n)
- self.levels = obj.index.names
- return super(AppendableMultiFrameTable, self).write(obj=obj.reset_index(), data_columns=data_columns, **kwargs)
+ return super(AppendableMultiFrameTable, self).write(obj=obj, data_columns=data_columns, **kwargs)
def read(self, columns=None, **kwargs):
if columns is not None:
@@ -3787,7 +3795,11 @@ def read(self, columns=None, **kwargs):
columns.insert(0, n)
df = super(AppendableMultiFrameTable, self).read(
columns=columns, **kwargs)
- df.set_index(self.levels, inplace=True)
+ df = df.set_index(self.levels)
+
+ # remove names for 'level_%d'
+ df.index = df.index.set_names([ None if self._re_levels.search(l) else l for l in df.index.names ])
+
return df
diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py
index 3ab818a7fbe1a..1953f79482a22 100644
--- a/pandas/io/tests/test_pytables.py
+++ b/pandas/io/tests/test_pytables.py
@@ -1572,6 +1572,51 @@ def test_column_multiindex(self):
store.put('df1',df,format='table')
tm.assert_frame_equal(store['df1'],df,check_index_type=True,check_column_type=True)
+ def test_store_multiindex(self):
+
+ # validate multi-index names
+ # GH 5527
+ with ensure_clean_store(self.path) as store:
+
+ def make_index(names=None):
+ return MultiIndex.from_tuples([( datetime.datetime(2013,12,d), s, t) for d in range(1,3) for s in range(2) for t in range(3)],
+ names=names)
+
+
+ # no names
+ _maybe_remove(store, 'df')
+ df = DataFrame(np.zeros((12,2)), columns=['a','b'], index=make_index())
+ store.append('df',df)
+ tm.assert_frame_equal(store.select('df'),df)
+
+ # partial names
+ _maybe_remove(store, 'df')
+ df = DataFrame(np.zeros((12,2)), columns=['a','b'], index=make_index(['date',None,None]))
+ store.append('df',df)
+ tm.assert_frame_equal(store.select('df'),df)
+
+ # series
+ _maybe_remove(store, 's')
+ s = Series(np.zeros(12), index=make_index(['date',None,None]))
+ store.append('s',s)
+ tm.assert_series_equal(store.select('s'),s)
+
+ # dup with column
+ _maybe_remove(store, 'df')
+ df = DataFrame(np.zeros((12,2)), columns=['a','b'], index=make_index(['date','a','t']))
+ self.assertRaises(ValueError, store.append, 'df',df)
+
+ # dup within level
+ _maybe_remove(store, 'df')
+ df = DataFrame(np.zeros((12,2)), columns=['a','b'], index=make_index(['date','date','date']))
+ self.assertRaises(ValueError, store.append, 'df',df)
+
+ # fully names
+ _maybe_remove(store, 'df')
+ df = DataFrame(np.zeros((12,2)), columns=['a','b'], index=make_index(['date','s','t']))
+ store.append('df',df)
+ tm.assert_frame_equal(store.select('df'),df)
+
def test_pass_spec_to_storer(self):
df = tm.makeDataFrame()
| close #5527
| https://api.github.com/repos/pandas-dev/pandas/pulls/5634 | 2013-12-03T02:41:10Z | 2013-12-03T03:00:46Z | 2013-12-03T03:00:46Z | 2014-06-22T06:01:57Z |
BUG: make sure partial setting with a Series like works with a completly empty frame (GH5632) | diff --git a/doc/source/release.rst b/doc/source/release.rst
index f121928c06f72..96004737c4d0f 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -246,7 +246,7 @@ API Changes
(:issue:`4390`)
- allow ``ix/loc`` for Series/DataFrame/Panel to set on any axis even when
the single-key is not currently contained in the index for that axis
- (:issue:`2578`, :issue:`5226`)
+ (:issue:`2578`, :issue:`5226`, :issue:`5632`)
- Default export for ``to_clipboard`` is now csv with a sep of `\t` for
compat (:issue:`3368`)
- ``at`` now will enlarge the object inplace (and return the same)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 6ef6d8c75216f..5e31b14fa7bd3 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1895,9 +1895,16 @@ def _ensure_valid_index(self, value):
passed value
"""
if not len(self.index):
+
+ # GH5632, make sure that we are a Series convertible
+ try:
+ value = Series(value)
+ except:
+ pass
+
if not isinstance(value, Series):
raise ValueError('Cannot set a frame with no defined index '
- 'and a non-series')
+ 'and a value that cannot be converted to a Series')
self._data.set_axis(1, value.index.copy(), check_axis=False)
def _set_item(self, key, value):
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index 44160609235df..7b05a0b78b121 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -1635,6 +1635,42 @@ def f():
df.loc[:,1] = 1
self.assertRaises(ValueError, f)
+ # these work as they don't really change
+ # anything but the index
+ # GH5632
+ expected = DataFrame(columns=['foo'])
+ def f():
+ df = DataFrame()
+ df['foo'] = Series([])
+ return df
+ assert_frame_equal(f(), expected)
+ def f():
+ df = DataFrame()
+ df['foo'] = Series(df.index)
+ return df
+ assert_frame_equal(f(), expected)
+ def f():
+ df = DataFrame()
+ df['foo'] = Series(range(len(df)))
+ return df
+ assert_frame_equal(f(), expected)
+ def f():
+ df = DataFrame()
+ df['foo'] = []
+ return df
+ assert_frame_equal(f(), expected)
+ def f():
+ df = DataFrame()
+ df['foo'] = df.index
+ return df
+ assert_frame_equal(f(), expected)
+ def f():
+ df = DataFrame()
+ df['foo'] = range(len(df))
+ return df
+ assert_frame_equal(f(), expected)
+
+ df = DataFrame()
df2 = DataFrame()
df2[1] = Series([1],index=['foo'])
df.loc[:,1] = Series([1],index=['foo'])
| closes #5632
| https://api.github.com/repos/pandas-dev/pandas/pulls/5633 | 2013-12-02T22:19:33Z | 2013-12-03T01:59:04Z | 2013-12-03T01:59:04Z | 2014-06-18T03:40:04Z |
BUG: core/generic/_update_inplace not resetting item_cache (GH5628) | diff --git a/doc/source/release.rst b/doc/source/release.rst
index 96da62077436f..f121928c06f72 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -209,7 +209,7 @@ Improvements to existing features
- ``NDFrame.drop()``, ``NDFrame.dropna()``, and ``.drop_duplicates()`` all
accept ``inplace`` as a kewyord argument; however, this only means that the
wrapper is updated inplace, a copy is still made internally.
- (:issue:`1960`, :issue:`5247`, and related :issue:`2325` [still not
+ (:issue:`1960`, :issue:`5247`, :issue:`5628`, and related :issue:`2325` [still not
closed])
- Fixed bug in `tools.plotting.andrews_curvres` so that lines are drawn grouped
by color as expected.
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 5909885821b15..3a03d3b48ef19 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -1217,6 +1217,7 @@ def _update_inplace(self, result):
# NOTE: This does *not* call __finalize__ and that's an explicit
# decision that we may revisit in the future.
self._reset_cache()
+ self._clear_item_cache()
self._data = result._data
self._maybe_update_cacher()
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 8f549eb12d825..9d9ebc1b95830 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -6806,6 +6806,13 @@ def test_drop(self):
assert_frame_equal(nu_df.drop('X', axis='rows'), nu_df.ix[["Y"], :])
assert_frame_equal(nu_df.drop(['X', 'Y'], axis=0), nu_df.ix[[], :])
+ # inplace cache issue
+ # GH 5628
+ df = pd.DataFrame(np.random.randn(10,3), columns=list('abc'))
+ expected = df[~(df.b>0)]
+ df.drop(labels=df[df.b>0].index, inplace=True)
+ assert_frame_equal(df,expected)
+
def test_fillna(self):
self.tsframe['A'][:5] = nan
self.tsframe['A'][-5:] = nan
| closes #5628
| https://api.github.com/repos/pandas-dev/pandas/pulls/5631 | 2013-12-02T12:42:39Z | 2013-12-02T12:53:01Z | 2013-12-02T12:53:01Z | 2014-07-02T08:55:16Z |
BUG/TST: importing util.clipboard shouldnt print to stdout (nosetests) | diff --git a/pandas/util/clipboard.py b/pandas/util/clipboard.py
index 3008a5d606c90..65c372bf7cd5b 100644
--- a/pandas/util/clipboard.py
+++ b/pandas/util/clipboard.py
@@ -133,12 +133,12 @@ def xselGetClipboard():
getcb = macGetClipboard
setcb = macSetClipboard
elif os.name == 'posix' or platform.system() == 'Linux':
- xclipExists = os.system('which xclip') == 0
+ xclipExists = os.system('which xclip > /dev/null') == 0
if xclipExists:
getcb = xclipGetClipboard
setcb = xclipSetClipboard
else:
- xselExists = os.system('which xsel') == 0
+ xselExists = os.system('which xsel > /dev/null') == 0
if xselExists:
getcb = xselGetClipboard
setcb = xselSetClipboard
| ```
$ nosetests
/usr/bin/xclip <<<< raise your hand if you've been ignoring this for 6 months
...........
```
`os.system` nastiness. The vbench package has similar problems.
https://github.com/pydata/pandas/pull/3848
:raised_hand:
| https://api.github.com/repos/pandas-dev/pandas/pulls/5627 | 2013-12-01T13:12:17Z | 2013-12-01T13:14:52Z | 2013-12-01T13:14:52Z | 2014-07-16T08:42:14Z |
TST: Always try to close file descriptors of tempfiles | diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py
index 78d9dcb1fb888..3ab818a7fbe1a 100644
--- a/pandas/io/tests/test_pytables.py
+++ b/pandas/io/tests/test_pytables.py
@@ -3731,7 +3731,7 @@ def do_copy(f = None, new_f = None, keys = None, propindexes = True, **kwargs):
if new_f is None:
import tempfile
- new_f = tempfile.mkstemp()[1]
+ fd, new_f = tempfile.mkstemp()
tstore = store.copy(new_f, keys = keys, propindexes = propindexes, **kwargs)
@@ -3757,6 +3757,10 @@ def do_copy(f = None, new_f = None, keys = None, propindexes = True, **kwargs):
finally:
safe_close(store)
safe_close(tstore)
+ try:
+ os.close(fd)
+ except:
+ pass
safe_remove(new_f)
do_copy()
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index 0c4e083b54eda..1904b5f0be49d 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -345,17 +345,20 @@ def ensure_clean(filename=None, return_filelike=False):
yield f
finally:
f.close()
-
else:
-
# don't generate tempfile if using a path with directory specified
if len(os.path.dirname(filename)):
raise ValueError("Can't pass a qualified name to ensure_clean()")
try:
- filename = tempfile.mkstemp(suffix=filename)[1]
+ fd, filename = tempfile.mkstemp(suffix=filename)
yield filename
finally:
+ try:
+ os.close(fd)
+ except Exception as e:
+ print("Couldn't close file descriptor: %d (file: %s)" %
+ (fd, filename))
try:
if os.path.exists(filename):
os.remove(filename)
| We weren't closing the file descriptors from `mkstemp`, which caused fun
segmentation faults for me. This should fix the issue.
| https://api.github.com/repos/pandas-dev/pandas/pulls/5626 | 2013-12-01T01:20:55Z | 2013-12-01T03:37:51Z | 2013-12-01T03:37:51Z | 2014-07-02T13:59:48Z |
ENH: Make set_option into a contextmanager that undos itself on __exit__ | diff --git a/pandas/core/config.py b/pandas/core/config.py
index 4bec029851092..7ecc27e98a297 100644
--- a/pandas/core/config.py
+++ b/pandas/core/config.py
@@ -71,9 +71,29 @@ class OptionError(AttributeError, KeyError):
checks"""
+# For user convenience, we'd like to have the available options described
+# in the docstring. For dev convenience we'd like to generate the docstrings
+# dynamically instead of maintaining them by hand. To this, we use the
+# class below which wraps functions inside a callable, and converts
+# __doc__ into a propery function. The doctsrings below are templates
+# using the py2.6+ advanced formatting syntax to plug in a concise list
+# of options, and option descriptions.
+
+
+def dynamic_doc(doc_template):
+
+ @property
+ def __doc__(self):
+ opts_desc = describe_option('all', _print_desc=False)
+ opts_list = pp_options_list(list(_registered_options.keys()))
+ return doc_template.format(opts_desc=opts_desc,
+ opts_list=opts_list)
+ return __doc__
+
#
# User API
+
def _get_single_key(pat, silent):
keys = _select_options(pat)
if len(keys) == 0:
@@ -92,7 +112,9 @@ def _get_single_key(pat, silent):
return key
-def _get_option(pat, silent=False):
+def get_option(pat, silent=False, prefix=True):
+ if prefix:
+ pat = prefix_key(pat)
key = _get_single_key(pat, silent)
# walk the nested dict
@@ -100,58 +122,38 @@ def _get_option(pat, silent=False):
return root[k]
-def _set_single_option(pat, value, silent):
- key = _get_single_key(pat, silent)
-
- o = _get_registered_option(key)
- if o and o.validator:
- o.validator(value)
-
- # walk the nested dict
- root, k = _get_root(key)
- root[k] = value
-
- if o.cb:
- o.cb(key)
-
+get_option.__doc__ = dynamic_doc("""
+get_option(pat) - Retrieves the value of the specified option
-def _set_multiple_options(args, silent):
- for k, v in zip(args[::2], args[1::2]):
- _set_single_option(k, v, silent)
+Available options:
+{opts_list}
+Parameters
+----------
+pat - str/regexp which should match a single option.
-def _set_option(*args, **kwargs):
- # must at least 1 arg deal with constraints later
- nargs = len(args)
- if not nargs or nargs % 2 != 0:
- raise AssertionError("Must provide an even number of non-keyword "
- "arguments")
+Note: partial matches are supported for convenience, but unless you use the
+full option name (e.g. x.y.z.option_name), your code may break in future
+versions if new options with similar names are introduced.
- # must be 0 or 1 kwargs
- nkwargs = len(kwargs)
- if nkwargs not in (0, 1):
- raise AssertionError("The can only be 0 or 1 keyword arguments")
+Returns
+-------
+result - the value of the option
- # if 1 kwarg then it must be silent=True or silent=False
- if nkwargs:
- k, = list(kwargs.keys())
- v, = list(kwargs.values())
+Raises
+------
+OptionError if no such option exists
- if k != 'silent':
- raise ValueError("the only allowed keyword argument is 'silent', "
- "you passed '{0}'".format(k))
- if not isinstance(v, bool):
- raise TypeError("the type of the keyword argument passed must be "
- "bool, you passed a {0}".format(v.__class__))
+{opts_desc}
+""")
- # default to false
- silent = kwargs.get('silent', False)
- _set_multiple_options(args, silent)
+def prefix_key(key):
+ return key
-def _describe_option(pat='', _print_desc=True):
- keys = _select_options(pat)
+def describe_option(pat='', _print_desc=True):
+ keys = _select_options(prefix_key(pat))
if len(keys) == 0:
raise OptionError('No such keys(s)')
@@ -165,9 +167,36 @@ def _describe_option(pat='', _print_desc=True):
return s
-def _reset_option(pat):
+describe_option.__doc__ = dynamic_doc("""
+describe_option(pat,_print_desc=False) Prints the description
+for one or more registered options.
+
+Call with not arguments to get a listing for all registered options.
+
+Available options:
+{opts_list}
- keys = _select_options(pat)
+Parameters
+----------
+pat - str, a regexp pattern. All matching keys will have their
+ description displayed.
+
+_print_desc - if True (default) the description(s) will be printed
+ to stdout otherwise, the description(s) will be returned
+ as a unicode string (for testing).
+
+Returns
+-------
+None by default, the description(s) as a unicode string if _print_desc
+is False
+
+{opts_desc}
+""")
+
+
+def reset_option(pat):
+
+ keys = _select_options(prefix_key(pat))
if len(keys) == 0:
raise OptionError('No such keys(s)')
@@ -179,10 +208,36 @@ def _reset_option(pat):
'value')
for k in keys:
- _set_option(k, _registered_options[k].defval)
+ set_option._set_single_option(k, _registered_options[k].defval)
+
+
+reset_option.__doc__ = dynamic_doc("""
+reset_option(pat) - Reset one or more options to their default value.
+
+Pass "all" as argument to reset all options.
+
+Available options:
+{opts_list}
+
+Parameters
+----------
+pat - str/regex if specified only options matching `prefix`* will be reset
+
+Note: partial matches are supported for convenience, but unless you use the
+full option name (e.g. x.y.z.option_name), your code may break in future
+versions if new options with similar names are introduced.
+
+Returns
+-------
+None
+
+{opts_desc}
+""")
-def get_default_val(pat):
+def get_default_val(pat, prefix=True):
+ if prefix:
+ prefix_key(pat)
key = _get_single_key(pat, silent=True)
return _get_registered_option(key).defval
@@ -204,7 +259,7 @@ def __setattr__(self, key, val):
# you can't set new keys
# can you can't overwrite subtrees
if key in self.d and not isinstance(self.d[key], dict):
- _set_option(prefix, val)
+ set_option(prefix, val)
else:
raise OptionError("You can only set the value of existing options")
@@ -217,63 +272,21 @@ def __getattr__(self, key):
if isinstance(v, dict):
return DictWrapper(v, prefix)
else:
- return _get_option(prefix)
+ return get_option(prefix)
def __dir__(self):
return list(self.d.keys())
-# For user convenience, we'd like to have the available options described
-# in the docstring. For dev convenience we'd like to generate the docstrings
-# dynamically instead of maintaining them by hand. To this, we use the
-# class below which wraps functions inside a callable, and converts
-# __doc__ into a propery function. The doctsrings below are templates
-# using the py2.6+ advanced formatting syntax to plug in a concise list
-# of options, and option descriptions.
-
-
-class CallableDynamicDoc(object):
-
- def __init__(self, func, doc_tmpl):
- self.__doc_tmpl__ = doc_tmpl
- self.__func__ = func
-
- def __call__(self, *args, **kwds):
- return self.__func__(*args, **kwds)
-
- @property
- def __doc__(self):
- opts_desc = _describe_option('all', _print_desc=False)
- opts_list = pp_options_list(list(_registered_options.keys()))
- return self.__doc_tmpl__.format(opts_desc=opts_desc,
- opts_list=opts_list)
-
-_get_option_tmpl = """
-get_option(pat) - Retrieves the value of the specified option
-
-Available options:
-{opts_list}
-
-Parameters
-----------
-pat - str/regexp which should match a single option.
-
-Note: partial matches are supported for convenience, but unless you use the
-full option name (e.g. x.y.z.option_name), your code may break in future
-versions if new options with similar names are introduced.
-
-Returns
--------
-result - the value of the option
+options = DictWrapper(_global_config)
-Raises
-------
-OptionError if no such option exists
+#
+# Functions for use by pandas developers, in addition to User - api
-{opts_desc}
-"""
-_set_option_tmpl = """
+class SetOptionMeta(type):
+ # metaclass to allow dynamic docstring on set_option
+ __doc__ = dynamic_doc("""
set_option(pat,value) - Sets the value of the specified option
Available options:
@@ -298,95 +311,68 @@ def __doc__(self):
OptionError if no such option exists
{opts_desc}
-"""
-
-_describe_option_tmpl = """
-describe_option(pat,_print_desc=False) Prints the description
-for one or more registered options.
+""")
-Call with not arguments to get a listing for all registered options.
-
-Available options:
-{opts_list}
-
-Parameters
-----------
-pat - str, a regexp pattern. All matching keys will have their
- description displayed.
-_print_desc - if True (default) the description(s) will be printed
- to stdout otherwise, the description(s) will be returned
- as a unicode string (for testing).
+@compat.add_metaclass(SetOptionMeta)
+class set_option(object):
+ silent_default = False
-Returns
--------
-None by default, the description(s) as a unicode string if _print_desc
-is False
-
-{opts_desc}
-"""
-
-_reset_option_tmpl = """
-reset_option(pat) - Reset one or more options to their default value.
-
-Pass "all" as argument to reset all options.
-
-Available options:
-{opts_list}
-
-Parameters
-----------
-pat - str/regex if specified only options matching `prefix`* will be reset
-
-Note: partial matches are supported for convenience, but unless you use the
-full option name (e.g. x.y.z.option_name), your code may break in future
-versions if new options with similar names are introduced.
-
-Returns
--------
-None
-
-{opts_desc}
-"""
+ def __init__(self, *args, **kwargs):
+ if not (len(args) % 2 == 0 and len(args) >= 2):
+ raise TypeError(
+ 'Need to invoke as '
+ '%s(pat, val, [(pat, val), ...)).' % (self.__class__.__name__)
+ )
-# bind the functions with their docstrings into a Callable
-# and use that as the functions exposed in pd.api
-get_option = CallableDynamicDoc(_get_option, _get_option_tmpl)
-set_option = CallableDynamicDoc(_set_option, _set_option_tmpl)
-reset_option = CallableDynamicDoc(_reset_option, _reset_option_tmpl)
-describe_option = CallableDynamicDoc(_describe_option, _describe_option_tmpl)
-options = DictWrapper(_global_config)
+ self.silent = kwargs.pop('silent', self.silent_default)
-#
-# Functions for use by pandas developers, in addition to User - api
+ if kwargs:
+ raise TypeError("The only allowable keyword argument is 'silent'."
+ " Got %s." % kwargs)
+ if not isinstance(self.silent, bool):
+ raise TypeError("Silent must be either True or False. Got %r." %
+ self.silent)
-class option_context(object):
+ ops = [(prefix_key(pat), val)
+ for pat, val in zip(args[::2], args[1::2])]
- def __init__(self, *args):
- if not (len(args) % 2 == 0 and len(args) >= 2):
- raise AssertionError(
- 'Need to invoke as'
- 'option_context(pat, val, [(pat, val), ...)).'
- )
-
- ops = list(zip(args[::2], args[1::2]))
undo = []
for pat, val in ops:
- undo.append((pat, _get_option(pat, silent=True)))
+ undo.append((pat, get_option(pat, silent=self.silent,
+ prefix=False)))
self.undo = undo
-
for pat, val in ops:
- _set_option(pat, val, silent=True)
+ self._set_single_option(pat, val, self.silent)
def __enter__(self):
- pass
+ return self
def __exit__(self, *args):
if self.undo:
for pat, val in self.undo:
- _set_option(pat, val)
+ self._set_single_option(pat, val, self.silent)
+
+ @staticmethod
+ def _set_single_option(pat, value, silent=False):
+ key = _get_single_key(pat, silent)
+
+ o = _get_registered_option(key)
+ if o and o.validator:
+ o.validator(value)
+
+ # walk the nested dict
+ root, k = _get_root(key)
+ root[k] = value
+
+ if o.cb:
+ o.cb(key)
+
+
+class option_context(set_option):
+ silent_default = True
def register_option(key, defval, doc='', validator=None, cb=None):
@@ -414,7 +400,7 @@ def register_option(key, defval, doc='', validator=None, cb=None):
"""
import tokenize
import keyword
- key = key.lower()
+ key = prefix_key(key.lower())
if key in _registered_options:
raise OptionError("Option '%s' has already been registered" % key)
@@ -616,7 +602,7 @@ def _build_option_description(k):
s = u('%s: ') % k
if o:
s += u('[default: %s] [currently: %s]') % (o.defval,
- _get_option(k, True))
+ get_option(k, True))
if o.doc:
s += '\n' + '\n '.join(o.doc.strip().split('\n'))
@@ -696,26 +682,17 @@ def config_prefix(prefix):
# Note: reset_option relies on set_option, and on key directly
# it does not fit in to this monkey-patching scheme
- global register_option, get_option, set_option, reset_option
+ global prefix_key
- def wrap(func):
+ def current_prefix_key(key):
+ return '%s.%s' % (prefix, key)
- def inner(key, *args, **kwds):
- pkey = '%s.%s' % (prefix, key)
- return func(pkey, *args, **kwds)
+ _prefix_key = prefix_key
+ prefix_key = current_prefix_key
- return inner
-
- _register_option = register_option
- _get_option = get_option
- _set_option = set_option
- set_option = wrap(set_option)
- get_option = wrap(get_option)
- register_option = wrap(register_option)
yield None
- set_option = _set_option
- get_option = _get_option
- register_option = _register_option
+
+ prefix_key = _prefix_key
# These factories and methods are handy for use as the validator
diff --git a/pandas/tests/test_config.py b/pandas/tests/test_config.py
index 80a3fe9be7003..d3896190af8f4 100644
--- a/pandas/tests/test_config.py
+++ b/pandas/tests/test_config.py
@@ -170,18 +170,17 @@ def test_set_option(self):
def test_set_option_empty_args(self):
- self.assertRaises(AssertionError, self.cf.set_option)
+ self.assertRaises(TypeError, self.cf.set_option)
def test_set_option_uneven_args(self):
- self.assertRaises(AssertionError, self.cf.set_option, 'a.b', 2, 'b.c')
-
+ self.assertRaises(TypeError, self.cf.set_option, 'a.b', 2, 'b.c')
def test_set_option_2_kwargs(self):
- self.assertRaises(AssertionError, self.cf.set_option, 'a.b', 2,
+ self.assertRaises(TypeError, self.cf.set_option, 'a.b', 2,
silenadf=2, asdf=2)
def test_set_option_invalid_kwargs_key(self):
- self.assertRaises(ValueError, self.cf.set_option, 'a.b', 2,
+ self.assertRaises(TypeError, self.cf.set_option, 'a.b', 2,
silenadf=2)
def test_set_option_invalid_kwargs_value_type(self):
@@ -189,7 +188,7 @@ def test_set_option_invalid_kwargs_value_type(self):
silent=2)
def test_set_option_invalid_single_argument_type(self):
- self.assertRaises(AssertionError, self.cf.set_option, 2)
+ self.assertRaises(TypeError, self.cf.set_option, 2)
def test_set_option_multiple(self):
self.cf.register_option('a', 1, 'doc')
@@ -325,7 +324,8 @@ def test_deprecate_option(self):
warnings.simplefilter('always')
self.cf.set_option('d.dep', 'baz') # should overwrite "d.a"
- self.assertEqual(len(w), 1) # should have raised one warning
+ # raises 2 warnings because collects for undo as well
+ self.assertEqual(len(w), 2)
self.assertTrue(
'eprecated' in str(w[-1])) # we get the custom message
| Fixes #5618
cc @y-p and @jseabold
I went ahead and simplifed all the prefixing stuff to just change a single `prefix_key` function instead and handled the dynamic docstring by setting the `__doc__` attribute of functions/classes. set_option is now callable _and_ a contextmanager (whee!) so no need to add anything else to the global namespace (if you don't call `__exit__` it doesn't undo itself)
Had to add a metaclass to this because it's the only way to set docstrings on the _class_ level. Doesn't really add that much noise so I think it's fine.
| https://api.github.com/repos/pandas-dev/pandas/pulls/5625 | 2013-12-01T00:50:30Z | 2013-12-19T22:43:04Z | null | 2014-07-11T21:41:59Z |
CLN: Trim includes of absent files | diff --git a/MANIFEST.in b/MANIFEST.in
index 02de7790d11cf..5bf02ad867bd2 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -2,9 +2,7 @@ include MANIFEST.in
include LICENSE
include RELEASE.md
include README.rst
-include TODO.rst
include setup.py
-include setupegg.py
graft doc
prune doc/build
| https://api.github.com/repos/pandas-dev/pandas/pulls/5624 | 2013-11-30T21:35:46Z | 2013-11-30T22:02:35Z | 2013-11-30T22:02:35Z | 2014-07-16T08:42:12Z | |
BUG/API: autocorrelation_plot should accept kwargs | diff --git a/doc/source/release.rst b/doc/source/release.rst
index 9a0854494a897..51bf209ec4b04 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -61,6 +61,7 @@ API Changes
- Raise/Warn ``SettingWithCopyError`` (according to the option ``chained_assignment`` in more cases,
when detecting chained assignment, related (:issue:`5938`)
- DataFrame.head(0) returns self instead of empty frame (:issue:`5846`)
+ - ``autocorrelation_plot`` now accepts ``**kwargs``. (:issue:`5623`)
Experimental Features
~~~~~~~~~~~~~~~~~~~~~
diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py
index 7c8b17fb14abe..c3a19bb5714c7 100644
--- a/pandas/tests/test_graphics.py
+++ b/pandas/tests/test_graphics.py
@@ -299,6 +299,10 @@ def test_autocorrelation_plot(self):
_check_plot_works(autocorrelation_plot, self.ts)
_check_plot_works(autocorrelation_plot, self.ts.values)
+ ax = autocorrelation_plot(self.ts, label='Test')
+ t = ax.get_legend().get_texts()[0].get_text()
+ self.assertEqual(t, 'Test')
+
@slow
def test_lag_plot(self):
from pandas.tools.plotting import lag_plot
diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py
index c4255e706b19f..aa5a5a017146b 100644
--- a/pandas/tools/plotting.py
+++ b/pandas/tools/plotting.py
@@ -672,13 +672,15 @@ def lag_plot(series, lag=1, ax=None, **kwds):
return ax
-def autocorrelation_plot(series, ax=None):
+def autocorrelation_plot(series, ax=None, **kwds):
"""Autocorrelation plot for time series.
Parameters:
-----------
series: Time series
ax: Matplotlib axis object, optional
+ kwds : keywords
+ Options to pass to matplotlib plotting method
Returns:
-----------
@@ -705,7 +707,9 @@ def r(h):
ax.axhline(y=-z99 / np.sqrt(n), linestyle='--', color='grey')
ax.set_xlabel("Lag")
ax.set_ylabel("Autocorrelation")
- ax.plot(x, y)
+ ax.plot(x, y, **kwds)
+ if 'label' in kwds:
+ ax.legend()
ax.grid()
return ax
| Unless there's a specific reason not to for this plot? All the others seem to accept them.
``` python
autocorrelation_plot(data, label='ACF')
```

| https://api.github.com/repos/pandas-dev/pandas/pulls/5623 | 2013-11-30T20:15:08Z | 2014-01-15T15:28:37Z | 2014-01-15T15:28:37Z | 2016-11-03T12:37:39Z |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.