title stringlengths 1 185 | diff stringlengths 0 32.2M | body stringlengths 0 123k ⌀ | url stringlengths 57 58 | created_at stringlengths 20 20 | closed_at stringlengths 20 20 | merged_at stringlengths 20 20 ⌀ | updated_at stringlengths 20 20 |
|---|---|---|---|---|---|---|---|
BUG: Fix __truediv__ numexpr error | diff --git a/RELEASE.rst b/RELEASE.rst
index 57cb53c1096f6..ebd88091050f1 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -250,6 +250,8 @@ pandas 0.11.1
not converting dtypes (GH3911_)
- Fixed a bug where ``DataFrame.replace`` with a compiled regular expression
in the ``to_replace`` argument wasn't working (GH3907_)
+ - Fixed ``__truediv__`` in Python 2.7 with ``numexpr`` installed to actually do true division when dividing
+ two integer arrays with at least 10000 cells total (GH3764_)
.. _GH3164: https://github.com/pydata/pandas/issues/3164
.. _GH2786: https://github.com/pydata/pandas/issues/2786
@@ -351,6 +353,7 @@ pandas 0.11.1
.. _GH3907: https://github.com/pydata/pandas/issues/3907
.. _GH3911: https://github.com/pydata/pandas/issues/3911
.. _GH3912: https://github.com/pydata/pandas/issues/3912
+.. _GH3764: https://github.com/pydata/pandas/issues/3764
pandas 0.11.0
=============
diff --git a/pandas/core/expressions.py b/pandas/core/expressions.py
index 34e56fe576a07..abe891b82410c 100644
--- a/pandas/core/expressions.py
+++ b/pandas/core/expressions.py
@@ -51,7 +51,7 @@ def set_numexpr_threads(n = None):
pass
-def _evaluate_standard(op, op_str, a, b, raise_on_error=True):
+def _evaluate_standard(op, op_str, a, b, raise_on_error=True, **eval_kwargs):
""" standard evaluation """
return op(a,b)
@@ -79,7 +79,7 @@ def _can_use_numexpr(op, op_str, a, b, dtype_check):
return False
-def _evaluate_numexpr(op, op_str, a, b, raise_on_error = False):
+def _evaluate_numexpr(op, op_str, a, b, raise_on_error = False, **eval_kwargs):
result = None
if _can_use_numexpr(op, op_str, a, b, 'evaluate'):
@@ -92,7 +92,7 @@ def _evaluate_numexpr(op, op_str, a, b, raise_on_error = False):
result = ne.evaluate('a_value %s b_value' % op_str,
local_dict={ 'a_value' : a_value,
'b_value' : b_value },
- casting='safe')
+ casting='safe', **eval_kwargs)
except (ValueError), detail:
if 'unknown type object' in str(detail):
pass
@@ -142,7 +142,7 @@ def _where_numexpr(cond, a, b, raise_on_error = False):
# turn myself on
set_use_numexpr(True)
-def evaluate(op, op_str, a, b, raise_on_error=False, use_numexpr=True):
+def evaluate(op, op_str, a, b, raise_on_error=False, use_numexpr=True, **eval_kwargs):
""" evaluate and return the expression of the op on a and b
Parameters
@@ -158,7 +158,7 @@ def evaluate(op, op_str, a, b, raise_on_error=False, use_numexpr=True):
"""
if use_numexpr:
- return _evaluate(op, op_str, a, b, raise_on_error=raise_on_error)
+ return _evaluate(op, op_str, a, b, raise_on_error=raise_on_error, **eval_kwargs)
return _evaluate_standard(op, op_str, a, b, raise_on_error=raise_on_error)
def where(cond, a, b, raise_on_error=False, use_numexpr=True):
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index f0145364363ac..47142daa8b20b 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -190,10 +190,10 @@ class DataConflictError(Exception):
# Factory helper methods
-def _arith_method(op, name, str_rep = None, default_axis='columns', fill_zeros=None):
+def _arith_method(op, name, str_rep = None, default_axis='columns', fill_zeros=None, **eval_kwargs):
def na_op(x, y):
try:
- result = expressions.evaluate(op, str_rep, x, y, raise_on_error=True)
+ result = expressions.evaluate(op, str_rep, x, y, raise_on_error=True, **eval_kwargs)
result = com._fill_zeros(result,y,fill_zeros)
except TypeError:
@@ -853,12 +853,17 @@ def __contains__(self, key):
__sub__ = _arith_method(operator.sub, '__sub__', '-', default_axis=None)
__mul__ = _arith_method(operator.mul, '__mul__', '*', default_axis=None)
__truediv__ = _arith_method(operator.truediv, '__truediv__', '/',
- default_axis=None, fill_zeros=np.inf)
+ default_axis=None, fill_zeros=np.inf, truediv=True)
+ # numexpr produces a different value (python/numpy: 0.000, numexpr: inf)
+ # when dividing by zero, so can't use floordiv speed up (yet)
+ # __floordiv__ = _arith_method(operator.floordiv, '__floordiv__', '//',
__floordiv__ = _arith_method(operator.floordiv, '__floordiv__',
default_axis=None, fill_zeros=np.inf)
__pow__ = _arith_method(operator.pow, '__pow__', '**', default_axis=None)
- __mod__ = _arith_method(operator.mod, '__mod__', '*', default_axis=None, fill_zeros=np.nan)
+ # currently causes a floating point exception to occur - so sticking with unaccelerated for now
+ # __mod__ = _arith_method(operator.mod, '__mod__', '%', default_axis=None, fill_zeros=np.nan)
+ __mod__ = _arith_method(operator.mod, '__mod__', default_axis=None, fill_zeros=np.nan)
__radd__ = _arith_method(_radd_compat, '__radd__', default_axis=None)
__rmul__ = _arith_method(operator.mul, '__rmul__', default_axis=None)
@@ -879,7 +884,7 @@ def __contains__(self, key):
# Python 2 division methods
if not py3compat.PY3:
__div__ = _arith_method(operator.div, '__div__', '/',
- default_axis=None, fill_zeros=np.inf)
+ default_axis=None, fill_zeros=np.inf, truediv=False)
__rdiv__ = _arith_method(lambda x, y: y / x, '__rdiv__',
default_axis=None, fill_zeros=np.inf)
diff --git a/pandas/tests/test_expressions.py b/pandas/tests/test_expressions.py
index af7f20a65fa7c..ba0a9926dfa78 100644
--- a/pandas/tests/test_expressions.py
+++ b/pandas/tests/test_expressions.py
@@ -30,6 +30,7 @@
_frame2 = DataFrame(np.random.randn(100, 4), columns = list('ABCD'), dtype='float64')
_mixed = DataFrame({ 'A' : _frame['A'].copy(), 'B' : _frame['B'].astype('float32'), 'C' : _frame['C'].astype('int64'), 'D' : _frame['D'].astype('int32') })
_mixed2 = DataFrame({ 'A' : _frame2['A'].copy(), 'B' : _frame2['B'].astype('float32'), 'C' : _frame2['C'].astype('int64'), 'D' : _frame2['D'].astype('int32') })
+_integer = DataFrame(np.random.randint(1, 100, size=(10001, 4)), columns = list('ABCD'), dtype='int64')
class TestExpressions(unittest.TestCase):
@@ -41,7 +42,56 @@ def setUp(self):
self.frame2 = _frame2.copy()
self.mixed = _mixed.copy()
self.mixed2 = _mixed2.copy()
-
+ self.integer = _integer.copy()
+ self._MIN_ELEMENTS = expr._MIN_ELEMENTS
+
+ def tearDown(self):
+ expr._MIN_ELEMENTS = self._MIN_ELEMENTS
+
+ #TODO: add test for Panel
+ #TODO: add tests for binary operations
+ @nose.tools.nottest
+ def run_arithmetic_test(self, df, assert_func, check_dtype=False):
+ expr._MIN_ELEMENTS = 0
+ operations = ['add', 'sub', 'mul','mod','truediv','floordiv','pow']
+ if not py3compat.PY3:
+ operations.append('div')
+ for arith in operations:
+ op = getattr(operator, arith)
+ expr.set_use_numexpr(False)
+ expected = op(df, df)
+ expr.set_use_numexpr(True)
+ result = op(df, df)
+ try:
+ if check_dtype:
+ if arith == 'div':
+ assert expected.dtype.kind == df.dtype.kind
+ if arith == 'truediv':
+ assert expected.dtype.kind == 'f'
+ assert_func(expected, result)
+ except Exception:
+ print("Failed test with operator %r" % op.__name__)
+ raise
+
+ def test_integer_arithmetic(self):
+ self.run_arithmetic_test(self.integer, assert_frame_equal)
+ self.run_arithmetic_test(self.integer.icol(0), assert_series_equal,
+ check_dtype=True)
+
+ def test_float_arithemtic(self):
+ self.run_arithmetic_test(self.frame, assert_frame_equal)
+ self.run_arithmetic_test(self.frame.icol(0), assert_series_equal,
+ check_dtype=True)
+
+ def test_mixed_arithmetic(self):
+ self.run_arithmetic_test(self.mixed, assert_frame_equal)
+ for col in self.mixed.columns:
+ self.run_arithmetic_test(self.mixed[col], assert_series_equal)
+
+ def test_integer_with_zeros(self):
+ self.integer *= np.random.randint(0, 2, size=np.shape(self.integer))
+ self.run_arithmetic_test(self.integer, assert_frame_equal)
+ self.run_arithmetic_test(self.integer.icol(0), assert_series_equal)
def test_invalid(self):
| Fixes a number of items relating to accelerated arithmetic in frame.
---
`__truediv__` had been set up to use numexpr, but this happened by passingthe division operator as the string. numexpr's evaluate only checked 2 frames up, which meant that it picked up the division setting from`frame.py` and would do floor/integer division when both inputs were integers.You'd only see that issue with a dataframe large enough to trigger numexpr evaluation (>10000 cells)
This adds test cases to `test_expression.py` that [exhibit this failure under thePython2.7 full deps test](https://travis-ci.org/jtratner/pandas/builds/7822069). The testcases only test `Series` and `DataFrame` (though it looks like neither`Series` nor `Panel` use `numexpr`). It doesn't fail under Python 3 because integer division is totally gone.
Now `evaluate`, `_evaluate_standard` and `_evaluate_numexpr` all accept extra keyword arguments, which are passed to `numexpr.evaluate`.
The test case is currently a separate commit that fails. I wasn't sure whether I should have combined it with the bugfix commit or not. Happyto change it if that's more appropriate.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3764 | 2013-06-05T22:40:00Z | 2013-06-18T13:01:18Z | 2013-06-18T13:01:18Z | 2014-06-13T05:42:35Z |
ENH: support HDFStore in Python3 (via PyTables 3.0.0) | diff --git a/README.rst b/README.rst
index daea702476ebc..85868176722bd 100644
--- a/README.rst
+++ b/README.rst
@@ -85,7 +85,6 @@ Optional dependencies
- `Cython <http://www.cython.org>`__: Only necessary to build development version. Version 0.17.1 or higher.
- `SciPy <http://www.scipy.org>`__: miscellaneous statistical functions
- `PyTables <http://www.pytables.org>`__: necessary for HDF5-based storage
- - Not yet supported on python >= 3
- `matplotlib <http://matplotlib.sourceforge.net/>`__: for plotting
- `statsmodels <http://statsmodels.sourceforge.net/>`__
- Needed for parts of :mod:`pandas.stats`
diff --git a/RELEASE.rst b/RELEASE.rst
index 12d2389a8a59b..3a347246be8dd 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -63,6 +63,7 @@ pandas 0.11.1
to append an index with a different name than the existing
- support datelike columns with a timezone as data_columns (GH2852_)
- table writing performance improvements.
+ - support python3 (via ``PyTables 3.0.0``) (GH3750_)
- Add modulo operator to Series, DataFrame
- Add ``date`` method to DatetimeIndex
- Simplified the API and added a describe method to Categorical
@@ -79,10 +80,14 @@ pandas 0.11.1
**API Changes**
- - When removing an object from a ``HDFStore``, ``remove(key)`` raises
- ``KeyError`` if the key is not a valid store object.
- - In an ``HDFStore``, raise a ``TypeError`` on passing ``where`` or ``columns``
- to select with a Storer; these are invalid parameters at this time
+ - ``HDFStore``
+
+ - When removing an object, ``remove(key)`` raises
+ ``KeyError`` if the key is not a valid store object.
+ - raise a ``TypeError`` on passing ``where`` or ``columns``
+ to select with a Storer; these are invalid parameters at this time
+ - can now specify an ``encoding`` option to ``append/put``
+ to enable alternate encodings (GH3750_)
- The repr() for (Multi)Index now obeys display.max_seq_items rather
then numpy threshold print options. (GH3426_, GH3466_)
- Added mangle_dupe_cols option to read_table/csv, allowing users
@@ -288,6 +293,7 @@ pandas 0.11.1
.. _GH3740: https://github.com/pydata/pandas/issues/3740
.. _GH3748: https://github.com/pydata/pandas/issues/3748
.. _GH3741: https://github.com/pydata/pandas/issues/3741
+.. _GH3750: https://github.com/pydata/pandas/issues/3750
pandas 0.11.0
=============
diff --git a/ci/install.sh b/ci/install.sh
index a091834a9570f..b748070db85aa 100755
--- a/ci/install.sh
+++ b/ci/install.sh
@@ -69,13 +69,11 @@ if ( ! $VENV_FILE_AVAILABLE ); then
pip install $PIP_ARGS cython
if [ ${TRAVIS_PYTHON_VERSION:0:1} == "2" ]; then
- # installed explicitly above, to get the library as well
- # sudo apt-get $APT_ARGS install libhdf5-serial-dev;
- pip install numexpr
- pip install tables
pip install $PIP_ARGS xlwt
fi
+ pip install numexpr
+ pip install tables
pip install $PIP_ARGS matplotlib
pip install $PIP_ARGS openpyxl
pip install $PIP_ARGS xlrd>=0.9.0
diff --git a/doc/source/install.rst b/doc/source/install.rst
index 6868969c1b968..9dc8064da45e3 100644
--- a/doc/source/install.rst
+++ b/doc/source/install.rst
@@ -95,7 +95,6 @@ Optional Dependencies
version. Version 0.17.1 or higher.
* `SciPy <http://www.scipy.org>`__: miscellaneous statistical functions
* `PyTables <http://www.pytables.org>`__: necessary for HDF5-based storage
- * Not yet supported on python >= 3
* `matplotlib <http://matplotlib.sourceforge.net/>`__: for plotting
* `statsmodels <http://statsmodels.sourceforge.net/>`__
* Needed for parts of :mod:`pandas.stats`
diff --git a/doc/source/io.rst b/doc/source/io.rst
index 802ab08e85932..1c615ca278668 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -1300,12 +1300,11 @@ the high performance HDF5 format using the excellent `PyTables
<http://www.pytables.org/>`__ library. See the :ref:`cookbook<cookbook.hdf>`
for some advanced strategies
-.. warning::
+.. note::
- ``PyTables`` 3.0.0 was recently released. This enables support for Python 3,
- however, it has not been integrated into pandas as of yet. (Under Python 2,
- ``PyTables`` version >= 2.3 is supported).
-
+ ``PyTables`` 3.0.0 was recently released to enables support for Python 3.
+ Pandas should be fully compatible (and previously written stores should be
+ backwards compatible) with all ``PyTables`` >= 2.3
.. ipython:: python
:suppress:
diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt
index b2fee1acbc4d6..badb364d214d1 100644
--- a/doc/source/v0.11.1.txt
+++ b/doc/source/v0.11.1.txt
@@ -237,6 +237,9 @@ Enhancements
pd.get_option('a.b')
pd.get_option('b.c')
+ - Support for ``HDFStore`` (via ``PyTables 3.0.0``) on Python3
+
+
Bug Fixes
~~~~~~~~~
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 5a480e08effba..b1b7b80e5fd23 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -13,12 +13,13 @@
import numpy as np
from pandas import (
- Series, TimeSeries, DataFrame, Panel, Panel4D, Index, MultiIndex, Int64Index
+ Series, TimeSeries, DataFrame, Panel, Panel4D, Index,
+ MultiIndex, Int64Index, Timestamp
)
from pandas.sparse.api import SparseSeries, SparseDataFrame, SparsePanel
from pandas.sparse.array import BlockIndex, IntIndex
from pandas.tseries.api import PeriodIndex, DatetimeIndex
-from pandas.core.common import adjoin, isnull
+from pandas.core.common import adjoin, isnull, is_list_like
from pandas.core.algorithms import match, unique, factorize
from pandas.core.categorical import Categorical
from pandas.core.common import _asarray_tuplesafe, _try_sort
@@ -27,6 +28,7 @@
from pandas.core.index import Int64Index, _ensure_index
import pandas.core.common as com
from pandas.tools.merge import concat
+from pandas.util import py3compat
import pandas.lib as lib
import pandas.algos as algos
@@ -37,6 +39,21 @@
# versioning attribute
_version = '0.10.1'
+# PY3 encoding if we don't specify
+_default_encoding = 'UTF-8'
+
+def _ensure_decoded(s):
+ """ if we have bytes, decode them to unicde """
+ if isinstance(s, np.bytes_):
+ s = s.decode('UTF-8')
+ return s
+def _ensure_encoding(encoding):
+ # set the encoding if we need
+ if encoding is None:
+ if py3compat.PY3:
+ encoding = _default_encoding
+ return encoding
+
class IncompatibilityWarning(Warning): pass
incompatibility_doc = """
where criteria is being ignored as this version [%s] is too old (or not-defined),
@@ -56,40 +73,40 @@ class PerformanceWarning(Warning): pass
# map object types
_TYPE_MAP = {
- Series : 'series',
- SparseSeries : 'sparse_series',
- TimeSeries : 'series',
- DataFrame : 'frame',
- SparseDataFrame : 'sparse_frame',
- Panel : 'wide',
- Panel4D : 'ndim',
- SparsePanel : 'sparse_panel'
+ Series : u'series',
+ SparseSeries : u'sparse_series',
+ TimeSeries : u'series',
+ DataFrame : u'frame',
+ SparseDataFrame : u'sparse_frame',
+ Panel : u'wide',
+ Panel4D : u'ndim',
+ SparsePanel : u'sparse_panel'
}
# storer class map
_STORER_MAP = {
- 'TimeSeries' : 'LegacySeriesStorer',
- 'Series' : 'LegacySeriesStorer',
- 'DataFrame' : 'LegacyFrameStorer',
- 'DataMatrix' : 'LegacyFrameStorer',
- 'series' : 'SeriesStorer',
- 'sparse_series' : 'SparseSeriesStorer',
- 'frame' : 'FrameStorer',
- 'sparse_frame' : 'SparseFrameStorer',
- 'wide' : 'PanelStorer',
- 'sparse_panel' : 'SparsePanelStorer',
+ u'TimeSeries' : 'LegacySeriesStorer',
+ u'Series' : 'LegacySeriesStorer',
+ u'DataFrame' : 'LegacyFrameStorer',
+ u'DataMatrix' : 'LegacyFrameStorer',
+ u'series' : 'SeriesStorer',
+ u'sparse_series' : 'SparseSeriesStorer',
+ u'frame' : 'FrameStorer',
+ u'sparse_frame' : 'SparseFrameStorer',
+ u'wide' : 'PanelStorer',
+ u'sparse_panel' : 'SparsePanelStorer',
}
# table class map
_TABLE_MAP = {
- 'generic_table' : 'GenericTable',
- 'appendable_frame' : 'AppendableFrameTable',
- 'appendable_multiframe' : 'AppendableMultiFrameTable',
- 'appendable_panel' : 'AppendablePanelTable',
- 'appendable_ndim' : 'AppendableNDimTable',
- 'worm' : 'WORMTable',
- 'legacy_frame' : 'LegacyFrameTable',
- 'legacy_panel' : 'LegacyPanelTable',
+ u'generic_table' : 'GenericTable',
+ u'appendable_frame' : 'AppendableFrameTable',
+ u'appendable_multiframe' : 'AppendableMultiFrameTable',
+ u'appendable_panel' : 'AppendablePanelTable',
+ u'appendable_ndim' : 'AppendableNDimTable',
+ u'worm' : 'WORMTable',
+ u'legacy_frame' : 'LegacyFrameTable',
+ u'legacy_panel' : 'LegacyPanelTable',
}
# axes map
@@ -522,15 +539,16 @@ def put(self, key, value, table=None, append=False, **kwargs):
Parameters
----------
- key : object
- value : {Series, DataFrame, Panel}
- table : boolean, default False
+ key : object
+ value : {Series, DataFrame, Panel}
+ table : boolean, default False
Write as a PyTables Table structure which may perform worse but
allow more flexible operations like searching / selecting subsets
of the data
- append : boolean, default False
+ append : boolean, default False
For table data structures, append the input data to the existing
table
+ encoding : default None, provide an encoding for strings
"""
self._write_to_group(key, value, table=table, append=append, **kwargs)
@@ -595,6 +613,7 @@ def append(self, key, value, columns=None, **kwargs):
nan_rep : string to use as string nan represenation
chunksize : size to chunk the writing
expectedrows : expected TOTAL row size of this table
+ encoding : default None, provide an encoding for strings
Notes
-----
@@ -692,7 +711,8 @@ def create_table_index(self, key, **kwargs):
def groups(self):
""" return a list of all the top-level nodes (that are not themselves a pandas storage object) """
_tables()
- return [ g for g in self._handle.walkNodes() if getattr(g._v_attrs,'pandas_type',None) or getattr(g,'table',None) or (isinstance(g,_table_mod.table.Table) and g._v_name != 'table') ]
+ return [ g for g in self._handle.walkNodes() if getattr(g._v_attrs,'pandas_type',None) or getattr(
+ g,'table',None) or (isinstance(g,_table_mod.table.Table) and g._v_name != u'table') ]
def get_node(self, key):
""" return the node with the key or None if it does not exist """
@@ -712,7 +732,8 @@ def get_storer(self, key):
s.infer_axes()
return s
- def copy(self, file, mode = 'w', propindexes = True, keys = None, complib = None, complevel = None, fletcher32 = False, overwrite = True):
+ def copy(self, file, mode = 'w', propindexes = True, keys = None, complib = None, complevel = None,
+ fletcher32 = False, overwrite = True):
""" copy the existing store to a new file, upgrading in place
Parameters
@@ -746,9 +767,9 @@ def copy(self, file, mode = 'w', propindexes = True, keys = None, complib = None
index = False
if propindexes:
index = [ a.name for a in s.axes if a.is_indexed ]
- new_store.append(k,data, index=index, data_columns=getattr(s,'data_columns',None))
+ new_store.append(k, data, index=index, data_columns=getattr(s,'data_columns',None), encoding=s.encoding)
else:
- new_store.put(k,data)
+ new_store.put(k, data, encoding=s.encoding)
return new_store
@@ -761,8 +782,8 @@ def error(t):
raise TypeError("cannot properly create the storer for: [%s] [group->%s,value->%s,table->%s,append->%s,kwargs->%s]" %
(t,group,type(value),table,append,kwargs))
- pt = getattr(group._v_attrs,'pandas_type',None)
- tt = getattr(group._v_attrs,'table_type',None)
+ pt = _ensure_decoded(getattr(group._v_attrs,'pandas_type',None))
+ tt = _ensure_decoded(getattr(group._v_attrs,'table_type',None))
# infer the pt from the passed value
if pt is None:
@@ -770,8 +791,8 @@ def error(t):
_tables()
if getattr(group,'table',None) or isinstance(group,_table_mod.table.Table):
- pt = 'frame_table'
- tt = 'generic_table'
+ pt = u'frame_table'
+ tt = u'generic_table'
else:
raise TypeError("cannot create a storer if the object is not existing nor a value are passed")
else:
@@ -783,10 +804,10 @@ def error(t):
# we are actually a table
if table or append:
- pt += '_table'
+ pt += u'_table'
# a storer node
- if 'table' not in pt:
+ if u'table' not in pt:
try:
return globals()[_STORER_MAP[pt]](self, group, **kwargs)
except:
@@ -798,26 +819,26 @@ def error(t):
# if we are a writer, determin the tt
if value is not None:
- if pt == 'frame_table':
+ if pt == u'frame_table':
index = getattr(value,'index',None)
if index is not None:
if index.nlevels == 1:
- tt = 'appendable_frame'
+ tt = u'appendable_frame'
elif index.nlevels > 1:
- tt = 'appendable_multiframe'
- elif pt == 'wide_table':
- tt = 'appendable_panel'
- elif pt == 'ndim_table':
- tt = 'appendable_ndim'
+ tt = u'appendable_multiframe'
+ elif pt == u'wide_table':
+ tt = u'appendable_panel'
+ elif pt == u'ndim_table':
+ tt = u'appendable_ndim'
else:
# distiguish between a frame/table
- tt = 'legacy_panel'
+ tt = u'legacy_panel'
try:
fields = group.table._v_attrs.fields
- if len(fields) == 1 and fields[0] == 'value':
- tt = 'legacy_frame'
+ if len(fields) == 1 and fields[0] == u'value':
+ tt = u'legacy_frame'
except:
pass
@@ -826,7 +847,8 @@ def error(t):
except:
error('_TABLE_MAP')
- def _write_to_group(self, key, value, index=True, table=False, append=False, complib=None, **kwargs):
+ def _write_to_group(self, key, value, index=True, table=False, append=False,
+ complib=None, encoding=None, **kwargs):
group = self.get_node(key)
# remove the node if we are not appending
@@ -851,7 +873,8 @@ def _write_to_group(self, key, value, index=True, table=False, append=False, com
group = self._handle.createGroup(path, p)
path = new_path
- s = self._create_storer(group, value, table=table, append=append, **kwargs)
+ s = self._create_storer(group, value, table=table, append=append,
+ encoding=encoding, **kwargs)
if append:
# raise if we are trying to append to a non-table,
# or a table that exists (and we are putting)
@@ -890,7 +913,7 @@ class TableIterator(object):
def __init__(self, func, nrows, start=None, stop=None, chunksize=None):
self.func = func
- self.nrows = nrows
+ self.nrows = nrows or 0
self.start = start or 0
if stop is None:
@@ -1015,7 +1038,7 @@ def infer(self, table):
new_self.get_attr()
return new_self
- def convert(self, values, nan_rep):
+ def convert(self, values, nan_rep, encoding):
""" set the values from this selection: take = take ownership """
try:
values = values[self.cname]
@@ -1024,19 +1047,19 @@ def convert(self, values, nan_rep):
kwargs = dict()
if self.freq is not None:
- kwargs['freq'] = self.freq
+ kwargs['freq'] = _ensure_decoded(self.freq)
if self.tz is not None:
- kwargs['tz'] = self.tz
+ kwargs['tz'] = _ensure_decoded(self.tz)
if self.index_name is not None:
- kwargs['name'] = self.index_name
+ kwargs['name'] = _ensure_decoded(self.index_name)
try:
- self.values = Index(_maybe_convert(values, self.kind), **kwargs)
+ self.values = Index(_maybe_convert(values, self.kind, self.encoding), **kwargs)
except:
# if the output freq is different that what we recorded, then infer it
if 'freq' in kwargs:
kwargs['freq'] = 'infer'
- self.values = Index(_maybe_convert(values, self.kind), **kwargs)
+ self.values = Index(_maybe_convert(values, self.kind, encoding), **kwargs)
return self
def take_data(self):
@@ -1068,7 +1091,7 @@ def __iter__(self):
def maybe_set_size(self, min_itemsize=None, **kwargs):
""" maybe set a string col itemsize:
min_itemsize can be an interger or a dict with this columns name with an integer size """
- if self.kind == 'string':
+ if _ensure_decoded(self.kind) == u'string':
if isinstance(min_itemsize, dict):
min_itemsize = min_itemsize.get(self.name)
@@ -1088,7 +1111,7 @@ def validate_col(self, itemsize=None):
# validate this column for string truncation (or reset to the max size)
dtype = getattr(self, 'dtype', None)
- if self.kind == 'string':
+ if _ensure_decoded(self.kind) == u'string':
c = self.col
if c is not None:
@@ -1167,7 +1190,7 @@ class GenericIndexCol(IndexCol):
def is_indexed(self):
return False
- def convert(self, values, nan_rep):
+ def convert(self, values, nan_rep, encoding):
""" set the values from this selection: take = take ownership """
self.values = Int64Index(np.arange(self.table.nrows))
@@ -1218,7 +1241,7 @@ def __init__(self, values=None, kind=None, typ=None, cname=None, data=None, bloc
super(DataCol, self).__init__(
values=values, kind=kind, typ=typ, cname=cname, **kwargs)
self.dtype = None
- self.dtype_attr = "%s_dtype" % self.name
+ self.dtype_attr = u"%s_dtype" % self.name
self.set_data(data)
def __repr__(self):
@@ -1246,22 +1269,25 @@ def take_data(self):
def set_kind(self):
# set my kind if we can
if self.dtype is not None:
- if self.dtype.startswith('string'):
+ dtype = _ensure_decoded(self.dtype)
+ if dtype.startswith(u'string') or dtype.startswith(u'bytes'):
self.kind = 'string'
- elif self.dtype.startswith('float'):
+ elif dtype.startswith(u'float'):
self.kind = 'float'
- elif self.dtype.startswith('int'):
+ elif dtype.startswith(u'int') or dtype.startswith(u'uint'):
self.kind = 'integer'
- elif self.dtype.startswith('date'):
+ elif dtype.startswith(u'date'):
self.kind = 'datetime'
- elif self.dtype.startswith('bool'):
+ elif dtype.startswith(u'bool'):
self.kind = 'bool'
+ else:
+ raise AssertionError("cannot interpret dtype of [%s] in [%s]" % (dtype,self))
# set my typ if we need
if self.typ is None:
self.typ = getattr(self.description,self.cname,None)
- def set_atom(self, block, existing_col, min_itemsize, nan_rep, info, **kwargs):
+ def set_atom(self, block, existing_col, min_itemsize, nan_rep, info, encoding=None, **kwargs):
""" create and setup my atom from the block b """
self.values = list(block.items)
@@ -1304,7 +1330,7 @@ def set_atom(self, block, existing_col, min_itemsize, nan_rep, info, **kwargs):
# this is basically a catchall; if say a datetime64 has nans then will
# end up here ###
elif inferred_type == 'string' or dtype == 'object':
- self.set_atom_string(block, existing_col, min_itemsize, nan_rep)
+ self.set_atom_string(block, existing_col, min_itemsize, nan_rep, encoding)
else:
self.set_atom_data(block)
@@ -1313,7 +1339,7 @@ def set_atom(self, block, existing_col, min_itemsize, nan_rep, info, **kwargs):
def get_atom_string(self, block, itemsize):
return _tables().StringCol(itemsize=itemsize, shape=block.shape[0])
- def set_atom_string(self, block, existing_col, min_itemsize, nan_rep):
+ def set_atom_string(self, block, existing_col, min_itemsize, nan_rep, encoding):
# fill nan items with myself
block = block.fillna(nan_rep)
data = block.values
@@ -1334,7 +1360,7 @@ def set_atom_string(self, block, existing_col, min_itemsize, nan_rep):
# itemsize is the maximum length of a string (along any dimension)
- itemsize = lib.max_len_string_array(data.ravel())
+ itemsize = lib.max_len_string_array(com._ensure_object(data.ravel()))
# specified min_itemsize?
if isinstance(min_itemsize, dict):
@@ -1351,10 +1377,10 @@ def set_atom_string(self, block, existing_col, min_itemsize, nan_rep):
self.itemsize = itemsize
self.kind = 'string'
self.typ = self.get_atom_string(block, itemsize)
- self.set_data(self.convert_string_data(data, itemsize))
+ self.set_data(self.convert_string_data(data, itemsize, encoding))
- def convert_string_data(self, data, itemsize):
- return data.astype('S%s' % itemsize)
+ def convert_string_data(self, data, itemsize, encoding):
+ return _convert_string_array(data, encoding, itemsize)
def get_atom_coltype(self):
""" return the PyTables column class for this column """
@@ -1407,7 +1433,7 @@ def validate_attr(self, append):
raise ValueError("appended items dtype do not match existing items dtype"
" in table!")
- def convert(self, values, nan_rep):
+ def convert(self, values, nan_rep, encoding):
""" set the data from this selection (and convert to the correct dtype if we can) """
try:
values = values[self.cname]
@@ -1417,9 +1443,10 @@ def convert(self, values, nan_rep):
# convert to the correct dtype
if self.dtype is not None:
+ dtype = _ensure_decoded(self.dtype)
# reverse converts
- if self.dtype == 'datetime64':
+ if dtype == u'datetime64':
# recreate the timezone
if self.tz is not None:
@@ -1432,30 +1459,30 @@ def convert(self, values, nan_rep):
else:
self.data = np.asarray(self.data, dtype='M8[ns]')
- elif self.dtype == 'date':
+ elif dtype == u'date':
self.data = np.array(
[date.fromtimestamp(v) for v in self.data], dtype=object)
- elif self.dtype == 'datetime':
+ elif dtype == u'datetime':
self.data = np.array(
[datetime.fromtimestamp(v) for v in self.data],
dtype=object)
else:
try:
- self.data = self.data.astype(self.dtype)
+ self.data = self.data.astype(dtype)
except:
self.data = self.data.astype('O')
- # convert nans
- if self.kind == 'string':
- self.data = lib.array_replace_from_nan_rep(
- self.data.ravel(), nan_rep).reshape(self.data.shape)
+ # convert nans / decode
+ if _ensure_decoded(self.kind) == u'string':
+ self.data = _unconvert_string_array(self.data, nan_rep=nan_rep, encoding=encoding)
+
return self
def get_attr(self):
""" get the data for this colummn """
self.values = getattr(self.attrs, self.kind_attr, None)
- self.dtype = getattr(self.attrs, self.dtype_attr, None)
+ self.dtype = getattr(self.attrs, self.dtype_attr, None)
self.set_kind()
def set_attr(self):
@@ -1471,7 +1498,7 @@ class DataIndexableCol(DataCol):
@property
def is_searchable(self):
- return self.kind == 'string'
+ return _ensure_decoded(self.kind) == u'string'
def get_atom_string(self, block, itemsize):
return _tables().StringCol(itemsize=itemsize)
@@ -1504,9 +1531,10 @@ class Storer(object):
ndim = None
is_table = False
- def __init__(self, parent, group, **kwargs):
+ def __init__(self, parent, group, encoding=None, **kwargs):
self.parent = parent
self.group = group
+ self.encoding = _ensure_encoding(encoding)
self.set_version()
@property
@@ -1515,7 +1543,7 @@ def is_old_version(self):
def set_version(self):
""" compute and set our version """
- version = getattr(self.group._v_attrs,'pandas_version',None)
+ version = _ensure_decoded(getattr(self.group._v_attrs,'pandas_version',None))
try:
self.version = tuple([int(x) for x in version.split('.')])
if len(self.version) == 2:
@@ -1525,7 +1553,7 @@ def set_version(self):
@property
def pandas_type(self):
- return getattr(self.group._v_attrs, 'pandas_type', None)
+ return _ensure_decoded(getattr(self.group._v_attrs, 'pandas_type', None))
def __repr__(self):
""" return a pretty representatgion of myself """
@@ -1674,10 +1702,18 @@ def validate_read(self, kwargs):
def is_exists(self):
return True
+ def set_attrs(self):
+ """ set our object attributes """
+ self.attrs.encoding = self.encoding
+
def get_attrs(self):
""" retrieve our attributes """
+ self.encoding = _ensure_encoding(getattr(self.attrs,'encoding',None))
for n in self.attributes:
- setattr(self,n,getattr(self.attrs, n, None))
+ setattr(self,n,_ensure_decoded(getattr(self.attrs, n, None)))
+
+ def write(self, obj, **kwargs):
+ self.set_attrs()
def read_array(self, key):
""" read an array for the specified node (off of group """
@@ -1700,7 +1736,7 @@ def read_array(self, key):
else:
ret = data
- if dtype == 'datetime64':
+ if dtype == u'datetime64':
ret = np.array(ret, dtype='M8[ns]')
if transposed:
@@ -1709,15 +1745,15 @@ def read_array(self, key):
return ret
def read_index(self, key):
- variety = getattr(self.attrs, '%s_variety' % key)
+ variety = _ensure_decoded(getattr(self.attrs, '%s_variety' % key))
- if variety == 'multi':
+ if variety == u'multi':
return self.read_multi_index(key)
- elif variety == 'block':
+ elif variety == u'block':
return self.read_block_index(key)
- elif variety == 'sparseint':
+ elif variety == u'sparseint':
return self.read_sparse_intindex(key)
- elif variety == 'regular':
+ elif variety == u'regular':
_, index = self.read_index_node(getattr(self.group, key))
return index
else: # pragma: no cover
@@ -1735,7 +1771,7 @@ def write_index(self, key, index):
self.write_sparse_intindex(key, index)
else:
setattr(self.attrs, '%s_variety' % key, 'regular')
- converted = _convert_index(index).set_name('index')
+ converted = _convert_index(index,self.encoding).set_name('index')
self.write_array(key, converted.values)
node = getattr(self.group, key)
node._v_attrs.kind = converted.kind
@@ -1782,7 +1818,7 @@ def write_multi_index(self, key, index):
index.names)):
# write the level
level_key = '%s_level%d' % (key, i)
- conv_level = _convert_index(lev).set_name(level_key)
+ conv_level = _convert_index(lev, self.encoding).set_name(level_key)
self.write_array(level_key, conv_level.values)
node = getattr(self.group, level_key)
node._v_attrs.kind = conv_level.kind
@@ -1815,7 +1851,7 @@ def read_multi_index(self, key):
def read_index_node(self, node):
data = node[:]
- kind = node._v_attrs.kind
+ kind = _ensure_decoded(node._v_attrs.kind)
name = None
if 'name' in node._v_attrs:
@@ -1826,23 +1862,22 @@ def read_index_node(self, node):
factory = self._get_index_factory(index_class)
kwargs = {}
- if 'freq' in node._v_attrs:
+ if u'freq' in node._v_attrs:
kwargs['freq'] = node._v_attrs['freq']
- if 'tz' in node._v_attrs:
+ if u'tz' in node._v_attrs:
kwargs['tz'] = node._v_attrs['tz']
- if kind in ('date', 'datetime'):
- index = factory(_unconvert_index(data, kind), dtype=object,
+ if kind in (u'date', u'datetime'):
+ index = factory(_unconvert_index(data, kind, encoding=self.encoding), dtype=object,
**kwargs)
else:
- index = factory(_unconvert_index(data, kind), **kwargs)
+ index = factory(_unconvert_index(data, kind, encoding=self.encoding), **kwargs)
index.name = name
return name, index
-
def write_array_empty(self, key, value):
""" write a 0-len array """
@@ -1922,7 +1957,7 @@ def read_index_legacy(self, key):
node = getattr(self.group,key)
data = node[:]
kind = node._v_attrs.kind
- return _unconvert_index_legacy(data, kind)
+ return _unconvert_index_legacy(data, kind, encoding=self.encoding)
class LegacySeriesStorer(LegacyStorer):
@@ -1942,7 +1977,7 @@ def read(self, **kwargs):
return DataFrame(values, index=index, columns=columns)
class SeriesStorer(GenericStorer):
- pandas_kind = 'series'
+ pandas_kind = u'series'
attributes = ['name']
@property
@@ -1963,12 +1998,13 @@ def read(self, **kwargs):
return Series(values, index=index, name=self.name)
def write(self, obj, **kwargs):
+ super(SeriesStorer, self).write(obj, **kwargs)
self.write_index('index', obj.index)
self.write_array('values', obj.values)
self.attrs.name = obj.name
class SparseSeriesStorer(GenericStorer):
- pandas_kind = 'sparse_series'
+ pandas_kind = u'sparse_series'
attributes = ['name','fill_value','kind']
def read(self, **kwargs):
@@ -1977,10 +2013,11 @@ def read(self, **kwargs):
sp_values = self.read_array('sp_values')
sp_index = self.read_index('sp_index')
return SparseSeries(sp_values, index=index, sparse_index=sp_index,
- kind=self.kind or 'block', fill_value=self.fill_value,
+ kind=self.kind or u'block', fill_value=self.fill_value,
name=self.name)
def write(self, obj, **kwargs):
+ super(SparseSeriesStorer, self).write(obj, **kwargs)
self.write_index('index', obj.index)
self.write_index('sp_index', obj.sp_index)
self.write_array('sp_values', obj.sp_values)
@@ -1989,7 +2026,7 @@ def write(self, obj, **kwargs):
self.attrs.kind = obj.kind
class SparseFrameStorer(GenericStorer):
- pandas_kind = 'sparse_frame'
+ pandas_kind = u'sparse_frame'
attributes = ['default_kind','default_fill_value']
def read(self, **kwargs):
@@ -2007,6 +2044,7 @@ def read(self, **kwargs):
def write(self, obj, **kwargs):
""" write it as a collection of individual sparse series """
+ super(SparseFrameStorer, self).write(obj, **kwargs)
for name, ss in obj.iteritems():
key = 'sparse_series_%s' % name
if key not in self.group._v_children:
@@ -2020,7 +2058,7 @@ def write(self, obj, **kwargs):
self.write_index('columns', obj.columns)
class SparsePanelStorer(GenericStorer):
- pandas_kind = 'sparse_panel'
+ pandas_kind = u'sparse_panel'
attributes = ['default_kind','default_fill_value']
def read(self, **kwargs):
@@ -2038,11 +2076,12 @@ def read(self, **kwargs):
default_fill_value=self.default_fill_value)
def write(self, obj, **kwargs):
+ super(SparsePanelStorer, self).write(obj, **kwargs)
self.attrs.default_fill_value = obj.default_fill_value
self.attrs.default_kind = obj.default_kind
self.write_index('items', obj.items)
- for name, sdf in obj.iteritems():
+ for name, sdf in obj.iterkv():
key = 'sparse_frame_%s' % name
if key not in self.group._v_children:
node = self._handle.createGroup(self.group, key)
@@ -2105,6 +2144,7 @@ def read(self, **kwargs):
return self.obj_type(BlockManager(blocks, axes))
def write(self, obj, **kwargs):
+ super(BlockManagerStorer, self).write(obj, **kwargs)
data = obj._data
if not data.is_consolidated():
data = data.consolidate()
@@ -2122,11 +2162,11 @@ def write(self, obj, **kwargs):
self.write_index('block%d_items' % i, blk.items)
class FrameStorer(BlockManagerStorer):
- pandas_kind = 'frame'
+ pandas_kind = u'frame'
obj_type = DataFrame
class PanelStorer(BlockManagerStorer):
- pandas_kind = 'wide'
+ pandas_kind = u'wide'
obj_type = Panel
is_shape_reversed = True
@@ -2151,7 +2191,7 @@ class Table(Storer):
levels : the names of levels
"""
- pandas_kind = 'wide_table'
+ pandas_kind = u'wide_table'
table_type = None
levels = 1
is_table = True
@@ -2225,7 +2265,7 @@ def nrows_expected(self):
@property
def is_exists(self):
""" has this table been created """
- return 'table' in self.group
+ return u'table' in self.group
@property
def storable(self):
@@ -2291,6 +2331,7 @@ def set_attrs(self):
self.attrs.non_index_axes = self.non_index_axes
self.attrs.data_columns = self.data_columns
self.attrs.nan_rep = self.nan_rep
+ self.attrs.encoding = self.encoding
self.attrs.levels = self.levels
self.set_info()
@@ -2300,6 +2341,7 @@ def get_attrs(self):
self.data_columns = getattr(self.attrs,'data_columns',None) or []
self.info = getattr(self.attrs,'info',None) or dict()
self.nan_rep = getattr(self.attrs,'nan_rep',None)
+ self.encoding = _ensure_encoding(getattr(self.attrs,'encoding',None))
self.levels = getattr(self.attrs,'levels',None) or []
t = self.table
self.index_axes = [ a.infer(t) for a in self.indexables if a.is_an_indexable ]
@@ -2430,7 +2472,7 @@ def read_axes(self, where, **kwargs):
# convert the data
for a in self.axes:
a.set_info(self.info)
- a.convert(values, nan_rep=self.nan_rep)
+ a.convert(values, nan_rep=self.nan_rep, encoding=self.encoding)
return True
@@ -2473,6 +2515,7 @@ def create_axes(self, axes, obj, validate=True, nan_rep=None, data_columns=None,
validate: validate the obj against an existiing object already written
min_itemsize: a dict of the min size for a column in bytes
nan_rep : a values to use for string column nan_rep
+ encoding : the encoding for string values
data_columns : a list of columns that we want to create separate to allow indexing (or True will force all colummns)
"""
@@ -2492,10 +2535,11 @@ def create_axes(self, axes, obj, validate=True, nan_rep=None, data_columns=None,
if self.infer_axes():
existing_table = self.copy()
existing_table.infer_axes()
- axes = [ a.axis for a in existing_table.index_axes]
- data_columns = existing_table.data_columns
- nan_rep = existing_table.nan_rep
- self.info = copy.copy(existing_table.info)
+ axes = [ a.axis for a in existing_table.index_axes]
+ data_columns = existing_table.data_columns
+ nan_rep = existing_table.nan_rep
+ self.encoding = existing_table.encoding
+ self.info = copy.copy(existing_table.info)
else:
existing_table = None
@@ -2510,6 +2554,7 @@ def create_axes(self, axes, obj, validate=True, nan_rep=None, data_columns=None,
# nan_representation
if nan_rep is None:
nan_rep = 'nan'
+
self.nan_rep = nan_rep
# create axes to index and non_index
@@ -2519,7 +2564,7 @@ def create_axes(self, axes, obj, validate=True, nan_rep=None, data_columns=None,
if i in axes:
name = obj._AXIS_NAMES[i]
index_axes_map[i] = _convert_index(
- a).set_name(name).set_axis(i)
+ a, self.encoding).set_name(name).set_axis(i)
else:
# we might be able to change the axes on the appending data if
@@ -2595,6 +2640,7 @@ def create_axes(self, axes, obj, validate=True, nan_rep=None, data_columns=None,
existing_col=existing_col,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
+ encoding=self.encoding,
info=self.info,
**kwargs)
col.set_pos(j)
@@ -2716,7 +2762,7 @@ def read_column(self, column, where = None, **kwargs):
# column must be an indexable or a data column
c = getattr(self.table.cols, column)
a.set_info(self.info)
- return Series(a.convert(c[:], nan_rep=self.nan_rep).take_data())
+ return Series(a.convert(c[:], nan_rep=self.nan_rep, encoding=self.encoding).take_data())
raise KeyError("column [%s] not found in the table" % column)
@@ -2725,7 +2771,7 @@ class WORMTable(Table):
table. writing is a one-time operation the data are stored in a format
that allows for searching the data on disk
"""
- table_type = 'worm'
+ table_type = u'worm'
def read(self, **kwargs):
""" read the indicies and the indexing array, calculate offset rows and
@@ -2750,7 +2796,7 @@ class LegacyTable(Table):
IndexCol(name='column', axis=2,
pos=1, index_kind='columns_kind'),
DataCol(name='fields', cname='values', kind_attr='fields', pos=2)]
- table_type = 'legacy'
+ table_type = u'legacy'
ndim = 3
def write(self, **kwargs):
@@ -2840,8 +2886,8 @@ def read(self, where=None, columns=None, **kwargs):
class LegacyFrameTable(LegacyTable):
""" support the legacy frame table """
- pandas_kind = 'frame_table'
- table_type = 'legacy_frame'
+ pandas_kind = u'frame_table'
+ table_type = u'legacy_frame'
obj_type = Panel
def read(self, *args, **kwargs):
@@ -2850,14 +2896,14 @@ def read(self, *args, **kwargs):
class LegacyPanelTable(LegacyTable):
""" support the legacy panel table """
- table_type = 'legacy_panel'
+ table_type = u'legacy_panel'
obj_type = Panel
class AppendableTable(LegacyTable):
""" suppor the new appendable table formats """
_indexables = None
- table_type = 'appendable'
+ table_type = u'appendable'
def write(self, obj, axes=None, append=False, complib=None,
complevel=None, fletcher32=None, min_itemsize=None, chunksize=None,
@@ -2868,7 +2914,8 @@ def write(self, obj, axes=None, append=False, complib=None,
# create the axes
self.create_axes(axes=axes, obj=obj, validate=append,
- min_itemsize=min_itemsize, **kwargs)
+ min_itemsize=min_itemsize,
+ **kwargs)
if not self.is_exists:
@@ -3019,8 +3066,8 @@ def delete(self, where=None, **kwargs):
class AppendableFrameTable(AppendableTable):
""" suppor the new appendable table formats """
- pandas_kind = 'frame_table'
- table_type = 'appendable_frame'
+ pandas_kind = u'frame_table'
+ table_type = u'appendable_frame'
ndim = 2
obj_type = DataFrame
@@ -3074,8 +3121,8 @@ def read(self, where=None, columns=None, **kwargs):
class GenericTable(AppendableFrameTable):
""" a table that read/writes the generic pytables table format """
- pandas_kind = 'frame_table'
- table_type = 'generic_table'
+ pandas_kind = u'frame_table'
+ table_type = u'generic_table'
ndim = 2
obj_type = DataFrame
@@ -3119,13 +3166,13 @@ def write(self, **kwargs):
class AppendableMultiFrameTable(AppendableFrameTable):
""" a frame with a multi-index """
- table_type = 'appendable_multiframe'
+ table_type = u'appendable_multiframe'
obj_type = DataFrame
ndim = 2
@property
def table_type_short(self):
- return 'appendable_multi'
+ return u'appendable_multi'
def write(self, obj, data_columns=None, **kwargs):
if data_columns is None:
@@ -3150,7 +3197,7 @@ def read(self, columns=None, **kwargs):
class AppendablePanelTable(AppendableTable):
""" suppor the new appendable table formats """
- table_type = 'appendable_panel'
+ table_type = u'appendable_panel'
ndim = 3
obj_type = Panel
@@ -3167,11 +3214,11 @@ def is_transposed(self):
class AppendableNDimTable(AppendablePanelTable):
""" suppor the new appendable table formats """
- table_type = 'appendable_ndim'
+ table_type = u'appendable_ndim'
ndim = 4
obj_type = Panel4D
-def _convert_index(index):
+def _convert_index(index, encoding=None):
index_name = getattr(index,'name',None)
if isinstance(index, DatetimeIndex):
@@ -3211,7 +3258,7 @@ def _convert_index(index):
# atom = _tables().ObjectAtom()
# return np.asarray(values, dtype='O'), 'object', atom
- converted = np.array(list(values), dtype=np.str_)
+ converted = _convert_string_array(values, encoding)
itemsize = converted.dtype.itemsize
return IndexCol(converted, 'string', _tables().StringCol(itemsize), itemsize=itemsize,
index_name=index_name)
@@ -3233,48 +3280,90 @@ def _convert_index(index):
return IndexCol(np.asarray(values, dtype='O'), 'object', atom,
index_name=index_name)
-def _unconvert_index(data, kind):
- if kind == 'datetime64':
+def _unconvert_index(data, kind, encoding=None):
+ kind = _ensure_decoded(kind)
+ if kind == u'datetime64':
index = DatetimeIndex(data)
- elif kind == 'datetime':
+ elif kind == u'datetime':
index = np.array([datetime.fromtimestamp(v) for v in data],
dtype=object)
- elif kind == 'date':
+ elif kind == u'date':
index = np.array([date.fromtimestamp(v) for v in data], dtype=object)
- elif kind in ('string', 'integer', 'float'):
+ elif kind in (u'integer', u'float'):
index = np.array(data)
- elif kind == 'object':
+ elif kind in (u'string'):
+ index = _unconvert_string_array(data, nan_rep=None, encoding=encoding)
+ elif kind == u'object':
index = np.array(data[0])
else: # pragma: no cover
raise ValueError('unrecognized index type %s' % kind)
return index
-def _unconvert_index_legacy(data, kind, legacy=False):
- if kind == 'datetime':
+def _unconvert_index_legacy(data, kind, legacy=False, encoding=None):
+ kind = _ensure_decoded(kind)
+ if kind == u'datetime':
index = lib.time64_to_datetime(data)
- elif kind in ('string', 'integer'):
+ elif kind in (u'integer'):
index = np.array(data, dtype=object)
+ elif kind in (u'string'):
+ index = _unconvert_string_array(data, nan_rep=None, encoding=encoding)
else: # pragma: no cover
raise ValueError('unrecognized index type %s' % kind)
return index
-def _maybe_convert(values, val_kind):
+def _convert_string_array(data, encoding, itemsize=None):
+
+ # encode if needed
+ if encoding is not None and len(data):
+ f = np.vectorize(lambda x: x.encode(encoding), otypes=[np.object])
+ data = f(data)
+
+ # create the sized dtype
+ if itemsize is None:
+ itemsize = lib.max_len_string_array(com._ensure_object(data.ravel()))
+
+ data = np.array(data,dtype="S%d" % itemsize)
+ return data
+
+def _unconvert_string_array(data, nan_rep=None, encoding=None):
+ """ deserialize a string array, possibly decoding """
+ shape = data.shape
+ data = np.array(data.ravel(),dtype=object)
+
+ # guard against a None encoding in PY3 (because of a legacy
+ # where the passed encoding is actually None)
+ encoding = _ensure_encoding(encoding)
+ if encoding is not None and len(data):
+ f = np.vectorize(lambda x: x.decode(encoding),otypes=[np.object])
+ data = f(data)
+
+ if nan_rep is None:
+ nan_rep = 'nan'
+
+ data = lib.string_array_replace_from_nan_rep(data, nan_rep)
+ return data.reshape(shape)
+
+def _maybe_convert(values, val_kind, encoding):
if _need_convert(val_kind):
- conv = _get_converter(val_kind)
+ conv = _get_converter(val_kind, encoding)
# conv = np.frompyfunc(conv, 1, 1)
values = conv(values)
return values
-def _get_converter(kind):
+def _get_converter(kind, encoding):
+ kind = _ensure_decoded(kind)
if kind == 'datetime64':
return lambda x: np.array(x, dtype='M8[ns]')
- if kind == 'datetime':
+ elif kind == 'datetime':
return lib.convert_timestamps
+ elif kind == 'string':
+ return lambda x: _unconvert_string_array(x,encoding=encoding)
else: # pragma: no cover
raise ValueError('invalid kind %s' % kind)
def _need_convert(kind):
- if kind in ('datetime', 'datetime64'):
+ kind = _ensure_decoded(kind)
+ if kind in (u'datetime', u'datetime64', u'string'):
return True
return False
@@ -3288,7 +3377,8 @@ class Term(object):
>, >=, <, <=, =, != (not equal) are allowed
value : a value or list of values (required)
queryables : a kinds map (dict of column name -> kind), or None i column is non-indexable
-
+ encoding : an encoding that will encode the query terms
+
Returns
-------
a Term object
@@ -3301,20 +3391,21 @@ class Term(object):
>>> Term('index', ['20121114','20121114'])
>>> Term('index', datetime(2012,11,14))
>>> Term('major_axis>20121114')
- >>> Term('minor_axis', ['A','B'])
+ >>> Term('minor_axis', ['A','U'])
"""
_ops = ['<=', '<', '>=', '>', '!=', '==', '=']
_search = re.compile("^\s*(?P<field>\w+)\s*(?P<op>%s)\s*(?P<value>.+)\s*$" % '|'.join(_ops))
_max_selectors = 31
- def __init__(self, field, op=None, value=None, queryables=None):
+ def __init__(self, field, op=None, value=None, queryables=None, encoding=None):
self.field = None
self.op = None
self.value = None
self.q = queryables or dict()
self.filter = None
self.condition = None
+ self.encoding = encoding
# unpack lists/tuples in field
while(isinstance(field, (tuple, list))):
@@ -3366,16 +3457,16 @@ def __init__(self, field, op=None, value=None, queryables=None):
if self.field is None or self.op is None or self.value is None:
raise ValueError("Could not create this term [%s]" % str(self))
- # = vs ==
+ # = vs ==
if self.op == '=':
self.op = '=='
# we have valid conditions
if self.op in ['>', '>=', '<', '<=']:
- if hasattr(self.value, '__iter__') and len(self.value) > 1:
+ if hasattr(self.value, '__iter__') and len(self.value) > 1 and not isinstance(self.value,basestring):
raise ValueError("an inequality condition cannot have multiple values [%s]" % str(self))
- if not hasattr(self.value, '__iter__'):
+ if not is_list_like(self.value):
self.value = [self.value]
if len(self.q):
@@ -3401,6 +3492,11 @@ def kind(self):
""" the kind of my field """
return self.q.get(self.field)
+ def generate(self, v):
+ """ create and return the op string for this TermValue """
+ val = v.tostring(self.encoding)
+ return "(%s %s %s)" % (self.field, self.op, val)
+
def eval(self):
""" set the numexpr expression for this term """
@@ -3411,40 +3507,39 @@ def eval(self):
if self.is_in_table:
values = [self.convert_value(v) for v in self.value]
else:
- values = [[v, v] for v in self.value]
+ values = [TermValue(v,v,self.kind) for v in self.value]
# equality conditions
if self.op in ['==', '!=']:
# our filter op expression
if self.op == '!=':
- filter_op = lambda axis, values: not axis.isin(values)
+ filter_op = lambda axis, vals: not axis.isin(vals)
else:
- filter_op = lambda axis, values: axis.isin(values)
+ filter_op = lambda axis, vals: axis.isin(vals)
if self.is_in_table:
# too many values to create the expression?
if len(values) <= self._max_selectors:
- self.condition = "(%s)" % ' | '.join(
- ["(%s %s %s)" % (self.field, self.op, v[0]) for v in values])
+ vs = [ self.generate(v) for v in values ]
+ self.condition = "(%s)" % ' | '.join(vs)
# use a filter after reading
else:
- self.filter = (self.field, filter_op, Index([v[1] for v in values]))
+ self.filter = (self.field, filter_op, Index([v.value for v in values]))
else:
- self.filter = (self.field, filter_op, Index([v[1] for v in values]))
+ self.filter = (self.field, filter_op, Index([v.value for v in values]))
else:
if self.is_in_table:
- self.condition = '(%s %s %s)' % (
- self.field, self.op, values[0][0])
-
+ self.condition = self.generate(values[0])
+
else:
raise TypeError("passing a filterable condition to a non-table indexer [%s]" % str(self))
@@ -3452,33 +3547,56 @@ def eval(self):
def convert_value(self, v):
""" convert the expression that is in the term to something that is accepted by pytables """
- if self.kind == 'datetime64' or self.kind == 'datetime' :
+ def stringify(value):
+ value = str(value)
+ if self.encoding is not None:
+ value = value.encode(self.encoding)
+ return value
+
+ kind = _ensure_decoded(self.kind)
+ if kind == u'datetime64' or kind == u'datetime' :
v = lib.Timestamp(v)
if v.tz is not None:
v = v.tz_convert('UTC')
- return [v.value, v]
- elif isinstance(v, datetime) or hasattr(v, 'timetuple') or self.kind == 'date':
+ return TermValue(v,v.value,kind)
+ elif isinstance(v, datetime) or hasattr(v, 'timetuple') or kind == u'date':
v = time.mktime(v.timetuple())
- return [v, Timestamp(v) ]
- elif self.kind == 'integer':
+ return TermValue(v,Timestamp(v),kind)
+ elif kind == u'integer':
v = int(float(v))
- return [v, v]
- elif self.kind == 'float':
+ return TermValue(v,v,kind)
+ elif kind == u'float':
v = float(v)
- return [v, v]
- elif self.kind == 'bool':
+ return TermValue(v,v,kind)
+ elif kind == u'bool':
if isinstance(v, basestring):
- v = not str(v).strip().lower() in ["false", "f", "no", "n", "none", "0", "[]", "{}", ""]
+ v = not v.strip().lower() in [u'false', u'f', u'no', u'n', u'none', u'0', u'[]', u'{}', u'']
else:
v = bool(v)
- return [v, v]
+ return TermValue(v,v,kind)
elif not isinstance(v, basestring):
- v = str(v)
- return [v, v]
+ v = stringify(v)
+ return TermValue(v,stringify(v),u'string')
# string quoting
- return ["'" + v + "'", v]
+ return TermValue(v,stringify(v),u'string')
+
+class TermValue(object):
+ """ hold a term value the we use to construct a condition/filter """
+
+ def __init__(self, value, converted, kind):
+ self.value = value
+ self.converted = converted
+ self.kind = kind
+ def tostring(self, encoding):
+ """ quote the string if not encoded
+ else encode and return """
+ if self.kind == u'string':
+ if encoding is not None:
+ return self.converted
+ return '"%s"' % self.converted
+ return self.converted
class Coordinates(object):
""" holds a returned coordinates list, useful to select the same rows from different tables
@@ -3528,9 +3646,9 @@ def __init__(self, table, where=None, start=None, stop=None, **kwargs):
# create the numexpr & the filter
if self.terms:
- conds = [t.condition for t in self.terms if t.condition is not None]
- if len(conds):
- self.condition = "(%s)" % ' & '.join(conds)
+ terms = [ t for t in self.terms if t.condition is not None ]
+ if len(terms):
+ self.condition = "(%s)" % ' & '.join([ t.condition for t in terms ])
self.filter = []
for t in self.terms:
if t.filter is not None:
@@ -3553,7 +3671,7 @@ def generate(self, where):
where = [where]
queryables = self.table.queryables()
- return [Term(c, queryables=queryables) for c in where]
+ return [Term(c, queryables=queryables, encoding=self.table.encoding) for c in where]
def select(self):
"""
diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py
index d0f03774f2070..8b3d4a475d952 100644
--- a/pandas/io/tests/test_pytables.py
+++ b/pandas/io/tests/test_pytables.py
@@ -17,6 +17,7 @@
from pandas.tests.test_series import assert_series_equal
from pandas.tests.test_frame import assert_frame_equal
from pandas import concat, Timestamp
+from pandas.util import py3compat
from numpy.testing.decorators import slow
@@ -115,7 +116,7 @@ def roundtrip(key, obj,**kwargs):
o = tm.makeTimeSeries()
assert_series_equal(o, roundtrip('series',o))
-
+
o = tm.makeStringSeries()
assert_series_equal(o, roundtrip('string_series',o))
@@ -474,6 +475,20 @@ def test_append(self):
store.append('uints', uint_data, data_columns=['u08','u16','u32']) # 64-bit indices not yet supported
tm.assert_frame_equal(store['uints'], uint_data)
+ def test_encoding(self):
+
+ with ensure_clean(self.path) as store:
+ df = DataFrame(dict(A='foo',B='bar'),index=range(5))
+ df.loc[2,'A'] = np.nan
+ df.loc[3,'B'] = np.nan
+ _maybe_remove(store, 'df')
+ store.append('df', df, encoding='ascii')
+ tm.assert_frame_equal(store['df'], df)
+
+ expected = df.reindex(columns=['A'])
+ result = store.select('df',Term('columns=A',encoding='ascii'))
+ tm.assert_frame_equal(result,expected)
+
def test_append_some_nans(self):
with ensure_clean(self.path) as store:
@@ -556,6 +571,7 @@ def test_append_some_nans(self):
def test_append_frame_column_oriented(self):
with ensure_clean(self.path) as store:
+
# column oriented
df = tm.makeTimeDataFrame()
_maybe_remove(store, 'df1')
@@ -1261,8 +1277,14 @@ def test_unimplemented_dtypes_table_columns(self):
with ensure_clean(self.path) as store:
+ l = [('date', datetime.date(2001, 1, 2))]
+
+ # py3 ok for unicode
+ if not py3compat.PY3:
+ l.append(('unicode', u'\u03c3'))
+
### currently not supported dtypes ####
- for n, f in [('unicode', u'\u03c3'), ('date', datetime.date(2001, 1, 2))]:
+ for n, f in l:
df = tm.makeDataFrame()
df[n] = f
self.assertRaises(
@@ -2545,6 +2567,7 @@ def test_legacy_0_10_read(self):
# legacy from 0.10
try:
store = HDFStore(tm.get_data_path('legacy_hdf/legacy_0.10.h5'), 'r')
+ str(store)
for k in store.keys():
store.select(k)
finally:
@@ -2554,6 +2577,7 @@ def test_legacy_0_11_read(self):
# legacy from 0.11
try:
store = HDFStore(tm.get_data_path('legacy_hdf/legacy_table_0.11.h5'), 'r')
+ str(store)
df = store.select('df')
df1 = store.select('df1')
mi = store.select('mi')
@@ -2585,24 +2609,25 @@ def do_copy(f = None, new_f = None, keys = None, propindexes = True, **kwargs):
# check indicies & nrows
for k in tstore.keys():
- if tstore.is_table(k):
+ if tstore.get_storer(k).is_table:
new_t = tstore.get_storer(k)
orig_t = store.get_storer(k)
self.assert_(orig_t.nrows == new_t.nrows)
- for a in orig_t.axes:
- if a.is_indexed:
- self.assert_(new_t[a.name].is_indexed == True)
- except (Exception), detail:
- pass
+ # check propindixes
+ if propindexes:
+ for a in orig_t.axes:
+ if a.is_indexed:
+ self.assert_(new_t[a.name].is_indexed == True)
+
finally:
safe_close(store)
safe_close(tstore)
safe_remove(new_f)
do_copy()
- do_copy(keys = ['df'])
+ do_copy(keys = ['/a','/b','/df1_mixed'])
do_copy(propindexes = False)
# new table
diff --git a/pandas/lib.pyx b/pandas/lib.pyx
index 15791a984ecc5..a80ad5b7d0208 100644
--- a/pandas/lib.pyx
+++ b/pandas/lib.pyx
@@ -14,6 +14,7 @@ from cpython cimport (PyDict_New, PyDict_GetItem, PyDict_SetItem,
Py_INCREF, PyTuple_SET_ITEM,
PyList_Check, PyFloat_Check,
PyString_Check,
+ PyBytes_Check,
PyTuple_SetItem,
PyTuple_New,
PyObject_SetAttrString)
@@ -762,7 +763,7 @@ def max_len_string_array(ndarray[object, ndim=1] arr):
m = 0
for i from 0 <= i < length:
v = arr[i]
- if PyString_Check(v):
+ if PyString_Check(v) or PyBytes_Check(v):
l = len(v)
if l > m:
@@ -772,11 +773,10 @@ def max_len_string_array(ndarray[object, ndim=1] arr):
@cython.boundscheck(False)
@cython.wraparound(False)
-def array_replace_from_nan_rep(ndarray[object, ndim=1] arr, object nan_rep, object replace = None):
+def string_array_replace_from_nan_rep(ndarray[object, ndim=1] arr, object nan_rep, object replace = None):
""" replace the values in the array with replacement if they are nan_rep; return the same array """
- cdef int length = arr.shape[0]
- cdef int i = 0
+ cdef int length = arr.shape[0], i = 0
if replace is None:
replace = np.nan
@@ -788,7 +788,6 @@ def array_replace_from_nan_rep(ndarray[object, ndim=1] arr, object nan_rep, obje
@cython.boundscheck(False)
@cython.wraparound(False)
-
def write_csv_rows(list data, list data_index, int nlevels, list cols, object writer):
cdef int N, j, i, ncols
| closes #3750
recently released PyTables 3.0.0 (and numexpr 2.1), which now support python3
were completely broken
These changed all data to be stored as bytes (with to/from encoding/decoding)
This PR supports an encoding argument (if you really want to encode your data),
and provides transparent access for python3 (and backwards compat) to
even python2 written stores
| https://api.github.com/repos/pandas-dev/pandas/pulls/3762 | 2013-06-05T18:21:30Z | 2013-06-06T00:14:00Z | 2013-06-06T00:14:00Z | 2014-07-05T15:12:01Z |
DataFrame.corr() now computing correlations only once | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 9c0a2843370f4..1fd3e517e3089 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -4750,15 +4750,16 @@ def corr(self, method='pearson', min_periods=None):
mask = np.isfinite(mat)
for i, ac in enumerate(mat):
for j, bc in enumerate(mat):
- valid = mask[i] & mask[j]
- if valid.sum() < min_periods:
- c = NA
- elif not valid.all():
- c = corrf(ac[valid], bc[valid])
- else:
- c = corrf(ac, bc)
- correl[i, j] = c
- correl[j, i] = c
+ if i <= j:
+ valid = mask[i] & mask[j]
+ if valid.sum() < min_periods:
+ c = NA
+ elif not valid.all():
+ c = corrf(ac[valid], bc[valid])
+ else:
+ c = corrf(ac, bc)
+ correl[i, j] = c
+ correl[j, i] = c
return self._constructor(correl, index=cols, columns=cols)
| DataFrame.corr() for 'spearman' and 'kendall' was computing some correlations twice.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3760 | 2013-06-05T13:05:20Z | 2013-06-09T22:05:06Z | null | 2013-06-09T22:05:06Z |
BUG: (GH3611) revisited; read_excel not passing thru options to ExcelFile.parse | diff --git a/RELEASE.rst b/RELEASE.rst
index b5dd3eef68dea..12d2389a8a59b 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -120,6 +120,8 @@ pandas 0.11.1
- Implement ``__nonzero__`` for ``NDFrame`` objects (GH3691_, GH3696_)
- ``as_matrix`` with mixed signed and unsigned dtypes will result in 2 x the lcd of the unsigned
as an int, maxing with ``int64``, to avoid precision issues (GH3733_)
+ - ``na_values`` in a list provided to ``read_csv/read_excel`` will match string and numeric versions
+ e.g. ``na_values=['99']`` will match 99 whether the column ends up being int, float, or string (GH3611_)
**Bug Fixes**
@@ -174,7 +176,7 @@ pandas 0.11.1
- Fix modulo and integer division on Series,DataFrames to act similary to ``float`` dtypes to return
``np.nan`` or ``np.inf`` as appropriate (GH3590_)
- Fix incorrect dtype on groupby with ``as_index=False`` (GH3610_)
- - Fix ``read_csv`` to correctly encode identical na_values, e.g. ``na_values=[-999.0,-999]``
+ - Fix ``read_csv/read_excel`` to correctly encode identical na_values, e.g. ``na_values=[-999.0,-999]``
was failing (GH3611_)
- Disable HTML output in qtconsole again. (GH3657_)
- Reworked the new repr display logic, which users found confusing. (GH3663_)
diff --git a/pandas/io/excel.py b/pandas/io/excel.py
index c23056ce76a62..5b7d13acd99ec 100644
--- a/pandas/io/excel.py
+++ b/pandas/io/excel.py
@@ -13,10 +13,7 @@
from pandas.tseries.period import Period
import json
-def read_excel(path_or_buf, sheetname, header=0, skiprows=None, skip_footer=0,
- index_col=None, parse_cols=None, parse_dates=False,
- date_parser=None, na_values=None, thousands=None, chunksize=None,
- kind=None, **kwds):
+def read_excel(path_or_buf, sheetname, kind=None, **kwds):
"""Read an Excel table into a pandas DataFrame
Parameters
@@ -47,16 +44,7 @@ def read_excel(path_or_buf, sheetname, header=0, skiprows=None, skip_footer=0,
DataFrame from the passed in Excel file
"""
return ExcelFile(path_or_buf,kind=kind).parse(sheetname=sheetname,
- header=0, skiprows=None,
- skip_footer=0,
- index_col=None,
- parse_cols=None,
- parse_dates=False,
- date_parser=None,
- na_values=None,
- thousands=None,
- chunksize=None, kind=None,
- **kwds)
+ kind=kind, **kwds)
class ExcelFile(object):
"""
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 60028d3f3f831..556d1ab1976b4 100644
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -1853,7 +1853,20 @@ def _clean_na_values(na_values, keep_default_na=True):
return na_values
def _stringify_na_values(na_values):
- return [ str(x) for x in na_values ]
+ """ return a stringified and numeric for these values """
+ result = []
+ for x in na_values:
+ result.append(str(x))
+ result.append(x)
+ try:
+ result.append(float(x))
+ except:
+ pass
+ try:
+ result.append(int(x))
+ except:
+ pass
+ return result
def _clean_index_names(columns, index_col):
if not _is_index_col(index_col):
diff --git a/pandas/io/tests/test_excel.py b/pandas/io/tests/test_excel.py
index 28242cda0b46b..39e1042d125a2 100644
--- a/pandas/io/tests/test_excel.py
+++ b/pandas/io/tests/test_excel.py
@@ -285,6 +285,15 @@ def _check_extension(self, ext):
recons = read_excel(path, 'test1', index_col=0, na_values=['NA'])
tm.assert_frame_equal(self.frame, recons)
+ # GH 3611
+ self.frame.to_excel(path, 'test1', na_rep='88')
+ recons = read_excel(path, 'test1', index_col=0, na_values=['88'])
+ tm.assert_frame_equal(self.frame, recons)
+
+ self.frame.to_excel(path, 'test1', na_rep='88')
+ recons = read_excel(path, 'test1', index_col=0, na_values=[88,88.0])
+ tm.assert_frame_equal(self.frame, recons)
+
def test_excel_roundtrip_xls_mixed(self):
_skip_if_no_xlrd()
_skip_if_no_xlwt()
diff --git a/pandas/src/inference.pyx b/pandas/src/inference.pyx
index 7d13aa8ce6765..5343819b9fbfe 100644
--- a/pandas/src/inference.pyx
+++ b/pandas/src/inference.pyx
@@ -373,12 +373,12 @@ def maybe_convert_numeric(ndarray[object] values, set na_values,
for i from 0 <= i < n:
val = values[i]
- if util.is_float_object(val):
- floats[i] = complexes[i] = val
- seen_float = 1
- elif val in na_values:
+ if val in na_values:
floats[i] = complexes[i] = nan
seen_float = 1
+ elif util.is_float_object(val):
+ floats[i] = complexes[i] = val
+ seen_float = 1
elif val is None:
floats[i] = complexes[i] = nan
seen_float = 1
| recloses #3611
API: add string and numeric versions of na_values when parsing
| https://api.github.com/repos/pandas-dev/pandas/pulls/3758 | 2013-06-05T11:56:43Z | 2013-06-05T14:02:43Z | 2013-06-05T14:02:43Z | 2014-07-16T08:11:38Z |
DOC: minor issue with the description of the names attribute of multiindexes | diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst
index 1c4f5db9a45d0..c11e190a2eb82 100644
--- a/doc/source/indexing.rst
+++ b/doc/source/indexing.rst
@@ -953,12 +953,12 @@ DataFrame to construct a MultiIndex automatically:
df
All of the ``MultiIndex`` constructors accept a ``names`` argument which stores
-string names for the levels themselves. If no names are provided, some
-arbitrary ones will be assigned:
+string names for the levels themselves. If no names are provided, ``None`` will
+be assigned:
.. ipython:: python
- index.names
+ df.index.names
This index can back any axis of a pandas object, and the number of **levels**
of the index is up to you:
| https://api.github.com/repos/pandas-dev/pandas/pulls/3755 | 2013-06-04T20:12:36Z | 2013-06-04T20:28:55Z | null | 2013-12-04T00:56:44Z | |
DOC: document bs4/lxml/html5lib issues | diff --git a/README.rst b/README.rst
index a74a155cf8a27..daea702476ebc 100644
--- a/README.rst
+++ b/README.rst
@@ -93,18 +93,49 @@ Optional dependencies
- openpyxl version 1.6.1 or higher, for writing .xlsx files
- xlrd >= 0.9.0
- Needed for Excel I/O
- - Both `html5lib <https://github.com/html5lib/html5lib-python>`__ **and**
- `Beautiful Soup 4 <http://www.crummy.com/software/BeautifulSoup>`__: for
- reading HTML tables
+ - `boto <https://pypi.python.org/pypi/boto>`__: necessary for Amazon S3
+ access.
+ - One of the following combinations of libraries is needed to use the
+ top-level :func:`~pandas.io.html.read_html` function:
+
+ - `BeautifulSoup4`_ and `html5lib`_ (Any recent version of `html5lib`_ is
+ okay.)
+ - `BeautifulSoup4`_ and `lxml`_
+ - `BeautifulSoup4`_ and `html5lib`_ and `lxml`_
+ - Only `lxml`_, although see :ref:`HTML reading gotchas <html-gotchas>`
+ for reasons as to why you should probably **not** take this approach.
.. warning::
- You need to install an older version of Beautiful Soup:
- - Version 4.1.3 and 4.0.2 have been confirmed for 64-bit Ubuntu/Debian
- - Version 4.0.2 have been confirmed for 32-bit Ubuntu
+ - if you install `BeautifulSoup4`_ you must install either
+ `lxml`_ or `html5lib`_ or both.
+ :func:`~pandas.io.html.read_html` will **not** work with *only*
+ `BeautifulSoup4`_ installed.
+ - You are highly encouraged to read :ref:`HTML reading gotchas
+ <html-gotchas>`. It explains issues surrounding the installation and
+ usage of the above three libraries
+ - You may need to install an older version of `BeautifulSoup4`_:
+ - Versions 4.2.1, 4.1.3 and 4.0.2 have been confirmed for 64 and
+ 32-bit Ubuntu/Debian
+ - Additionally, if you're using `Anaconda`_ you should definitely
+ read :ref:`the gotchas about HTML parsing libraries <html-gotchas>`
- - Any recent version of ``html5lib`` is okay.
- - `boto <https://pypi.python.org/pypi/boto>`__: necessary for Amazon S3 access.
+ .. note::
+
+ - if you're on a system with ``apt-get`` you can do
+
+ .. code-block:: sh
+
+ sudo apt-get build-dep python-lxml
+
+ to get the necessary dependencies for installation of `lxml`_. This
+ will prevent further headaches down the line.
+
+
+.. _html5lib: https://github.com/html5lib/html5lib-python
+.. _BeautifulSoup4: http://www.crummy.com/software/BeautifulSoup
+.. _lxml: http://lxml.de
+.. _Anaconda: https://store.continuum.io/cshop/anaconda
Installation from sources
diff --git a/doc/source/gotchas.rst b/doc/source/gotchas.rst
index 7b184f6d5043f..422e3cec59386 100644
--- a/doc/source/gotchas.rst
+++ b/doc/source/gotchas.rst
@@ -344,3 +344,112 @@ where the data copying occurs.
See `this link <http://stackoverflow.com/questions/13592618/python-pandas-dataframe-thread-safe>`__
for more information.
+
+.. _html-gotchas:
+
+HTML Table Parsing
+------------------
+There are some versioning issues surrounding the libraries that are used to
+parse HTML tables in the top-level pandas io function ``read_html``.
+
+**Issues with** |lxml|_
+
+ * Benefits
+
+ * |lxml|_ is very fast
+
+ * |lxml|_ requires Cython to install correctly.
+
+ * Drawbacks
+
+ * |lxml|_ does *not* make any guarantees about the results of it's parse
+ *unless* it is given |svm|_.
+
+ * In light of the above, we have chosen to allow you, the user, to use the
+ |lxml|_ backend, but **this backend will use** |html5lib|_ if |lxml|_
+ fails to parse
+
+ * It is therefore *highly recommended* that you install both
+ |BeautifulSoup4|_ and |html5lib|_, so that you will still get a valid
+ result (provided everything else is valid) even if |lxml|_ fails.
+
+**Issues with** |BeautifulSoup4|_ **using** |lxml|_ **as a backend**
+
+ * The above issues hold here as well since |BeautifulSoup4|_ is essentially
+ just a wrapper around a parser backend.
+
+**Issues with** |BeautifulSoup4|_ **using** |html5lib|_ **as a backend**
+
+ * Benefits
+
+ * |html5lib|_ is far more lenient than |lxml|_ and consequently deals
+ with *real-life markup* in a much saner way rather than just, e.g.,
+ dropping an element without notifying you.
+
+ * |html5lib|_ *generates valid HTML5 markup from invalid markup
+ automatically*. This is extremely important for parsing HTML tables,
+ since it guarantees a valid document. However, that does NOT mean that
+ it is "correct", since the process of fixing markup does not have a
+ single definition.
+
+ * |html5lib|_ is pure Python and requires no additional build steps beyond
+ its own installation.
+
+ * Drawbacks
+
+ * The biggest drawback to using |html5lib|_ is that it is slow as
+ molasses. However consider the fact that many tables on the web are not
+ big enough for the parsing algorithm runtime to matter. It is more
+ likely that the bottleneck will be in the process of reading the raw
+ text from the url over the web, i.e., IO (input-output). For very large
+ tables, this might not be true.
+
+**Issues with using** |Anaconda|_
+
+ * `Anaconda`_ ships with `lxml`_ version 3.2.0; the following workaround for
+ `Anaconda`_ was successfully used to deal with the versioning issues
+ surrounding `lxml`_ and `BeautifulSoup4`_.
+
+ .. note::
+
+ Unless you have *both*:
+
+ * A strong restriction on the upper bound of the runtime of some code
+ that incorporates :func:`~pandas.io.html.read_html`
+ * Complete knowledge that the HTML you will be parsing will be 100%
+ valid at all times
+
+ then you should install `html5lib`_ and things will work swimmingly
+ without you having to muck around with `conda`. If you want the best of
+ both worlds then install both `html5lib`_ and `lxml`_. If you do install
+ `lxml`_ then you need to perform the following commands to ensure that
+ lxml will work correctly:
+
+ .. code-block:: sh
+
+ # remove the included version
+ conda remove lxml
+
+ # install the latest version of lxml
+ pip install 'git+git://github.com/lxml/lxml.git'
+
+ # install the latest version of beautifulsoup4
+ pip install 'bzr+lp:beautifulsoup'
+
+ Note that you need `bzr <http://bazaar.canonical.com/en>`_ and `git
+ <http://git-scm.com>`_ installed to perform the last two operations.
+
+.. |svm| replace:: **strictly valid markup**
+.. _svm: http://validator.w3.org/docs/help.html#validation_basics
+
+.. |html5lib| replace:: **html5lib**
+.. _html5lib: https://github.com/html5lib/html5lib-python
+
+.. |BeautifulSoup4| replace:: **BeautifulSoup4**
+.. _BeautifulSoup4: http://www.crummy.com/software/BeautifulSoup
+
+.. |lxml| replace:: **lxml**
+.. _lxml: http://lxml.de
+
+.. |Anaconda| replace:: **Anaconda**
+.. _Anaconda: https://store.continuum.io/cshop/anaconda
diff --git a/doc/source/install.rst b/doc/source/install.rst
index 005e213fe24de..6868969c1b968 100644
--- a/doc/source/install.rst
+++ b/doc/source/install.rst
@@ -102,17 +102,49 @@ Optional Dependencies
* `openpyxl <http://packages.python.org/openpyxl/>`__, `xlrd/xlwt <http://www.python-excel.org/>`__
* openpyxl version 1.6.1 or higher
* Needed for Excel I/O
- * Both `html5lib <https://github.com/html5lib/html5lib-python>`__ **and**
- `Beautiful Soup 4 <http://www.crummy.com/software/BeautifulSoup>`__: for
- reading HTML tables
+ * `boto <https://pypi.python.org/pypi/boto>`__: necessary for Amazon S3
+ access.
+ * One of the following combinations of libraries is needed to use the
+ top-level :func:`~pandas.io.html.read_html` function:
+
+ * `BeautifulSoup4`_ and `html5lib`_ (Any recent version of `html5lib`_ is
+ okay.)
+ * `BeautifulSoup4`_ and `lxml`_
+ * `BeautifulSoup4`_ and `html5lib`_ and `lxml`_
+ * Only `lxml`_, although see :ref:`HTML reading gotchas <html-gotchas>`
+ for reasons as to why you should probably **not** take this approach.
.. warning::
- You need to install an older version of Beautiful Soup:
- - Version 4.1.3 and 4.0.2 have been confirmed for 64-bit Ubuntu/Debian
- - Version 4.0.2 have been confirmed for 32-bit Ubuntu
+ * if you install `BeautifulSoup4`_ you must install either
+ `lxml`_ or `html5lib`_ or both.
+ :func:`~pandas.io.html.read_html` will **not** work with *only*
+ `BeautifulSoup4`_ installed.
+ * You are highly encouraged to read :ref:`HTML reading gotchas
+ <html-gotchas>`. It explains issues surrounding the installation and
+ usage of the above three libraries
+ * You may need to install an older version of `BeautifulSoup4`_:
+ - Versions 4.2.1, 4.1.3 and 4.0.2 have been confirmed for 64 and
+ 32-bit Ubuntu/Debian
+ * Additionally, if you're using `Anaconda`_ you should definitely
+ read :ref:`the gotchas about HTML parsing libraries <html-gotchas>`
- * Any recent version of ``html5lib`` is okay.
+ .. note::
+
+ * if you're on a system with ``apt-get`` you can do
+
+ .. code-block:: sh
+
+ sudo apt-get build-dep python-lxml
+
+ to get the necessary dependencies for installation of `lxml`_. This
+ will prevent further headaches down the line.
+
+
+.. _html5lib: https://github.com/html5lib/html5lib-python
+.. _BeautifulSoup4: http://www.crummy.com/software/BeautifulSoup
+.. _lxml: http://lxml.de
+.. _Anaconda: https://store.continuum.io/cshop/anaconda
.. note::
diff --git a/doc/source/io.rst b/doc/source/io.rst
index 27d3e21fea2c4..802ab08e85932 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -943,6 +943,12 @@ HTML
Reading HTML Content
~~~~~~~~~~~~~~~~~~~~~~
+.. warning::
+
+ We **highly encourage** you to read the :ref:`HTML parsing gotchas
+ <html-gotchas>` regarding the issues surrounding the
+ BeautifulSoup4/html5lib/lxml parsers.
+
.. _io.read_html:
.. versionadded:: 0.11.1
| to summarize this mess of deps and check my thoughts here
### if a user insists on using `lxml` (either with or without `bs4`)
- warning about its inability to deal with the modern web
- warning saying that the user should install `html5lib` and `bs4` so that a page will parse even if `lxml` barfs
- test coverage for failing and passing pages (things that would parse "correctly" before will now fail since the parser will be extremely strict) thus only pages validated by the DTD will even try to parse
(to be fair I was really enthusiastic about `lxml` because of how fast it is but now i'm sort of against it)
### what users should really do
- install `bs4`
- install `html5lib`
- happily parse things into `DataFrame`s with a low amount of stress
### anaconda + `lxml` (no `bs4`)
- no problems (modulo the above warnings)
@wesm maybe you could chime in about what (if anything) you did to `libxml2`/`libxslt` i wasn't clear on the details from the mailing list.
### anaconda + `bs4` + `lxml`
- make sure that you're using `bs4==4.2.1`
- make sure that you're using `lxml==3.2.1`
- workout the details of how to do this with `conda` (i did this already, but it was 2 or 3 AM so I'm a little foggy on the details)
### anaconda + `bs4` + `html5lib` (no `lxml`)
- happy parsing of HTML tables
this will be in a gotcha that will be linked to from a warning at the top of the read html section of io.rst
| https://api.github.com/repos/pandas-dev/pandas/pulls/3751 | 2013-06-03T22:24:22Z | 2013-06-04T20:02:50Z | 2013-06-04T20:02:50Z | 2014-07-16T08:11:35Z |
BUG: (GH3748) Incorrectly read a HDFStore multi-index Frame witha column specification | diff --git a/RELEASE.rst b/RELEASE.rst
index bbfc9fb948ef4..2b90edaa327b0 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -81,6 +81,8 @@ pandas 0.11.1
- When removing an object from a ``HDFStore``, ``remove(key)`` raises
``KeyError`` if the key is not a valid store object.
+ - In an ``HDFStore``, raise a ``TypeError`` on passing ``where`` or ``columns``
+ to select with a Storer; these are invalid parameters at this time
- The repr() for (Multi)Index now obeys display.max_seq_items rather
then numpy threshold print options. (GH3426_, GH3466_)
- Added mangle_dupe_cols option to read_table/csv, allowing users
@@ -197,6 +199,7 @@ pandas 0.11.1
their first argument (GH3702_)
- Fix file tokenization error with \r delimiter and quoted fields (GH3453_)
- Groupby transform with item-by-item not upcasting correctly (GH3740_)
+ - Incorrectly read a HDFStore multi-index Frame witha column specification (GH3748_)
.. _GH3164: https://github.com/pydata/pandas/issues/3164
.. _GH2786: https://github.com/pydata/pandas/issues/2786
@@ -280,6 +283,7 @@ pandas 0.11.1
.. _GH3667: https://github.com/pydata/pandas/issues/3667
.. _GH3733: https://github.com/pydata/pandas/issues/3733
.. _GH3740: https://github.com/pydata/pandas/issues/3740
+.. _GH3748: https://github.com/pydata/pandas/issues/3748
pandas 0.11.0
=============
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 0a86d72a05f16..5a480e08effba 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -1664,6 +1664,12 @@ def f(values, freq=None, tz=None):
return f
return klass
+ def validate_read(self, kwargs):
+ if kwargs.get('columns') is not None:
+ raise TypeError("cannot pass a column specification when reading a Storer")
+ if kwargs.get('where') is not None:
+ raise TypeError("cannot pass a where specification when reading a Storer")
+
@property
def is_exists(self):
return True
@@ -1921,6 +1927,7 @@ def read_index_legacy(self, key):
class LegacySeriesStorer(LegacyStorer):
def read(self, **kwargs):
+ self.validate_read(kwargs)
index = self.read_index_legacy('index')
values = self.read_array('values')
return Series(values, index=index)
@@ -1928,6 +1935,7 @@ def read(self, **kwargs):
class LegacyFrameStorer(LegacyStorer):
def read(self, **kwargs):
+ self.validate_read(kwargs)
index = self.read_index_legacy('index')
columns = self.read_index_legacy('columns')
values = self.read_array('values')
@@ -1945,6 +1953,7 @@ def shape(self):
return None
def read(self, **kwargs):
+ self.validate_read(kwargs)
index = self.read_index('index')
if len(index) > 0:
values = self.read_array('values')
@@ -1963,6 +1972,7 @@ class SparseSeriesStorer(GenericStorer):
attributes = ['name','fill_value','kind']
def read(self, **kwargs):
+ self.validate_read(kwargs)
index = self.read_index('index')
sp_values = self.read_array('sp_values')
sp_index = self.read_index('sp_index')
@@ -1983,6 +1993,7 @@ class SparseFrameStorer(GenericStorer):
attributes = ['default_kind','default_fill_value']
def read(self, **kwargs):
+ self.validate_read(kwargs)
columns = self.read_index('columns')
sdict = {}
for c in columns:
@@ -2013,6 +2024,7 @@ class SparsePanelStorer(GenericStorer):
attributes = ['default_kind','default_fill_value']
def read(self, **kwargs):
+ self.validate_read(kwargs)
items = self.read_index('items')
sdict = {}
@@ -2075,6 +2087,8 @@ def shape(self):
return None
def read(self, **kwargs):
+ self.validate_read(kwargs)
+
axes = []
for i in xrange(self.ndim):
ax = self.read_index('axis%d' % i)
@@ -3124,8 +3138,12 @@ def write(self, obj, data_columns=None, **kwargs):
self.levels = obj.index.names
return super(AppendableMultiFrameTable, self).write(obj=obj.reset_index(), data_columns=data_columns, **kwargs)
- def read(self, *args, **kwargs):
- df = super(AppendableMultiFrameTable, self).read(*args, **kwargs)
+ def read(self, columns=None, **kwargs):
+ if columns is not None:
+ for n in self.levels:
+ if n not in columns:
+ columns.insert(0, n)
+ df = super(AppendableMultiFrameTable, self).read(columns=columns, **kwargs)
df.set_index(self.levels, inplace=True)
return df
diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py
index 73d2e23ae4384..d0f03774f2070 100644
--- a/pandas/io/tests/test_pytables.py
+++ b/pandas/io/tests/test_pytables.py
@@ -1072,6 +1072,26 @@ def test_append_hierarchical(self):
result = store.select('mi')
tm.assert_frame_equal(result, df)
+ # GH 3748
+ result = store.select('mi',columns=['A','B'])
+ expected = df.reindex(columns=['A','B'])
+ tm.assert_frame_equal(result,expected)
+
+ with tm.ensure_clean('test.hdf') as path:
+ df.to_hdf(path,'df',table=True)
+ result = read_hdf(path,'df',columns=['A','B'])
+ expected = df.reindex(columns=['A','B'])
+ tm.assert_frame_equal(result,expected)
+
+ def test_pass_spec_to_storer(self):
+
+ df = tm.makeDataFrame()
+
+ with ensure_clean(self.path) as store:
+ store.put('df',df)
+ self.assertRaises(TypeError, store.select, 'df', columns=['A'])
+ self.assertRaises(TypeError, store.select, 'df',where=[('columns=A')])
+
def test_append_misc(self):
with ensure_clean(self.path) as store:
| closes #3748
API: raise a `TypeError` on passing `where` or `columns` to select with a Storer
e.g. these are invalid parameters at this time
| https://api.github.com/repos/pandas-dev/pandas/pulls/3749 | 2013-06-03T19:08:40Z | 2013-06-03T21:06:35Z | 2013-06-03T21:06:34Z | 2014-07-16T08:11:33Z |
ENH: Experimental CustomBusinessDay DateOffset class. fixes #2301 | diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst
index f11bf60549d93..e2e4e81f13199 100644
--- a/doc/source/timeseries.rst
+++ b/doc/source/timeseries.rst
@@ -382,6 +382,7 @@ frequency increment. Specific offset logic like "month", "business day", or
DateOffset, "Generic offset class, defaults to 1 calendar day"
BDay, "business day (weekday)"
+ CDay, "custom business day (experimental)"
Week, "one week, optionally anchored on a day of the week"
WeekOfMonth, "the x-th day of the y-th week of each month"
MonthEnd, "calendar month end"
@@ -477,6 +478,54 @@ Another example is parameterizing ``YearEnd`` with the specific ending month:
.. _timeseries.alias:
+Custom Business Days (Experimental)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The ``CDay`` or ``CustomBusinessDay`` class provides a parametric
+``BusinessDay`` class which can be used to create customized business day
+calendars which account for local holidays and local weekend conventions.
+
+.. ipython:: python
+
+ from pandas.tseries.offsets import CustomBusinessDay
+ # As an interesting example, let's look at Egypt where
+ # a Friday-Saturday weekend is observed.
+ weekmask_egypt = 'Sun Mon Tue Wed Thu'
+ # They also observe International Workers' Day so let's
+ # add that for a couple of years
+ holidays = ['2012-05-01', datetime(2013, 5, 1), np.datetime64('2014-05-01')]
+ bday_egypt = CustomBusinessDay(holidays=holidays, weekmask=weekmask_egypt)
+ dt = datetime(2013, 4, 30)
+ print dt + 2 * bday_egypt
+ dts = date_range(dt, periods=5, freq=bday_egypt).to_series()
+ print dts
+ print Series(dts.weekday, dts).map(Series('Mon Tue Wed Thu Fri Sat Sun'.split()))
+
+.. note::
+
+ The frequency string 'C' is used to indicate that a CustomBusinessDay
+ DateOffset is used, it is important to note that since CustomBusinessDay is
+ a parameterised type, instances of CustomBusinessDay may differ and this is
+ not detectable from the 'C' frequency string. The user therefore needs to
+ ensure that the 'C' frequency string is used consistently within the user's
+ application.
+
+
+.. note::
+
+ This uses the ``numpy.busdaycalendar`` API introduced in Numpy 1.7 and
+ therefore requires Numpy 1.7.0 or newer.
+
+.. warning::
+
+ There are known problems with the timezone handling in Numpy 1.7 and users
+ should therefore use this **experimental(!)** feature with caution and at
+ their own risk.
+
+ To the extent that the ``datetime64`` and ``busdaycalendar`` APIs in Numpy
+ have to change to fix the timezone issues, the behaviour of the
+ ``CustomBusinessDay`` class may have to change in future versions.
+
Offset Aliases
~~~~~~~~~~~~~~
@@ -489,6 +538,7 @@ frequencies. We will refer to these aliases as *offset aliases*
:widths: 15, 100
"B", "business day frequency"
+ "C", "custom business day frequency (experimental)"
"D", "calendar day frequency"
"W", "weekly frequency"
"M", "month end frequency"
diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt
index 3202efbcef83a..07101ed78ba24 100644
--- a/doc/source/v0.11.1.txt
+++ b/doc/source/v0.11.1.txt
@@ -319,6 +319,32 @@ Other Enhancements
- DatetimeIndexes no longer try to convert mixed-integer indexes during join
operations (:issue:`3877`)
+Experimental Features
+~~~~~~~~~~~~~~~~~~~~~
+
+ - Added experimental ``CustomBusinessDay`` class to support ``DateOffsets``
+ with custom holiday calendars and custom weekmasks. (GH2301_)
+
+ .. note::
+
+ This uses the ``numpy.busdaycalendar`` API introduced in Numpy 1.7 and
+ therefore requires Numpy 1.7.0 or newer.
+
+ .. ipython:: python
+
+ from pandas.tseries.offsets import CustomBusinessDay
+ # As an interesting example, let's look at Egypt where
+ # a Friday-Saturday weekend is observed.
+ weekmask_egypt = 'Sun Mon Tue Wed Thu'
+ # They also observe International Workers' Day so let's
+ # add that for a couple of years
+ holidays = ['2012-05-01', datetime(2013, 5, 1), np.datetime64('2014-05-01')]
+ bday_egypt = CustomBusinessDay(holidays=holidays, weekmask=weekmask_egypt)
+ dt = datetime(2013, 4, 30)
+ print dt + 2 * bday_egypt
+ dts = date_range(dt, periods=5, freq=bday_egypt).to_series()
+ print Series(dts.weekday, dts).map(Series('Mon Tue Wed Thu Fri Sat Sun'.split()))
+
Bug Fixes
~~~~~~~~~
diff --git a/pandas/core/datetools.py b/pandas/core/datetools.py
index aea7b2b6b5462..d6da94856b140 100644
--- a/pandas/core/datetools.py
+++ b/pandas/core/datetools.py
@@ -8,6 +8,12 @@
day = DateOffset()
bday = BDay()
businessDay = bday
+try:
+ cday = CDay()
+ customBusinessDay = CustomBusinessDay()
+except NotImplementedError:
+ cday = None
+ customBusinessDay = None
monthEnd = MonthEnd()
yearEnd = YearEnd()
yearBegin = YearBegin()
diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py
index f54bfee55782a..51b8e5d042ca9 100644
--- a/pandas/tseries/frequencies.py
+++ b/pandas/tseries/frequencies.py
@@ -114,14 +114,21 @@ def _get_freq_str(base, mult=1):
# Offset names ("time rules") and related functions
-from pandas.tseries.offsets import (Day, BDay, Hour, Minute, Second, Milli,
- Week, Micro, MonthEnd, MonthBegin,
- BMonthBegin, BMonthEnd, YearBegin, YearEnd,
- BYearBegin, BYearEnd, QuarterBegin,
- QuarterEnd, BQuarterBegin, BQuarterEnd)
+from pandas.tseries.offsets import (Micro, Milli, Second, Minute, Hour,
+ Day, BDay, CDay, Week, MonthBegin,
+ MonthEnd, BMonthBegin, BMonthEnd,
+ QuarterBegin, QuarterEnd, BQuarterBegin,
+ BQuarterEnd, YearBegin, YearEnd,
+ BYearBegin, BYearEnd,
+ )
+try:
+ cday = CDay()
+except NotImplementedError:
+ cday = None
_offset_map = {
'D': Day(),
+ 'C': cday,
'B': BDay(),
'H': Hour(),
'T': Minute(),
@@ -278,6 +285,7 @@ def _get_freq_str(base, mult=1):
'BAS': 'A',
'MS': 'M',
'D': 'D',
+ 'C': 'C',
'B': 'B',
'T': 'T',
'S': 'S',
@@ -1004,15 +1012,17 @@ def is_subperiod(source, target):
if _is_quarterly(source):
return _quarter_months_conform(_get_rule_month(source),
_get_rule_month(target))
- return source in ['D', 'B', 'M', 'H', 'T', 'S']
+ return source in ['D', 'C', 'B', 'M', 'H', 'T', 'S']
elif _is_quarterly(target):
- return source in ['D', 'B', 'M', 'H', 'T', 'S']
+ return source in ['D', 'C', 'B', 'M', 'H', 'T', 'S']
elif target == 'M':
- return source in ['D', 'B', 'H', 'T', 'S']
+ return source in ['D', 'C', 'B', 'H', 'T', 'S']
elif _is_weekly(target):
- return source in [target, 'D', 'B', 'H', 'T', 'S']
+ return source in [target, 'D', 'C', 'B', 'H', 'T', 'S']
elif target == 'B':
return source in ['B', 'H', 'T', 'S']
+ elif target == 'C':
+ return source in ['C', 'H', 'T', 'S']
elif target == 'D':
return source in ['D', 'H', 'T', 'S']
elif target == 'H':
@@ -1055,17 +1065,19 @@ def is_superperiod(source, target):
smonth = _get_rule_month(source)
tmonth = _get_rule_month(target)
return _quarter_months_conform(smonth, tmonth)
- return target in ['D', 'B', 'M', 'H', 'T', 'S']
+ return target in ['D', 'C', 'B', 'M', 'H', 'T', 'S']
elif _is_quarterly(source):
- return target in ['D', 'B', 'M', 'H', 'T', 'S']
+ return target in ['D', 'C', 'B', 'M', 'H', 'T', 'S']
elif source == 'M':
- return target in ['D', 'B', 'H', 'T', 'S']
+ return target in ['D', 'C', 'B', 'H', 'T', 'S']
elif _is_weekly(source):
- return target in [source, 'D', 'B', 'H', 'T', 'S']
+ return target in [source, 'D', 'C', 'B', 'H', 'T', 'S']
elif source == 'B':
- return target in ['D', 'B', 'H', 'T', 'S']
+ return target in ['D', 'C', 'B', 'H', 'T', 'S']
+ elif source == 'C':
+ return target in ['D', 'C', 'B', 'H', 'T', 'S']
elif source == 'D':
- return target in ['D', 'B', 'H', 'T', 'S']
+ return target in ['D', 'C', 'B', 'H', 'T', 'S']
elif source == 'H':
return target in ['H', 'T', 'S']
elif source == 'T':
diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py
index f560a6bf6e717..56df301b5b027 100644
--- a/pandas/tseries/index.py
+++ b/pandas/tseries/index.py
@@ -11,7 +11,7 @@
from pandas.tseries.frequencies import (
infer_freq, to_offset, get_period_alias,
Resolution, get_reso_string)
-from pandas.tseries.offsets import DateOffset, generate_range, Tick
+from pandas.tseries.offsets import DateOffset, generate_range, Tick, CDay
from pandas.tseries.tools import parse_time_string, normalize_date
from pandas.util.decorators import cache_readonly
import pandas.core.common as com
@@ -1740,6 +1740,57 @@ def bdate_range(start=None, end=None, periods=None, freq='B', tz=None,
freq=freq, tz=tz, normalize=normalize, name=name)
+def cdate_range(start=None, end=None, periods=None, freq='C', tz=None,
+ normalize=True, name=None, **kwargs):
+ """
+ **EXPERIMENTAL** Return a fixed frequency datetime index, with
+ CustomBusinessDay as the default frequency
+
+ .. warning:: EXPERIMENTAL
+
+ The CustomBusinessDay class is not officially supported and the API is
+ likely to change in future versions. Use this at your own risk.
+
+ Parameters
+ ----------
+ start : string or datetime-like, default None
+ Left bound for generating dates
+ end : string or datetime-like, default None
+ Right bound for generating dates
+ periods : integer or None, default None
+ If None, must specify start and end
+ freq : string or DateOffset, default 'C' (CustomBusinessDay)
+ Frequency strings can have multiples, e.g. '5H'
+ tz : string or None
+ Time zone name for returning localized DatetimeIndex, for example
+ Asia/Beijing
+ normalize : bool, default False
+ Normalize start/end dates to midnight before generating date range
+ name : str, default None
+ Name for the resulting index
+ weekmask : str, Default 'Mon Tue Wed Thu Fri'
+ weekmask of valid business days, passed to ``numpy.busdaycalendar``
+ holidays : list
+ list/array of dates to exclude from the set of valid business days,
+ passed to ``numpy.busdaycalendar``
+
+ Notes
+ -----
+ 2 of start, end, or periods must be specified
+
+ Returns
+ -------
+ rng : DatetimeIndex
+ """
+
+ if freq=='C':
+ holidays = kwargs.pop('holidays', [])
+ weekmask = kwargs.pop('weekmask', 'Mon Tue Wed Thu Fri')
+ freq = CDay(holidays=holidays, weekmask=weekmask)
+ return DatetimeIndex(start=start, end=end, periods=periods, freq=freq,
+ tz=tz, normalize=normalize, name=name, **kwargs)
+
+
def _to_m8(key, tz=None):
'''
Timestamp-like => dt64
diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py
index 9585d1f81e81d..deefd9f489611 100644
--- a/pandas/tseries/offsets.py
+++ b/pandas/tseries/offsets.py
@@ -1,4 +1,5 @@
from datetime import date, datetime, timedelta
+import numpy as np
from pandas.tseries.tools import to_datetime
@@ -7,7 +8,7 @@
import pandas.lib as lib
import pandas.tslib as tslib
-__all__ = ['Day', 'BusinessDay', 'BDay',
+__all__ = ['Day', 'BusinessDay', 'BDay', 'CustomBusinessDay', 'CDay',
'MonthBegin', 'BMonthBegin', 'MonthEnd', 'BMonthEnd',
'YearBegin', 'BYearBegin', 'YearEnd', 'BYearEnd',
'QuarterBegin', 'BQuarterBegin', 'QuarterEnd', 'BQuarterEnd',
@@ -100,7 +101,8 @@ def _should_cache(self):
def _params(self):
attrs = [(k, v) for k, v in vars(self).iteritems()
- if k not in ['kwds', '_offset', 'name', 'normalize']]
+ if k not in ['kwds', '_offset', 'name', 'normalize',
+ 'busdaycalendar']]
attrs.extend(self.kwds.items())
attrs = sorted(set(attrs))
@@ -359,6 +361,121 @@ def onOffset(cls, dt):
return dt.weekday() < 5
+class CustomBusinessDay(BusinessDay):
+ """
+ **EXPERIMENTAL** DateOffset subclass representing possibly n business days
+ excluding holidays
+
+ .. warning:: EXPERIMENTAL
+
+ This class is not officially supported and the API is likely to change
+ in future versions. Use this at your own risk.
+
+ Parameters
+ ----------
+ n : int, default 1
+ offset : timedelta, default timedelta(0)
+ normalize : bool, default False
+ Normalize start/end dates to midnight before generating date range
+ weekmask : str, Default 'Mon Tue Wed Thu Fri'
+ weekmask of valid business days, passed to ``numpy.busdaycalendar``
+ holidays : list
+ list/array of dates to exclude from the set of valid business days,
+ passed to ``numpy.busdaycalendar``
+ """
+
+ _cacheable = False
+
+ def __init__(self, n=1, **kwds):
+ # Check we have the required numpy version
+ from distutils.version import LooseVersion
+ if LooseVersion(np.__version__) < '1.7.0':
+ raise NotImplementedError("CustomBusinessDay requires numpy >= "
+ "1.7.0. Current version: " +
+ np.__version__)
+
+ self.n = int(n)
+ self.kwds = kwds
+ self.offset = kwds.get('offset', timedelta(0))
+ self.normalize = kwds.get('normalize', False)
+ self.weekmask = kwds.get('weekmask', 'Mon Tue Wed Thu Fri')
+
+ holidays = kwds.get('holidays', [])
+ holidays = [self._to_dt64(dt, dtype='datetime64[D]') for dt in
+ holidays]
+ self.holidays = tuple(sorted(holidays))
+ self.kwds['holidays'] = self.holidays
+ self._set_busdaycalendar()
+
+ def _set_busdaycalendar(self):
+ holidays = np.array(self.holidays, dtype='datetime64[D]')
+ self.busdaycalendar = np.busdaycalendar(holidays=holidays,
+ weekmask=self.weekmask)
+
+ def __getstate__(self):
+ """"Return a pickleable state"""
+ state = self.__dict__.copy()
+ del state['busdaycalendar']
+ return state
+
+ def __setstate__(self, state):
+ """Reconstruct an instance from a pickled state"""
+ self.__dict__ = state
+ self._set_busdaycalendar()
+
+ @property
+ def rule_code(self):
+ return 'C'
+
+ @staticmethod
+ def _to_dt64(dt, dtype='datetime64'):
+ if isinstance(dt, (datetime, basestring)):
+ dt = np.datetime64(dt, dtype=dtype)
+ if isinstance(dt, np.datetime64):
+ dt = dt.astype(dtype)
+ else:
+ raise TypeError('dt must be datestring, datetime or datetime64')
+ return dt
+
+ def apply(self, other):
+ if isinstance(other, datetime):
+ dtype = type(other)
+ elif isinstance(other, np.datetime64):
+ dtype = other.dtype
+ elif isinstance(other, (timedelta, Tick)):
+ return BDay(self.n, offset=self.offset + other,
+ normalize=self.normalize)
+ else:
+ raise TypeError('Only know how to combine trading day with '
+ 'datetime, datetime64 or timedelta!')
+ dt64 = self._to_dt64(other)
+
+ day64 = dt64.astype('datetime64[D]')
+ time = dt64 - day64
+
+ if self.n<=0:
+ roll = 'forward'
+ else:
+ roll = 'backward'
+
+ result = np.busday_offset(day64, self.n, roll=roll,
+ busdaycal=self.busdaycalendar)
+
+ if not self.normalize:
+ result = result + time
+
+ result = result.astype(dtype)
+
+ if self.offset:
+ result = result + self.offset
+
+ return result
+
+ def onOffset(self, dt):
+ day64 = self._to_dt64(dt).astype('datetime64[D]')
+ return np.is_busday(day64, busdaycal=self.busdaycalendar)
+
+
class MonthEnd(DateOffset, CacheableOffset):
"""DateOffset of one month end"""
@@ -1169,6 +1286,7 @@ class Nano(Tick):
BDay = BusinessDay
BMonthEnd = BusinessMonthEnd
BMonthBegin = BusinessMonthBegin
+CDay = CustomBusinessDay
def _get_firstbday(wkday):
diff --git a/pandas/tseries/tests/test_daterange.py b/pandas/tseries/tests/test_daterange.py
index 7fbdbbe328c84..4c46dcccbce1c 100644
--- a/pandas/tseries/tests/test_daterange.py
+++ b/pandas/tseries/tests/test_daterange.py
@@ -1,6 +1,7 @@
from datetime import datetime
import pickle
import unittest
+import nose
import numpy as np
@@ -9,7 +10,7 @@
from pandas import Timestamp
from pandas.tseries.offsets import generate_range
-from pandas.tseries.index import bdate_range, date_range
+from pandas.tseries.index import cdate_range, bdate_range, date_range
import pandas.tseries.tools as tools
import pandas.core.datetools as datetools
@@ -23,6 +24,11 @@ def _skip_if_no_pytz():
raise nose.SkipTest
+def _skip_if_no_cday():
+ if datetools.cday is None:
+ raise nose.SkipTest("CustomBusinessDay not available.")
+
+
def eq_gen_range(kwargs, expected):
rng = generate_range(**kwargs)
assert(np.array_equal(list(rng), expected))
@@ -37,6 +43,12 @@ def test_generate(self):
rng2 = list(generate_range(START, END, time_rule='B'))
self.assert_(np.array_equal(rng1, rng2))
+ def test_generate_cday(self):
+ _skip_if_no_cday()
+ rng1 = list(generate_range(START, END, offset=datetools.cday))
+ rng2 = list(generate_range(START, END, time_rule='C'))
+ self.assert_(np.array_equal(rng1, rng2))
+
def test_1(self):
eq_gen_range(dict(start=datetime(2009, 3, 25), periods=2),
[datetime(2009, 3, 25), datetime(2009, 3, 26)])
@@ -364,7 +376,235 @@ def test_month_range_union_tz(self):
early_dr.union(late_dr)
+class TestCustomDateRange(unittest.TestCase):
+
+ def setUp(self):
+ _skip_if_no_cday()
+ self.rng = cdate_range(START, END)
+
+ def test_constructor(self):
+ rng = cdate_range(START, END, freq=datetools.cday)
+ rng = cdate_range(START, periods=20, freq=datetools.cday)
+ rng = cdate_range(end=START, periods=20, freq=datetools.cday)
+ self.assertRaises(ValueError, date_range, '2011-1-1', '2012-1-1', 'C')
+ self.assertRaises(ValueError, cdate_range, '2011-1-1', '2012-1-1', 'C')
+
+ def test_cached_range(self):
+ rng = DatetimeIndex._cached_range(START, END,
+ offset=datetools.cday)
+ rng = DatetimeIndex._cached_range(START, periods=20,
+ offset=datetools.cday)
+ rng = DatetimeIndex._cached_range(end=START, periods=20,
+ offset=datetools.cday)
+
+ self.assertRaises(Exception, DatetimeIndex._cached_range, START, END)
+
+ self.assertRaises(Exception, DatetimeIndex._cached_range, START,
+ freq=datetools.cday)
+
+ self.assertRaises(Exception, DatetimeIndex._cached_range, end=END,
+ freq=datetools.cday)
+
+ self.assertRaises(Exception, DatetimeIndex._cached_range, periods=20,
+ freq=datetools.cday)
+
+ def test_comparison(self):
+ d = self.rng[10]
+
+ comp = self.rng > d
+ self.assert_(comp[11])
+ self.assert_(not comp[9])
+
+ def test_copy(self):
+ cp = self.rng.copy()
+ repr(cp)
+ self.assert_(cp.equals(self.rng))
+
+ def test_repr(self):
+ # only really care that it works
+ repr(self.rng)
+
+ def test_getitem(self):
+ smaller = self.rng[:5]
+ self.assert_(np.array_equal(smaller, self.rng.view(np.ndarray)[:5]))
+ self.assertEquals(smaller.offset, self.rng.offset)
+
+ sliced = self.rng[::5]
+ self.assertEquals(sliced.offset, datetools.cday * 5)
+
+ fancy_indexed = self.rng[[4, 3, 2, 1, 0]]
+ self.assertEquals(len(fancy_indexed), 5)
+ self.assert_(isinstance(fancy_indexed, DatetimeIndex))
+ self.assert_(fancy_indexed.freq is None)
+
+ # 32-bit vs. 64-bit platforms
+ self.assertEquals(self.rng[4], self.rng[np.int_(4)])
+
+ def test_getitem_matplotlib_hackaround(self):
+ values = self.rng[:, None]
+ expected = self.rng.values[:, None]
+ self.assert_(np.array_equal(values, expected))
+
+ def test_shift(self):
+ shifted = self.rng.shift(5)
+ self.assertEquals(shifted[0], self.rng[5])
+ self.assertEquals(shifted.offset, self.rng.offset)
+
+ shifted = self.rng.shift(-5)
+ self.assertEquals(shifted[5], self.rng[0])
+ self.assertEquals(shifted.offset, self.rng.offset)
+
+ shifted = self.rng.shift(0)
+ self.assertEquals(shifted[0], self.rng[0])
+ self.assertEquals(shifted.offset, self.rng.offset)
+
+ rng = date_range(START, END, freq=datetools.bmonthEnd)
+ shifted = rng.shift(1, freq=datetools.cday)
+ self.assertEquals(shifted[0], rng[0] + datetools.cday)
+
+ def test_pickle_unpickle(self):
+ pickled = pickle.dumps(self.rng)
+ unpickled = pickle.loads(pickled)
+
+ self.assert_(unpickled.offset is not None)
+
+ def test_union(self):
+ # overlapping
+ left = self.rng[:10]
+ right = self.rng[5:10]
+
+ the_union = left.union(right)
+ self.assert_(isinstance(the_union, DatetimeIndex))
+
+ # non-overlapping, gap in middle
+ left = self.rng[:5]
+ right = self.rng[10:]
+
+ the_union = left.union(right)
+ self.assert_(isinstance(the_union, Index))
+
+ # non-overlapping, no gap
+ left = self.rng[:5]
+ right = self.rng[5:10]
+
+ the_union = left.union(right)
+ self.assert_(isinstance(the_union, DatetimeIndex))
+
+ # order does not matter
+ self.assert_(np.array_equal(right.union(left), the_union))
+
+ # overlapping, but different offset
+ rng = date_range(START, END, freq=datetools.bmonthEnd)
+
+ the_union = self.rng.union(rng)
+ self.assert_(isinstance(the_union, DatetimeIndex))
+
+ def test_outer_join(self):
+ # should just behave as union
+
+ # overlapping
+ left = self.rng[:10]
+ right = self.rng[5:10]
+
+ the_join = left.join(right, how='outer')
+ self.assert_(isinstance(the_join, DatetimeIndex))
+
+ # non-overlapping, gap in middle
+ left = self.rng[:5]
+ right = self.rng[10:]
+
+ the_join = left.join(right, how='outer')
+ self.assert_(isinstance(the_join, DatetimeIndex))
+ self.assert_(the_join.freq is None)
+
+ # non-overlapping, no gap
+ left = self.rng[:5]
+ right = self.rng[5:10]
+
+ the_join = left.join(right, how='outer')
+ self.assert_(isinstance(the_join, DatetimeIndex))
+
+ # overlapping, but different offset
+ rng = date_range(START, END, freq=datetools.bmonthEnd)
+
+ the_join = self.rng.join(rng, how='outer')
+ self.assert_(isinstance(the_join, DatetimeIndex))
+ self.assert_(the_join.freq is None)
+
+ def test_intersection_bug(self):
+ # GH #771
+ a = cdate_range('11/30/2011', '12/31/2011')
+ b = cdate_range('12/10/2011', '12/20/2011')
+ result = a.intersection(b)
+ self.assert_(result.equals(b))
+
+ def test_summary(self):
+ self.rng.summary()
+ self.rng[2:2].summary()
+
+ def test_summary_pytz(self):
+ _skip_if_no_pytz()
+ import pytz
+ cdate_range('1/1/2005', '1/1/2009', tz=pytz.utc).summary()
+
+ def test_misc(self):
+ end = datetime(2009, 5, 13)
+ dr = cdate_range(end=end, periods=20)
+ firstDate = end - 19 * datetools.cday
+
+ assert len(dr) == 20
+ assert dr[0] == firstDate
+ assert dr[-1] == end
+
+ def test_date_parse_failure(self):
+ badly_formed_date = '2007/100/1'
+
+ self.assertRaises(ValueError, Timestamp, badly_formed_date)
+
+ self.assertRaises(ValueError, cdate_range, start=badly_formed_date,
+ periods=10)
+ self.assertRaises(ValueError, cdate_range, end=badly_formed_date,
+ periods=10)
+ self.assertRaises(ValueError, cdate_range, badly_formed_date,
+ badly_formed_date)
+
+ def test_equals(self):
+ self.assertFalse(self.rng.equals(list(self.rng)))
+
+ def test_daterange_bug_456(self):
+ # GH #456
+ rng1 = cdate_range('12/5/2011', '12/5/2011')
+ rng2 = cdate_range('12/2/2011', '12/5/2011')
+ rng2.offset = datetools.CDay()
+
+ result = rng1.union(rng2)
+ self.assert_(isinstance(result, DatetimeIndex))
+
+ def test_cdaterange(self):
+ rng = cdate_range('2013-05-01', periods=3)
+ xp = DatetimeIndex(['2013-05-01', '2013-05-02', '2013-05-03'])
+ self.assert_(xp.equals(rng))
+
+ def test_cdaterange_weekmask(self):
+ rng = cdate_range('2013-05-01', periods=3,
+ weekmask='Sun Mon Tue Wed Thu')
+ xp = DatetimeIndex(['2013-05-01', '2013-05-02', '2013-05-05'])
+ self.assert_(xp.equals(rng))
+
+ def test_cdaterange_holidays(self):
+ rng = cdate_range('2013-05-01', periods=3,
+ holidays=['2013-05-01'])
+ xp = DatetimeIndex(['2013-05-02', '2013-05-03', '2013-05-06'])
+ self.assert_(xp.equals(rng))
+
+ def test_cdaterange_weekmask_and_holidays(self):
+ rng = cdate_range('2013-05-01', periods=3,
+ weekmask='Sun Mon Tue Wed Thu',
+ holidays=['2013-05-01'])
+ xp = DatetimeIndex(['2013-05-02', '2013-05-05', '2013-05-06'])
+ self.assert_(xp.equals(rng))
+
+
if __name__ == '__main__':
- import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
diff --git a/pandas/tseries/tests/test_offsets.py b/pandas/tseries/tests/test_offsets.py
index bcd74e7e6eecd..487a3091fd83b 100644
--- a/pandas/tseries/tests/test_offsets.py
+++ b/pandas/tseries/tests/test_offsets.py
@@ -1,10 +1,13 @@
from datetime import date, datetime, timedelta
import unittest
+import nose
+from nose.tools import assert_raises
+
import numpy as np
from pandas.core.datetools import (
- bday, BDay, BQuarterEnd, BMonthEnd, BYearEnd, MonthEnd, MonthBegin,
- BYearBegin, QuarterBegin, BQuarterBegin, BMonthBegin,
+ bday, BDay, cday, CDay, BQuarterEnd, BMonthEnd, BYearEnd, MonthEnd,
+ MonthBegin, BYearBegin, QuarterBegin, BQuarterBegin, BMonthBegin,
DateOffset, Week, YearBegin, YearEnd, Hour, Minute, Second, Day, Micro,
Milli, Nano,
WeekOfMonth, format, ole2datetime, QuarterEnd, to_datetime, normalize_date,
@@ -16,8 +19,6 @@
from pandas.tseries.tools import parse_time_string
import pandas.tseries.offsets as offsets
-from nose.tools import assert_raises
-
from pandas.tslib import monthrange
from pandas.lib import Timestamp
from pandas.util.testing import assertRaisesRegexp
@@ -31,6 +32,12 @@ def test_monthrange():
for m in range(1, 13):
assert monthrange(y, m) == calendar.monthrange(y, m)
+
+def _skip_if_no_cday():
+ if cday is None:
+ raise nose.SkipTest("CustomBusinessDay not available.")
+
+
####
## Misc function tests
####
@@ -295,6 +302,220 @@ def test_offsets_compare_equal(self):
self.assertFalse(offset1 != offset2)
+class TestCustomBusinessDay(unittest.TestCase):
+ _multiprocess_can_split_ = True
+
+ def setUp(self):
+ self.d = datetime(2008, 1, 1)
+
+ _skip_if_no_cday()
+ self.offset = CDay()
+ self.offset2 = CDay(2)
+
+ def test_different_normalize_equals(self):
+ # equivalent in this special case
+ offset = CDay()
+ offset2 = CDay()
+ offset2.normalize = True
+ self.assertEqual(offset, offset2)
+
+ def test_repr(self):
+ assert repr(self.offset) == '<1 CustomBusinessDay>'
+ assert repr(self.offset2) == '<2 CustomBusinessDays>'
+
+ expected = '<1 BusinessDay: offset=datetime.timedelta(1)>'
+ assert repr(self.offset + timedelta(1)) == expected
+
+ def test_with_offset(self):
+ offset = self.offset + timedelta(hours=2)
+
+ assert (self.d + offset) == datetime(2008, 1, 2, 2)
+
+ def testEQ(self):
+ self.assertEqual(self.offset2, self.offset2)
+
+ def test_mul(self):
+ pass
+
+ def test_hash(self):
+ self.assertEqual(hash(self.offset2), hash(self.offset2))
+
+ def testCall(self):
+ self.assertEqual(self.offset2(self.d), datetime(2008, 1, 3))
+
+ def testRAdd(self):
+ self.assertEqual(self.d + self.offset2, self.offset2 + self.d)
+
+ def testSub(self):
+ off = self.offset2
+ self.assertRaises(Exception, off.__sub__, self.d)
+ self.assertEqual(2 * off - off, off)
+
+ self.assertEqual(self.d - self.offset2, self.d + CDay(-2))
+
+ def testRSub(self):
+ self.assertEqual(self.d - self.offset2, (-self.offset2).apply(self.d))
+
+ def testMult1(self):
+ self.assertEqual(self.d + 10 * self.offset, self.d + CDay(10))
+
+ def testMult2(self):
+ self.assertEqual(self.d + (-5 * CDay(-10)),
+ self.d + CDay(50))
+
+ def testRollback1(self):
+ self.assertEqual(CDay(10).rollback(self.d), self.d)
+
+ def testRollback2(self):
+ self.assertEqual(
+ CDay(10).rollback(datetime(2008, 1, 5)), datetime(2008, 1, 4))
+
+ def testRollforward1(self):
+ self.assertEqual(CDay(10).rollforward(self.d), self.d)
+
+ def testRollforward2(self):
+ self.assertEqual(
+ CDay(10).rollforward(datetime(2008, 1, 5)), datetime(2008, 1, 7))
+
+ def test_roll_date_object(self):
+ offset = CDay()
+
+ dt = date(2012, 9, 15)
+
+ result = offset.rollback(dt)
+ self.assertEqual(result, datetime(2012, 9, 14))
+
+ result = offset.rollforward(dt)
+ self.assertEqual(result, datetime(2012, 9, 17))
+
+ offset = offsets.Day()
+ result = offset.rollback(dt)
+ self.assertEqual(result, datetime(2012, 9, 15))
+
+ result = offset.rollforward(dt)
+ self.assertEqual(result, datetime(2012, 9, 15))
+
+ def test_onOffset(self):
+ tests = [(CDay(), datetime(2008, 1, 1), True),
+ (CDay(), datetime(2008, 1, 5), False)]
+
+ for offset, date, expected in tests:
+ assertOnOffset(offset, date, expected)
+
+ def test_apply(self):
+ from pandas.core.datetools import cday
+ tests = []
+
+ tests.append((cday,
+ {datetime(2008, 1, 1): datetime(2008, 1, 2),
+ datetime(2008, 1, 4): datetime(2008, 1, 7),
+ datetime(2008, 1, 5): datetime(2008, 1, 7),
+ datetime(2008, 1, 6): datetime(2008, 1, 7),
+ datetime(2008, 1, 7): datetime(2008, 1, 8)}))
+
+ tests.append((2 * cday,
+ {datetime(2008, 1, 1): datetime(2008, 1, 3),
+ datetime(2008, 1, 4): datetime(2008, 1, 8),
+ datetime(2008, 1, 5): datetime(2008, 1, 8),
+ datetime(2008, 1, 6): datetime(2008, 1, 8),
+ datetime(2008, 1, 7): datetime(2008, 1, 9)}))
+
+ tests.append((-cday,
+ {datetime(2008, 1, 1): datetime(2007, 12, 31),
+ datetime(2008, 1, 4): datetime(2008, 1, 3),
+ datetime(2008, 1, 5): datetime(2008, 1, 4),
+ datetime(2008, 1, 6): datetime(2008, 1, 4),
+ datetime(2008, 1, 7): datetime(2008, 1, 4),
+ datetime(2008, 1, 8): datetime(2008, 1, 7)}))
+
+ tests.append((-2 * cday,
+ {datetime(2008, 1, 1): datetime(2007, 12, 28),
+ datetime(2008, 1, 4): datetime(2008, 1, 2),
+ datetime(2008, 1, 5): datetime(2008, 1, 3),
+ datetime(2008, 1, 6): datetime(2008, 1, 3),
+ datetime(2008, 1, 7): datetime(2008, 1, 3),
+ datetime(2008, 1, 8): datetime(2008, 1, 4),
+ datetime(2008, 1, 9): datetime(2008, 1, 7)}))
+
+ tests.append((CDay(0),
+ {datetime(2008, 1, 1): datetime(2008, 1, 1),
+ datetime(2008, 1, 4): datetime(2008, 1, 4),
+ datetime(2008, 1, 5): datetime(2008, 1, 7),
+ datetime(2008, 1, 6): datetime(2008, 1, 7),
+ datetime(2008, 1, 7): datetime(2008, 1, 7)}))
+
+ for offset, cases in tests:
+ for base, expected in cases.iteritems():
+ assertEq(offset, base, expected)
+
+ def test_apply_large_n(self):
+ dt = datetime(2012, 10, 23)
+
+ result = dt + CDay(10)
+ self.assertEqual(result, datetime(2012, 11, 6))
+
+ result = dt + CDay(100) - CDay(100)
+ self.assertEqual(result, dt)
+
+ off = CDay() * 6
+ rs = datetime(2012, 1, 1) - off
+ xp = datetime(2011, 12, 23)
+ self.assertEqual(rs, xp)
+
+ st = datetime(2011, 12, 18)
+ rs = st + off
+ xp = datetime(2011, 12, 26)
+ self.assertEqual(rs, xp)
+
+ def test_apply_corner(self):
+ self.assertRaises(Exception, CDay().apply, BMonthEnd())
+
+ def test_offsets_compare_equal(self):
+ # root cause of #456
+ offset1 = CDay()
+ offset2 = CDay()
+ self.assertFalse(offset1 != offset2)
+
+ def test_holidays(self):
+ # Define a TradingDay offset
+ holidays = ['2012-05-01', datetime(2013, 5, 1),
+ np.datetime64('2014-05-01')]
+ tday = CDay(holidays=holidays)
+ for year in range(2012, 2015):
+ dt = datetime(year, 4, 30)
+ xp = datetime(year, 5, 2)
+ rs = dt + tday
+ self.assertEqual(rs, xp)
+
+ def test_weekmask(self):
+ weekmask_saudi = 'Sat Sun Mon Tue Wed' # Thu-Fri Weekend
+ weekmask_uae = '1111001' # Fri-Sat Weekend
+ weekmask_egypt = [1,1,1,1,0,0,1] # Fri-Sat Weekend
+ bday_saudi = CDay(weekmask=weekmask_saudi)
+ bday_uae = CDay(weekmask=weekmask_uae)
+ bday_egypt = CDay(weekmask=weekmask_egypt)
+ dt = datetime(2013, 5, 1)
+ xp_saudi = datetime(2013, 5, 4)
+ xp_uae = datetime(2013, 5, 2)
+ xp_egypt = datetime(2013, 5, 2)
+ self.assertEqual(xp_saudi, dt + bday_saudi)
+ self.assertEqual(xp_uae, dt + bday_uae)
+ self.assertEqual(xp_egypt, dt + bday_egypt)
+ xp2 = datetime(2013, 5, 5)
+ self.assertEqual(xp2, dt + 2 * bday_saudi)
+ self.assertEqual(xp2, dt + 2 * bday_uae)
+ self.assertEqual(xp2, dt + 2 * bday_egypt)
+
+ def test_weekmask_and_holidays(self):
+ weekmask_egypt = 'Sun Mon Tue Wed Thu' # Fri-Sat Weekend
+ holidays = ['2012-05-01', datetime(2013, 5, 1),
+ np.datetime64('2014-05-01')]
+ bday_egypt = CDay(holidays=holidays, weekmask=weekmask_egypt)
+ dt = datetime(2013, 4, 30)
+ xp_egypt = datetime(2013, 5, 5)
+ self.assertEqual(xp_egypt, dt + 2 * bday_egypt)
+
+
def assertOnOffset(offset, date, expected):
actual = offset.onOffset(date)
assert actual == expected
@@ -1160,7 +1381,6 @@ def test_offset(self):
datetime(2007, 12, 15): datetime(2007, 4, 1),
datetime(2012, 1, 31): datetime(2011, 4, 1), }))
-
for offset, cases in tests:
for base, expected in cases.iteritems():
assertEq(offset, base, expected)
| I took a stab at issue #2301 using the busdaycalendar functionality in numpy 1.7.
### Caveats
There are a few issues with this:
- It requires Numpy 1.7 (I've only tested it on numpy 1.7.1).
- The timezone handling in nump 1.7 is broken (according to [this discussion](http://numpy-discussion.10968.n7.nabble.com/timezones-and-datetime64-td33407.html) ) and the datetime64 and timezone API will change in numpy 1.8. So from what I understand the current code as I've written it might move you onto a different day if you're in a UTC-0?00 timezone (I'm in UTC+0200 so it didn't affect me).
That said I didn't want to reinvent the wheel and since the numpy datetime64 api does cater for this usecase I thought it would be good to standardise on that in the long term.
The code does what I need it to do and I'm putting it out there in case it's useful to anyone else. Also, with some feedback maybe we can get it to the point where it could be included in Pandas as an optional DateOffset class for users who have Numpy 1.7.
### Possible Improvements
I can think of:
- Guard in the constructor to raise a meaningful exception when the user doesn't is on Numpy < 1.7.
- Better handling for the timezone issue.
- Unit tests for the CustomBusinessDay behaviour.
### Notes
- I picked the frequency code 'C' because it was available and fit nicely between 'D' and 'B'. I had originally named the class TradingDay and wanted to use 'T' but that's already used for Minutes.
- I have no idea what goes on in frequencies.py and I simply put a 'C' wherever I found a 'B' using the reasoning that as a BusinessDay subclass it should work in all the same places.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3744 | 2013-06-03T14:01:05Z | 2013-06-21T12:39:20Z | 2013-06-21T12:39:20Z | 2014-07-02T10:29:35Z |
BUG: (GH3740) Groupby transform with item-by-item not upcasting correctly | diff --git a/RELEASE.rst b/RELEASE.rst
index c59a53c7f6c69..bbfc9fb948ef4 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -196,6 +196,7 @@ pandas 0.11.1
- ``DataFrame.to_html`` and ``DataFrame.to_latex`` now accept a path for
their first argument (GH3702_)
- Fix file tokenization error with \r delimiter and quoted fields (GH3453_)
+ - Groupby transform with item-by-item not upcasting correctly (GH3740_)
.. _GH3164: https://github.com/pydata/pandas/issues/3164
.. _GH2786: https://github.com/pydata/pandas/issues/2786
@@ -278,6 +279,7 @@ pandas 0.11.1
.. _GH3696: https://github.com/pydata/pandas/issues/3696
.. _GH3667: https://github.com/pydata/pandas/issues/3667
.. _GH3733: https://github.com/pydata/pandas/issues/3733
+.. _GH3740: https://github.com/pydata/pandas/issues/3740
pandas 0.11.0
=============
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index d409adfd71158..64606a6e644f9 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -1532,6 +1532,9 @@ def transform(self, func, *args, **kwargs):
transformed : Series
"""
result = self.obj.copy()
+ if hasattr(result,'values'):
+ result = result.values
+ dtype = result.dtype
if isinstance(func, basestring):
wrapper = lambda x: getattr(x, func)(*args, **kwargs)
@@ -1539,13 +1542,21 @@ def transform(self, func, *args, **kwargs):
wrapper = lambda x: func(x, *args, **kwargs)
for name, group in self:
+
+ group = com.ensure_float(group)
object.__setattr__(group, 'name', name)
res = wrapper(group)
- # result[group.index] = res
indexer = self.obj.index.get_indexer(group.index)
- np.put(result, indexer, res)
+ if hasattr(res,'values'):
+ res = res.values
- return result
+ # need to do a safe put here, as the dtype may be different
+ # this needs to be an ndarray
+ result,_ = com._maybe_upcast_indexer(result, indexer, res)
+
+ # downcast if we can (and need)
+ result = _possibly_downcast_to_dtype(result, dtype)
+ return self.obj.__class__(result,index=self.obj.index,name=self.obj.name)
class NDFrameGroupBy(GroupBy):
diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py
index c56fca49cce48..cf62b16a9dd2a 100644
--- a/pandas/tests/test_groupby.py
+++ b/pandas/tests/test_groupby.py
@@ -616,6 +616,39 @@ def f(x):
assert_series_equal(agged, expected, check_dtype=False)
self.assert_(issubclass(agged.dtype.type, np.dtype(dtype).type))
+ def test_groupby_transform_with_int(self):
+
+ # GH 3740, make sure that we might upcast on item-by-item transform
+
+ # floats
+ df = DataFrame(dict(A = [1,1,1,2,2,2], B = Series(1,dtype='float64'), C = Series([1,2,3,1,2,3],dtype='float64'), D = 'foo'))
+ result = df.groupby('A').transform(lambda x: (x-x.mean())/x.std())
+ expected = DataFrame(dict(B = np.nan, C = Series([-1,0,1,-1,0,1],dtype='float64')))
+ assert_frame_equal(result,expected)
+
+ # int case
+ df = DataFrame(dict(A = [1,1,1,2,2,2], B = 1, C = [1,2,3,1,2,3], D = 'foo'))
+ result = df.groupby('A').transform(lambda x: (x-x.mean())/x.std())
+ expected = DataFrame(dict(B = np.nan, C = [-1,0,1,-1,0,1]))
+ assert_frame_equal(result,expected)
+
+ # int that needs float conversion
+ s = Series([2,3,4,10,5,-1])
+ df = DataFrame(dict(A = [1,1,1,2,2,2], B = 1, C = s, D = 'foo'))
+ result = df.groupby('A').transform(lambda x: (x-x.mean())/x.std())
+
+ s1 = s.iloc[0:3]
+ s1 = (s1-s1.mean())/s1.std()
+ s2 = s.iloc[3:6]
+ s2 = (s2-s2.mean())/s2.std()
+ expected = DataFrame(dict(B = np.nan, C = concat([s1,s2])))
+ assert_frame_equal(result,expected)
+
+ # int downcasting
+ result = df.groupby('A').transform(lambda x: x*2/2)
+ expected = DataFrame(dict(B = 1, C = [2,3,4,10,5,-1]))
+ assert_frame_equal(result,expected)
+
def test_indices_concatenation_order(self):
# GH 2808
| closes #3740
| https://api.github.com/repos/pandas-dev/pandas/pulls/3743 | 2013-06-03T13:24:35Z | 2013-06-03T17:09:29Z | 2013-06-03T17:09:29Z | 2014-07-16T08:11:29Z |
TST/BUG: fix bs4 tests that were getting erroneously run when lxml is installed but not bs4 | diff --git a/RELEASE.rst b/RELEASE.rst
index 2b90edaa327b0..b5dd3eef68dea 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -200,6 +200,7 @@ pandas 0.11.1
- Fix file tokenization error with \r delimiter and quoted fields (GH3453_)
- Groupby transform with item-by-item not upcasting correctly (GH3740_)
- Incorrectly read a HDFStore multi-index Frame witha column specification (GH3748_)
+ - ``read_html`` now correctly skips tests (GH3741_)
.. _GH3164: https://github.com/pydata/pandas/issues/3164
.. _GH2786: https://github.com/pydata/pandas/issues/2786
@@ -284,6 +285,7 @@ pandas 0.11.1
.. _GH3733: https://github.com/pydata/pandas/issues/3733
.. _GH3740: https://github.com/pydata/pandas/issues/3740
.. _GH3748: https://github.com/pydata/pandas/issues/3748
+.. _GH3741: https://github.com/pydata/pandas/issues/3741
pandas 0.11.0
=============
diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt
index 0ef37704b9d8f..b2fee1acbc4d6 100644
--- a/doc/source/v0.11.1.txt
+++ b/doc/source/v0.11.1.txt
@@ -294,6 +294,7 @@ Bug Fixes
- Allow insert/delete to non-unique columns (GH3679_)
- ``DataFrame.from_records`` did not accept empty recarrays (GH3682_)
+ - ``read_html`` now correctly skips tests (GH3741_)
See the `full release notes
<https://github.com/pydata/pandas/blob/master/RELEASE.rst>`__ or issue tracker
@@ -340,3 +341,4 @@ on GitHub for a complete list.
.. _GH3691: https://github.com/pydata/pandas/issues/3691
.. _GH3696: https://github.com/pydata/pandas/issues/3696
.. _GH3667: https://github.com/pydata/pandas/issues/3667
+.. _GH3741: https://github.com/pydata/pandas/issues/3741
diff --git a/pandas/io/tests/data/banklist.csv b/pandas/io/tests/data/banklist.csv
index 6545d31fe5fd4..85cebb56f6adf 100644
--- a/pandas/io/tests/data/banklist.csv
+++ b/pandas/io/tests/data/banklist.csv
@@ -1,8 +1,12 @@
-Bank Name,City,State,CERT #,Acquiring Institution,Closing Date,Updated Date
-Douglas County Bank,Douglasville,GA,21649,Hamilton State Bank,26-Apr-13,30-Apr-13
-Parkway Bank,Lenoir,NC,57158,"CertusBank, National Association",26-Apr-13,30-Apr-13
-Chipola Community Bank,Marianna,FL,58034,First Federal Bank of Florida,19-Apr-13,23-Apr-13
-Heritage Bank of North Florida,Orange Park,FL,26680,FirstAtlantic Bank,19-Apr-13,23-Apr-13
+Bank Name,City,ST,CERT,Acquiring Institution,Closing Date,Updated Date
+Banks of Wisconsin d/b/a Bank of Kenosha,Kenosha,WI,35386,"North Shore Bank, FSB",31-May-13,31-May-13
+Central Arizona Bank,Scottsdale,AZ,34527,Western State Bank,14-May-13,20-May-13
+Sunrise Bank,Valdosta,GA,58185,Synovus Bank,10-May-13,21-May-13
+Pisgah Community Bank,Asheville,NC,58701,"Capital Bank, N.A.",10-May-13,14-May-13
+Douglas County Bank,Douglasville,GA,21649,Hamilton State Bank,26-Apr-13,16-May-13
+Parkway Bank,Lenoir,NC,57158,"CertusBank, National Association",26-Apr-13,17-May-13
+Chipola Community Bank,Marianna,FL,58034,First Federal Bank of Florida,19-Apr-13,16-May-13
+Heritage Bank of North Florida,Orange Park,FL,26680,FirstAtlantic Bank,19-Apr-13,16-May-13
First Federal Bank,Lexington,KY,29594,Your Community Bank,19-Apr-13,23-Apr-13
Gold Canyon Bank,Gold Canyon,AZ,58066,"First Scottsdale Bank, National Association",5-Apr-13,9-Apr-13
Frontier Bank,LaGrange,GA,16431,HeritageBank of the South,8-Mar-13,26-Mar-13
@@ -36,18 +40,18 @@ Waccamaw Bank,Whiteville,NC,34515,First Community Bank,8-Jun-12,8-Nov-12
Farmers' and Traders' State Bank,Shabbona,IL,9257,First State Bank,8-Jun-12,10-Oct-12
Carolina Federal Savings Bank,Charleston,SC,35372,Bank of North Carolina,8-Jun-12,31-Oct-12
First Capital Bank,Kingfisher,OK,416,F & M Bank,8-Jun-12,10-Oct-12
-"Alabama Trust Bank, National Association",Sylacauga,AL,35224,Southern States Bank,18-May-12,31-Oct-12
+"Alabama Trust Bank, National Association",Sylacauga,AL,35224,Southern States Bank,18-May-12,20-May-13
"Security Bank, National Association",North Lauderdale,FL,23156,Banesco USA,4-May-12,31-Oct-12
-Palm Desert National Bank,Palm Desert,CA,23632,Pacific Premier Bank,27-Apr-12,31-Aug-12
-Plantation Federal Bank,Pawleys Island,SC,32503,First Federal Bank,27-Apr-12,31-Oct-12
-"Inter Savings Bank, fsb D/B/A InterBank, fsb",Maple Grove,MN,31495,Great Southern Bank,27-Apr-12,17-Oct-12
-HarVest Bank of Maryland,Gaithersburg,MD,57766,Sonabank,27-Apr-12,17-Oct-12
+Palm Desert National Bank,Palm Desert,CA,23632,Pacific Premier Bank,27-Apr-12,17-May-13
+Plantation Federal Bank,Pawleys Island,SC,32503,First Federal Bank,27-Apr-12,17-May-13
+"Inter Savings Bank, fsb D/B/A InterBank, fsb",Maple Grove,MN,31495,Great Southern Bank,27-Apr-12,17-May-13
+HarVest Bank of Maryland,Gaithersburg,MD,57766,Sonabank,27-Apr-12,17-May-13
Bank of the Eastern Shore,Cambridge,MD,26759,No Acquirer,27-Apr-12,17-Oct-12
-"Fort Lee Federal Savings Bank, FSB",Fort Lee,NJ,35527,Alma Bank,20-Apr-12,31-Aug-12
-Fidelity Bank,Dearborn,MI,33883,The Huntington National Bank,30-Mar-12,9-Aug-12
+"Fort Lee Federal Savings Bank, FSB",Fort Lee,NJ,35527,Alma Bank,20-Apr-12,17-May-13
+Fidelity Bank,Dearborn,MI,33883,The Huntington National Bank,30-Mar-12,16-May-13
Premier Bank,Wilmette,IL,35419,International Bank of Chicago,23-Mar-12,17-Oct-12
Covenant Bank & Trust,Rock Spring,GA,58068,"Stearns Bank, N.A.",23-Mar-12,31-Oct-12
-New City Bank ,Chicago,IL,57597,No Acquirer,9-Mar-12,29-Oct-12
+New City Bank,Chicago,IL,57597,No Acquirer,9-Mar-12,29-Oct-12
Global Commerce Bank,Doraville,GA,34046,Metro City Bank,2-Mar-12,31-Oct-12
Home Savings of America,Little Falls,MN,29178,No Acquirer,24-Feb-12,17-Dec-12
Central Bank of Georgia,Ellaville,GA,5687,Ameris Bank,24-Feb-12,9-Aug-12
@@ -55,7 +59,7 @@ SCB Bank,Shelbyville,IN,29761,"First Merchants Bank, National Association",10-Fe
Charter National Bank and Trust,Hoffman Estates,IL,23187,"Barrington Bank & Trust Company, National Association",10-Feb-12,25-Mar-13
BankEast,Knoxville,TN,19869,U.S.Bank National Association,27-Jan-12,8-Mar-13
Patriot Bank Minnesota,Forest Lake,MN,34823,First Resource Bank,27-Jan-12,12-Sep-12
-Tennessee Commerce Bank ,Franklin,TN,35296,Republic Bank & Trust Company,27-Jan-12,20-Nov-12
+Tennessee Commerce Bank,Franklin,TN,35296,Republic Bank & Trust Company,27-Jan-12,20-Nov-12
First Guaranty Bank and Trust Company of Jacksonville,Jacksonville,FL,16579,"CenterState Bank of Florida, N.A.",27-Jan-12,12-Sep-12
American Eagle Savings Bank,Boothwyn,PA,31581,"Capital Bank, N.A.",20-Jan-12,25-Jan-13
The First State Bank,Stockbridge,GA,19252,Hamilton State Bank,20-Jan-12,25-Jan-13
@@ -130,7 +134,7 @@ The Bank of Commerce,Wood Dale,IL,34292,Advantage National Bank Group,25-Mar-11,
Legacy Bank,Milwaukee,WI,34818,Seaway Bank and Trust Company,11-Mar-11,12-Sep-12
First National Bank of Davis,Davis,OK,4077,The Pauls Valley National Bank,11-Mar-11,20-Aug-12
Valley Community Bank,St. Charles,IL,34187,First State Bank,25-Feb-11,12-Sep-12
-"San Luis Trust Bank, FSB ",San Luis Obispo,CA,34783,First California Bank,18-Feb-11,20-Aug-12
+"San Luis Trust Bank, FSB",San Luis Obispo,CA,34783,First California Bank,18-Feb-11,20-Aug-12
Charter Oak Bank,Napa,CA,57855,Bank of Marin,18-Feb-11,12-Sep-12
Citizens Bank of Effingham,Springfield,GA,34601,Heritage Bank of the South,18-Feb-11,2-Nov-12
Habersham Bank,Clarkesville,GA,151,SCBT National Association,18-Feb-11,2-Nov-12
@@ -153,9 +157,9 @@ Oglethorpe Bank,Brunswick,GA,57440,Bank of the Ozarks,14-Jan-11,2-Nov-12
Legacy Bank,Scottsdale,AZ,57820,Enterprise Bank & Trust,7-Jan-11,12-Sep-12
First Commercial Bank of Florida,Orlando,FL,34965,First Southern Bank,7-Jan-11,2-Nov-12
Community National Bank,Lino Lakes,MN,23306,Farmers & Merchants Savings Bank,17-Dec-10,20-Aug-12
-First Southern Bank ,Batesville,AR,58052,Southern Bank,17-Dec-10,20-Aug-12
+First Southern Bank,Batesville,AR,58052,Southern Bank,17-Dec-10,20-Aug-12
"United Americas Bank, N.A.",Atlanta,GA,35065,State Bank and Trust Company,17-Dec-10,2-Nov-12
-"Appalachian Community Bank, FSB ",McCaysville,GA,58495,Peoples Bank of East Tennessee,17-Dec-10,31-Oct-12
+"Appalachian Community Bank, FSB",McCaysville,GA,58495,Peoples Bank of East Tennessee,17-Dec-10,31-Oct-12
Chestatee State Bank,Dawsonville,GA,34578,Bank of the Ozarks,17-Dec-10,2-Nov-12
"The Bank of Miami,N.A.",Coral Gables,FL,19040,1st United Bank,17-Dec-10,2-Nov-12
Earthstar Bank,Southampton,PA,35561,Polonia Bank,10-Dec-10,20-Aug-12
@@ -195,7 +199,7 @@ Sonoma Valley Bank,Sonoma,CA,27259,Westamerica Bank,20-Aug-10,12-Sep-12
Los Padres Bank,Solvang,CA,32165,Pacific Western Bank,20-Aug-10,12-Sep-12
Butte Community Bank,Chico,CA,33219,"Rabobank, N.A.",20-Aug-10,12-Sep-12
Pacific State Bank,Stockton,CA,27090,"Rabobank, N.A.",20-Aug-10,12-Sep-12
-ShoreBank,Chicago,IL,15640,Urban Partnership Bank,20-Aug-10,12-Sep-12
+ShoreBank,Chicago,IL,15640,Urban Partnership Bank,20-Aug-10,16-May-13
Imperial Savings and Loan Association,Martinsville,VA,31623,"River Community Bank, N.A.",20-Aug-10,24-Aug-12
Independent National Bank,Ocala,FL,27344,"CenterState Bank of Florida, N.A.",20-Aug-10,5-Nov-12
Community National Bank at Bartow,Bartow,FL,25266,"CenterState Bank of Florida, N.A.",20-Aug-10,5-Nov-12
@@ -206,13 +210,13 @@ The Cowlitz Bank,Longview,WA,22643,Heritage Bank,30-Jul-10,22-Aug-12
Coastal Community Bank,Panama City Beach,FL,9619,Centennial Bank,30-Jul-10,5-Nov-12
Bayside Savings Bank,Port Saint Joe,FL,57669,Centennial Bank,30-Jul-10,5-Nov-12
Northwest Bank & Trust,Acworth,GA,57658,State Bank and Trust Company,30-Jul-10,5-Nov-12
-Home Valley Bank ,Cave Junction,OR,23181,South Valley Bank & Trust,23-Jul-10,12-Sep-12
-SouthwestUSA Bank ,Las Vegas,NV,35434,Plaza Bank,23-Jul-10,22-Aug-12
-Community Security Bank ,New Prague,MN,34486,Roundbank,23-Jul-10,12-Sep-12
-Thunder Bank ,Sylvan Grove,KS,10506,The Bennington State Bank,23-Jul-10,13-Sep-12
-Williamsburg First National Bank ,Kingstree,SC,17837,"First Citizens Bank and Trust Company, Inc.",23-Jul-10,5-Nov-12
-Crescent Bank and Trust Company ,Jasper,GA,27559,Renasant Bank,23-Jul-10,5-Nov-12
-Sterling Bank ,Lantana,FL,32536,IBERIABANK,23-Jul-10,5-Nov-12
+Home Valley Bank,Cave Junction,OR,23181,South Valley Bank & Trust,23-Jul-10,12-Sep-12
+SouthwestUSA Bank,Las Vegas,NV,35434,Plaza Bank,23-Jul-10,22-Aug-12
+Community Security Bank,New Prague,MN,34486,Roundbank,23-Jul-10,12-Sep-12
+Thunder Bank,Sylvan Grove,KS,10506,The Bennington State Bank,23-Jul-10,13-Sep-12
+Williamsburg First National Bank,Kingstree,SC,17837,"First Citizens Bank and Trust Company, Inc.",23-Jul-10,5-Nov-12
+Crescent Bank and Trust Company,Jasper,GA,27559,Renasant Bank,23-Jul-10,5-Nov-12
+Sterling Bank,Lantana,FL,32536,IBERIABANK,23-Jul-10,5-Nov-12
"Mainstreet Savings Bank, FSB",Hastings,MI,28136,Commercial Bank,16-Jul-10,13-Sep-12
Olde Cypress Community Bank,Clewiston,FL,28864,"CenterState Bank of Florida, N.A.",16-Jul-10,5-Nov-12
Turnberry Bank,Aventura,FL,32280,NAFH National Bank,16-Jul-10,5-Nov-12
@@ -362,7 +366,7 @@ Brickwell Community Bank,Woodbury,MN,57736,CorTrust Bank N.A.,11-Sep-09,15-Jan-1
"Corus Bank, N.A.",Chicago,IL,13693,"MB Financial Bank, N.A.",11-Sep-09,21-Aug-12
First State Bank,Flagstaff,AZ,34875,Sunwest Bank,4-Sep-09,15-Jan-13
Platinum Community Bank,Rolling Meadows,IL,35030,No Acquirer,4-Sep-09,21-Aug-12
-Vantus Bank,Sioux City,IA,27732,Great Southern Bank,4-Sep-09,21-Aug-12
+Vantus Bank,Sioux City,IN,27732,Great Southern Bank,4-Sep-09,21-Aug-12
InBank,Oak Forest,IL,20203,"MB Financial Bank, N.A.",4-Sep-09,21-Aug-12
First Bank of Kansas City,Kansas City,MO,25231,Great American Bank,4-Sep-09,21-Aug-12
Affinity Bank,Ventura,CA,27197,Pacific Western Bank,28-Aug-09,21-Aug-12
@@ -452,7 +456,7 @@ National Bank of Commerce,Berkeley,IL,19733,Republic Bank of Chicago,16-Jan-09,1
Sanderson State Bank,Sanderson,TX,11568,The Pecos County State Bank,12-Dec-08,4-Sep-12
Haven Trust Bank,Duluth,GA,35379,"Branch Banking & Trust Company, (BB&T)",12-Dec-08,16-Aug-12
First Georgia Community Bank,Jackson,GA,34301,United Bank,5-Dec-08,16-Aug-12
-PFF Bank & Trust ,Pomona,CA,28344,"U.S. Bank, N.A.",21-Nov-08,4-Jan-13
+PFF Bank & Trust,Pomona,CA,28344,"U.S. Bank, N.A.",21-Nov-08,4-Jan-13
Downey Savings & Loan,Newport Beach,CA,30968,"U.S. Bank, N.A.",21-Nov-08,4-Jan-13
Community Bank,Loganville,GA,16490,Bank of Essex,21-Nov-08,4-Sep-12
Security Pacific Bank,Los Angeles,CA,23595,Pacific Western Bank,7-Nov-08,28-Aug-12
diff --git a/pandas/io/tests/data/banklist.html b/pandas/io/tests/data/banklist.html
index 8e15f37ccffdb..801016e7a5478 100644
--- a/pandas/io/tests/data/banklist.html
+++ b/pandas/io/tests/data/banklist.html
@@ -1,61 +1,31 @@
-<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
-<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
+<!DOCTYPE html><!-- HTML5 -->
+<html lang="en-US">
+<!-- Content language is American English. -->
<head>
-
-<!-- Instruction: In the title tag change Product Title to the approved product name -->
- <title>FDIC: Failed Bank List</title>
- <meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1" />
-
- <link rel="stylesheet" type="text/css" media="print" href="http://www.fdic.gov/style_productprint.css" />
-
-
-<style>
-
-* {margin:0; padding:0; outline:none}
-body {font:Arial,Helvetica; margin:10px; background-color:#fff}
-
-.sortable {width:925px; margin:0 auto 15px; font:13px Arial, Helvetica}
-.sortable th {background-color:#003366; text-align:left; color:#fff}
-.sortable th h3 {font-size:13px; padding:2px}
-.sortable td {padding:2px}
-.sortable .head h3 {background: url('images/sort.gif') no-repeat 5px center; cursor:pointer; padding-left:15px; text-decoration:underline}
-.sortable .desc, .sortable .asc {background-color:#404040; font-style:italic; text-decoration:underline}
-.sortable .desc h3 {background: url('images/desc.gif') no-repeat 5px center; cursor:pointer; padding-left:15px}
-.sortable .asc h3 {background: url('images/asc.gif') no-repeat 5px center; cursor:pointer; padding-left:15px}
-.sortable .head:hover, .sortable .desc:hover, .sortable .asc:hover {color:#fff}
-.sortable .evenrow td {background:#fff}
-.sortable .oddrow td {background:#fff}
-.sortable td.evenselected {background:#ebebeb}
-.sortable td.oddselected {background:#ebebeb}
-
-#controls {width:925px; margin:0 auto}
-#perpage {float:left; width:190px}
-#perpage select {float:left; font-size:11px}
-#perpage span {float:left; margin:2px 0 0 5px}
-#navigation {float:left; width:340px; text-align:center}
-#navigation img {cursor:pointer}
-#text {float:left; width:190px; text-align:right; margin-top:2px; font:13px Arial, Helvetica}
-</style>
+<title>FDIC: Failed Bank List</title>
+<!-- Meta Tags -->
+<meta charset="UTF-8">
+<!-- Unicode character encoding -->
+<meta http-equiv="X-UA-Compatible" content="IE=edge">
+<!-- Turns off IE Compatiblity Mode -->
+<meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=1">
+<!-- Makes it so phones don't auto zoom out. -->
+<meta name="author" content="DRR">
+<meta http-equiv="keywords" name="keywords" content="banks, financial institutions, failed, failure, closing, deposits, depositors, banking services, assuming institution, acquiring institution, claims">
+<!-- CSS -->
+<link rel="stylesheet" type="text/css" href="/responsive/css/responsive.css">
+<link rel="stylesheet" type="text/css" href="banklist.css">
</head>
-<body bgcolor="#ffffff" text="#000000">
-
-
-<!-- BEGIN HEADER INCLUDE -->
-<!-- Instruction: The following statement is the header include statement. Do not revise this code. -->
-<!-- begin header -->
-<!-- Last Updated Date: 1-21-2011 Time: 9:00AM Version: 1.5 -->
-<!--<script type="text/javascript" src="http://www.google.com/jsapi?key=ABQIAAAARFKFRzFbjPYbUgzSrdVg0hRrrNc1sGQv42gDojQ1Ll8KWy8MgRRQv_0u-KVSwjYfghDs3QJR40ZHtA"></script>
-<script type="text/javascript">
-google.load("jquery", "1.4.2");
-</script>-->
-<script type="text/javascript" src="/js/jquery-1.4.2.min.js"></script>
-<script type="text/javascript" src="/header/js/navigation.js"></script>
+<body>
+<!-- START of Header -->
+<script type="text/javascript" src="/responsive/header/js/header.js"></script>
+<link rel="stylesheet" type="text/css" href="/responsive/header/css/header.css" />
<!-- googleac.html includes Autocomplete functionality -->
- <!-- Autocomplete files -->
-<link rel="stylesheet" type="text/css" href="/header/css/jquery.autocomplete.css" />
-<script type="text/javascript" src="/header/js/jquery.autocomplete-1.4.2.js"></script>
-
+<!-- Autocomplete files -->
+<link rel="stylesheet" type="text/css" href="/responsive/header/css/jquery.autocomplete.css" />
+<script type="text/javascript" src="/responsive/js/jquery-1.4.1.min.js"></script>
+<script type="text/javascript" src="/responsive/header/js/jquery.autocomplete-1.4.2.js"></script>
<script type="text/javascript">
function findValue(li) {
if( li == null ) return alert("No match!");
@@ -77,7 +47,6 @@
// otherwise, let's just display the value in the text box
else var sValue = li.selectValue;
-
$('#googlesearch2').submit();
}
@@ -100,9 +69,6 @@
function formatResult(row) {
return row[0].replace(/(<.+?>)/gi, '');
}
-
-
-
$("#newSearch").autocomplete("/searchjs.asp", {
width: 179,
@@ -116,7 +82,7 @@
});
- $("#search2").autocomplete("searchjs.asp", {
+ $("#search2").autocomplete("/searchjs.asp", {
width: 160,
autoFill: false,
//delay:10,
@@ -130,5202 +96,4790 @@
});
-
-
-
-
-
</script>
+<!-- END CODE NEEDED TO MAKE THE SEARCH BOX WORK -->
-<!-- Omniture SiteCatalyst Code -->
-<script language="JavaScript" type="text/javascript" src="/js/s_code_v1.js"></script>
<!-- FORESEE Code -->
<script type="text/javascript" src="/foresee/foresee-trigger.js"></script>
-<link rel="stylesheet" type="text/css" href="/header/css/header_style.css" />
-<!--[if lt IE 7]>
- <style media="screen" type="text/css">
- #site-container {
- height: 100%;
- }
- #footer-container {
- bottom: -1px;
- }
- </style>
- <![endif]-->
+<a href="#after_header" class="responsive_header-skip_header">Skip Header</a>
+<header>
+<div id="responsive_header">
+ <div id="responsive_header-right_side">
+ <ul id="responsive_header-links">
+ <li id="responsive_header-twitter" title="Visit FDIC on Twitter"><a tabindex="1" href="/social.html?site=http://twitter.com/FDICgov">Visit FDIC on Twitter</a></li>
+ <li id="responsive_header-facebook" title="Visit FDIC on Facebook"><a tabindex="1" href="/social.html?site=http://www.facebook.com/FDICgov">Visit FDIC on Facebook</a></li>
+ <li id="responsive_header-fdicchannel" title="Visit FDIC on YouTube"><a tabindex="1" href="/social.html?site=http://www.youtube.com/user/FDICchannel">Visit FDIC on YouTube</a></li>
+ <li id="responsive_header-rss" title="FDIC RSS Feed"><a tabindex="1" href="/rss.html">FDIC RSS Feed</a></li>
+ <li id="responsive_header-subscribe" title="Subscribe to FDIC alerts"><a tabindex="1" href="http://service.govdelivery.com/service/multi_subscribe.html?code=USFDIC">Subscribe to FDIC alerts</a></li>
+ </ul>
+ <div id="responsive_header-search">
+ <a href="/search/advanced.html" class="search" title="Advanced Search">Advanced Search</a>
+ <form id="googlesearch" action="http://search.fdic.gov/search" method="get" name="Search box for FDIC.gov">
+ <fieldset>
+ <div class="form">
+ <label for="q">Search FDIC.gov</label>
+ <input tabindex="1" id="newSearch" name="q" class="field" type="text" style="outline: 0 none;" value="Search FDIC..." onblur="if(this.value == '') {this.value = 'Search FDIC...';}" onfocus="if(this.value == 'Search FDIC...') {this.value = '';}" />
+ <input tabindex="1" id="searchsubmit" class="submit" alt="Search Icon" title="Search Icon" type="submit" value="" />
+ <input value="date:D:L:d1" name="sort" type="hidden" />
-<div id ="site-container">
- <div id="header-container"> <!-- start of header container -->
- <!-- everything inside the header is held within this container -->
- <div id="header-nav">
- <div id="header-nav-left-container">
-
- <div id="header-nav-left">
- <a href="/" alt="FDIC Logo" title="FDIC Home - Federal Deposit Insurance Corporation">
- <div id="fdic-logo" class="homeOff"></div>
- </a>
- </div> <!-- close header-nav-left -->
-
- <div id="header-nav-right">
- <div id="header-nav-right-top">
- <div id="fdic-title"></div>
+ <input value="xml_no_dtd" name="output" type="hidden" />
+ <input value="UTF-8" name="ie" type="hidden" />
+ <input value="UTF-8" name="oe" type="hidden" />
+ <input value="wwwGOV" name="client" type="hidden" />
+ <input value="wwwGOV" name="proxystylesheet" type="hidden" />
+ <input value="default" name="site" type="hidden" />
</div>
- <div id="header-nav-right-bottom">
- <h1>Each depositor insured to at least $250,000 per insured bank</h1>
- </div>
- </div> <!-- close header-nav-right -->
-
- </div> <!-- close header-nav-left-container -->
-
- <div id="header-nav-right-container">
- <div id="right-container-top">
- <div id="web2">
- <ul>
- <li><a href="/social.html?site=http://twitter.com/FDICgov"><img src="/header/images/web2/twitter.png" alt="Twitter" title="Twitter" height="24px"/></a></li>
- <li><a href="/social.html?site=http://www.facebook.com/FDICgov"><img src="/header/images/web2/facebook.png" alt="Facebook" title="Facebook" height="24px"/></a></li>
- <li><a href="/social.html?site=http://www.youtube.com/user/FDICchannel"><img src="/header/images/web2/youtube.png" alt="YouTube" title="YouTube" height="24px"/></a></li>
- <li><a href="/rss.html"><img src="/header/images/web2/rss.png" alt="RSS" title="RSS" height="24px"/></a></span></li>
- <li><a href="http://service.govdelivery.com/service/multi_subscribe.html?code=USFDIC"><img src="/header/images/web2/subscribe.png" alt="Subscribe" title="Subscribe" height="24px"/></a></li>
- </ul>
- </div>
- </div> <!-- close right-container-right-top -->
-
- <div id="right-container-center">
- <div id="advanced-search" title="Advanced Search"><a href="/search/advanced.html" class="search">Advanced Search</a></div>
- </div> <!-- close right-container-right-center -->
-
- <div id="right-container-bottom">
- <div id="search">
- <form id="googlesearch" action="http://search.fdic.gov/search" method="get" name="Search box for FDIC.gov">
- <fieldset>
- <div class="form" alt="Search box for FDIC.gov" title="Search box for FDIC.gov">
- <div class="search2">
- <label for="fdic_search"></label>
- <label for="searchsubmit"></label>
- </div>
- <input id="newSearch" name="q" class="field" type="text" style="outline: 0 none;" value="Search FDIC..." onblur="if(this.value == '') {this.value = 'Search FDIC...';}" onfocus="if(this.value == 'Search FDIC...') {this.value = '';}" />
- <input id="searchsubmit" class="submit" alt="Search Icon" title="Search Icon" type="submit" value="" />
- <input value="date:D:L:d1" name="sort" type="hidden" />
- <input value="xml_no_dtd" name="output" type="hidden" />
- <input value="UTF-8" name="ie" type="hidden" />
- <input value="UTF-8" name="oe" type="hidden" />
- <input value="wwwGOV_new" name="client" type="hidden" />
- <input value="wwwGOV_new" name="proxystylesheet" type="hidden" />
- <input value="default" name="site" type="hidden" />
- </div>
- </fieldset>
- </form>
- </div> <!-- close id="search" -->
- </div> <!-- close right-container-right-bottom -->
- </div> <!-- close header-nav-right-container -->
-
- </div> <!-- close header-nav **This is the top part of the header** -->
-
- <div id="top-nav"> <!-- start of top-nav class **This is the main navigation in header, color is light blue**-->
- <!-- top-nav unordered list -->
- <!-- lists all top-nav titles -->
- <!-- **************************************************************** -->
- <ul>
- <li><span id="home" title="Home"><a href="/">Home</a></span></li>
- <li><span>|</span></li>
- <li><span id="deposit" title="Deposit Insurance"><a href="/deposit/">Deposit Insurance</a></span></li>
- <li><span>|</span></li>
- <li><span id="consumers" title="Consumer Protection"><a href="/consumers/">Consumer Protection</a></span></li>
- <li><span>|</span></li>
- <li><span id="bank" title="Industry Analysis"><a href="/bank/">Industry Analysis</a></span></li>
- <li><span>|</span></li>
- <li><span id="regulations" title="Regulations & Examinations"><a href="/regulations/">Regulations & Examinations</a></span></li>
- <li><span>|</span></li>
- <li><span id="buying" title="Asset Sales"><a href="/buying/">Asset Sales</a></span></li>
- <li><span>|</span></li>
- <li><span id="news" title="News & Events"><a href="/news/">News & Events</a></span></li>
- <li><span>|</span></li>
- <li><span id="about" title="About FDIC"><a href="/about/">About FDIC</a></span></li>
- </ul>
- <!-- **************************************************************** -->
- <!-- close top-nav unordered list -->
- </div> <!-- close top-nav id -->
-
- <div id="sub-nav-container"> <!-- start of sub-nav-container **sub-silo of main navigation, color is gold -->
- <div id="sub-nav"> <!-- start of div id sub-nav -->
-
- <!-- lists all sub-nav ul tags -->
- <!-- **************************************************************** -->
- <!-- deposit sub -->
- <div id="deposit_sub" class="sub-wrapper"> <!-- div 1 for "Deposit" -->
- <ul>
- <li><span id="deposit_sub1" title="Bank Find"><a href="http://research.fdic.gov/bankfind/">BankFind</a></span></li>
- <li><span id="deposit_sub2" title="Are My Deposits Insured?"><a href="/deposit/deposits/">Are My Deposits Insured?</a></span></li>
- <li><span id="deposit_sub3" title="Uninsured Investments"><a href="/deposit/investments/">Uninsured Investments</a></span></li>
- <li><span id="deposit_sub4" title="The Deposit Insurance Fund"><a href="/deposit/insurance/index.html">The Deposit Insurance Fund</a></span></li>
- <li><span id="deposit_sub5" title="International Deposit Insurance"><a href="/deposit/deposits/international/">International Deposit Insurance</a></span></li>
- </ul>
- </div> <!-- close div 1-->
-
- <!-- consumer sub -->
- <div id="consumers_sub" class="sub-wrapper"> <!-- div 2 for "Consumer" -->
- <ul>
- <li><span id="consumers_sub1" title="Consumer News & Information"><a href="/consumers/consumer/">Consumer News & Information</a></span></li>
- <li><span id="consumers_sub2" title="Loans & Mortgages"><a href="/consumers/loans/">Loans & Mortgages</a></span></li>
- <li><span id="consumers_sub3" title="Banking & Your Money"><a href="/consumers/banking/">Banking & Your Money</a></span></li>
- <li><span id="consumers_sub4" title="Financial Education & Literacy"><a href="/consumers/education/">Financial Education & Literacy</a></span></li>
- <li><span id="consumers_sub5" title="Community Affairs"><a href="/consumers/community/">Community Affairs</a></span></li>
- <li><span id="consumers_sub6" title="Identity Theft & Fraud"><a href="/consumers/theft/">Identity Theft & Fraud</a></span></li>
- <li><span id="consumers_sub7" title="Consumer Financial Privacy"><a href="/consumers/privacy/">Consumer Financial Privacy</a></span></li>
- </ul>
- </div> <!-- close div 2 -->
-
- <!-- industry sub -->
- <div id="bank_sub" class="sub-wrapper"> <!-- div 3 for "Industry" -->
- <ul>
- <li><span id="bank_sub1" title="Bank Data & Statistics"><a href="/bank/statistical/">Bank Data & Statistics</a></span></li>
- <li><span id="bank_sub2" title="Research & Analysis"><a href="/bank/analytical/">Research & Analysis</a></span></li>
- <li><span id="bank_sub3" title="Failed Banks"><a href="/bank/individual/failed/">Failed Banks</a></span></li>
- </ul>
- </div> <!-- close div 3 -->
-
- <!-- regulations sub -->
- <div id="regulations_sub" class="sub-wrapper"> <!-- div 4 for "Regulations" -->
- <ul>
- <li><span id="regulations_sub1" title="Bank Examinations"><a href="/regulations/examinations/">Bank Examinations</a></span></li>
- <li><span id="regulations_sub2" title="Laws & Regulations"><a href="/regulations/laws/">Laws & Regulations</a></span></li>
- <li><span id="regulations_sub3" title="Resources for Bank Officers & Directors"><a href="/regulations/resources/">Resources for Bank Officers & Directors</a></span></li>
- <li><span id="regulations_sub4" title="FDICconnect"><a href="http://www.fdicconnect.gov/">FDIC<em>connect</em></a></span></li>
- <li><span id="regulations_sub5" title="Required Financial Reports"><a href="/regulations/required/">Required Financial Reports</a></span></li>
- <li><span id="regulations_sub6" title="Examiner Training Programs"><a href="/regulations/examiner/">Examiner Training Programs</a></span></li>
- </ul>
- </div> <!-- close div 4 -->
-
- <!-- asset sub -->
- <div id="buying_sub" class="sub-wrapper"> <!-- div 5 for "Asset" -->
- <ul>
- <li><span id="buying_sub1" title="Loan Sales"><a href="/buying/loan/">Loan Sales</a></span></li>
- <li><span id="buying_sub2" title="Real Estate Sales"><a href="/buying/owned/">Real Estate and Property Marketplace</a></span></li>
- <li><span id="buying_sub3" title="Financial Asset Sales"><a href="/buying/financial/">Financial Asset Sales</a></span></li>
- <li><span id="buying_sub4" title="Servicing Sales Announcements"><a href="/buying/servicing/">Servicing Sales Announcements</a></span></li>
- <li><span id="buying_sub5" title="Other Asset Sales"><a href="/buying/otherasset/">Other Asset Sales</a></span></li>
- <li><span id="buying_sub6" title="Historical Sales"><a href="/buying/historical/">Historical Sales</a></span></li>
- </ul>
- </div> <!-- close div 5 -->
-
- <!-- news sub -->
- <div id="news_sub" class="sub-wrapper"> <!-- div 6 for "News" -->
- <ul>
- <li><span id="news_sub1" title="Press Releases"><a href="/news/news/press/2013/">Press Releases</a></span></li>
- <li><span id="news_sub2" title="Online Press Room"><a href="https://fdicsurvey.inquisiteasp.com/fdic/cgi-bin/qwebcorporate.dll?M58TRS">Online Press Room</a></span></li>
- <li><span id="news_sub3" title="Conferences & Events"><a href="/news/conferences/">Conferences & Events</a></span></li>
- <li><span id="news_sub4" title="Financial Institution Letters"><a href="/news/news/financial/2013/">Financial Institution Letters</a></span></li>
-
- <!-- include this lnk for year 2013 and remove 2012 link below <li><span id="news_sub5" title="Special Alerts"><a href="/news/news/SpecialAlert/2013/">Special Alerts</a></span></li>-->
- <li><span id="news_sub5" title="Special Alerts"><a href="/news/news/SpecialAlert/2012/">Special Alerts</a></span></li>
- <li><span id="news_sub6" title="Letters to the Editor/Opinion Editorials"><a href="/news/letters/">Letters to the Editor/Opinion Editorials</a></span></li>
- <li><span id="news_sub7" title="Speeches & Testimony"><a href="/news/news/speeches/">Speeches & Testimony</a></span></li>
- </ul>
- </div> <!-- close div 6 -->
-
- <!-- news sub -->
- <div id="about_sub" class="sub-wrapper"> <!-- div 6 for "News" -->
- <ul>
- <li><span id="about_sub1" title="Mission & Purpose"><a href="/about/index.html#1">Mission & Purpose</a></span></li>
- <li><span id="about_sub2" title="Advisory Committees"><a href="/about/index.html#2">Advisory Committees</a></span></li>
- <li><span id="about_sub3" title="Careers with the FDIC"><a href="/about/index.html#3">Careers with the FDIC</a></span></li>
- <li><span id="about_sub4" title="Management Team"><a href="/about/index.html#4">Management Team</a></span></li>
- <li><span id="about_sub5" title="Plans & Reports"><a href="/about/index.html#5">Plans & Reports</a></span></li>
- <li><span id="about_sub6" title="What We Can Do for You"><a href="/about/index.html#6">What We Can Do for You</a></span></li>
- <li><span id="about_sub7" title="Diversity with the FDIC"><a href="/about/index.html#7">Diversity at the FDIC</a></span></li>
- </ul>
- </div> <!-- close div 6 -->
-
- <!-- **************************************************************** -->
- </div> <!-- close of id - sub-nav -->
- </div> <!-- close of id - sub-nav-container -->
- </div> <!-- end of the header-container -->
-<div id="body">
-<!-- end header -->
-<font face="arial, helvetica, sans-serif" size="2">
-<!-- END HEADER INCLUDE -->
-
-<!-- Instruction: The following meta tags are for the keywords and document author. If desired change "name of the document" owner to the actual name of the owner and change "add keywords here" to a list of keywords separated by a comma. -->
-<meta name="author" content="DRR" />
-<meta http-equiv="keywords" name="keywords" content="banks, financial institutions, failed, failure, closing, deposits, depositors,
-banking services, assuming institution, acquiring institution, claims" />
-
-
-<link rel="stylesheet" type="text/css" media="print" href="http://www.fdic.gov/style_productprint.css" />
-
-<img src="http://www.fdic.gov/images/spacer.gif" width="1" height="2" alt="" border="0" /><br />
-<table width="670" cellspacing="0" cellpadding="0" border="0">
- <tr>
- <td width="1" bgcolor="#cccccc"><img src="http://www.fdic.gov/images/spacer.gif" width="1" height="1" alt="" border="0" /></td>
- <td width="14" bgcolor="#cccccc"><img src="http://www.fdic.gov/images/spacer.gif" width="14" height="1" alt="" border="0" /></td>
- <td width="739" bgcolor="#cccccc"><span class="noDisplay"><img src="http://www.fdic.gov/images/spacer.gif" width="739" height="1" alt="" border="0" /></span></td>
- <td width="1" bgcolor="#cccccc"><img src="http://www.fdic.gov/images/spacer.gif" width="1" height="1" alt="" border="0" /></td>
- </tr>
-
- <tr>
- <td bgcolor="#cccccc"><img src="http://www.fdic.gov/images/spacer.gif" width="1" height="24" alt="" border="0" /><br /></td>
- <td></td>
- <td width="739">
-
- <!-- BEGIN BREAD CRUMB TRAIL -->
-
- <!-- Instruction: Change the "Tertiary" link text to the correct third-level menu page name and the href value to the appropriate relative path to the third-level menu page. -->
-
- <!-- Instruction: Change the "Product Title" text to the name of the approved product title. -->
-
- <font face="arial, helvetica,sans-serif" size="1"><a href="/index.html">Home</a> > <a href="/bank/index.html">Industry
- Analysis</a> > <a href="/bank/individual/failed/index.html">Failed Banks</a> > Failed
- Bank List</font><br />
-
- <!-- END BREAD CRUMB TRAIL -->
-
- </td>
- <td bgcolor="#cccccc"><img src="http://www.fdic.gov/images/spacer.gif" width="1" height="1" alt="" border="0" /><br /></td>
- </tr>
-
- <tr>
- <td colspan="4" bgcolor="#cccccc"><img src="http://www.fdic.gov/images/spacer.gif" width="1" height="1" alt="" border="0" /><br /></td>
- </tr>
-</table>
-
-<table width="640" cellspacing="0" cellpadding="0" border="0">
- <tr>
- <td width="25"><img src="http://www.fdic.gov/images/spacer.gif" width="25" height="1" alt="" border="0" /><br /></td>
- <td colspan="2">
- <br />
-<!-- DRR BEGIN Product Title & Body-->
-<!-- DRR BEGIN Product Title & Body-->
-<table width="100%" cellpadding="0" cellspacing="0" border="0">
-<!-- BEGIN PRODUCT TITLE -->
-<tr>
- <td>
- <!-- Instruction: Change the "Product Title" text to the name of the approved product title. -->
-
- <font face="arial, helvetica, sans-serif" size="4" color="#003366"><strong><a name="top">Failed
- Bank List</a></strong></font>
- <hr size="1" color="#003366" noshade />
-
-
- </td>
-</tr>
-
-<!-- END PRODUCT TITLE -->
-<!-- DOCUMENT BODY BEGINS HERE -->
-<tr>
- <td valign="top">
- <table border="0" cellpadding="0" cellspacing="0" width="900">
-
- <tr>
- <td> <font face="arial, helvetica, sans-serif" size="2">
- <br />The FDIC is often appointed as receiver for failed banks. This page contains useful information for the customers and vendors of these banks. This includes information on the acquiring bank (if applicable), how your accounts and loans are affected, and how vendors can file claims against the receivership.
- <a href="http://www2.fdic.gov/drrip/cs/index.asp">Failed Financial Institution Contact Search</a>
- displays point of contact information related to failed banks.<br /><br />
-
- This list includes banks which have failed since October 1, 2000. To search for banks that failed prior to those on this page, visit this link: <a href="http://www2.fdic.gov/hsob/SelectRpt.asp?EntryTyp=30">Failures and Assistance Transactions
-</a><br /><br />
-
- <!-- <a href="banklist.csv">Open Bank List as CSV file</a> -->
- <a href="banklist.csv">Failed Bank List</a> - CSV file (Updated on Mondays. Also opens in Excel - <a href="http://www.fdic.gov/excel.html">Excel
- Help</a>)
- <br />
- <script type="text/javascript">
- <!--
- document.writeln("<br /><em>Click arrows next to headers to sort in Ascending or Descending order.</em><br />");
-//-->
- </script><br />
- </font>
- </td>
- </tr>
- </table>
- </td>
-</tr>
-
-<tr>
- <td>
- <table cellpadding="0" cellspacing="0" bordercolordark="#003366" bordercolorlight="ebebeb" border="1" id="table" class="sortable">
- <thead>
- <tr bgcolor="#003366">
- <th id="Institution"><h3>Bank Name</h3></th>
- <th class="nosort" id="city" style="padding-left:3px"><h3>City</h3></th>
- <th id="state"><h3>State</h3></th>
- <th id="CERT #" class="nosort" style="padding-left:3px"><h3>CERT #</h3></th>
- <th id="AI" style="padding-left:3px"><h3>Acquiring Institution</h3></th>
- <th id="Closing"><h3>Closing Date</h3></th>
- <th id="Updated"><h3>Updated Date</h3></th>
- </tr>
- </thead>
- <tbody>
-
- <tr>
- <td><a href="douglascb.html">Douglas County Bank</a></td>
- <td headers="city">Douglasville</td>
- <td headers="state">GA</td>
- <td headers="CERT #">21649</td>
- <td headers="AI">Hamilton State Bank</td>
- <td headers="Closing Date">April 26, 2013</td>
- <td headers="Updated">April 30, 2013</td>
-</tr>
- <tr>
- <td><a href="parkway.html">Parkway Bank</a></td>
- <td headers="city">Lenoir</td>
- <td headers="state">NC</td>
- <td headers="CERT #">57158</td>
- <td headers="AI">CertusBank, National Association</td>
- <td headers="Closing Date">April 26, 2013</td>
- <td headers="Updated">April 30, 2013</td>
-</tr>
-<tr>
- <td><a href="chipola.html">Chipola Community Bank</a></td>
- <td headers="city">Marianna</td>
- <td headers="state">FL</td>
- <td headers="CERT #">58034</td>
- <td headers="AI">First Federal Bank of Florida</td>
- <td headers="Closing Date">April 19, 2013</td>
- <td headers="Updated">April 23, 2013</td>
-</tr>
-<tr>
- <td><a href="heritagebank-fl.html">Heritage Bank of North Florida</a></td>
- <td headers="city">Orange Park</td>
- <td headers="state">FL</td>
- <td headers="CERT #">26680</td>
- <td headers="AI">FirstAtlantic Bank</td>
- <td headers="Closing Date">April 19, 2013</td>
- <td headers="Updated">April 23, 2013</td>
-</tr>
-<tr>
- <td><a href="firstfederal-ky.html">First Federal Bank</a></td>
- <td headers="city">Lexington</td>
- <td headers="state">KY</td>
- <td headers="CERT #">29594</td>
- <td headers="AI">Your Community Bank</td>
- <td headers="Closing Date">April 19, 2013</td>
- <td headers="Updated">April 23, 2013</td>
-</tr>
-<td><a href="goldcanyon.html">Gold Canyon Bank</a></td>
- <td headers="city">Gold Canyon</td>
- <td headers="state">AZ</td>
- <td headers="CERT #">58066</td>
- <td headers="AI">First Scottsdale Bank,
-National Association</td>
- <td headers="Closing Date">April 5, 2013</td>
- <td headers="Updated">April 9, 2013</td>
-</tr>
-<tr>
- <td><a href="frontier-ga.html">Frontier Bank</a></td>
- <td headers="city">LaGrange</td>
- <td headers="state">GA</td>
- <td headers="CERT #">16431</td>
- <td headers="AI">HeritageBank of the South</td>
- <td headers="Closing Date">March 8, 2013</td>
- <td headers="Updated">March 26, 2013</td>
-</tr>
-<tr>
- <td><a href="covenant-il.html">Covenant Bank</a></td>
- <td headers="city">Chicago</td>
- <td headers="state">IL</td>
- <td headers="CERT #">22476</td>
- <td headers="AI">Liberty Bank and Trust Company</td>
- <td headers="Closing Date">February 15, 2013</td>
- <td headers="Updated">March 4, 2013</td>
-</tr>
-<tr>
- <td><a href="1stregents.html">1st Regents Bank</a></td>
- <td headers="city">Andover</td>
- <td headers="state">MN</td>
- <td headers="CERT #">57157</td>
- <td headers="AI">First Minnesota Bank</td>
- <td headers="Closing Date">January 18, 2013</td>
- <td headers="Updated">February 28, 2013</td>
-</tr>
-<tr>
- <td><a href="westside.html">Westside Community Bank</a></td>
- <td headers="city">University Place</td>
- <td headers="state">WA</td>
- <td headers="CERT #">33997</td>
- <td headers="AI">Sunwest Bank</td>
- <td headers="Closing Date">January 11, 2013</td>
- <td headers="Updated">January 24, 2013</td>
-</tr>
-<tr>
- <td><a href="cmbkozarks.html">Community Bank of the Ozarks</a></td>
- <td headers="city">Sunrise Beach</td>
- <td headers="state">MO</td>
- <td headers="CERT #">27331</td>
- <td headers="AI">Bank of Sullivan</td>
- <td headers="Closing Date">December 14, 2012</td>
- <td headers="Updated">January 24, 2013</td>
-</tr>
-<tr>
- <td><a href="hometown.html">Hometown Community Bank</a></td>
- <td headers="city">Braselton</td>
- <td headers="state">GA</td>
- <td headers="CERT #">57928</td>
- <td headers="AI">CertusBank, National Association</td>
- <td headers="Closing Date">November 16, 2012</td>
- <td headers="Updated">January 24, 2013</td>
-</tr>
-<tr>
- <td><a href="cfnb.html">Citizens First National Bank</a></td>
- <td headers="city">Princeton</td>
- <td headers="state">IL</td>
- <td headers="CERT #">3731</td>
- <td headers="AI">Heartland Bank and Trust Company</td>
- <td headers="Closing Date">November 2, 2012</td>
- <td headers="Updated">January 24, 2013</td>
-</tr>
- <tr>
- <td><a href="heritage_fl.html">Heritage Bank of Florida</a></td>
- <td headers="city">Lutz</td>
- <td headers="state">FL</td>
- <td headers="CERT #">35009</td>
- <td headers="AI">Centennial Bank</td>
- <td headers="Closing Date">November 2, 2012</td>
- <td headers="Updated">January 24, 2013</td>
-</tr>
-<tr>
- <td><a href="novabank.html">NOVA Bank</a></td>
- <td headers="city">Berwyn</td>
- <td headers="state">PA</td>
- <td headers="CERT #">27148</td>
- <td headers="AI">No Acquirer</td>
- <td headers="Closing Date">October 26, 2012</td>
- <td headers="Updated">January 24, 2013</td>
-</tr>
-<tr>
- <td><a href="excelbank.html">Excel Bank</a></td>
- <td headers="city">Sedalia</td>
- <td headers="state">MO</td>
- <td headers="CERT #">19189</td>
- <td headers="AI">Simmons First National Bank</td>
- <td headers="Closing Date">October 19, 2012</td>
- <td headers="Updated">January 24, 2013</td>
-</tr>
-<tr>
- <td><a href="firsteastside.html">First East Side Savings Bank</a></td>
- <td headers="city">Tamarac</td>
- <td headers="state">FL</td>
- <td headers="CERT #">28144</td>
- <td headers="AI">Stearns Bank N.A.</td>
- <td headers="Closing Date">October 19, 2012</td>
- <td headers="Updated">January 24, 2013</td>
-</tr>
-<tr>
- <td><a href="gulfsouth.html">GulfSouth Private Bank</a></td>
- <td headers="city">Destin</td>
- <td headers="state">FL</td>
- <td headers="CERT #">58073</td>
- <td headers="AI">SmartBank</td>
- <td headers="Closing Date">October 19, 2012</td>
- <td headers="Updated">January 24, 2013</td>
-</tr>
-<tr>
- <td><a href="firstunited.html">First United Bank</a></td>
- <td headers="city">Crete</td>
- <td headers="state">IL</td>
- <td headers="CERT #">20685</td>
- <td headers="AI">Old Plank Trail Community Bank, National Association</td>
- <td headers="Closing Date">September 28, 2012</td>
- <td headers="Updated">November 15, 2012</td>
-</tr>
-<tr>
- <td><a href="truman.html">Truman Bank</a></td>
- <td headers="city">St. Louis</td>
- <td headers="state">MO</td>
- <td headers="CERT #">27316</td>
- <td headers="AI">Simmons First National Bank</td>
- <td headers="Closing Date">September 14, 2012</td>
- <td headers="Updated">December 17, 2012</td>
-</tr>
-<tr>
- <td><a href="firstcommbk_mn.html">First Commercial Bank</a></td>
- <td headers="city">Bloomington</td>
- <td headers="state">MN</td>
- <td headers="CERT #">35246</td>
- <td headers="AI">Republic Bank & Trust Company</td>
- <td headers="Closing Date">September 7, 2012</td>
- <td headers="Updated">December 17, 2012</td>
-</tr>
-<tr>
- <td><a href="waukegan.html">Waukegan Savings Bank</a></td>
- <td headers="city">Waukegan</td>
- <td headers="state">IL</td>
- <td headers="CERT #">28243</td>
- <td headers="AI"> First Midwest Bank</td>
- <td headers="Closing Date">August 3, 2012</td>
- <td headers="Updated">October 11, 2012</td>
-</tr>
-<tr>
- <td><a href="jasper.html">Jasper Banking Company</a></td>
- <td headers="city">Jasper</td>
- <td headers="state">GA</td>
- <td headers="CERT #">16240</td>
- <td headers="AI">Stearns Bank N.A.</td>
- <td headers="Closing Date">July 27, 2012</td>
- <td headers="Updated">December 17, 2012</td>
-</tr>
-<tr>
- <td><a href="secondfederal.html">Second Federal Savings and Loan Association of Chicago</a></td>
- <td headers="city">Chicago</td>
- <td headers="state">IL</td>
- <td headers="CERT #">27986</td>
- <td headers="AI">Hinsdale Bank & Trust Company</td>
- <td headers="Closing Date">July 20, 2012</td>
- <td headers="Updated">January 14, 2013</td>
-</tr>
-<tr>
- <td><a href="heartland.html">Heartland Bank</a></td>
- <td headers="city">Leawood</td>
- <td headers="state">KS</td>
- <td headers="CERT #">1361</td>
- <td headers="AI">Metcalf Bank</td>
- <td headers="Closing Date">July 20, 2012</td>
- <td headers="Updated">December 17, 2012</td>
-</tr>
-<tr>
- <td><a href="cherokee.html">First Cherokee State Bank</a></td>
- <td headers="city">Woodstock</td>
- <td headers="state">GA</td>
- <td headers="CERT #">32711</td>
- <td headers="AI">Community & Southern Bank</td>
- <td headers="Closing Date">July 20, 2012</td>
- <td headers="Updated">October 31, 2012</td>
-</tr>
-<tr>
- <td><a href="georgiatrust.html">Georgia Trust Bank</a></td>
- <td headers="city">Buford</td>
- <td headers="state">GA</td>
- <td headers="CERT #">57847</td>
- <td headers="AI">Community & Southern Bank</td>
- <td headers="Closing Date">July 20, 2012</td>
- <td headers="Updated">December 17, 2012</td>
-</tr>
-<tr>
- <td><a href="royalpalm.html">The Royal Palm Bank of Florida</a></td>
- <td headers="city">Naples</td>
- <td headers="state">FL</td>
- <td headers="CERT #">57096</td>
- <td headers="AI">First National Bank of the Gulf Coast</td>
- <td headers="Closing Date">July 20, 2012</td>
- <td headers="Updated">January 7, 2013</td>
-</tr>
-<tr>
- <td><a href="glasgow.html">Glasgow Savings Bank</a></td>
- <td headers="city">Glasgow</td>
- <td headers="state">MO</td>
- <td headers="CERT #">1056</td>
- <td headers="AI"> Regional Missouri Bank</td>
- <td headers="Closing Date">July 13, 2012</td>
- <td headers="Updated">October 11, 2012</td>
-</tr>
-<tr>
- <td><a href="montgomery.html">Montgomery Bank & Trust</a></td>
- <td headers="city">Ailey</td>
- <td headers="state">GA</td>
- <td headers="CERT #">19498</td>
- <td headers="AI"> Ameris Bank</td>
- <td headers="Closing Date">July 6, 2012</td>
- <td headers="Updated">October 31, 2012</td>
-</tr>
-<tr>
- <td><a href="farmersbank.html">The Farmers Bank of Lynchburg</a></td>
- <td headers="city">Lynchburg</td>
- <td headers="state">TN</td>
- <td headers="CERT #">1690</td>
- <td headers="AI">Clayton Bank and Trust</td>
- <td headers="Closing Date">June 15, 2012</td>
- <td headers="Updated">October 31, 2012</td>
-</tr>
-<tr>
- <td><a href="securityexchange.html">Security Exchange Bank</a></td>
- <td headers="city">Marietta</td>
- <td headers="state">GA</td>
- <td headers="CERT #">35299</td>
- <td headers="AI">Fidelity Bank</td>
- <td headers="Closing Date">June 15, 2012</td>
- <td headers="Updated">October 10, 2012</td>
-</tr>
-<tr>
- <td><a href="putnam.html">Putnam State Bank</a></td>
- <td headers="city">Palatka</td>
- <td headers="state">FL</td>
- <td headers="CERT #">27405</td>
- <td headers="AI">Harbor Community Bank</td>
- <td headers="Closing Date">June 15, 2012</td>
- <td headers="Updated">October 10, 2012</td>
-</tr>
-<tr>
- <td><a href="waccamaw.html">Waccamaw Bank</a></td>
- <td headers="city">Whiteville</td>
- <td headers="state">NC</td>
- <td headers="CERT #">34515</td>
- <td headers="AI">First Community Bank</td>
- <td headers="Closing Date">June 8, 2012</td>
- <td headers="Updated">November 8, 2012</td>
-</tr>
-<tr>
- <td><a href="ftsb.html">Farmers' and Traders' State Bank</a></td>
- <td headers="city">Shabbona</td>
- <td headers="state">IL</td>
- <td headers="CERT #">9257</td>
- <td headers="AI">First State Bank</td>
- <td headers="Closing Date">June 8, 2012</td>
- <td headers="Updated">October 10, 2012</td>
-</tr>
-<tr>
- <td><a href="carolina.html">Carolina Federal Savings Bank</a></td>
- <td headers="city">Charleston</td>
- <td headers="state">SC</td>
- <td headers="CERT #">35372</td>
- <td headers="AI">Bank of North Carolina</td>
- <td headers="Closing Date">June 8, 2012</td>
- <td headers="Updated">October 31, 2012</td>
-</tr>
-<tr>
- <td><a href="firstcapital.html">First Capital Bank</a></td>
- <td headers="city">Kingfisher</td>
- <td headers="state">OK</td>
- <td headers="CERT #">416</td>
- <td headers="AI">F & M Bank</td>
- <td headers="Closing Date">June 8, 2012</td>
- <td headers="Updated">October 10, 2012</td>
-</tr>
-<tr>
- <td><a href="alabamatrust.html">Alabama Trust Bank, National Association</a></td>
- <td headers="city">Sylacauga</td>
- <td headers="state">AL</td>
- <td headers="CERT #">35224</td>
- <td headers="AI">Southern States Bank</td>
- <td headers="Closing Date">May 18, 2012</td>
- <td headers="Updated">October 31, 2012</td>
-</tr>
-<tr>
- <td><a href="securitybank.html">Security Bank, National Association</a></td>
- <td headers="city">North Lauderdale</td>
- <td headers="state">FL</td>
- <td headers="CERT #">23156</td>
- <td headers="AI">Banesco USA</td>
- <td headers="Closing Date">May 4, 2012</td>
- <td headers="Updated">October 31, 2012</td>
-</tr>
-<tr>
- <td><a href="palmdesert.html">Palm Desert National Bank</a></td>
- <td headers="city">Palm Desert</td>
- <td headers="state">CA</td>
- <td headers="CERT #">23632</td>
- <td headers="AI">Pacific Premier Bank</td>
- <td headers="Closing Date">April 27, 2012</td>
- <td headers="Updated">August 31, 2012</td>
-</tr>
-<tr>
- <td><a href="plantation.html">Plantation Federal Bank</a></td>
- <td headers="city">Pawleys Island</td>
- <td headers="state">SC</td>
- <td headers="CERT #">32503</td>
- <td headers="AI">First Federal Bank</td>
- <td headers="Closing Date">April 27, 2012</td>
- <td headers="Updated">October 31, 2012</td>
-</tr>
-<tr>
- <td><a href="interbank.html">Inter Savings Bank, fsb D/B/A InterBank, fsb</a></td>
- <td headers="city">Maple Grove</td>
- <td headers="state">MN</td>
- <td headers="CERT #">31495</td>
- <td headers="AI">Great Southern Bank</td>
- <td headers="Closing Date">April 27, 2012</td>
- <td headers="Updated">October 17, 2012</td>
-</tr>
-<tr>
- <td><a href="harvest.html">HarVest Bank of Maryland</a></td>
- <td headers="city">Gaithersburg</td>
- <td headers="state">MD</td>
- <td headers="CERT #">57766</td>
- <td headers="AI">Sonabank</td>
- <td headers="Closing Date">April 27, 2012</td>
- <td headers="Updated">October 17, 2012</td>
-</tr>
-<tr>
- <td><a href="easternshore.html">Bank of the Eastern Shore</a></td>
- <td headers="city">Cambridge</td>
- <td headers="state">MD</td>
- <td headers="CERT #">26759</td>
- <td headers="AI">No Acquirer</td>
- <td headers="Closing Date">April 27, 2012</td>
- <td headers="Updated">October 17, 2012</td>
-</tr>
-<tr>
- <td><a href="fortlee.html">Fort Lee Federal Savings Bank, FSB</a></td>
- <td headers="city">Fort Lee</td>
- <td headers="state">NJ</td>
- <td headers="CERT #">35527</td>
- <td headers="AI">Alma Bank</td>
- <td headers="Closing Date">April 20, 2012</td>
- <td headers="Updated">August 31, 2012</td>
-</tr>
-<tr>
- <td><a href="fidelity.html">Fidelity Bank</a></td>
- <td headers="city">Dearborn</td>
- <td headers="state">MI</td>
- <td headers="CERT #">33883</td>
- <td headers="AI">The Huntington National Bank</td>
- <td headers="Closing Date">March 30, 2012</td>
- <td headers="Updated">August 9, 2012</td>
-</tr>
-<tr>
- <td><a href="premier-il.html">Premier Bank</a></td>
- <td headers="city">Wilmette</td>
- <td headers="state">IL</td>
- <td headers="CERT #">35419</td>
- <td headers="AI">International Bank of Chicago</td>
- <td headers="Closing Date">March 23, 2012</td>
- <td headers="Updated">October 17, 2012</td>
-</tr>
-<tr>
- <td><a href="covenant.html">Covenant Bank & Trust</a></td>
- <td headers="city">Rock Spring</td>
- <td headers="state">GA</td>
- <td headers="CERT #">58068</td>
- <td headers="AI">Stearns Bank, N.A.</td>
- <td headers="Closing Date">March 23, 2012</td>
- <td headers="Updated">October 31, 2012</td>
-</tr>
-<tr>
- <td><a href="newcity.html">New City Bank </a></td>
- <td headers="city">Chicago</td>
- <td headers="state">IL</td>
- <td headers="CERT #">57597</td>
- <td headers="AI">No Acquirer</td>
- <td headers="Closing Date">March 9, 2012</td>
- <td headers="Updated">October 29, 2012</td>
-</tr>
-<tr>
- <td><a href="global.html">Global Commerce Bank</a></td>
- <td headers="city">Doraville</td>
- <td headers="state">GA</td>
- <td headers="CERT #">34046</td>
- <td headers="AI">Metro City Bank</td>
- <td headers="Closing Date">March 2, 2012</td>
- <td headers="Updated">October 31, 2012</td>
-</tr>
-<tr>
- <td><a href="homesvgs.html">Home Savings of America</a></td>
- <td headers="city">Little Falls</td>
- <td headers="state">MN</td>
- <td headers="CERT #">29178</td>
- <td headers="AI">No Acquirer</td>
- <td headers="Closing Date">February 24, 2012</td>
- <td headers="Updated">December 17, 2012</td>
-</tr>
-<tr>
- <td><a href="cbg.html">Central Bank of Georgia</a></td>
- <td headers="city">Ellaville</td>
- <td headers="state">GA</td>
- <td headers="CERT #">5687</td>
- <td headers="AI">Ameris Bank</td>
- <td headers="Closing Date">February 24, 2012</td>
- <td headers="Updated">August 9, 2012</td>
-</tr>
-<tr>
- <td><a href="scbbank.html">SCB Bank</a></td>
- <td headers="city">Shelbyville</td>
- <td headers="state">IN</td>
- <td headers="CERT #">29761</td>
- <td headers="AI">First Merchants Bank, National Association</td>
- <td headers="Closing Date">February 10, 2012</td>
- <td headers="Updated">March 25, 2013</td>
-</tr>
-<tr>
- <td><a href="cnbt.html">Charter National Bank and Trust</a></td>
- <td headers="city">Hoffman Estates</td>
- <td headers="state">IL</td>
- <td headers="CERT #">23187</td>
- <td headers="AI">Barrington Bank & Trust
-Company, National Association</td>
- <td headers="Closing Date">February 10, 2012</td>
- <td headers="Updated">March 25, 2013</td>
-</tr>
-<tr>
- <td><a href="bankeast.html">BankEast</a></td>
- <td headers="city">Knoxville</td>
- <td headers="state">TN</td>
- <td headers="CERT #">19869</td>
- <td headers="AI">U.S.Bank National Association </td>
- <td headers="Closing Date">January 27, 2012</td>
- <td headers="Updated">March 8, 2013</td>
-</tr>
-<tr>
- <td><a href="patriot-mn.html">Patriot Bank Minnesota</a></td>
- <td headers="city">Forest Lake</td>
- <td headers="state">MN</td>
- <td headers="CERT #">34823</td>
- <td headers="AI">First Resource Bank</td>
- <td headers="Closing Date">January 27, 2012</td>
- <td headers="Updated">September 12, 2012</td>
-</tr>
-<tr>
- <td><a href="tcb.html">Tennessee Commerce Bank
-</a></td>
- <td headers="city">Franklin</td>
- <td headers="state">TN</td>
- <td headers="CERT #">35296</td>
- <td headers="AI">Republic Bank & Trust Company</td>
- <td headers="Closing Date">January 27, 2012</td>
- <td headers="Updated">November 20, 2012</td>
-</tr>
-<tr>
- <td><a href="fgbtcj.html">First Guaranty Bank and Trust Company of Jacksonville</a></td>
- <td headers="city">Jacksonville</td>
- <td headers="state">FL</td>
- <td headers="CERT #">16579</td>
- <td headers="AI">CenterState Bank of Florida, N.A.</td>
- <td headers="Closing Date">January 27, 2012</td>
- <td headers="Updated">September 12, 2012</td>
-</tr>
-<tr>
- <td><a href="americaneagle.html">American Eagle Savings Bank</a></td>
- <td headers="city">Boothwyn</td>
- <td headers="state">PA</td>
- <td headers="CERT #">31581</td>
- <td headers="AI">Capital Bank, N.A.</td>
- <td headers="Closing Date">January 20, 2012</td>
- <td headers="Updated">January 25, 2013</td>
-</tr>
-<tr>
- <td><a href="firststatebank-ga.html">The First State Bank</a></td>
- <td headers="city">Stockbridge</td>
- <td headers="state">GA</td>
- <td headers="CERT #">19252</td>
- <td headers="AI">Hamilton State Bank</td>
- <td headers="Closing Date">January 20, 2012</td>
- <td headers="Updated">January 25, 2013</td>
-</tr>
-<tr>
- <td><a href="cfsb.html">Central Florida State Bank</a></td>
- <td headers="city">Belleview</td>
- <td headers="state">FL</td>
- <td headers="CERT #">57186</td>
- <td headers="AI">CenterState Bank of Florida, N.A.</td>
- <td headers="Closing Date">January 20, 2012</td>
- <td headers="Updated">January 25, 2013</td>
-</tr>
-<tr>
- <td><a href="westernnatl.html">Western National Bank</a></td>
- <td headers="city">Phoenix</td>
- <td headers="state">AZ</td>
- <td headers="CERT #">57917</td>
- <td headers="AI"> Washington Federal</td>
- <td headers="Closing Date">December 16, 2011</td>
- <td headers="Updated">August 13, 2012</td>
-</tr>
-<tr>
-<td><a href="premier-fl.html">Premier Community Bank of the Emerald Coast</a></td>
- <td headers="city">Crestview</td>
- <td headers="state">FL</td>
- <td headers="CERT #">58343</td>
- <td headers="AI"> Summit Bank</td>
- <td headers="Closing Date">December 16, 2011</td>
- <td headers="Updated">September 12, 2012</td>
-</tr>
-<tr>
- <td><a href="centralprog.html">Central Progressive Bank</a></td>
- <td headers="city">Lacombe</td>
- <td headers="state">LA</td>
- <td headers="CERT #">19657</td>
- <td headers="AI"> First NBC Bank</td>
- <td headers="Closing Date">November 18, 2011</td>
- <td headers="Updated">August 13, 2012</td>
-</tr>
-<tr>
- <td><a href="polkcounty.html">Polk County Bank</a></td>
- <td headers="city">Johnston</td>
- <td headers="state">IA</td>
- <td headers="CERT #">14194</td>
- <td headers="AI">Grinnell State Bank</td>
- <td headers="Closing Date">November 18, 2011</td>
- <td headers="Updated">August 15, 2012</td>
-</tr>
-<tr>
- <td><a href="rockmart.html">Community Bank of Rockmart</a></td>
- <td headers="city">Rockmart</td>
- <td headers="state">GA</td>
- <td headers="CERT #">57860</td>
- <td headers="AI">Century Bank of Georgia</td>
- <td headers="Closing Date">November 10, 2011</td>
- <td headers="Updated">August 13, 2012</td>
-</tr>
-<tr>
- <td><a href="sunfirst.html">SunFirst Bank</a></td>
- <td headers="city">Saint George</td>
- <td headers="state">UT</td>
- <td headers="CERT #">57087</td>
- <td headers="AI">Cache Valley Bank</td>
- <td headers="Closing Date">November 4, 2011</td>
- <td headers="Updated">November 16, 2012</td>
-</tr>
-<tr>
- <td><a href="midcity.html">Mid City Bank, Inc.</a></td>
- <td headers="city">Omaha</td>
- <td headers="state">NE</td>
- <td headers="CERT #">19397</td>
- <td headers="AI">Premier Bank</td>
- <td headers="Closing Date">November 4, 2011</td>
- <td headers="Updated">August 15, 2012</td>
-</tr>
-<tr>
- <td><a href="allamerican.html ">All American Bank</a></td>
- <td headers="city">Des Plaines</td>
- <td headers="state">IL</td>
- <td headers="CERT #">57759</td>
- <td headers="AI">International Bank of Chicago</td>
- <td headers="Closing Date">October 28, 2011</td>
- <td headers="Updated">August 15, 2012</td>
-</tr>
-<tr>
- <td><a href="commbanksco.html">Community Banks of Colorado</a></td>
- <td headers="city">Greenwood Village</td>
- <td headers="state">CO</td>
- <td headers="CERT #">21132</td>
- <td headers="AI">Bank Midwest, N.A.</td>
- <td headers="Closing Date">October 21, 2011</td>
- <td headers="Updated">January 2, 2013</td>
-</tr>
-<tr>
- <td><a href="commcapbk.html">Community Capital Bank</a></td>
- <td headers="city">Jonesboro</td>
- <td headers="state">GA</td>
- <td headers="CERT #">57036</td>
- <td headers="AI">State Bank and Trust Company</td>
- <td headers="Closing Date">October 21, 2011</td>
- <td headers="Updated">November 8, 2012</td>
-</tr>
-<tr>
- <td><a href="decatur.html">Decatur First Bank</a></td>
- <td headers="city">Decatur</td>
- <td headers="state">GA</td>
- <td headers="CERT #">34392</td>
- <td headers="AI">Fidelity Bank</td>
- <td headers="Closing Date">October 21, 2011</td>
- <td headers="Updated">November 8, 2012</td>
-</tr>
-<tr>
- <td><a href="oldharbor.html">Old Harbor Bank</a></td>
- <td headers="city">Clearwater</td>
- <td headers="state">FL</td>
- <td headers="CERT #">57537</td>
- <td headers="AI">1st United Bank</td>
- <td headers="Closing Date">October 21, 2011</td>
- <td headers="Updated">November 8, 2012</td>
-</tr>
-<tr>
- <td><a href="countrybank.html">Country Bank</a></td>
- <td headers="city">Aledo</td>
- <td headers="state">IL</td>
- <td headers="CERT #">35395</td>
- <td headers="AI">Blackhawk Bank & Trust</td>
- <td headers="Closing Date">October 14, 2011</td>
- <td headers="Updated">August 15, 2012</td>
-</tr>
-<tr>
- <td><a href="firststatebank-nj.html">First State Bank</a></td>
- <td headers="city">Cranford</td>
- <td headers="state">NJ</td>
- <td headers="CERT #">58046</td>
- <td headers="AI">Northfield Bank</td>
- <td headers="Closing Date">October 14, 2011</td>
- <td headers="Updated">November 8, 2012</td>
-</tr>
-<tr>
- <td><a href="blueridge.html">Blue Ridge Savings Bank, Inc.</a></td>
- <td headers="city">Asheville</td>
- <td headers="state">NC</td>
- <td headers="CERT #">32347</td>
- <td headers="AI">Bank of North Carolina</td>
- <td headers="Closing Date">October 14, 2011</td>
- <td headers="Updated">November 8, 2012</td>
-</tr>
-<tr>
- <td><a href="piedmont-ga.html">Piedmont Community Bank</a></td>
- <td headers="city">Gray</td>
- <td headers="state">GA</td>
- <td headers="CERT #">57256</td>
- <td headers="AI">State Bank and Trust Company</td>
- <td headers="Closing Date">October 14, 2011</td>
- <td headers="Updated">January 22, 2013</td>
-</tr>
-<tr>
- <td><a href="sunsecurity.html">Sun Security Bank</a></td>
- <td headers="city">Ellington</td>
- <td headers="state">MO</td>
- <td headers="CERT #">20115</td>
- <td headers="AI"> Great Southern Bank </td>
- <td headers="Closing Date">October 7, 2011</td>
- <td headers="Updated">November 7, 2012</td>
-</tr>
-<tr>
- <td><a href="riverbank.html">The RiverBank</a></td>
- <td headers="city">Wyoming</td>
- <td headers="state">MN</td>
- <td headers="CERT #">10216</td>
- <td headers="AI"> Central Bank </td>
- <td headers="Closing Date">October 7, 2011</td>
- <td headers="Updated">November 7, 2012</td>
-</tr>
-<tr>
- <td><a href="firstintlbank.html">First International Bank</a></td>
- <td headers="city">Plano</td>
- <td headers="state">TX</td>
- <td headers="CERT #">33513</td>
- <td headers="AI"> American First National Bank </td>
- <td headers="Closing Date">September 30, 2011</td>
- <td headers="Updated">October 9, 2012</td>
-</tr>
-<tr>
- <td><a href="cbnc.html">Citizens Bank of Northern California</a></td>
- <td headers="city">Nevada City</td>
- <td headers="state">CA</td>
- <td headers="CERT #">33983</td>
- <td headers="AI"> Tri Counties Bank</td>
- <td headers="Closing Date">September 23, 2011</td>
- <td headers="Updated">October 9, 2012</td>
-</tr>
-<tr>
- <td><a href="boc-va.html">Bank of the Commonwealth</a></td>
- <td headers="city">Norfolk</td>
- <td headers="state">VA</td>
- <td headers="CERT #">20408</td>
- <td headers="AI">Southern Bank and Trust Company</td>
- <td headers="Closing Date">September 23, 2011</td>
- <td headers="Updated">October 9, 2012</td>
-</tr>
-<tr>
- <td><a href="fnbf.html">The First National Bank of Florida</a></td>
- <td headers="city">Milton</td>
- <td headers="state">FL</td>
- <td headers="CERT #">25155</td>
- <td headers="AI">CharterBank</td>
- <td headers="Closing Date">September 9, 2011</td>
- <td headers="Updated">September 6, 2012</td>
-</tr>
-<tr>
- <td><a href="creekside.html">CreekSide Bank</a></td>
- <td headers="city">Woodstock</td>
- <td headers="state">GA</td>
- <td headers="CERT #">58226</td>
- <td headers="AI">Georgia Commerce Bank</td>
- <td headers="Closing Date">September 2, 2011</td>
- <td headers="Updated">September 6, 2012</td>
-</tr>
-<tr>
- <td><a href="patriot.html">Patriot Bank of Georgia</a></td>
- <td headers="city">Cumming</td>
- <td headers="state">GA</td>
- <td headers="CERT #">58273</td>
- <td headers="AI">Georgia Commerce Bank</td>
- <td headers="Closing Date">September 2, 2011</td>
- <td headers="Updated">November 2, 2012</td>
-</tr>
-<tr>
- <td><a href="firstchoice-il.html">First Choice Bank</a></td>
- <td headers="city">Geneva</td>
- <td headers="state">IL</td>
- <td headers="CERT #">57212</td>
- <td headers="AI">Inland Bank & Trust</td>
- <td headers="Closing Date">August 19, 2011</td>
- <td headers="Updated">August 15, 2012</td>
-</tr>
-<tr>
- <td><a href="firstsouthern-ga.html">First Southern National Bank</a></td>
- <td headers="city">Statesboro</td>
- <td headers="state">GA</td>
- <td headers="CERT #">57239</td>
- <td headers="AI">Heritage Bank of the South</td>
- <td headers="Closing Date">August 19, 2011</td>
- <td headers="Updated">November 2, 2012</td>
-</tr>
-<tr>
- <td><a href="lydian.html">Lydian Private Bank</a></td>
- <td headers="city">Palm Beach</td>
- <td headers="state">FL</td>
- <td headers="CERT #">35356</td>
- <td headers="AI">Sabadell United Bank, N.A.</td>
- <td headers="Closing Date">August 19, 2011</td>
- <td headers="Updated">November 2, 2012</td>
-</tr>
-<tr>
- <td><a href="publicsvgs.html">Public Savings Bank</a></td>
- <td headers="city">Huntingdon Valley</td>
- <td headers="state">PA</td>
- <td headers="CERT #">34130</td>
- <td headers="AI">Capital Bank, N.A.</td>
- <td headers="Closing Date">August 18, 2011</td>
- <td headers="Updated">August 15, 2012</td>
-</tr>
-<tr>
- <td><a href="fnbo.html">The First National Bank of Olathe</a></td>
- <td headers="city">Olathe</td>
- <td headers="state">KS</td>
- <td headers="CERT #">4744</td>
- <td headers="AI">Enterprise Bank & Trust</td>
- <td headers="Closing Date">August 12, 2011</td>
- <td headers="Updated">August 23, 2012</td>
-</tr>
-<tr>
- <td><a href="whitman.html">Bank of Whitman</a></td>
- <td headers="city">Colfax</td>
- <td headers="state">WA</td>
- <td headers="CERT #">22528</td>
- <td headers="AI">Columbia State Bank</td>
- <td headers="Closing Date">August 5, 2011</td>
- <td headers="Updated">August 16, 2012</td>
-</tr>
-<tr>
- <td><a href="shorewood.html">Bank of Shorewood</a></td>
- <td headers="city">Shorewood</td>
- <td headers="state">IL</td>
- <td headers="CERT #">22637</td>
- <td headers="AI">Heartland Bank and Trust Company</td>
- <td headers="Closing Date">August 5, 2011</td>
- <td headers="Updated">August 16, 2012</td>
-</tr>
-<tr>
- <td><a href="integra.html">Integra Bank National Association</a></td>
- <td headers="city">Evansville</td>
- <td headers="state">IN</td>
- <td headers="CERT #">4392</td>
- <td headers="AI">Old National Bank</td>
- <td headers="Closing Date">July 29, 2011</td>
- <td headers="Updated">August 16, 2012</td>
-</tr>
-<tr>
- <td><a href="bankmeridian.html">BankMeridian, N.A.</a></td>
- <td headers="city">Columbia</td>
- <td headers="state">SC</td>
- <td headers="CERT #">58222</td>
- <td headers="AI">SCBT National Association</td>
- <td headers="Closing Date">July 29, 2011</td>
- <td headers="Updated">November 2, 2012</td>
-</tr>
-<tr>
- <td><a href="vbb.html">Virginia Business Bank</a></td>
- <td headers="city">Richmond</td>
- <td headers="state">VA</td>
- <td headers="CERT #">58283</td>
- <td headers="AI">Xenith Bank</td>
- <td headers="Closing Date">July 29, 2011</td>
- <td headers="Updated">October 9, 2012</td>
-</tr>
-<tr>
- <td><a href="bankofchoice.html">Bank of Choice</a></td>
- <td headers="city">Greeley</td>
- <td headers="state">CO</td>
- <td headers="CERT #">2994</td>
- <td headers="AI">Bank Midwest, N.A.</td>
- <td headers="Closing Date">July 22, 2011</td>
- <td headers="Updated">September 12, 2012</td>
-</tr>
-<tr>
- <td><a href="landmark.html">LandMark Bank of Florida</a></td>
- <td headers="city">Sarasota</td>
- <td headers="state">FL</td>
- <td headers="CERT #">35244</td>
- <td headers="AI">American Momentum Bank</td>
- <td headers="Closing Date">July 22, 2011</td>
- <td headers="Updated">November 2, 2012</td>
-</tr>
-<tr>
- <td><a href="southshore.html">Southshore Community Bank</a></td>
- <td headers="city">Apollo Beach</td>
- <td headers="state">FL</td>
- <td headers="CERT #">58056</td>
- <td headers="AI">American Momentum Bank</td>
- <td headers="Closing Date">July 22, 2011</td>
- <td headers="Updated">November 2, 2012</td>
-</tr>
-<tr>
- <td><a href="summitbank.html">Summit Bank</a></td>
- <td headers="city">Prescott </td>
- <td headers="state">AZ</td>
- <td headers="CERT #">57442 </td>
- <td headers="AI">The Foothills Bank</td>
- <td width="125" headers="Closing Date">July 15, 2011</td>
- <td width="125" headers="Updated">August 16, 2012</td>
-</tr>
-<tr>
- <td><a href="firstpeoples.html">First Peoples Bank</a></td>
- <td headers="city">Port St. Lucie </td>
- <td headers="state">FL</td>
- <td headers="CERT #">34870 </td>
- <td headers="AI">Premier American Bank, N.A.</td>
- <td width="125" headers="Closing Date">July 15, 2011</td>
- <td width="125" headers="Updated">November 2, 2012</td>
-</tr>
-<tr>
- <td><a href="hightrust.html">High Trust Bank</a></td>
- <td headers="city">Stockbridge </td>
- <td headers="state">GA</td>
- <td headers="CERT #">19554 </td>
- <td headers="AI">Ameris Bank</td>
- <td width="125" headers="Closing Date">July 15, 2011</td>
- <td width="125" headers="Updated">November 2, 2012</td>
-</tr>
-<tr>
- <td><a href="onegeorgia.html">One Georgia Bank</a></td>
- <td headers="city">Atlanta </td>
- <td headers="state">GA</td>
- <td headers="CERT #">58238 </td>
- <td headers="AI">Ameris Bank</td>
- <td width="125" headers="Closing Date">July 15, 2011</td>
- <td width="125" headers="Updated">November 2, 2012</td>
-</tr>
-<tr>
- <td><a href="signaturebank.html">Signature Bank</a></td>
- <td headers="city">Windsor </td>
- <td headers="state">CO</td>
- <td headers="CERT #">57835 </td>
- <td headers="AI">Points West Community Bank</td>
- <td width="125" headers="Closing Date">July 8, 2011</td>
- <td width="125" headers="Updated">October 26, 2012</td>
-</tr>
-<tr>
- <td><a href="coloradocapital.html">Colorado Capital Bank</a></td>
- <td headers="city">Castle Rock </td>
- <td headers="state">CO</td>
- <td headers="CERT #">34522</td>
- <td headers="AI">First-Citizens Bank & Trust Company</td>
- <td width="125" headers="Closing Date">July 8, 2011</td>
- <td width="125" headers="Updated">January 15, 2013</td>
-</tr>
-<tr>
- <td><a href="firstchicago.html">First Chicago Bank & Trust</a></td>
- <td headers="city">Chicago</td>
- <td headers="state">IL</td>
- <td headers="CERT #">27935</td>
- <td headers="AI">Northbrook Bank & Trust Company</td>
- <td width="125" headers="Closing Date">July 8, 2011</td>
- <td width="125" headers="Updated">September 9, 2012</td>
-</tr>
-<tr>
- <td><a href="mountain.html">Mountain Heritage Bank</a></td>
- <td headers="city">Clayton</td>
- <td headers="state">GA</td>
- <td headers="CERT #">57593</td>
- <td headers="AI">First American Bank and Trust Company</td>
- <td width="125" headers="Closing Date">June 24, 2011</td>
- <td width="125" headers="Updated">November 2, 2012</td>
-</tr>
-<tr>
- <td><a href="fcbtb.html">First Commercial Bank of Tampa Bay</a></td>
- <td headers="city">Tampa</td>
- <td headers="state">FL</td>
- <td headers="CERT #">27583</td>
- <td headers="AI">Stonegate Bank</td>
- <td width="125" headers="Closing Date">June 17, 2011</td>
- <td width="125" headers="Updated">November 2, 2012</td>
-</tr>
-<tr>
- <td><a href="mcintoshstate.html">McIntosh State Bank</a></td>
- <td headers="city">Jackson</td>
- <td headers="state">GA</td>
- <td headers="CERT #">19237</td>
- <td headers="AI">Hamilton State Bank</td>
- <td width="125" headers="Closing Date">June 17, 2011</td>
- <td width="125" headers="Updated">November 2, 2012</td>
-</tr>
-<tr>
- <td><a href="atlanticbanktrust.html">Atlantic Bank and Trust</a>
- </td>
- <td headers="city">Charleston</td>
- <td headers="state">SC</td>
- <td headers="CERT #">58420</td>
- <td headers="AI">First Citizens Bank and Trust Company, Inc.</td>
- <td width="125" headers="Closing Date">June 3, 2011</td>
- <td width="125" headers="Updated">October 31, 2012</td>
-</tr>
-<tr>
- <td><a href="firstheritage.html">First Heritage Bank</a></td>
- <td headers="city">Snohomish</td>
- <td headers="state">WA</td>
- <td headers="CERT #">23626</td>
- <td headers="AI">Columbia State Bank</td>
- <td width="125" headers="Closing Date">May 27, 2011</td>
- <td width="125" headers="Updated">January 28, 2013</td>
-</tr>
-<tr>
- <td><a href="summit.html">Summit Bank</a></td>
- <td headers="city">Burlington</td>
- <td headers="state">WA</td>
- <td headers="CERT #">513</td>
- <td headers="AI">Columbia State Bank</td>
- <td width="125" headers="Closing Date">May 20, 2011</td>
- <td width="125" headers="Updated">January 22, 2013</td>
-</tr>
-<tr>
- <td><a href="fgbc.html">First Georgia Banking Company</a></td>
- <td headers="city">Franklin</td>
- <td headers="state">GA</td>
- <td headers="CERT #">57647</td>
- <td headers="AI">CertusBank, National Association</td>
- <td width="125" headers="Closing Date">May 20, 2011</td>
- <td width="125" headers="Updated">November 13, 2012</td>
-</tr>
-<tr>
- <td><a href="atlanticsthrn.html">Atlantic Southern Bank</a></td>
- <td headers="city">Macon</td>
- <td headers="state">GA</td>
- <td headers="CERT #">57213</td>
- <td headers="AI">CertusBank, National Association</td>
- <td width="125" headers="Closing Date">May 20, 2011</td>
- <td width="125" headers="Updated">October 31, 2012</td>
-</tr>
-<tr>
- <td><a href="coastal_fl.html">Coastal Bank</a></td>
- <td headers="city">Cocoa Beach</td>
- <td headers="state">FL</td>
- <td headers="CERT #">34898</td>
- <td headers="AI">Florida Community Bank, a division of Premier American Bank, N.A.</td>
- <td width="125" headers="Closing Date">May 6, 2011</td>
- <td width="125" headers="Updated">November 30, 2012</td>
-</tr>
-<tr>
- <td><a href="communitycentral.html">Community Central Bank</a></td>
- <td headers="city">Mount Clemens</td>
- <td headers="state">MI</td>
- <td headers="CERT #">34234</td>
- <td headers="AI">Talmer Bank & Trust</td>
- <td headers="Closing Date">April 29, 2011</td>
- <td headers="Updated">August 16, 2012</td>
-</tr>
-<tr>
- <td><a href="parkavenue_ga.html">The Park Avenue Bank</a></td>
- <td headers="city">Valdosta</td>
- <td headers="state">GA</td>
- <td headers="CERT #">19797</td>
- <td headers="AI">Bank of the Ozarks</td>
- <td headers="Closing Date">April 29, 2011</td>
- <td headers="Updated">November 30, 2012</td>
-</tr>
-<tr>
- <td><a href="firstchoice.html">First Choice Community Bank</a></td>
- <td headers="city">Dallas</td>
- <td headers="state">GA</td>
- <td headers="CERT #">58539</td>
- <td headers="AI">Bank of the Ozarks</td>
- <td headers="Closing Date">April 29, 2011</td>
- <td headers="Updated">January 22, 2013</td>
-</tr>
-<tr>
- <td><a href="cortez.html">Cortez Community Bank</a></td>
- <td headers="city">Brooksville</td>
- <td headers="state">FL</td>
- <td headers="CERT #">57625</td>
- <td headers="AI">Florida Community Bank, a division of Premier American Bank, N.A.
- </td>
- <td headers="Closing Date">April 29, 2011</td>
- <td headers="Updated">November 30, 2012</td>
-</tr>
-<tr>
- <td><a href="fnbcf.html">First National Bank of Central Florida</a></td>
- <td headers="city">Winter Park</td>
- <td headers="state">FL</td>
- <td headers="CERT #">26297</td>
- <td headers="AI">Florida Community Bank, a division of Premier American Bank, N.A.</td>
- <td headers="Closing Date">April 29, 2011</td>
- <td headers="Updated">November 30, 2012</td>
-</tr>
-<tr>
- <td><a href="heritage_ms.html">Heritage Banking Group</a></td>
- <td headers="city">Carthage</td>
- <td headers="state">MS</td>
- <td headers="CERT #">14273</td>
- <td headers="AI">Trustmark National Bank</td>
- <td headers="Closing Date">April 15, 2011</td>
- <td headers="Updated">November 30, 2012</td>
-</tr>
-<tr>
- <td><a href="rosemount.html">Rosemount National Bank</a></td>
- <td headers="city">Rosemount</td>
- <td headers="state">MN</td>
- <td headers="CERT #">24099</td>
- <td headers="AI">Central Bank</td>
- <td headers="Closing Date">April 15, 2011</td>
- <td headers="Updated">August 16, 2012</td>
-</tr>
-<tr>
- <td><a href="superior_al.html">Superior Bank</a></td>
- <td headers="city">Birmingham</td>
- <td headers="state">AL</td>
- <td headers="CERT #">17750</td>
- <td headers="AI">Superior Bank, National Association</td>
- <td headers="Closing Date">April 15, 2011</td>
- <td headers="Updated">November 30, 2012</td>
-</tr>
-<tr>
- <td><a href="nexity.html">Nexity Bank</a></td>
- <td headers="city">Birmingham</td>
- <td headers="state">AL</td>
- <td headers="CERT #">19794</td>
- <td headers="AI">AloStar Bank of Commerce</td>
- <td headers="Closing Date">April 15, 2011</td>
- <td headers="Updated">September 4, 2012</td>
-</tr>
-<tr>
- <td><a href="newhorizons.html">New Horizons Bank</a></td>
- <td headers="city">East Ellijay</td>
- <td headers="state">GA</td>
- <td headers="CERT #">57705</td>
- <td headers="AI">Citizens South Bank</td>
- <td headers="Closing Date">April 15, 2011</td>
- <td headers="Updated">August 16, 2012</td>
-</tr>
-<tr>
- <td><a href="bartow.html">Bartow County Bank</a></td>
- <td headers="city">Cartersville</td>
- <td headers="state">GA</td>
- <td headers="CERT #">21495</td>
- <td headers="AI">Hamilton State Bank</td>
- <td headers="Closing Date">April 15, 2011</td>
- <td headers="Updated">January 22, 2013</td>
-</tr>
-<tr>
- <td><a href="nevadacommerce.html">Nevada Commerce Bank</a></td>
- <td headers="city">Las Vegas</td>
- <td headers="state">NV</td>
- <td headers="CERT #">35418</td>
- <td headers="AI">City National Bank</td>
- <td headers="Closing Date">April 8, 2011</td>
- <td headers="Updated">September 9, 2012</td>
-</tr>
-<tr>
- <td><a href="westernsprings.html">Western Springs National Bank and Trust</a></td>
- <td headers="city">Western Springs</td>
- <td headers="state">IL</td>
- <td headers="CERT #">10086</td>
- <td headers="AI">Heartland Bank and Trust Company</td>
- <td headers="Closing Date">April 8, 2011</td>
- <td headers="Updated">January 22, 2013</td>
-</tr>
-<tr>
- <td><a href="bankofcommerce.html">The Bank of Commerce</a></td>
- <td headers="city">Wood Dale</td>
- <td headers="state">IL</td>
- <td headers="CERT #">34292</td>
- <td headers="AI">Advantage National Bank Group</td>
- <td headers="Closing Date">March 25, 2011</td>
- <td headers="Updated">January 22, 2013</td>
-</tr>
-<tr>
- <td><a href="legacy-wi.html">Legacy Bank</a></td>
- <td headers="city">Milwaukee</td>
- <td headers="state">WI</td>
- <td headers="CERT #">34818</td>
- <td headers="AI">Seaway Bank and Trust Company</td>
- <td headers="Closing Date">March 11, 2011</td>
- <td headers="Updated">September 12, 2012</td>
- </tr>
- <tr>
- <td><a href="firstnatldavis.html">First National Bank of Davis</a></td>
- <td headers="city">Davis</td>
- <td headers="state">OK</td>
- <td headers="CERT #">4077</td>
- <td headers="AI">The Pauls Valley National Bank</td>
- <td headers="Closing Date">March 11, 2011</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="valleycomm.html">Valley Community Bank</a></td>
- <td headers="city">St. Charles</td>
- <td headers="state">IL</td>
- <td headers="CERT #">34187</td>
- <td headers="AI">First State Bank</td>
- <td headers="Closing Date">February 25, 2011</td>
- <td headers="Updated">September 12, 2012</td>
- </tr>
- <tr>
- <td><a href="sanluistrust.html">San Luis Trust Bank, FSB </a></td>
- <td headers="city">San Luis Obispo</td>
- <td headers="state">CA</td>
- <td headers="CERT #">34783</td>
- <td headers="AI">First California Bank</td>
- <td headers="Closing Date">February 18, 2011</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
-
- <tr>
- <td><a href="charteroak.html">Charter Oak Bank</a></td>
- <td headers="city">Napa</td>
- <td headers="state">CA</td>
- <td headers="CERT #">57855</td>
- <td headers="AI">Bank of Marin</td>
- <td headers="Closing Date">February 18, 2011</td>
- <td headers="Updated">September 12, 2012</td>
- </tr>
-
- <tr>
- <td><a href="citizensbk_ga.html">Citizens Bank of Effingham</a></td>
- <td headers="city">Springfield</td>
- <td headers="state">GA</td>
- <td headers="CERT #">34601</td>
- <td headers="AI">Heritage Bank of the South</td>
- <td headers="Closing Date">February 18, 2011</td>
- <td headers="Updated">November 2, 2012</td>
- </tr>
-
- <tr>
- <td><a href="habersham.html">Habersham Bank</a></td>
- <td headers="city">Clarkesville</td>
- <td headers="state">GA</td>
- <td headers="CERT #">151</td>
- <td headers="AI">SCBT National Association</td>
- <td headers="Closing Date">February 18, 2011</td>
- <td headers="Updated">November 2, 2012</td>
- </tr>
-
-
- <tr>
- <td><a href="canyonstate.html">Canyon National Bank</a></td>
- <td headers="city">Palm Springs</td>
- <td headers="state">CA</td>
- <td headers="CERT #">34692</td>
- <td headers="AI">Pacific Premier Bank</td>
- <td headers="Closing Date">February 11, 2011</td>
- <td headers="Updated">September 12, 2012</td>
- </tr>
-
-
-
- <tr>
- <td><a href="badgerstate.html">Badger State Bank</a></td>
- <td headers="city">Cassville</td>
- <td headers="state">WI</td>
- <td headers="CERT #">13272</td>
- <td headers="AI">Royal Bank </td>
- <td headers="Closing Date">February 11, 2011</td>
- <td headers="Updated">September 12, 2012</td>
- </tr>
-
-
- <tr>
- <td><a href="peoplesstatebank.html">Peoples State Bank</a></td>
- <td headers="city">Hamtramck</td>
- <td headers="state">MI</td>
- <td headers="CERT #">14939</td>
- <td headers="AI">First Michigan Bank</td>
- <td headers="Closing Date">February 11, 2011</td>
- <td headers="Updated">January 22, 2013</td>
- </tr>
-
-
- <tr>
- <td><a href="sunshinestate.html">Sunshine State Community Bank</a></td>
- <td headers="city">Port Orange</td>
- <td headers="state">FL</td>
- <td headers="CERT #">35478</td>
- <td headers="AI">Premier American Bank, N.A.</td>
- <td headers="Closing Date">February 11, 2011</td>
- <td headers="Updated">November 2, 2012</td>
- </tr>
-
- <tr>
- <td><a href="commfirst_il.html">Community First Bank Chicago</a></td>
- <td headers="city">Chicago</td>
- <td headers="state">IL</td>
- <td headers="CERT #">57948</td>
- <td headers="AI">Northbrook Bank & Trust Company</td>
- <td headers="Closing Date">February 4, 2011</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="northgabank.html">North Georgia Bank</a></td>
- <td headers="city">Watkinsville</td>
- <td headers="state">GA</td>
- <td headers="CERT #">35242</td>
- <td headers="AI">BankSouth</td>
- <td headers="Closing Date">February 4, 2011</td>
- <td headers="Updated">November 2, 2012</td>
- </tr>
- <tr>
- <td><a href="americantrust.html">American Trust Bank</a></td>
- <td headers="city">Roswell</td>
- <td headers="state">GA</td>
- <td headers="CERT #">57432</td>
- <td headers="AI">Renasant Bank</td>
- <td headers="Closing Date">February 4, 2011</td>
- <td headers="Updated">October 31, 2012</td>
- </tr>
- <tr>
- <td><a href="firstcomm_nm.html">First Community Bank</a></td>
- <td headers="city">Taos</td>
- <td headers="state">NM</td>
- <td headers="CERT #">12261</td>
- <td headers="AI">U.S. Bank, N.A.</td>
- <td headers="Closing Date">January 28, 2011</td>
- <td headers="Updated">September 12, 2012</td>
- </tr>
-
- <tr>
- <td><a href="firstier.html">FirsTier Bank</a></td>
- <td headers="city">Louisville</td>
- <td headers="state">CO</td>
- <td headers="CERT #">57646</td>
- <td headers="AI">No Acquirer</td>
- <td headers="Closing Date">January 28, 2011</td>
- <td headers="Updated">September 12, 2012</td>
- </tr>
-
- <tr>
- <td><a href="evergreenstatewi.html">Evergreen State Bank</a></td>
- <td headers="city">Stoughton</td>
- <td headers="state">WI</td>
- <td headers="CERT #">5328</td>
- <td headers="AI">McFarland State Bank</td>
- <td headers="Closing Date">January 28, 2011</td>
- <td headers="Updated">September 12, 2012</td>
- </tr>
-
-
- <tr>
- <td><a href="firststatebank_ok.html">The First State Bank</a></td>
- <td headers="city">Camargo</td>
- <td headers="state">OK</td>
- <td headers="CERT #">2303</td>
- <td headers="AI">Bank 7</td>
- <td headers="Closing Date">January 28, 2011</td>
- <td headers="Updated">September 12, 2012</td>
- </tr>
-
-
- <tr>
- <td><a href="unitedwestern.html">United Western Bank</a></td>
- <td headers="city">Denver</td>
- <td headers="state">CO</td>
- <td headers="CERT #">31293</td>
- <td headers="AI">First-Citizens Bank & Trust Company</td>
- <td headers="Closing Date">January 21, 2011</td>
- <td headers="Updated">September 12, 2012</td>
- </tr>
-
-
- <tr>
- <td><a href="bankofasheville.html">The Bank of Asheville</a></td>
- <td headers="city">Asheville</td>
- <td headers="state">NC</td>
- <td headers="CERT #">34516</td>
- <td headers="AI">First Bank</td>
- <td headers="Closing Date">January 21, 2011</td>
- <td headers="Updated">November 2, 2012</td>
- </tr>
-
-
- <tr>
- <td><a href="commsouth.html">CommunitySouth Bank & Trust</a></td>
- <td headers="city">Easley</td>
- <td headers="state">SC</td>
- <td headers="CERT #">57868</td>
- <td headers="AI">CertusBank, National Association</td>
- <td headers="Closing Date">January 21, 2011</td>
- <td headers="Updated">November 2, 2012</td>
- </tr>
-
- <tr>
- <td><a href="enterprise.html">Enterprise Banking Company</a></td>
- <td headers="city">McDonough</td>
- <td headers="state">GA</td>
- <td headers="CERT #">19758</td>
- <td headers="AI">No Acquirer</td>
- <td headers="Closing Date">January 21, 2011</td>
- <td headers="Updated">November 2, 2012</td>
- </tr>
-
-
- <tr>
- <td><a href="oglethorpe.html">Oglethorpe Bank</a></td>
- <td headers="city">Brunswick</td>
- <td headers="state">GA</td>
- <td headers="CERT #">57440</td>
- <td headers="AI">Bank of the Ozarks </td>
- <td headers="Closing Date">January 14, 2011</td>
- <td headers="Updated">November 2, 2012</td>
- </tr>
-
- <tr>
- <td><a href="legacybank.html">Legacy Bank</a></td>
- <td headers="city">Scottsdale</td>
- <td headers="state">AZ</td>
- <td headers="CERT #">57820</td>
- <td headers="AI">Enterprise Bank & Trust </td>
- <td headers="Closing Date">January 7, 2011</td>
- <td headers="Updated">September 12, 2012</td>
- </tr>
- <tr>
- <td><a href="firstcommercial.html">First Commercial Bank of Florida</a></td>
- <td headers="city">Orlando</td>
- <td headers="state">FL</td>
- <td headers="CERT #">34965</td>
- <td headers="AI">First Southern Bank</td>
- <td headers="Closing Date">January 7, 2011</td>
- <td headers="Updated">November 2, 2012</td>
- </tr>
- <tr>
- <td><a href="communitynatl.html">Community National Bank</a></td>
- <td headers="city">Lino Lakes</td>
- <td headers="state">MN</td>
- <td headers="CERT #">23306</td>
- <td headers="AI">Farmers & Merchants Savings Bank</td>
- <td headers="Closing Date">December 17, 2010</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
-
-
- <tr>
- <td><a href="firstsouthern.html">First Southern Bank </a></td>
- <td headers="city">Batesville</td>
- <td headers="state">AR</td>
- <td headers="CERT #">58052</td>
- <td headers="AI">Southern Bank</td>
- <td headers="Closing Date">December 17, 2010</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
-
-
- <tr>
- <td><a href="unitedamericas.html">United Americas Bank, N.A.</a></td>
- <td headers="city">Atlanta</td>
- <td headers="state">GA</td>
- <td headers="CERT #">35065</td>
- <td headers="AI">State Bank and Trust Company</td>
- <td headers="Closing Date">December 17, 2010</td>
- <td headers="Updated">November 2, 2012</td>
- </tr>
-
-
- <tr>
- <td><a href="appalachianga.html">Appalachian Community Bank, FSB </a></td>
- <td headers="city">McCaysville</td>
- <td headers="state">GA</td>
- <td headers="CERT #">58495</td>
- <td headers="AI">Peoples Bank of East Tennessee</td>
- <td headers="Closing Date">December 17, 2010</td>
- <td headers="Updated">October 31, 2012</td>
- </tr>
-
-
- <tr>
- <td><a href="chestatee.html">Chestatee State Bank</a></td>
- <td headers="city">Dawsonville</td>
- <td headers="state">GA</td>
- <td headers="CERT #">34578</td>
- <td headers="AI">Bank of the Ozarks</td>
- <td headers="Closing Date">December 17, 2010</td>
- <td headers="Updated">November 2, 2012</td>
- </tr>
-
-
- <tr>
- <td><a href="bankofmiami.html">The Bank of Miami,N.A.</a></td>
- <td headers="city">Coral Gables</td>
- <td headers="state">FL</td>
- <td headers="CERT #">19040</td>
- <td headers="AI">1st United Bank </td>
- <td headers="Closing Date">December 17, 2010</td>
- <td headers="Updated">November 2, 2012</td>
- </tr>
-
- <tr>
- <td><a href="earthstar.html">Earthstar Bank</a></td>
- <td headers="city">Southampton</td>
- <td headers="state">PA</td>
- <td headers="CERT #">35561</td>
- <td headers="AI">Polonia Bank</td>
- <td headers="Closing Date">December 10, 2010</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="paramount.html">Paramount Bank</a></td>
- <td headers="city">Farmington Hills</td>
- <td headers="state">MI</td>
- <td headers="CERT #">34673</td>
- <td headers="AI">Level One Bank</td>
- <td headers="Closing Date">December 10, 2010</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="firstbanking.html">First Banking Center</a></td>
- <td headers="city">Burlington</td>
- <td headers="state">WI</td>
- <td headers="CERT #">5287</td>
- <td headers="AI">First Michigan Bank</td>
- <td headers="Closing Date">November 19, 2010</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
-
-
-
- <tr>
- <td><a href="allegbank.html">Allegiance Bank of North America</a></td>
- <td headers="city">Bala Cynwyd</td>
- <td headers="state">PA</td>
- <td headers="CERT #">35078</td>
- <td headers="AI">VIST Bank</td>
- <td headers="Closing Date">November 19, 2010</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
-
-
- <tr>
- <td><a href="gulfstate.html">Gulf State Community Bank</a></td>
- <td headers="city">Carrabelle</td>
- <td headers="state">FL</td>
- <td headers="CERT #">20340</td>
- <td headers="AI">Centennial Bank</td>
- <td headers="Closing Date">November 19, 2010</td>
- <td headers="Updated">November 2, 2012</td>
- </tr>
-
-
- <tr>
- <td><a href="copperstar.html">Copper Star Bank</a></td>
- <td headers="city">Scottsdale</td>
- <td headers="state">AZ</td>
- <td headers="CERT #">35463</td>
- <td headers="AI">Stearns Bank, N.A.</td>
- <td headers="Closing Date">November 12, 2010</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="darbybank.html">Darby Bank & Trust Co.</a></td>
- <td headers="city">Vidalia</td>
- <td headers="state">GA</td>
- <td headers="CERT #">14580</td>
- <td headers="AI">Ameris Bank</td>
- <td headers="Closing Date">November 12, 2010</td>
- <td headers="Updated">January 15, 2013</td>
- </tr>
- <tr>
- <td><a href="tifton.html">Tifton Banking Company</a></td>
- <td headers="city">Tifton</td>
- <td headers="state">GA</td>
- <td headers="CERT #">57831</td>
- <td headers="AI">Ameris Bank</td>
- <td headers="Closing Date">November 12, 2010</td>
- <td headers="Updated">November 2, 2012</td>
- </tr>
-
- <tr>
- <td><a href="firstvietnamese.html">First Vietnamese American Bank</a><br />
- <a href="firstvietnamese_viet.pdf">In Vietnamese</a></td>
- <td headers="city">Westminster</td>
- <td headers="state">CA</td>
- <td headers="CERT #">57885</td>
- <td headers="AI">Grandpoint Bank</td>
- <td headers="Closing Date">November 5, 2010</td>
- <td headers="Updated">September 12, 2012</td>
- </tr>
- <tr>
- <td><a href="piercecommercial.html">Pierce Commercial Bank</a></td>
- <td headers="city">Tacoma</td>
- <td headers="state">WA</td>
- <td headers="CERT #">34411</td>
- <td headers="AI">Heritage Bank</td>
- <td headers="Closing Date">November 5, 2010</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="westerncommercial_ca.html">Western Commercial Bank</a></td>
- <td headers="city">Woodland Hills</td>
- <td headers="state">CA</td>
- <td headers="CERT #">58087</td>
- <td headers="AI">First California Bank</td>
- <td headers="Closing Date">November 5, 2010</td>
- <td headers="Updated">September 12, 2012</td>
- </tr>
- <tr>
- <td><a href="kbank.html">K Bank</a></td>
- <td headers="city">Randallstown</td>
- <td headers="state">MD</td>
- <td headers="CERT #">31263</td>
- <td headers="AI">Manufacturers and Traders Trust Company (M&T Bank)</td>
- <td headers="Closing Date">November 5, 2010</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="firstazfsb.html">First Arizona Savings, A FSB</a></td>
- <td headers="city">Scottsdale</td>
- <td headers="state">AZ</td>
- <td headers="CERT #">32582</td>
- <td headers="AI">No Acquirer</td>
- <td headers="Closing Date">October 22, 2010</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="hillcrest_ks.html">Hillcrest Bank</a></td>
- <td headers="city">Overland Park</td>
- <td headers="state">KS</td>
- <td headers="CERT #">22173</td>
- <td headers="AI">Hillcrest Bank, N.A.</td>
- <td headers="Closing Date">October 22, 2010</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
-
-
- <tr>
- <td><a href="firstsuburban.html">First Suburban National Bank</a></td>
- <td headers="city">Maywood</td>
- <td headers="state">IL</td>
- <td headers="CERT #">16089</td>
- <td headers="AI">Seaway Bank and Trust Company</td>
- <td headers="Closing Date">October 22, 2010</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
-
-
-
- <tr>
- <td><a href="fnbbarnesville.html">The First National Bank of Barnesville</a></td>
- <td headers="city">Barnesville</td>
- <td headers="state">GA</td>
- <td headers="CERT #">2119</td>
- <td headers="AI">United Bank</td>
- <td headers="Closing Date">October 22, 2010</td>
- <td headers="Updated">November 2, 2012</td>
- </tr>
-
-
-
- <tr>
- <td><a href="gordon.html">The Gordon Bank</a></td>
- <td headers="city">Gordon</td>
- <td headers="state">GA</td>
- <td headers="CERT #">33904</td>
- <td headers="AI">Morris Bank</td>
- <td headers="Closing Date">October 22, 2010</td>
- <td headers="Updated">November 2, 2012</td>
- </tr>
-
-
-
- <tr>
- <td><a href="progress_fl.html">Progress Bank of Florida</a></td>
- <td headers="city">Tampa</td>
- <td headers="state">FL</td>
- <td headers="CERT #">32251</td>
- <td headers="AI">Bay Cities Bank</td>
- <td headers="Closing Date">October 22, 2010</td>
- <td headers="Updated">November 2, 2012</td>
- </tr>
-
-
-
- <tr>
- <td><a href="firstbankjacksonville.html">First Bank of Jacksonville</a></td>
- <td headers="city">Jacksonville</td>
- <td headers="state">FL</td>
- <td headers="CERT #">27573</td>
- <td headers="AI">Ameris Bank</td>
- <td headers="Closing Date">October 22, 2010</td>
- <td headers="Updated">November 2, 2012</td>
- </tr>
-
- <tr>
- <td><a href="premier_mo.html">Premier Bank</a></td>
- <td headers="city">Jefferson City</td>
- <td headers="state">MO</td>
- <td headers="CERT #">34016</td>
- <td headers="AI">Providence Bank</td>
- <td headers="Closing Date">October 15, 2010</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="westbridge.html">WestBridge Bank and Trust Company</a></td>
- <td headers="city">Chesterfield</td>
- <td headers="state">MO</td>
- <td headers="CERT #">58205</td>
- <td headers="AI">Midland States Bank</td>
- <td headers="Closing Date">October 15, 2010</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="securitysavingsfsb.html">Security Savings Bank, F.S.B.</a></td>
- <td headers="city">Olathe</td>
- <td headers="state">KS</td>
- <td headers="CERT #">30898</td>
- <td headers="AI">Simmons First National Bank</td>
- <td headers="Closing Date">October 15, 2010</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="shoreline.html">Shoreline Bank</a></td>
- <td headers="city">Shoreline</td>
- <td headers="state">WA</td>
- <td headers="CERT #">35250</td>
- <td headers="AI">GBC International Bank</td>
- <td headers="Closing Date">October 1, 2010</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="wakulla.html">Wakulla Bank</a></td>
- <td headers="city">Crawfordville</td>
- <td headers="state"> FL </td>
- <td headers="CERT #">21777</td>
- <td headers="AI">Centennial Bank</td>
- <td headers="Closing Date">October 1, 2010</td>
- <td headers="Updated">November 2, 2012</td>
- </tr>
- <tr>
- <td><a href="northcounty.html">North County Bank</a></td>
- <td headers="city">Arlington</td>
- <td headers="state"> WA </td>
- <td headers="CERT #">35053</td>
- <td headers="AI">Whidbey Island Bank</td>
- <td headers="Closing Date">September 24, 2010</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="haventrust_fl.html">Haven Trust Bank Florida</a></td>
- <td headers="city">Ponte Vedra Beach</td>
- <td headers="state"> FL </td>
- <td headers="CERT #">58308</td>
- <td headers="AI">First Southern Bank</td>
- <td headers="Closing Date">September 24, 2010</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
-
- <tr>
- <td><a href="maritimesavings.html">Maritime Savings Bank</a></td>
- <td headers="city">West Allis</td>
- <td headers="state"> WI </td>
- <td headers="CERT #">28612</td>
- <td headers="AI">North Shore Bank, FSB</td>
- <td headers="Closing Date">September 17, 2010</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
-
- <tr>
- <td><a href="bramblesavings.html">Bramble Savings Bank</a></td>
- <td headers="city">Milford</td>
- <td headers="state"> OH </td>
- <td headers="CERT #">27808</td>
- <td headers="AI">Foundation Bank</td>
- <td headers="Closing Date">September 17, 2010</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="peoplesbank_ga.html">The Peoples Bank</a></td>
- <td headers="city">Winder</td>
- <td headers="state"> GA </td>
- <td headers="CERT #">182</td>
- <td headers="AI">Community & Southern Bank</td>
- <td headers="Closing Date">September 17, 2010</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
-
- <tr>
- <td><a href="firstcommerce_ga.html">First Commerce Community Bank</a></td>
- <td headers="city">Douglasville</td>
- <td headers="state"> GA </td>
- <td headers="CERT #">57448</td>
- <td headers="AI">Community & Southern Bank</td>
- <td headers="Closing Date">September 17, 2010</td>
- <td headers="Updated">January 15, 2013</td>
- </tr>
-
- <tr>
- <td><a href="ellijay.html">Bank of Ellijay</a></td>
- <td headers="city"> Ellijay </td>
- <td headers="state"> GA </td>
- <td headers="CERT #">58197</td>
- <td headers="AI">Community & Southern Bank</td>
- <td headers="Closing Date">September 17, 2010</td>
- <td headers="Updated">January 15, 2013</td>
- </tr>
-
-
- <tr>
- <td><a href="isnbank.html">ISN Bank</a></td>
- <td headers="city">Cherry Hill </td>
- <td headers="state"> NJ </td>
- <td headers="CERT #">57107</td>
- <td headers="AI">Customers Bank</td>
- <td headers="Closing Date">September 17, 2010</td>
- <td headers="Updated">August 22, 2012</td>
- </tr>
-
- <tr>
- <td><a href="horizonfl.html">Horizon Bank</a></td>
- <td headers="city">Bradenton</td>
- <td headers="state"> FL </td>
- <td headers="CERT #">35061</td>
- <td headers="AI">Bank of the Ozarks</td>
- <td headers="Closing Date">September 10, 2010</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
-
- <tr>
- <td><a href="sonoma.html">Sonoma Valley Bank</a></td>
- <td headers="city">Sonoma</td>
- <td headers="state"> CA </td>
- <td headers="CERT #">27259</td>
- <td headers="AI">Westamerica Bank</td>
- <td headers="Closing Date">August 20, 2010</td>
- <td headers="Updated">September 12, 2012</td>
- </tr>
-
-
- <tr>
- <td><a href="lospadres.html">Los Padres Bank</a></td>
- <td headers="city">Solvang </td>
- <td headers="state">CA</td>
- <td headers="CERT #">32165</td>
- <td headers="AI">Pacific Western Bank</td>
- <td headers="Closing Date">August 20, 2010</td>
- <td headers="Updated">September 12, 2012</td>
- </tr>
-
- <tr>
- <td><a href="butte.html">Butte Community Bank</a></td>
- <td headers="city">Chico</td>
- <td headers="state"> CA </td>
- <td headers="CERT #">33219</td>
- <td headers="AI">Rabobank, N.A.</td>
- <td headers="Closing Date">August 20, 2010</td>
- <td headers="Updated">September 12, 2012</td>
- </tr>
-
- <tr>
- <td><a href="pacificbk.html">Pacific State Bank</a></td>
- <td headers="city">Stockton</td>
- <td headers="state"> CA </td>
- <td headers="CERT #">27090</td>
- <td headers="AI">Rabobank, N.A.</td>
- <td headers="Closing Date">August 20, 2010</td>
- <td headers="Updated">September 12, 2012</td>
- </tr>
-
- <tr>
- <td><a href="shorebank.html">ShoreBank</a></td>
- <td headers="city">Chicago </td>
- <td headers="state">IL</td>
- <td headers="CERT #">15640</td>
- <td headers="AI">Urban Partnership Bank</td>
- <td headers="Closing Date">August 20, 2010</td>
- <td headers="Updated">September 12, 2012</td>
- </tr>
-
-
- <tr>
- <td><a href="imperialsvgs.html">Imperial Savings and Loan Association</a></td>
- <td headers="city">Martinsville</td>
- <td headers="state">VA</td>
- <td headers="CERT #">31623</td>
- <td headers="AI">River Community Bank, N.A.</td>
- <td headers="Closing Date">August 20, 2010</td>
- <td headers="Updated">August 24, 2012</td>
- </tr>
-
-
- <tr>
- <td><a href="inatbank.html">Independent National Bank</a></td>
- <td headers="city">Ocala</td>
- <td headers="state">FL</td>
- <td headers="CERT #">27344</td>
- <td headers="AI">CenterState Bank of Florida, N.A.</td>
- <td headers="Closing Date">August 20, 2010</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
-
- <tr>
- <td><a href="cnbbartow.html">Community National Bank at Bartow</a></td>
- <td headers="city">Bartow</td>
- <td headers="state">FL</td>
- <td headers="CERT #">25266</td>
- <td headers="AI">CenterState Bank of Florida, N.A.</td>
- <td headers="Closing Date">August 20, 2010</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
-
-
-
- <tr>
- <td><a href="palosbank.html">Palos Bank and Trust Company</a></td>
- <td headers="city">Palos Heights</td>
- <td headers="state">IL</td>
- <td headers="CERT #">17599</td>
- <td headers="AI">First Midwest Bank</td>
- <td headers="Closing Date">August 13, 2010</td>
- <td headers="Updated">August 22, 2012</td>
- </tr>
- <tr>
- <td><a href="ravenswood.html">Ravenswood Bank</a></td>
- <td headers="city">Chicago</td>
- <td headers="state">IL</td>
- <td headers="CERT #">34231</td>
- <td headers="AI">Northbrook Bank & Trust Company</td>
- <td headers="Closing Date">August 6, 2010</td>
- <td headers="Updated">August 22, 2012</td>
- </tr>
-
-
- <tr>
- <td><a href="libertyor.html">LibertyBank</a></td>
- <td headers="city">Eugene</td>
- <td headers="state">OR</td>
- <td headers="CERT #">31964</td>
- <td headers="AI">Home Federal Bank</td>
- <td headers="Closing Date">July 30, 2010</td>
- <td headers="Updated">August 22, 2012</td>
- </tr>
- <tr>
- <td><a href="cowlitz.html">The Cowlitz Bank</a></td>
- <td headers="city">Longview</td>
- <td headers="state">WA</td>
- <td headers="CERT #">22643</td>
- <td headers="AI">Heritage Bank</td>
- <td headers="Closing Date">July 30, 2010</td>
- <td headers="Updated">August 22, 2012</td>
- </tr>
- <tr>
- <td><a href="coastal.html">Coastal Community Bank</a></td>
- <td headers="city">Panama City Beach</td>
- <td headers="state">FL</td>
- <td headers="CERT #">9619</td>
- <td headers="AI">Centennial Bank</td>
- <td headers="Closing Date">July 30, 2010</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
- <tr>
- <td><a href="bayside.html">Bayside Savings Bank</a></td>
- <td headers="city">Port Saint Joe</td>
- <td headers="state">FL</td>
- <td headers="CERT #">57669</td>
- <td headers="AI">Centennial Bank</td>
- <td headers="Closing Date">July 30, 2010</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
- <tr>
- <td><a href="northwestga.html">Northwest Bank & Trust</a></td>
- <td headers="city">Acworth</td>
- <td headers="state">GA</td>
- <td headers="CERT #">57658</td>
- <td headers="AI">State Bank and Trust Company</td>
- <td headers="Closing Date">July 30, 2010</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
- <tr>
- <td><a href="homevalleyor.html">Home Valley Bank </a></td>
- <td headers="city">Cave Junction</td>
- <td headers="state">OR</td>
- <td headers="CERT #">23181</td>
- <td headers="AI">South Valley Bank & Trust</td>
- <td headers="Closing Date">July 23, 2010</td>
- <td headers="Updated">September 12, 2012</td>
- </tr>
- <tr>
- <td><a href="southwestusanv.html">SouthwestUSA Bank </a></td>
- <td headers="city">Las Vegas</td>
- <td headers="state">NV</td>
- <td headers="CERT #">35434</td>
- <td headers="AI">Plaza Bank</td>
- <td headers="Closing Date">July 23, 2010</td>
- <td headers="Updated">August 22, 2012</td>
- </tr>
- <tr>
- <td><a href="communitysecmn.html">Community Security Bank </a></td>
- <td headers="city">New Prague</td>
- <td headers="state">MN</td>
- <td headers="CERT #">34486</td>
- <td headers="AI">Roundbank</td>
- <td headers="Closing Date">July 23, 2010</td>
- <td headers="Updated">September 12, 2012</td>
- </tr>
- <tr>
- <td><a href="thunderbankks.html">Thunder Bank </a></td>
- <td headers="city">Sylvan Grove</td>
- <td headers="state">KS</td>
- <td headers="CERT #">10506</td>
- <td headers="AI">The Bennington State Bank</td>
- <td headers="Closing Date">July 23, 2010</td>
- <td headers="Updated">September 13, 2012</td>
- </tr>
- <tr>
- <td><a href="williamsburgsc.html">Williamsburg First National Bank </a></td>
- <td headers="city">Kingstree</td>
- <td headers="state">SC</td>
- <td headers="CERT #">17837</td>
- <td headers="AI">First Citizens Bank and Trust Company, Inc.</td>
- <td headers="Closing Date">July 23, 2010</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
- <tr>
- <td><a href="crescentga.html">Crescent Bank and Trust Company </a></td>
- <td headers="city">Jasper</td>
- <td headers="state">GA</td>
- <td headers="CERT #">27559</td>
- <td headers="AI">Renasant Bank</td>
- <td headers="Closing Date">July 23, 2010</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
- <tr>
- <td><a href="sterlingfl.html">Sterling Bank </a></td>
- <td headers="city">Lantana</td>
- <td headers="state">FL</td>
- <td headers="CERT #">32536</td>
- <td headers="AI">IBERIABANK</td>
- <td headers="Closing Date">July 23, 2010</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
- <tr>
- <td><a href="mainstsvgs.html">Mainstreet Savings Bank, FSB</a></td>
- <td headers="city">Hastings</td>
- <td headers="state">MI</td>
- <td headers="CERT #">28136</td>
- <td headers="AI">Commercial Bank</td>
- <td headers="Closing Date">July 16, 2010</td>
- <td headers="Updated">September 13, 2012</td>
- </tr>
- <tr>
- <td><a href="oldecypress.html">Olde Cypress Community Bank</a></td>
- <td headers="city">Clewiston</td>
- <td headers="state">FL</td>
- <td headers="CERT #">28864</td>
- <td headers="AI">CenterState Bank of Florida, N.A.</td>
- <td headers="Closing Date">July 16, 2010</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
- <tr>
- <td><a href="turnberry.html">Turnberry Bank</a></td>
- <td headers="city">Aventura</td>
- <td headers="state">FL</td>
- <td headers="CERT #">32280</td>
- <td headers="AI">NAFH National Bank</td>
- <td headers="Closing Date">July 16, 2010</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
- <tr>
- <td><a href="metrobankfl.html">Metro Bank of Dade County</a></td>
- <td headers="city">Miami</td>
- <td headers="state">FL</td>
- <td headers="CERT #">25172</td>
- <td headers="AI">NAFH National Bank</td>
- <td headers="Closing Date">July 16, 2010</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
- <tr>
- <td><a href="firstnatlsc.html">First National Bank of the South</a></td>
- <td headers="city">Spartanburg</td>
- <td headers="state">SC</td>
- <td headers="CERT #">35383</td>
- <td headers="AI">NAFH National Bank</td>
- <td headers="Closing Date">July 16, 2010</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
- <tr>
- <td height="24"><a href="woodlands.html">Woodlands Bank</a></td>
- <td headers="city">Bluffton</td>
- <td headers="state">SC</td>
- <td headers="CERT #">32571</td>
- <td headers="AI">Bank of the Ozarks</td>
- <td headers="Closing Date">July 16, 2010</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
- <tr>
- <td><a href="homenatlok.html">Home National Bank</a></td>
- <td headers="city">Blackwell</td>
- <td headers="state">OK</td>
- <td headers="CERT #">11636</td>
- <td headers="AI">RCB Bank</td>
- <td headers="Closing Date">July 9, 2010</td>
- <td headers="Updated">December 10, 2012</td>
- </tr>
- <tr>
- <td><a href="usabankny.html">USA Bank</a></td>
- <td headers="city">Port Chester</td>
- <td headers="state">NY</td>
- <td headers="CERT #">58072</td>
- <td headers="AI">New Century Bank</td>
- <td headers="Closing Date">July 9, 2010</td>
- <td headers="Updated">September 14, 2012</td>
- </tr>
- <tr>
- <td><a href="idealfedsvngsmd.html">Ideal Federal Savings Bank</a></td>
- <td headers="city">Baltimore</td>
- <td headers="state">MD</td>
- <td headers="CERT #">32456</td>
- <td headers="AI">No Acquirer</td>
- <td headers="Closing Date">July 9, 2010</td>
- <td headers="Updated">September 14, 2012</td>
- </tr>
- <tr>
- <td><a href="baynatlmd.html">Bay National Bank</a></td>
- <td headers="city">Baltimore</td>
- <td headers="state">MD</td>
- <td headers="CERT #">35462</td>
- <td headers="AI">Bay Bank, FSB</td>
- <td headers="Closing Date">July 9, 2010</td>
- <td headers="Updated">January 15, 2013</td>
- </tr>
- <tr>
- <td><a href="highdesertnm.html">High Desert State Bank</a></td>
- <td headers="city">Albuquerque</td>
- <td headers="state">NM</td>
- <td headers="CERT #">35279</td>
- <td headers="AI">First American Bank</td>
- <td headers="Closing Date">June 25, 2010</td>
- <td headers="Updated">September 14, 2012</td>
- </tr>
- <tr>
- <td><a href="firstnatga.html">First National Bank</a></td>
- <td headers="city">Savannah</td>
- <td headers="state">GA</td>
- <td headers="CERT #">34152</td>
- <td headers="AI">The Savannah Bank, N.A.</td>
- <td headers="Closing Date">June 25, 2010</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
- <tr>
- <td><a href="peninsulafl.html">Peninsula Bank</a></td>
- <td headers="city">Englewood</td>
- <td headers="state">FL</td>
- <td headers="CERT #">26563</td>
- <td headers="AI">Premier American Bank, N.A.</td>
- <td headers="Closing Date">June 25, 2010</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
- <tr>
- <td><a href="nevsecbank.html">Nevada Security Bank</a></td>
- <td headers="city">Reno</td>
- <td headers="state">NV</td>
- <td headers="CERT #">57110</td>
- <td headers="AI">Umpqua Bank</td>
- <td headers="Closing Date">June 18, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="washfirstintl.html">Washington First International Bank</a></td>
- <td headers="city">Seattle</td>
- <td headers="state">WA</td>
- <td headers="CERT #">32955</td>
- <td headers="AI">East West Bank</td>
- <td headers="Closing Date">June 11, 2010</td>
- <td headers="Updated">September 14, 2012</td>
- </tr>
- <tr>
- <td><a href="tieronebankne.html">TierOne Bank</a></td>
- <td headers="city">Lincoln</td>
- <td headers="state">NE</td>
- <td headers="CERT #">29341</td>
- <td headers="AI">Great Western Bank</td>
- <td headers="Closing Date">June 4, 2010</td>
- <td headers="Updated">September 14, 2012</td>
- </tr>
- <tr>
- <td><a href="arcolail.html">Arcola Homestead Savings Bank</a></td>
- <td headers="city">Arcola</td>
- <td headers="state">IL</td>
- <td headers="CERT #">31813</td>
- <td headers="AI">No Acquirer</td>
- <td headers="Closing Date">June 4, 2010</td>
- <td headers="Updated">September 14, 2012</td>
- </tr>
- <tr>
- <td><a href="firstnatms.html">First National Bank</a></td>
- <td headers="city">Rosedale </td>
- <td headers="state">MS</td>
- <td headers="CERT #">15814</td>
- <td headers="AI">The Jefferson Bank</td>
- <td headers="Closing Date">June 4, 2010</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
- <tr>
- <td><a href="swbnevada.html">Sun West Bank</a></td>
- <td headers="city">Las Vegas </td>
- <td headers="state">NV</td>
- <td headers="CERT #">34785</td>
- <td headers="AI">City National Bank</td>
- <td headers="Closing Date">May 28, 2010</td>
- <td headers="Updated">September 14, 2012</td>
- </tr>
- <tr>
- <td><a href="graniteca.html">Granite Community Bank, NA</a></td>
- <td headers="city">Granite Bay </td>
- <td headers="state">CA</td>
- <td headers="CERT #">57315</td>
- <td headers="AI">Tri Counties Bank</td>
- <td headers="Closing Date">May 28, 2010</td>
- <td headers="Updated">September 14, 2012</td>
- </tr>
- <tr>
- <td><a href="bankoffloridatb.html">Bank of Florida - Tampa</a></td>
- <td headers="city">Tampa</td>
- <td headers="state">FL</td>
- <td headers="CERT #">57814</td>
- <td headers="AI">EverBank</td>
- <td headers="Closing Date">May 28, 2010</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
- <tr>
- <td><a href="bankoffloridasw.html">Bank of Florida - Southwest</a></td>
- <td headers="city">Naples </td>
- <td headers="state">FL</td>
- <td headers="CERT #">35106</td>
- <td headers="AI">EverBank</td>
- <td headers="Closing Date">May 28, 2010</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
- <tr>
- <td><a href="bankoffloridase.html">Bank of Florida - Southeast</a></td>
- <td headers="city">Fort Lauderdale </td>
- <td headers="state">FL</td>
- <td headers="CERT #">57360</td>
- <td headers="AI">EverBank</td>
- <td headers="Closing Date">May 28, 2010</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
- <tr>
- <td><a href="pinehurstmn.html">Pinehurst Bank</a></td>
- <td headers="city">Saint Paul </td>
- <td headers="state">MN</td>
- <td headers="CERT #">57735</td>
- <td headers="AI">Coulee Bank</td>
- <td headers="Closing Date">May 21, 2010</td>
- <td headers="Updated">October 26, 2012</td>
- </tr>
- <tr>
- <td><a href="midwestil.html">Midwest Bank and Trust Company</a></td>
- <td headers="city">Elmwood Park </td>
- <td headers="state">IL</td>
- <td headers="CERT #">18117</td>
- <td headers="AI">FirstMerit Bank, N.A.</td>
- <td headers="Closing Date">May 14, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="swcmntymo.html">Southwest Community Bank</a></td>
- <td headers="city">Springfield</td>
- <td headers="state">MO</td>
- <td headers="CERT #">34255</td>
- <td headers="AI">Simmons First National Bank</td>
- <td headers="Closing Date">May 14, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="newlibertymi.html">New Liberty Bank</a></td>
- <td headers="city">Plymouth</td>
- <td headers="state">MI</td>
- <td headers="CERT #">35586</td>
- <td headers="AI">Bank of Ann Arbor</td>
- <td headers="Closing Date">May 14, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="satillacmntyga.html">Satilla Community Bank</a></td>
- <td headers="city">Saint Marys</td>
- <td headers="state">GA</td>
- <td headers="CERT #">35114</td>
- <td headers="AI">Ameris Bank</td>
- <td headers="Closing Date">May 14, 2010</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
- <tr>
- <td><a href="1stpacific.html">1st Pacific Bank of California</a></td>
- <td headers="city">San Diego</td>
- <td headers="state">CA</td>
- <td headers="CERT #">35517</td>
- <td headers="AI">City National Bank</td>
- <td headers="Closing Date">May 7, 2010</td>
- <td headers="Updated">December 13, 2012</td>
- </tr>
- <tr>
- <td><a href="townebank.html">Towne Bank of Arizona</a></td>
- <td headers="city">Mesa</td>
- <td headers="state">AZ</td>
- <td headers="CERT #">57697</td>
- <td headers="AI">Commerce Bank of Arizona</td>
- <td headers="Closing Date">May 7, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="accessbank.html">Access Bank</a></td>
- <td headers="city">Champlin</td>
- <td headers="state">MN</td>
- <td headers="CERT #">16476</td>
- <td headers="AI">PrinsBank</td>
- <td headers="Closing Date">May 7, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="bonifay.html">The Bank of Bonifay</a></td>
- <td headers="city">Bonifay</td>
- <td headers="state">FL</td>
- <td headers="CERT #">14246</td>
- <td headers="AI">First Federal Bank of Florida</td>
- <td headers="Closing Date">May 7, 2010</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
- <tr>
- <td><a href="frontier.html">Frontier Bank</a></td>
- <td headers="city">Everett</td>
- <td headers="state">WA</td>
- <td headers="CERT #">22710</td>
- <td headers="AI">Union Bank, N.A.</td>
- <td headers="Closing Date">April 30, 2010</td>
- <td headers="Updated">January 15, 2013</td>
- </tr>
- <tr>
- <td><a href="bc-natl.html">BC National Banks</a></td>
- <td headers="city">Butler</td>
- <td headers="state">MO</td>
- <td headers="CERT #">17792</td>
- <td headers="AI">Community First Bank</td>
- <td headers="Closing Date">April 30, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="champion.html">Champion Bank</a></td>
- <td headers="city">Creve Coeur</td>
- <td headers="state">MO</td>
- <td headers="CERT #">58362</td>
- <td headers="AI">BankLiberty</td>
- <td headers="Closing Date">April 30, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="cfbancorp.html">CF Bancorp</a></td>
- <td headers="city">Port Huron</td>
- <td headers="state">MI</td>
- <td headers="CERT #">30005</td>
- <td headers="AI">First Michigan Bank</td>
- <td headers="Closing Date">April 30, 2010</td>
- <td headers="Updated">January 15, 2013</td>
- </tr>
- <tr>
- <td headers="Instituition"><a href="westernbank-puertorico.html">Westernbank Puerto Rico</a><br />
- <a href="westernbank-puertorico_spanish.html">En Espanol</a></td>
- <td headers="city">Mayaguez</td>
- <td headers="state">PR</td>
- <td headers="CERT #">31027</td>
- <td headers="AI">Banco Popular de Puerto Rico</td>
- <td headers="Closing Date">April 30, 2010</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
- <tr>
- <td headers="Instituition"><a href="r-gpremier-puertorico.html">R-G Premier Bank of Puerto Rico</a><br />
- <a href="r-gpremier-puertorico_spanish.html">En Espanol</a></td>
- <td headers="city">Hato Rey</td>
- <td headers="state">PR</td>
- <td headers="CERT #">32185</td>
- <td headers="AI">Scotiabank de Puerto Rico</td>
- <td headers="Closing Date">April 30, 2010</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
- <tr>
- <td headers="Instituition"><a href="eurobank-puertorico.html">Eurobank</a><br />
- <a href="eurobank-puertorico_spanish.html">En Espanol</a></td>
- <td headers="city">San Juan</td>
- <td headers="state">PR</td>
- <td headers="CERT #">27150</td>
- <td headers="AI">Oriental Bank and Trust</td>
- <td headers="Closing Date">April 30, 2010</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
- <tr>
- <td><a href="wheatland.html">Wheatland Bank</a></td>
- <td headers="city">Naperville</td>
- <td headers="state">IL</td>
- <td headers="CERT #">58429</td>
- <td headers="AI">Wheaton Bank & Trust</td>
- <td headers="Closing Date">April 23, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="peotone.html">Peotone Bank and Trust Company</a></td>
- <td headers="city">Peotone</td>
- <td headers="state">IL</td>
- <td headers="CERT #">10888</td>
- <td headers="AI">First Midwest Bank</td>
- <td headers="Closing Date">April 23, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="lincoln-park.html">Lincoln Park Savings Bank</a></td>
- <td headers="city">Chicago</td>
- <td headers="state">IL</td>
- <td headers="CERT #">30600</td>
- <td headers="AI">Northbrook Bank & Trust Company</td>
- <td headers="Closing Date">April 23, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="new-century-il.html">New Century Bank</a></td>
- <td headers="city">Chicago</td>
- <td headers="state">IL</td>
- <td headers="CERT #">34821</td>
- <td headers="AI">MB Financial Bank, N.A.</td>
- <td headers="Closing Date">April 23, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="citizens-bank.html">Citizens Bank and Trust Company of Chicago</a></td>
- <td headers="city">Chicago</td>
- <td headers="state">IL</td>
- <td headers="CERT #">34658</td>
- <td headers="AI">Republic Bank of Chicago</td>
- <td headers="Closing Date">April 23, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="broadway.html">Broadway Bank</a></td>
- <td headers="city">Chicago</td>
- <td headers="state">IL</td>
- <td headers="CERT #">22853</td>
- <td headers="AI">MB Financial Bank, N.A.</td>
- <td headers="Closing Date">April 23, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="amcore.html">Amcore Bank, National Association</a></td>
- <td headers="city">Rockford</td>
- <td headers="state">IL</td>
- <td headers="CERT #">3735</td>
- <td headers="AI">Harris N.A.</td>
- <td headers="Closing Date">April 23, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
-
- <tr>
- <td><a href="citybank.html">City Bank</a></td>
- <td headers="city">Lynnwood</td>
- <td headers="state">WA</td>
- <td headers="CERT #">21521</td>
- <td headers="AI">Whidbey Island Bank</td>
- <td headers="Closing Date">April 16, 2010</td>
- <td headers="Updated">September 14, 2012</td>
- </tr>
- <tr>
- <td><a href="tamalpais.html">Tamalpais Bank</a></td>
- <td headers="city">San Rafael</td>
- <td headers="state">CA</td>
- <td headers="CERT #">33493</td>
- <td headers="AI">Union Bank, N.A.</td>
- <td headers="Closing Date">April 16, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="innovative.html">Innovative Bank</a></td>
- <td headers="city">Oakland</td>
- <td headers="state">CA</td>
- <td headers="CERT #">23876</td>
- <td headers="AI">Center Bank</td>
- <td headers="Closing Date">April 16, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="butlerbank.html">Butler Bank</a></td>
- <td headers="city">Lowell</td>
- <td headers="state">MA</td>
- <td headers="CERT #">26619</td>
- <td headers="AI">People's United Bank</td>
- <td headers="Closing Date">April 16, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="riverside-natl.html">Riverside National Bank of Florida</a></td>
- <td headers="city">Fort Pierce</td>
- <td headers="state">FL</td>
- <td headers="CERT #">24067</td>
- <td headers="AI">TD Bank, N.A.</td>
- <td headers="Closing Date">April 16, 2010</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
- <tr>
- <td><a href="americanfirst.html">AmericanFirst Bank</a></td>
- <td headers="city">Clermont</td>
- <td headers="state">FL</td>
- <td headers="CERT #">57724</td>
- <td headers="AI">TD Bank, N.A.</td>
- <td headers="Closing Date">April 16, 2010</td>
- <td headers="Updated">October 31, 2012</td>
- </tr>
- <tr>
- <td><a href="ffbnf.html">First Federal Bank of North Florida</a></td>
- <td headers="city">Palatka</td>
- <td headers="state">FL </td>
- <td headers="CERT #">28886</td>
- <td headers="AI">TD Bank, N.A.</td>
- <td headers="Closing Date">April 16, 2010</td>
- <td headers="Updated">January 15, 2013</td>
- </tr>
- <tr>
- <td><a href="lakeside-comm.html">Lakeside Community Bank</a></td>
- <td headers="city">Sterling Heights</td>
- <td headers="state">MI</td>
- <td headers="CERT #">34878</td>
- <td headers="AI">No Acquirer</td>
- <td headers="Closing Date">April 16, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="beachfirst.html">Beach First National Bank</a></td>
- <td headers="city">Myrtle Beach</td>
- <td headers="state">SC</td>
- <td headers="CERT #">34242</td>
- <td headers="AI">Bank of North Carolina</td>
- <td headers="Closing Date">April 9, 2010</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
- <tr>
- <td><a href="deserthills.html">Desert Hills Bank</a></td>
- <td headers="city">Phoenix</td>
- <td headers="state">AZ</td>
- <td headers="CERT #">57060</td>
- <td headers="AI">New York Community Bank</td>
- <td headers="Closing Date">March 26, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="unity-natl.html">Unity National Bank</a></td>
- <td headers="city">Cartersville</td>
- <td headers="state">GA</td>
- <td headers="CERT #">34678</td>
- <td headers="AI">Bank of the Ozarks</td>
- <td headers="Closing Date">March 26, 2010</td>
- <td headers="Updated">September 14, 2012</td>
- </tr>
- <tr>
- <td><a href="key-west.html">Key West Bank</a></td>
- <td headers="city">Key West</td>
- <td headers="state">FL</td>
- <td headers="CERT #">34684</td>
- <td headers="AI">Centennial Bank</td>
- <td headers="Closing Date">March 26, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="mcintosh.html">McIntosh Commercial Bank</a></td>
- <td headers="city">Carrollton</td>
- <td headers="state">GA</td>
- <td headers="CERT #">57399</td>
- <td headers="AI">CharterBank</td>
- <td headers="Closing Date">March 26, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="state-aurora.html">State Bank of Aurora</a></td>
- <td headers="city">Aurora</td>
- <td headers="state">MN</td>
- <td headers="CERT #">8221</td>
- <td headers="AI">Northern State Bank</td>
- <td headers="Closing Date">March 19, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="firstlowndes.html">First Lowndes Bank</a></td>
- <td headers="city">Fort Deposit</td>
- <td headers="state">AL</td>
- <td headers="CERT #">24957</td>
- <td headers="AI">First Citizens Bank</td>
- <td headers="Closing Date">March 19, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="bankofhiawassee.html">Bank of Hiawassee</a></td>
- <td headers="city">Hiawassee</td>
- <td headers="state">GA</td>
- <td headers="CERT #">10054</td>
- <td headers="AI">Citizens South Bank</td>
- <td headers="Closing Date">March 19, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="appalachian.html">Appalachian Community Bank</a></td>
- <td headers="city">Ellijay</td>
- <td headers="state">GA</td>
- <td headers="CERT #">33989</td>
- <td headers="AI">Community & Southern Bank</td>
- <td headers="Closing Date">March 19, 2010</td>
- <td headers="Updated">October 31, 2012</td>
- </tr>
- <tr>
- <td><a href="advanta-ut.html">Advanta Bank Corp.</a></td>
- <td headers="city">Draper</td>
- <td headers="state">UT</td>
- <td headers="CERT #">33535</td>
- <td headers="AI">No Acquirer</td>
- <td headers="Closing Date">March 19, 2010</td>
- <td headers="Updated">September 14, 2012</td>
- </tr>
- <tr>
- <td><a href="cent-security.html">Century Security Bank</a></td>
- <td headers="city">Duluth</td>
- <td headers="state">GA</td>
- <td headers="CERT #">58104</td>
- <td headers="AI">Bank of Upson</td>
- <td headers="Closing Date">March 19, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="amer-natl-oh.html">American National Bank</a></td>
- <td headers="city">Parma</td>
- <td headers="state">OH</td>
- <td headers="CERT #">18806</td>
- <td headers="AI">The National Bank and Trust Company</td>
- <td headers="Closing Date">March 19, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="statewide.html">Statewide Bank</a></td>
- <td headers="city">Covington</td>
- <td headers="state">LA</td>
- <td headers="CERT #">29561</td>
- <td headers="AI">Home Bank</td>
- <td headers="Closing Date">March 12, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
-<tr>
- <td><a href="oldsouthern.html">Old Southern Bank</a></td>
- <td headers="city">Orlando</td>
- <td headers="state">FL</td>
- <td headers="CERT #">58182</td>
- <td headers="AI">Centennial Bank</td>
- <td headers="Closing Date">March 12, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="parkavenue-ny.html">The Park Avenue Bank</a></td>
- <td headers="city">New York</td>
- <td headers="state">NY</td>
- <td headers="CERT #">27096</td>
- <td headers="AI">Valley National Bank</td>
- <td headers="Closing Date">March 12, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="libertypointe.html">LibertyPointe Bank</a></td>
- <td headers="city">New York</td>
- <td headers="state">NY</td>
- <td headers="CERT #">58071</td>
- <td headers="AI">Valley National Bank</td>
- <td headers="Closing Date">March 11, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="centennial-ut.html">Centennial Bank</a></td>
- <td headers="city">Ogden</td>
- <td headers="state">UT</td>
- <td headers="CERT #">34430</td>
- <td headers="AI">No Acquirer</td>
- <td headers="Closing Date">March 5, 2010</td>
- <td headers="Updated">September 14, 2012</td>
- </tr>
- <tr>
- <td><a href="waterfield.html">Waterfield Bank</a></td>
- <td headers="city">Germantown</td>
- <td headers="state">MD</td>
- <td headers="CERT #">34976</td>
- <td headers="AI">No Acquirer</td>
- <td headers="Closing Date">March 5, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="bankofillinois.html">Bank of Illinois</a></td>
- <td headers="city">Normal</td>
- <td headers="state">IL</td>
- <td headers="CERT #">9268</td>
- <td headers="AI">Heartland Bank and Trust Company</td>
- <td headers="Closing Date">March 5, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="sunamerican.html">Sun American Bank</a></td>
- <td headers="city">Boca Raton</td>
- <td headers="state">FL</td>
- <td headers="CERT #">27126</td>
- <td headers="AI">First-Citizens Bank & Trust Company</td>
- <td headers="Closing Date">March 5, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="rainier.html">Rainier Pacific Bank</a></td>
- <td headers="city">Tacoma</td>
- <td headers="state">WA</td>
- <td headers="CERT #">38129</td>
- <td headers="AI">Umpqua Bank</td>
- <td headers="Closing Date">February 26, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="carsonriver.html">Carson River Community Bank</a></td>
- <td headers="city">Carson City</td>
- <td headers="state">NV</td>
- <td headers="CERT #">58352</td>
- <td headers="AI">Heritage Bank of Nevada</td>
- <td headers="Closing Date">February 26, 2010</td>
- <td headers="Updated">January 15, 2013</td>
- </tr>
- <tr>
- <td><a href="lajolla.html">La Jolla Bank, FSB</a></td>
- <td headers="city">La Jolla</td>
- <td headers="state">CA</td>
- <td headers="CERT #">32423</td>
- <td headers="AI">OneWest Bank, FSB</td>
- <td headers="Closing Date">February 19, 2010</td>
- <td headers="Updated">August 24, 2012</td>
- </tr>
- <tr>
- <td><a href="georgewashington.html">George Washington Savings Bank</a></td>
- <td headers="city">Orland Park</td>
- <td headers="state">IL</td>
- <td headers="CERT #">29952</td>
- <td headers="AI">FirstMerit Bank, N.A.</td>
- <td headers="Closing Date">February 19, 2010</td>
- <td headers="Updated">August 24, 2012</td>
- </tr>
- <tr>
- <td><a href="lacoste.html">The La Coste National Bank</a></td>
- <td headers="city">La Coste</td>
- <td headers="state">TX</td>
- <td headers="CERT #">3287</td>
- <td headers="AI">Community National Bank</td>
- <td headers="Closing Date">February 19, 2010</td>
- <td headers="Updated">September 14, 2012</td>
- </tr>
- <tr>
- <td><a href="marco.html">Marco Community Bank</a></td>
- <td headers="city">Marco Island</td>
- <td headers="state">FL</td>
- <td headers="CERT #">57586</td>
- <td headers="AI">Mutual of Omaha Bank</td>
- <td headers="Closing Date">February 19, 2010</td>
- <td headers="Updated">August 24, 2012</td>
- </tr>
- <tr>
- <td><a href="1stamerican.html">1st American State Bank of Minnesota</a></td>
- <td headers="city">Hancock</td>
- <td headers="state">MN</td>
- <td headers="CERT #">15448</td>
- <td headers="AI">Community Development Bank, FSB</td>
- <td headers="Closing Date">February 5, 2010</td>
- <td headers="Updated">August 24, 2012</td>
- </tr>
- <tr>
- <td><a href="americanmarine.html">American Marine Bank</a></td>
- <td headers="city">Bainbridge Island</td>
- <td headers="state">WA</td>
- <td headers="CERT #">16730</td>
- <td headers="AI">Columbia State Bank</td>
- <td headers="Closing Date">January 29, 2010</td>
- <td headers="Updated">August 24, 2012</td>
- </tr>
- <tr>
- <td><a href="firstregional.html">First Regional Bank</a></td>
- <td headers="city">Los Angeles</td>
- <td headers="state">CA</td>
- <td headers="CERT #">23011</td>
- <td headers="AI">First-Citizens Bank & Trust Company</td>
- <td headers="Closing Date">January 29, 2010</td>
- <td headers="Updated">August 24, 2012</td>
- </tr>
- <tr>
- <td><a href="cbt-cornelia.html">Community Bank and Trust</a></td>
- <td headers="city">Cornelia</td>
- <td headers="state">GA</td>
- <td headers="CERT #">5702</td>
- <td headers="AI">SCBT National Association</td>
- <td headers="Closing Date">January 29, 2010</td>
- <td headers="Updated">January 15, 2013</td>
- </tr>
- <tr>
- <td><a href="marshall-mn.html">Marshall Bank, N.A.</a></td>
- <td headers="city">Hallock</td>
- <td headers="state">MN</td>
- <td headers="CERT #">16133</td>
- <td headers="AI">United Valley Bank</td>
- <td headers="Closing Date">January 29, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="floridacommunity.html">Florida Community Bank</a></td>
- <td headers="city">Immokalee</td>
- <td headers="state">FL</td>
- <td headers="CERT #">5672</td>
- <td headers="AI">Premier American Bank, N.A.</td>
- <td headers="Closing Date">January 29, 2010</td>
- <td headers="Updated">January 15, 2013</td>
- </tr>
- <tr>
- <td><a href="firstnational-carrollton.html">First National Bank of Georgia</a></td>
- <td headers="city">Carrollton</td>
- <td headers="state">GA</td>
- <td headers="CERT #">16480</td>
- <td headers="AI">Community & Southern Bank</td>
- <td headers="Closing Date">January 29, 2010</td>
- <td headers="Updated">December 13, 2012</td>
- </tr>
- <tr>
- <td><a href="columbiariver.html">Columbia River Bank</a></td>
- <td headers="city">The Dalles</td>
- <td headers="state">OR</td>
- <td headers="CERT #">22469</td>
- <td headers="AI">Columbia State Bank</td>
- <td headers="Closing Date">January 22, 2010</td>
- <td headers="Updated">September 14, 2012</td>
- </tr>
- <tr>
- <td><a href="evergreen-wa.html">Evergreen Bank</a></td>
- <td headers="city">Seattle</td>
- <td headers="state">WA</td>
- <td headers="CERT #">20501</td>
- <td headers="AI">Umpqua Bank</td>
- <td headers="Closing Date">January 22, 2010</td>
- <td headers="Updated">January 15, 2013</td>
- </tr>
- <tr>
- <td><a href="charter-nm.html">Charter Bank</a></td>
- <td headers="city">Santa Fe</td>
- <td headers="state">NM</td>
- <td headers="CERT #">32498</td>
- <td headers="AI">Charter Bank</td>
- <td headers="Closing Date">January 22, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="leeton.html">Bank of Leeton</a></td>
- <td headers="city">Leeton</td>
- <td headers="state">MO</td>
- <td headers="CERT #">8265</td>
- <td headers="AI">Sunflower Bank, N.A.</td>
- <td headers="Closing Date">January 22, 2010</td>
- <td headers="Updated">January 15, 2013</td>
- </tr>
- <tr>
- <td><a href="premieramerican.html">Premier American Bank</a></td>
- <td headers="city">Miami</td>
- <td headers="state">FL</td>
- <td headers="CERT #">57147</td>
- <td headers="AI">Premier American Bank, N.A.</td>
- <td headers="Closing Date">January 22, 2010</td>
- <td headers="Updated">December 13, 2012</td>
- </tr>
- <tr>
- <td><a href="barnes.html">Barnes Banking Company</a></td>
- <td headers="city">Kaysville</td>
- <td headers="state">UT</td>
- <td headers="CERT #">1252</td>
- <td headers="AI">No Acquirer</td>
- <td headers="Closing Date">January 15, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="ststephen.html">St. Stephen State Bank</a></td>
- <td headers="city">St. Stephen</td>
- <td headers="state">MN</td>
- <td headers="CERT #">17522</td>
- <td headers="AI">First State Bank of St. Joseph</td>
- <td headers="Closing Date">January 15, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="towncommunity.html">Town Community
- Bank & Trust</a></td>
- <td headers="city">Antioch</td>
- <td headers="state">IL</td>
- <td headers="CERT #">34705</td>
- <td headers="AI">First American Bank</td>
- <td headers="Closing Date">January 15, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="horizon-wa.html">Horizon Bank</a></td>
- <td headers="city">Bellingham</td>
- <td headers="state">WA</td>
- <td headers="CERT #">22977</td>
- <td headers="AI">Washington Federal Savings and Loan Association</td>
- <td headers="Closing Date">January 8, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="firstfederal-ca.html">First Federal Bank of California, F.S.B.</a></td>
- <td headers="city">Santa Monica</td>
- <td headers="state">CA</td>
- <td headers="CERT #">28536</td>
- <td headers="AI">OneWest Bank, FSB</td>
- <td headers="Closing Date">December 18, 2009</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="imperialcapital.html">Imperial Capital Bank</a></td>
- <td headers="city">La Jolla</td>
- <td headers="state">CA</td>
- <td headers="CERT #">26348</td>
- <td headers="AI">City National Bank</td>
- <td headers="Closing Date">December 18, 2009</td>
- <td headers="Updated">September 5, 2012</td>
- </tr>
- <tr>
- <td><a href="ibb.html">Independent Bankers' Bank</a></td>
- <td headers="city">Springfield</td>
- <td headers="state">IL</td>
- <td headers="CERT #">26820</td>
- <td headers="AI">The Independent BankersBank (TIB)</td>
- <td headers="Closing Date">December 18, 2009</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="newsouth.html">New South Federal Savings Bank</a></td>
- <td headers="city">Irondale</td>
- <td headers="state">AL</td>
- <td headers="CERT #">32276</td>
- <td headers="AI">Beal Bank</td>
- <td headers="Closing Date">December 18, 2009</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="citizensstate-mi.html">Citizens State Bank</a></td>
- <td headers="city">New Baltimore</td>
- <td headers="state">MI</td>
- <td headers="CERT #">1006</td>
- <td headers="AI">No Acquirer</td>
- <td headers="Closing Date">December 18, 2009</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
- <tr>
- <td><a href="peoplesfirst-fl.html">Peoples First Community Bank</a></td>
- <td headers="city">Panama City</td>
- <td headers="state">FL</td>
- <td headers="CERT #">32167</td>
- <td headers="AI">Hancock Bank</td>
- <td headers="Closing Date">December 18, 2009</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
- <tr>
- <td><a href="rockbridge.html">RockBridge Commercial Bank</a></td>
- <td headers="city">Atlanta</td>
- <td headers="state">GA</td>
- <td headers="CERT #">58315</td>
- <td headers="AI">No Acquirer</td>
- <td headers="Closing Date">December 18, 2009</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
- <tr>
- <td><a href="solutions.html">SolutionsBank</a></td>
- <td headers="city">Overland Park</td>
- <td headers="state">KS</td>
- <td headers="CERT #">4731</td>
- <td headers="AI">Arvest Bank</td>
- <td headers="Closing Date">December 11, 2009</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="valleycapital.html">Valley Capital Bank, N.A.</a></td>
- <td headers="city">Mesa</td>
- <td headers="state">AZ</td>
- <td headers="CERT #">58399</td>
- <td headers="AI">Enterprise Bank & Trust</td>
- <td headers="Closing Date">December 11, 2009</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="republicfederal.html">Republic Federal Bank, N.A.</a></td>
- <td headers="city">Miami</td>
- <td headers="state">FL</td>
- <td headers="CERT #">22846</td>
- <td headers="AI">1st United Bank</td>
- <td headers="Closing Date">December 11, 2009</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
- <tr>
- <td><a href="atlantic-va.html">Greater Atlantic Bank</a></td>
- <td headers="city">Reston</td>
- <td headers="state">VA</td>
- <td headers="CERT #">32583</td>
- <td headers="AI">Sonabank</td>
- <td headers="Closing Date">December 4, 2009</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
- <tr>
- <td><a href="benchmark-il.html">Benchmark Bank</a></td>
- <td headers="city">Aurora</td>
- <td headers="state">IL</td>
- <td headers="CERT #">10440</td>
- <td headers="AI">MB Financial Bank, N.A.</td>
- <td headers="Closing Date">December 4, 2009</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="amtrust.html">AmTrust Bank</a></td>
- <td headers="city">Cleveland</td>
- <td headers="state">OH</td>
- <td headers="CERT #">29776</td>
- <td headers="AI">New York Community Bank</td>
- <td headers="Closing Date">December 4, 2009</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
-<tr>
- <td><a href="tattnall.html">The Tattnall Bank</a></td>
- <td headers="city">Reidsville</td>
- <td headers="state">GA</td>
- <td headers="CERT #">12080</td>
- <td headers="AI">Heritage Bank of the South</td>
- <td headers="Closing Date">December 4, 2009</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
- <tr>
- <td><a href="firstsecurity.html">First Security National Bank</a></td>
- <td headers="city">Norcross</td>
- <td headers="state">GA</td>
- <td headers="CERT #">26290</td>
- <td headers="AI">State Bank and Trust Company</td>
- <td headers="Closing Date">December 4, 2009</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
-<tr>
- <td><a href="buckheadcommunity.html">The Buckhead Community Bank</a></td>
- <td headers="city">Atlanta</td>
- <td headers="state">GA</td>
- <td headers="CERT #">34663</td>
- <td headers="AI">State Bank and Trust Company</td>
- <td headers="Closing Date">December 4, 2009</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
- <tr>
- <td><a href="commercesw-fl.html">Commerce Bank of Southwest Florida</a></td>
- <td headers="city">Fort Myers</td>
- <td headers="state">FL</td>
- <td headers="CERT #">58016</td>
- <td headers="AI">Central Bank</td>
- <td headers="Closing Date">November 20, 2009</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
- <tr>
- <td><a href="pacificcoastnatl.html">Pacific Coast National Bank</a></td>
- <td headers="city">San Clemente</td>
- <td headers="state">CA</td>
- <td headers="CERT #">57914</td>
- <td headers="AI">Sunwest Bank</td>
- <td headers="Closing Date">November 13, 2009</td>
- <td headers="Updated">August 22, 2012</td>
- </tr>
- <tr>
- <td><a href="orion-fl.html">Orion Bank</a></td>
- <td headers="city">Naples</td>
- <td headers="state">FL</td>
- <td headers="CERT #">22427</td>
- <td headers="AI">IBERIABANK</td>
- <td headers="Closing Date">November 13, 2009</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
-<tr>
- <td><a href="centuryfsb.html">Century Bank,
- F.S.B.</a></td>
- <td headers="city">Sarasota</td>
- <td headers="state">FL</td>
- <td headers="CERT #">32267</td>
- <td headers="AI">IBERIABANK</td>
- <td headers="Closing Date">November 13, 2009</td>
- <td headers="Updated">August 22, 2012</td>
- </tr>
-
- <tr>
- <td><a href="ucb.html">United Commercial Bank</a></td>
- <td headers="city">San Francisco</td>
- <td headers="state">CA</td>
- <td headers="CERT #">32469</td>
- <td headers="AI">East West Bank</td>
- <td headers="Closing Date">November 6, 2009</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
- <tr>
- <td><a href="gateway-mo.html">Gateway Bank of St. Louis</a></td>
- <td headers="city">St. Louis</td>
- <td headers="state">MO</td>
- <td headers="CERT #">19450</td>
- <td headers="AI">Central Bank of Kansas City</td>
- <td headers="Closing Date">November 6, 2009</td>
- <td headers="Updated">August 22, 2012</td>
- </tr>
- <tr>
- <td><a href="prosperan.html">Prosperan Bank</a></td>
- <td headers="city">Oakdale</td>
- <td headers="state">MN</td>
- <td headers="CERT #">35074</td>
- <td headers="AI">Alerus Financial, N.A.</td>
- <td headers="Closing Date">November 6, 2009</td>
- <td headers="Updated">August 22, 2012</td>
- </tr>
- <tr>
- <td><a href="homefsb-mi.html">Home Federal Savings Bank</a></td>
- <td headers="city">Detroit</td>
- <td headers="state">MI</td>
- <td headers="CERT #">30329</td>
- <td headers="AI">Liberty Bank and Trust Company</td>
- <td headers="Closing Date">November 6, 2009</td>
- <td headers="Updated">August 22, 2012</td>
- </tr>
- <tr>
- <td><a href="unitedsecurity-ga.html">United Security Bank</a></td>
- <td headers="city">Sparta</td>
- <td headers="state">GA</td>
- <td headers="CERT #">22286</td>
- <td headers="AI">Ameris Bank</td>
- <td headers="Closing Date">November 6, 2009</td>
- <td headers="Updated">January 15, 2013</td>
- </tr>
- <tr>
- <td><a href="northhouston-tx.html">North Houston Bank</a></td>
- <td headers="city">Houston</td>
- <td headers="state">TX</td>
- <td headers="CERT #">18776</td>
- <td headers="AI">U.S. Bank N.A.</td>
- <td headers="Closing Date">October 30, 2009</td>
- <td headers="Updated">August 22, 2012</td>
- </tr>
-<tr>
- <td><a href="madisonville-tx.html">Madisonville State Bank</a></td>
- <td headers="city">Madisonville</td>
- <td headers="state">TX</td>
- <td headers="CERT #">33782</td>
- <td headers="AI">U.S. Bank N.A.</td>
- <td headers="Closing Date">October 30, 2009</td>
- <td headers="Updated">August 22, 2012</td>
- </tr>
-<tr>
- <td><a href="citizens-teague.html">Citizens National Bank</a></td>
- <td headers="city">Teague</td>
- <td headers="state">TX</td>
- <td headers="CERT #">25222</td>
- <td headers="AI">U.S. Bank N.A.</td>
- <td headers="Closing Date">October 30, 2009</td>
- <td headers="Updated">August 22, 2012</td>
- </tr>
-<tr>
- <td><a href="park-il.html">Park National Bank</a></td>
- <td headers="city">Chicago</td>
- <td headers="state">IL</td>
- <td headers="CERT #">11677</td>
- <td headers="AI">U.S. Bank N.A.</td>
- <td headers="Closing Date">October 30, 2009</td>
- <td headers="Updated">August 22, 2012</td>
- </tr>
-<tr>
- <td><a href="pacificnational-ca.html">Pacific National Bank</a></td>
- <td headers="city">San Francisco</td>
- <td headers="state">CA</td>
- <td headers="CERT #">30006</td>
- <td headers="AI">U.S. Bank N.A.</td>
- <td headers="Closing Date">October 30, 2009</td>
- <td headers="Updated">August 22, 2012</td>
- </tr>
-<tr>
- <td><a href="calnational.html">California National Bank</a></td>
- <td headers="city">Los Angeles</td>
- <td headers="state">CA</td>
- <td headers="CERT #">34659</td>
- <td headers="AI">U.S. Bank N.A.</td>
- <td headers="Closing Date">October 30, 2009</td>
- <td headers="Updated">September 5, 2012</td>
- </tr>
-<tr>
- <td><a href="sandiegonational.html">San Diego National Bank</a></td>
- <td headers="city">San Diego</td>
- <td headers="state">CA</td>
- <td headers="CERT #">23594</td>
- <td headers="AI">U.S. Bank N.A.</td>
- <td headers="Closing Date">October 30, 2009</td>
- <td headers="Updated">August 22, 2012</td>
- </tr>
-<tr>
- <td><a href="community-lemont.html">Community Bank of Lemont</a></td>
- <td headers="city">Lemont</td>
- <td headers="state">IL</td>
- <td headers="CERT #">35291</td>
- <td headers="AI">U.S. Bank N.A.</td>
- <td headers="Closing Date">October 30, 2009</td>
- <td headers="Updated">January 15, 2013</td>
- </tr>
-<tr>
- <td><a href="bankusa-az.html">Bank USA, N.A</a>.</td>
- <td headers="city">Phoenix</td>
- <td headers="state">AZ</td>
- <td headers="CERT #">32218</td>
- <td headers="AI">U.S. Bank N.A.</td>
- <td headers="Closing Date">October 30, 2009</td>
- <td headers="Updated">August 22, 2012</td>
- </tr>
- <tr>
- <td><a href="firstdupage.html">First DuPage Bank</a></td>
- <td headers="city">Westmont</td>
- <td headers="state">IL</td>
- <td headers="CERT #">35038</td>
- <td headers="AI">First Midwest Bank</td>
- <td headers="Closing Date">October 23, 2009</td>
- <td headers="Updated">August 22, 2012</td>
- </tr>
- <tr>
- <td><a href="riverview-mn.html">Riverview Community Bank</a></td>
- <td headers="city">Otsego</td>
- <td headers="state">MN</td>
- <td headers="CERT #">57525</td>
- <td headers="AI">Central Bank</td>
- <td headers="Closing Date">October 23, 2009</td>
- <td headers="Updated">August 22, 2012</td>
- </tr>
- <tr>
- <td><a href="elmwood.html">Bank of Elmwood</a></td>
- <td headers="city">Racine</td>
- <td headers="state">WI</td>
- <td headers="CERT #">18321</td>
- <td headers="AI">Tri City National Bank</td>
- <td headers="Closing Date">October 23, 2009</td>
- <td headers="Updated">August 22, 2012</td>
- </tr>
-
- <tr>
- <td><a href="flagship.html">Flagship National Bank</a></td>
- <td headers="city">Bradenton</td>
- <td headers="state">FL</td>
- <td headers="CERT #">35044</td>
- <td headers="AI">First Federal Bank of Florida</td>
- <td headers="Closing Date">October 23, 2009</td>
- <td headers="Updated">August 22, 2012</td>
- </tr>
- <tr>
- <td><a href="hillcrest-fl.html">Hillcrest Bank Florida</a></td>
- <td headers="city">Naples</td>
- <td headers="state">FL</td>
- <td headers="CERT #">58336</td>
- <td headers="AI">Stonegate Bank</td>
- <td headers="Closing Date">October 23, 2009</td>
- <td headers="Updated">August 22, 2012</td>
- </tr>
- <tr>
- <td><a href="americanunited.html">American United Bank</a></td>
- <td headers="city">Lawrenceville</td>
- <td headers="state">GA</td>
- <td headers="CERT #">57794</td>
- <td headers="AI">Ameris Bank</td>
- <td headers="Closing Date">October 23, 2009</td>
- <td headers="Updated">September 5, 2012</td>
- </tr>
- <tr>
- <td><a href="partners-fl.html">Partners Bank</a></td>
- <td headers="city">Naples</td>
- <td headers="state">FL</td>
- <td headers="CERT #">57959</td>
- <td headers="AI">Stonegate Bank</td>
- <td headers="Closing Date">October 23, 2009</td>
- <td headers="Updated">January 15, 2013</td>
- </tr>
- <tr>
- <td><a href="sanjoaquin.html">San Joaquin Bank</a></td>
- <td headers="city">Bakersfield</td>
- <td headers="state">CA</td>
- <td headers="CERT #">23266</td>
- <td headers="AI">Citizens Business Bank</td>
- <td headers="Closing Date">October 16, 2009</td>
- <td headers="Updated">August 22, 2012</td>
- </tr>
- <tr>
- <td><a href="scnb-co.html">Southern Colorado National Bank</a></td>
- <td headers="city">Pueblo</td>
- <td headers="state">CO</td>
- <td headers="CERT #">57263</td>
- <td headers="AI">Legacy Bank</td>
- <td headers="Closing Date">October 2, 2009</td>
- <td headers="Updated">September 5, 2012</td>
- </tr>
- <tr>
- <td><a href="jennings-mn.html">Jennings State Bank</a></td>
- <td headers="city">Spring Grove</td>
- <td headers="state">MN</td>
- <td headers="CERT #">11416</td>
- <td headers="AI">Central Bank</td>
- <td headers="Closing Date">October 2, 2009</td>
- <td headers="Updated">August 21, 2012</td>
- </tr>
- <tr>
- <td><a href="warren-mi.html">Warren Bank</a></td>
- <td headers="city">Warren</td>
- <td headers="state">MI</td>
- <td headers="CERT #">34824</td>
- <td headers="AI">The Huntington National Bank</td>
- <td headers="Closing Date">October 2, 2009</td>
- <td headers="Updated">August 21, 2012</td>
- </tr>
- <tr>
- <td><a href="georgian.html">Georgian Bank</a></td>
- <td headers="city">Atlanta</td>
- <td headers="state">GA</td>
- <td headers="CERT #">57151</td>
- <td headers="AI">First Citizens Bank and Trust Company, Inc.</td>
- <td headers="Closing Date">September 25, 2009</td>
- <td headers="Updated">August 21, 2012</td>
- </tr>
- <tr>
- <td><a href="irwin-ky.html">Irwin Union Bank, F.S.B.</a></td>
- <td headers="city">Louisville</td>
- <td headers="state">KY</td>
- <td headers="CERT #">57068</td>
- <td headers="AI">First Financial Bank, N.A.</td>
- <td headers="Closing Date">September 18, 2009</td>
- <td headers="Updated">September 5, 2012</td>
- </tr>
- <tr>
- <td><a href="irwin-in.html">Irwin Union Bank and Trust Company</a></td>
- <td headers="city">Columbus</td>
- <td headers="state">IN</td>
- <td headers="CERT #">10100</td>
- <td headers="AI">First Financial Bank, N.A.</td>
- <td headers="Closing Date">September 18, 2009</td>
- <td headers="Updated">August 21, 2012</td>
- </tr>
- <tr>
- <td><a href="venture-wa.html">Venture Bank</a></td>
- <td headers="city">Lacey</td>
- <td headers="state">WA</td>
- <td headers="CERT #">22868</td>
- <td headers="AI">First-Citizens Bank & Trust Company</td>
- <td headers="Closing Date">September 11, 2009</td>
- <td headers="Updated">August 21, 2012</td>
- </tr>
- <tr>
- <td><a href="brickwell-mn.html">Brickwell Community Bank</a></td>
- <td headers="city">Woodbury</td>
- <td headers="state">MN</td>
- <td headers="CERT #">57736</td>
- <td headers="AI">CorTrust Bank N.A.</td>
- <td headers="Closing Date">September 11, 2009</td>
- <td headers="Updated">January 15, 2013</td>
- </tr>
- <tr>
- <td><a href="corus.html">Corus Bank, N.A.</a></td>
- <td headers="city">Chicago</td>
- <td headers="state">IL</td>
- <td headers="CERT #">13693</td>
- <td headers="AI">MB Financial Bank, N.A.</td>
- <td headers="Closing Date">September 11, 2009</td>
- <td headers="Updated">August 21, 2012</td>
- </tr>
- <tr>
- <td><a href="firststate-az.html">First State Bank</a></td>
- <td headers="city">Flagstaff</td>
- <td headers="state">AZ</td>
- <td headers="CERT #">34875</td>
- <td headers="AI">Sunwest Bank</td>
- <td headers="Closing Date">September 4, 2009</td>
- <td headers="Updated">January 15, 2013</td>
- </tr>
- <tr>
- <td><a href="platinum-il.html">Platinum Community Bank</a></td>
- <td headers="city">Rolling Meadows</td>
- <td headers="state">IL</td>
- <td headers="CERT #">35030</td>
- <td headers="AI">No Acquirer</td>
- <td headers="Closing Date">September 4, 2009</td>
- <td headers="Updated">August 21, 2012</td>
- </tr>
- <tr>
- <td><a href="vantus.html">Vantus Bank</a></td>
- <td headers="city">Sioux City</td>
- <td headers="state">IA</td>
- <td headers="CERT #">27732</td>
- <td headers="AI">Great Southern Bank</td>
- <td headers="Closing Date">September 4, 2009</td>
- <td headers="Updated">August 21, 2012</td>
- </tr>
- <tr>
- <td><a href="inbank.html">InBank</a></td>
- <td headers="city">Oak Forest</td>
- <td headers="state">IL</td>
- <td headers="CERT #">20203</td>
- <td headers="AI">MB Financial Bank, N.A.</td>
- <td headers="Closing Date">September 4, 2009</td>
- <td headers="Updated">August 21, 2012</td>
- </tr>
- <tr>
- <td><a href="firstbankkc-mo.html">First Bank of Kansas City</a></td>
- <td headers="city">Kansas City</td>
- <td headers="state">MO</td>
- <td headers="CERT #">25231</td>
- <td headers="AI">Great American Bank</td>
- <td headers="Closing Date">September 4, 2009</td>
- <td headers="Updated">August 21, 2012</td>
- </tr>
- <tr>
- <td><a href="affinity-ca.html">Affinity Bank</a></td>
- <td headers="city">Ventura</td>
- <td headers="state">CA</td>
- <td headers="CERT #">27197</td>
- <td headers="AI">Pacific Western Bank</td>
- <td headers="Closing Date">August 28, 2009</td>
- <td headers="Updated">August 21, 2012</td>
- </tr>
- <tr>
- <td><a href="mainstreet-mn.html">Mainstreet Bank</a></td>
- <td headers="city">Forest Lake</td>
- <td headers="state">MN</td>
- <td headers="CERT #">1909</td>
- <td headers="AI">Central Bank</td>
- <td headers="Closing Date">August 28, 2009</td>
- <td headers="Updated">August 21, 2012</td>
- </tr>
- <tr>
- <td><a href="bradford-md.html">Bradford Bank</a></td>
- <td headers="city">Baltimore</td>
- <td headers="state">MD</td>
- <td headers="CERT #">28312</td>
- <td headers="AI">Manufacturers and Traders Trust Company (M&T Bank)</td>
- <td headers="Closing Date">August 28, 2009</td>
- <td headers="Updated">January 15, 2013</td>
- </tr>
- <tr>
- <td><a href="guaranty-tx.html">Guaranty Bank</a></td>
- <td headers="city">Austin</td>
- <td headers="state">TX</td>
- <td headers="CERT #">32618</td>
- <td headers="AI">BBVA Compass</td>
- <td headers="Closing Date">August 21, 2009</td>
- <td headers="Updated">August 21, 2012</td>
- </tr>
- <tr>
- <td><a href="capitalsouth.html">CapitalSouth Bank</a></td>
- <td headers="city">Birmingham </td>
- <td headers="state">AL</td>
- <td headers="CERT #">22130</td>
- <td headers="AI">IBERIABANK</td>
- <td headers="Closing Date">August 21, 2009</td>
- <td headers="Updated">January 15, 2013</td>
- </tr>
- <tr>
- <td><a href="coweta.html">First Coweta Bank</a> </td>
- <td headers="city">Newnan</td>
- <td headers="state">GA</td>
- <td headers="CERT #">57702</td>
- <td headers="AI">United Bank</td>
- <td headers="Closing Date">August 21, 2009</td>
- <td headers="Updated">January 15, 2013</td>
- </tr>
- <tr>
- <td><a href="ebank.html">ebank</a></td>
- <td headers="city">Atlanta</td>
- <td headers="state">GA</td>
- <td headers="CERT #">34682</td>
- <td headers="AI">Stearns Bank, N.A.</td>
- <td headers="Closing Date">August 21, 2009</td>
- <td headers="Updated">August 21, 2012</td>
- </tr>
- <tr>
- <td><a href="community-nv.html">Community Bank of Nevada</a></td>
- <td headers="city">Las Vegas</td>
- <td headers="state">NV</td>
- <td headers="CERT #">34043</td>
- <td headers="AI">No Acquirer</td>
- <td headers="Closing Date">August 14, 2009</td>
- <td headers="Updated">August 21, 2012</td>
- </tr>
- <tr>
- <td><a href="community-az.html">Community Bank of Arizona</a></td>
- <td headers="city">Phoenix</td>
- <td headers="state">AZ</td>
- <td headers="CERT #">57645</td>
- <td headers="AI">MidFirst Bank</td>
- <td headers="Closing Date">August 14, 2009</td>
- <td headers="Updated">August 21, 2012</td>
- </tr>
- <tr>
- <td><a href="union-az.html">Union Bank, National Association</a></td>
- <td headers="city">Gilbert</td>
- <td headers="state">AZ</td>
- <td headers="CERT #">34485</td>
- <td headers="AI">MidFirst Bank</td>
- <td headers="Closing Date">August 14, 2009</td>
- <td headers="Updated">August 21, 2012</td>
- </tr>
- <tr>
- <td><a href="colonial-al.html">Colonial Bank</a></td>
- <td headers="city">Montgomery</td>
- <td headers="state">AL</td>
- <td headers="CERT #">9609</td>
- <td headers="AI">Branch Banking & Trust Company, (BB&T) </td>
- <td headers="Closing Date">August 14, 2009</td>
- <td headers="Updated">September 5, 2012</td>
- </tr>
- <tr>
- <td><a href="dwelling.html">Dwelling House Savings and Loan Association</a></td>
- <td headers="city">Pittsburgh</td>
- <td headers="state">PA</td>
- <td headers="CERT #">31559</td>
- <td headers="AI">PNC Bank, N.A.</td>
- <td headers="Closing Date">August 14, 2009</td>
- <td headers="Updated">January 15, 2013</td>
- </tr>
- <tr>
- <td><a href="community-prineville.html">Community First Bank</a></td>
- <td headers="city">Prineville</td>
- <td headers="state">OR</td>
- <td headers="CERT #">23268</td>
- <td headers="AI">Home Federal Bank</td>
- <td headers="Closing Date">August 7, 2009</td>
- <td headers="Updated">January 15, 2013</td>
- </tr>
- <tr>
- <td><a href="community-venice.html">Community National Bank of Sarasota County</a></td>
- <td headers="city">Venice</td>
- <td headers="state">FL</td>
- <td headers="CERT #">27183</td>
- <td headers="AI">Stearns Bank, N.A.</td>
- <td headers="Closing Date">August 7, 2009</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="fsb-sarasota.html">First State Bank</a></td>
- <td headers="city">Sarasota</td>
- <td headers="state">FL</td>
- <td headers="CERT #">27364</td>
- <td headers="AI">Stearns Bank, N.A.</td>
- <td headers="Closing Date">August 7, 2009</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="mutual-harvey.html">Mutual Bank</a></td>
- <td headers="city">Harvey</td>
- <td headers="state">IL</td>
- <td headers="CERT #">18659</td>
- <td headers="AI">United Central Bank</td>
- <td headers="Closing Date">July 31, 2009</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="americano.html">First BankAmericano</a></td>
- <td headers="city">Elizabeth</td>
- <td headers="state">NJ</td>
- <td headers="CERT #">34270</td>
- <td headers="AI">Crown Bank</td>
- <td headers="Closing Date">July 31, 2009</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="peoplescommunity-oh.html">Peoples Community Bank</a></td>
- <td headers="city">West Chester</td>
- <td headers="state">OH</td>
- <td headers="CERT #">32288</td>
- <td headers="AI">First Financial Bank, N.A.</td>
- <td headers="Closing Date">July 31, 2009</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="integrity-fl.html">Integrity Bank</a></td>
- <td headers="city">Jupiter</td>
- <td headers="state">FL</td>
- <td headers="CERT #">57604</td>
- <td headers="AI">Stonegate Bank</td>
- <td headers="Closing Date">July 31, 2009</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="fsb-altus.html">First State Bank of Altus</a></td>
- <td headers="city">Altus</td>
- <td headers="state">OK</td>
- <td headers="CERT #">9873</td>
- <td headers="AI">Herring Bank</td>
- <td headers="Closing Date">July 31, 2009</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="sb-jones.html">Security Bank of Jones County</a></td>
- <td headers="city">Gray</td>
- <td headers="state">GA</td>
- <td headers="CERT #">8486</td>
- <td headers="AI">State Bank and Trust Company</td>
- <td headers="Closing Date">July 24, 2009</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="sb-houston.html">Security Bank of Houston County</a></td>
- <td headers="city">Perry</td>
- <td headers="state">GA</td>
- <td headers="CERT #">27048</td>
- <td headers="AI">State Bank and Trust Company</td>
- <td headers="Closing Date">July 24, 2009</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="sb-bibb.html">Security Bank of Bibb County</a></td>
- <td headers="city">Macon</td>
- <td headers="state">GA</td>
- <td headers="CERT #">27367</td>
- <td headers="AI">State Bank and Trust Company</td>
- <td headers="Closing Date">July 24, 2009</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="sb-metro.html">Security Bank of North Metro</a></td>
- <td headers="city">Woodstock</td>
- <td headers="state">GA</td>
- <td headers="CERT #">57105</td>
- <td headers="AI">State Bank and Trust Company</td>
- <td headers="Closing Date">July 24, 2009</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="sb-fulton.html">Security Bank of North Fulton</a></td>
- <td headers="city">Alpharetta</td>
- <td headers="state">GA</td>
- <td headers="CERT #">57430</td>
- <td headers="AI">State Bank and Trust Company</td>
- <td headers="Closing Date">July 24, 2009</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="sb-gwinnett.html">Security Bank of Gwinnett County</a></td>
- <td headers="city">Suwanee</td>
- <td headers="state">GA</td>
- <td headers="CERT #">57346</td>
- <td headers="AI">State Bank and Trust Company</td>
- <td headers="Closing Date">July 24, 2009</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="waterford.html">Waterford Village Bank</a></td>
- <td headers="city">Williamsville</td>
- <td headers="state">NY</td>
- <td headers="CERT #">58065</td>
- <td headers="AI">Evans Bank, N.A.</td>
- <td headers="Closing Date">July 24, 2009</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="temecula.html">Temecula Valley Bank</a></td>
- <td headers="city">Temecula</td>
- <td headers="state">CA</td>
- <td headers="CERT #">34341</td>
- <td headers="AI">First-Citizens Bank & Trust Company</td>
- <td headers="Closing Date">July 17, 2009</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="vineyard.html">Vineyard Bank</a></td>
- <td headers="city">Rancho Cucamonga</td>
- <td headers="state">CA</td>
- <td headers="CERT #">23556</td>
- <td headers="AI">California Bank & Trust</td>
- <td headers="Closing Date">July 17, 2009</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="bankfirst.html">BankFirst</a></td>
- <td headers="city">Sioux Falls</td>
- <td headers="state">SD</td>
- <td headers="CERT #">34103</td>
- <td headers="AI">Alerus Financial, N.A.</td>
- <td headers="Closing Date">July 17, 2009</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="piedmont.html">First Piedmont Bank</a></td>
- <td headers="city">Winder</td>
- <td headers="state">GA</td>
- <td headers="CERT #">34594</td>
- <td headers="AI">First American Bank and Trust Company</td>
- <td headers="Closing Date">July 17, 2009</td>
- <td headers="Updated">January 15, 2013</td>
- </tr>
- <tr>
- <td><a href="wyoming.html">Bank of Wyoming</a></td>
- <td headers="city">Thermopolis</td>
- <td headers="state">WY</td>
- <td headers="CERT #">22754</td>
- <td headers="AI">Central Bank & Trust</td>
- <td headers="Closing Date">July 10, 2009</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="founders.html">Founders Bank</a></td>
- <td headers="city">Worth</td>
- <td headers="state">IL</td>
- <td headers="CERT #">18390</td>
- <td headers="AI">The PrivateBank and Trust Company</td>
- <td headers="Closing Date">July 2, 2009</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="millennium.html">Millennium State Bank of Texas</a></td>
- <td headers="city">Dallas</td>
- <td headers="state">TX</td>
- <td headers="CERT #">57667</td>
- <td headers="AI">State Bank of Texas</td>
- <td headers="Closing Date">July 2, 2009</td>
- <td headers="Updated">October 26, 2012</td>
- </tr>
- <tr>
- <td><a href="danville.html">First National Bank of Danville</a></td>
- <td headers="city">Danville</td>
- <td headers="state">IL</td>
- <td headers="CERT #">3644</td>
- <td headers="AI">First Financial Bank, N.A.</td>
- <td headers="Closing Date">July 2, 2009</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="elizabeth.html">Elizabeth State Bank</a></td>
- <td headers="city">Elizabeth</td>
- <td headers="state">IL</td>
- <td headers="CERT #">9262</td>
- <td headers="AI">Galena State Bank and Trust Company</td>
- <td headers="Closing Date">July 2, 2009</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="rockriver.html">Rock River Bank</a></td>
- <td headers="city">Oregon</td>
- <td headers="state">IL</td>
- <td headers="CERT #">15302</td>
- <td headers="AI">The Harvard State Bank</td>
- <td headers="Closing Date">July 2, 2009</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="winchester.html">First State Bank of Winchester</a></td>
- <td headers="city">Winchester</td>
- <td headers="state">IL</td>
- <td headers="CERT #">11710</td>
- <td headers="AI">The First National Bank of Beardstown</td>
- <td headers="Closing Date">July 2, 2009</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="warner.html">John Warner Bank</a></td>
- <td headers="city">Clinton</td>
- <td headers="state">IL</td>
- <td headers="CERT #">12093</td>
- <td headers="AI">State Bank of Lincoln</td>
- <td headers="Closing Date">July 2, 2009</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="mirae.html">Mirae Bank</a></td>
- <td headers="city">Los Angeles</td>
- <td headers="state">CA</td>
- <td headers="CERT #">57332</td>
- <td headers="AI">Wilshire State Bank</td>
- <td headers="Closing Date">June 26, 2009</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="metropacific.html">MetroPacific Bank</a></td>
- <td headers="city">Irvine</td>
- <td headers="state">CA</td>
- <td headers="CERT #">57893</td>
- <td headers="AI">Sunwest Bank</td>
- <td headers="Closing Date">June 26, 2009</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="horizon.html">Horizon Bank</a></td>
- <td headers="city">Pine City</td>
- <td headers="state">MN</td>
- <td headers="CERT #">9744</td>
- <td headers="AI">Stearns Bank, N.A.</td>
- <td headers="Closing Date">June 26, 2009</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="neighbor.html">Neighborhood Community Bank</a></td>
- <td headers="city">Newnan</td>
- <td headers="state">GA</td>
- <td headers="CERT #">35285</td>
- <td headers="AI">CharterBank</td>
- <td headers="Closing Date">June 26, 2009</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="communityga.html">Community Bank of West Georgia</a></td>
- <td headers="city">Villa Rica</td>
- <td headers="state">GA</td>
- <td headers="CERT #">57436</td>
- <td headers="AI">No Acquirer</td>
- <td headers="Closing Date">June 26, 2009</td>
- <td headers="Updated">August 17, 2012</td>
- </tr>
- <tr>
- <td><a href="anthony.html">First National Bank of Anthony</a></td>
- <td headers="city">Anthony</td>
- <td headers="state">KS</td>
- <td headers="CERT #">4614</td>
- <td headers="AI">Bank of Kansas</td>
- <td headers="Closing Date">June 19, 2009</td>
- <td headers="Updated">August 17, 2012</td>
- </tr>
- <tr>
- <td><a href="cooperative.html">Cooperative Bank</a></td>
- <td headers="city">Wilmington</td>
- <td headers="state">NC</td>
- <td headers="CERT #">27837</td>
- <td headers="AI">First Bank</td>
- <td headers="Closing Date">June 19, 2009</td>
- <td headers="Updated">August 17, 2012</td>
- </tr>
- <tr>
- <td><a href="scb.html">Southern Community Bank</a></td>
- <td headers="city">Fayetteville</td>
- <td headers="state">GA</td>
- <td headers="CERT #">35251</td>
- <td headers="AI">United Community Bank</td>
- <td headers="Closing Date">June 19, 2009</td>
- <td headers="Updated">August 17, 2012</td>
- </tr>
- <tr>
- <td><a href="lincolnwood.html">Bank of Lincolnwood</a></td>
- <td headers="city">Lincolnwood</td>
- <td headers="state">IL</td>
- <td headers="CERT #">17309</td>
- <td headers="AI">Republic Bank of Chicago</td>
- <td headers="Closing Date">June 5, 2009</td>
- <td headers="Updated">August 17, 2012</td>
- </tr>
- <tr>
- <td><a href="citizensnational.html">Citizens National Bank</a></td>
- <td headers="city">Macomb</td>
- <td headers="state">IL</td>
- <td headers="CERT #">5757</td>
- <td headers="AI">Morton Community Bank</td>
- <td headers="Closing Date">May 22, 2009</td>
- <td headers="Updated">September 4, 2012</td>
- </tr>
- <tr>
- <td><a href="strategiccapital.html">Strategic Capital Bank</a></td>
- <td headers="city">Champaign</td>
- <td headers="state">IL</td>
- <td headers="CERT #">35175</td>
- <td headers="AI">Midland States Bank</td>
- <td headers="Closing Date">May 22, 2009</td>
- <td headers="Updated">September 4, 2012</td>
- </tr>
- <tr>
- <td><a href="bankunited.html">BankUnited, FSB</a></td>
- <td headers="city">Coral Gables</td>
- <td headers="state">FL</td>
- <td headers="CERT #">32247</td>
- <td headers="AI">BankUnited</td>
- <td headers="Closing Date">May 21, 2009</td>
- <td headers="Updated">August 17, 2012</td>
- </tr>
- <tr>
- <td><a href="westsound.html">Westsound Bank</a></td>
- <td headers="city">Bremerton</td>
- <td headers="state">WA</td>
- <td headers="CERT #">34843</td>
- <td headers="AI">Kitsap Bank</td>
- <td headers="Closing Date">May 8, 2009</td>
- <td headers="Updated">September 4, 2012</td>
- </tr>
- <tr>
- <td><a href="americawest.html">America West Bank</a></td>
- <td headers="city">Layton</td>
- <td headers="state">UT</td>
- <td headers="CERT #">35461</td>
- <td headers="AI">Cache Valley Bank</td>
- <td headers="Closing Date">May 1, 2009</td>
- <td headers="Updated">August 17, 2012</td>
- </tr>
- <tr>
- <td><a href="citizens.html">Citizens Community Bank</a></td>
- <td headers="city">Ridgewood</td>
- <td headers="state">NJ</td>
- <td headers="CERT #">57563</td>
- <td headers="AI">North Jersey Community Bank</td>
- <td headers="Closing Date">May 1, 2009</td>
- <td headers="Updated">September 4, 2012</td>
- </tr>
- <tr>
- <td><a href="silverton.html">Silverton Bank, NA</a></td>
- <td headers="city">Atlanta</td>
- <td headers="state">GA</td>
- <td headers="CERT #">26535</td>
- <td headers="AI">No Acquirer</td>
- <td headers="Closing Date">May 1, 2009</td>
- <td headers="Updated">August 17, 2012</td>
- </tr>
- <tr>
- <td><a href="firstbankidaho.html">First Bank of Idaho</a></td>
- <td headers="city">Ketchum</td>
- <td headers="state">ID</td>
- <td headers="CERT #">34396</td>
- <td headers="AI">U.S. Bank, N.A.</td>
- <td headers="Closing Date">April 24, 2009</td>
- <td headers="Updated">August 17, 2012</td>
- </tr>
- <tr>
- <td><a href="beverlyhills.html">First Bank of Beverly Hills</a></td>
- <td headers="city">Calabasas</td>
- <td headers="state">CA</td>
- <td headers="CERT #">32069</td>
- <td headers="AI">No Acquirer</td>
- <td headers="Closing Date">April 24, 2009</td>
- <td headers="Updated">September 4, 2012</td>
- </tr>
- <tr>
- <td><a href="michiganheritage.html">Michigan Heritage Bank</a></td>
- <td headers="city">Farmington Hills</td>
- <td headers="state">MI</td>
- <td headers="CERT #">34369</td>
- <td headers="AI">Level One Bank</td>
- <td headers="Closing Date">April 24, 2009</td>
- <td headers="Updated">August 17, 2012</td>
- </tr>
- <tr>
- <td><a href="amsouthern.html">American Southern Bank</a></td>
- <td headers="city">Kennesaw</td>
- <td headers="state">GA</td>
- <td headers="CERT #">57943</td>
- <td headers="AI">Bank of North Georgia</td>
- <td headers="Closing Date">April 24, 2009</td>
- <td headers="Updated">August 17, 2012</td>
- </tr>
- <tr>
- <td><a href="greatbasin.html">Great Basin Bank of Nevada</a></td>
- <td headers="city">Elko</td>
- <td headers="state">NV</td>
- <td headers="CERT #">33824</td>
- <td headers="AI">Nevada State Bank</td>
- <td headers="Closing Date">April 17, 2009</td>
- <td headers="Updated">September 4, 2012</td>
- </tr>
- <tr>
- <td><a href="amsterling.html">American Sterling Bank</a></td>
- <td headers="city">Sugar Creek</td>
- <td headers="state">MO</td>
- <td headers="CERT #">8266</td>
- <td headers="AI">Metcalf Bank</td>
- <td headers="Closing Date">April 17, 2009</td>
- <td headers="Updated">August 31, 2012</td>
- </tr>
- <tr>
- <td><a href="newfrontier.html">New Frontier Bank</a></td>
- <td headers="city">Greeley</td>
- <td headers="state">CO</td>
- <td headers="CERT #">34881</td>
- <td headers="AI">No Acquirer</td>
- <td headers="Closing Date">April 10, 2009</td>
- <td headers="Updated">September 4, 2012</td>
- </tr>
- <tr>
- <td><a href="capefear.html">Cape Fear Bank</a></td>
- <td headers="city">Wilmington</td>
- <td headers="state">NC</td>
- <td headers="CERT #">34639</td>
- <td headers="AI">First Federal Savings and Loan Association</td>
- <td headers="Closing Date">April 10, 2009</td>
- <td headers="Updated">August 17, 2012</td>
- </tr>
- <tr>
- <td><a href="omni.html">Omni National Bank</a></td>
- <td headers="city">Atlanta</td>
- <td headers="state">GA</td>
- <td headers="CERT #">22238</td>
- <td headers="AI">No Acquirer</td>
- <td headers="Closing Date">March 27, 2009</td>
- <td headers="Updated">August 17, 2012</td>
- </tr>
- <tr>
- <td><a href="teambank.html">TeamBank, NA</a></td>
- <td headers="city">Paola</td>
- <td headers="state">KS</td>
- <td headers="CERT #">4754</td>
- <td headers="AI">Great Southern Bank</td>
- <td headers="Closing Date">March 20, 2009</td>
- <td headers="Updated">August 17, 2012</td>
- </tr>
- <tr>
- <td><a href="coloradonational.html">Colorado National Bank</a></td>
- <td headers="city">Colorado Springs</td>
- <td headers="state">CO</td>
- <td headers="CERT #">18896</td>
- <td headers="AI">Herring Bank</td>
- <td headers="Closing Date">March 20, 2009</td>
- <td headers="Updated">August 17, 2012</td>
- </tr>
- <tr>
- <td><a href="firstcity.html">FirstCity Bank</a></td>
- <td headers="city">Stockbridge</td>
- <td headers="state">GA</td>
- <td headers="CERT #">18243</td>
- <td headers="AI">No Acquirer</td>
- <td headers="Closing Date">March 20, 2009</td>
- <td headers="Updated">August 17, 2012</td>
- </tr>
- <tr>
- <td><a href="freedomga.html">Freedom Bank of Georgia</a></td>
- <td headers="city">Commerce</td>
- <td headers="state">GA</td>
- <td headers="CERT #">57558</td>
- <td headers="AI">Northeast Georgia Bank</td>
- <td headers="Closing Date">March 6, 2009</td>
- <td headers="Updated">August 17, 2012</td>
- </tr>
- <tr>
- <td><a href="securitysavings.html">Security Savings Bank</a></td>
- <td headers="city">Henderson</td>
- <td headers="state">NV</td>
- <td headers="CERT #">34820</td>
- <td headers="AI">Bank of Nevada</td>
- <td headers="Closing Date">February 27, 2009</td>
- <td headers="Updated">September 7, 2012</td>
- </tr>
- <tr>
- <td><a href="heritagebank.html">Heritage Community Bank</a></td>
- <td headers="city">Glenwood</td>
- <td headers="state">IL</td>
- <td headers="CERT #">20078</td>
- <td headers="AI">MB Financial Bank, N.A.</td>
- <td headers="Closing Date">February 27, 2009</td>
- <td headers="Updated">August 17, 2012</td>
- </tr>
- <tr>
- <td><a href="silverfalls.html">Silver Falls Bank</a></td>
- <td headers="city">Silverton</td>
- <td headers="state">OR</td>
- <td headers="CERT #">35399</td>
- <td headers="AI">Citizens Bank</td>
- <td headers="Closing Date">February 20, 2009</td>
- <td headers="Updated">August 17, 2012</td>
- </tr>
- <tr>
- <td><a href="pinnacle.html">Pinnacle Bank of Oregon</a></td>
- <td headers="city">Beaverton</td>
- <td headers="state">OR</td>
- <td headers="CERT #">57342</td>
- <td headers="AI">Washington Trust Bank of Spokane</td>
- <td headers="Closing Date">February 13, 2009</td>
- <td headers="Updated">August 17, 2012</td>
- </tr>
- <tr>
- <td><a href="cornbelt.html">Corn Belt Bank & Trust Co.</a></td>
- <td headers="city">Pittsfield</td>
- <td headers="state">IL</td>
- <td headers="CERT #">16500</td>
- <td headers="AI">The Carlinville National Bank</td>
- <td headers="Closing Date">February 13, 2009</td>
- <td headers="Updated">August 17, 2012</td>
- </tr>
- <tr>
- <td><a href="riverside.html">Riverside Bank of the Gulf Coast</a></td>
- <td headers="city">Cape Coral</td>
- <td headers="state">FL</td>
- <td headers="CERT #">34563</td>
- <td headers="AI">TIB Bank</td>
- <td headers="Closing Date">February 13, 2009</td>
- <td headers="Updated">August 17, 2012</td>
- </tr>
- <tr>
- <td><a href="sherman.html">Sherman County Bank</a></td>
- <td headers="city">Loup City</td>
- <td headers="state">NE</td>
- <td headers="CERT #">5431</td>
- <td headers="AI">Heritage Bank</td>
- <td headers="Closing Date">February 13, 2009</td>
- <td headers="Updated">August 17, 2012</td>
- </tr>
- <tr>
- <td><a href="county.html">County Bank</a></td>
- <td headers="city">Merced</td>
- <td headers="state">CA</td>
- <td headers="CERT #">22574</td>
- <td headers="AI">Westamerica Bank</td>
- <td headers="Closing Date">February 6, 2009</td>
- <td headers="Updated">September 4, 2012</td>
- </tr>
- <tr>
- <td><a href="alliance.html">Alliance Bank</a></td>
- <td headers="city">Culver City</td>
- <td headers="state">CA</td>
- <td headers="CERT #"> 23124</td>
- <td headers="AI">California Bank & Trust</td>
- <td headers="Closing Date">February 6, 2009</td>
- <td headers="Updated">August 16, 2012</td>
- </tr>
- <tr>
- <td><a href="firstbank.html">FirstBank Financial Services</a></td>
- <td headers="city">McDonough</td>
- <td headers="state">GA</td>
- <td headers="CERT #">57017</td>
- <td headers="AI">Regions Bank</td>
- <td headers="Closing Date">February 6, 2009</td>
- <td headers="Updated">August 16, 2012</td>
- </tr>
- <tr>
- <td><a href="ocala.html">Ocala National Bank</a></td>
- <td headers="city">Ocala</td>
- <td headers="state">FL</td>
- <td headers="CERT #">26538</td>
- <td headers="AI">CenterState Bank of Florida, N.A.</td>
- <td headers="Closing Date">January 30, 2009</td>
- <td headers="Updated">September 4, 2012</td>
- </tr>
- <tr>
- <td><a href="suburban.html">Suburban FSB</a></td>
- <td headers="city">Crofton</td>
- <td headers="state">MD</td>
- <td headers="CERT #">30763</td>
- <td headers="AI">Bank of Essex</td>
- <td headers="Closing Date">January 30, 2009</td>
- <td headers="Updated">August 16, 2012</td>
- </tr>
- <tr>
- <td><a href="magnet.html">MagnetBank</a></td>
- <td headers="city">Salt Lake City</td>
- <td headers="state">UT</td>
- <td headers="CERT #">58001</td>
- <td headers="AI">No Acquirer</td>
- <td headers="Closing Date">January 30, 2009</td>
- <td headers="Updated">August 16, 2012</td>
- </tr>
- <tr>
- <td><a href="centennial.html">1st Centennial Bank</a></td>
- <td headers="city">Redlands</td>
- <td headers="state">CA</td>
- <td headers="CERT #">33025</td>
- <td headers="AI">First California Bank</td>
- <td headers="Closing Date">January 23, 2009</td>
- <td headers="Updated">August 16, 2012</td>
- </tr>
- <tr>
- <td><a href="clark.html">Bank of Clark County</a></td>
- <td headers="city">Vancouver</td>
- <td headers="state">WA</td>
- <td headers="CERT #">34959</td>
- <td headers="AI">Umpqua Bank</td>
- <td headers="Closing Date">January 16, 2009</td>
- <td headers="Updated">August 16, 2012</td>
- </tr>
- <tr>
- <td><a href="commerce.html">National Bank of Commerce</a></td>
- <td headers="city">Berkeley</td>
- <td headers="state">IL</td>
- <td headers="CERT #">19733</td>
- <td headers="AI">Republic Bank of Chicago</td>
- <td headers="Closing Date">January 16, 2009</td>
- <td headers="Updated">August 16, 2012</td>
- </tr>
- <tr>
- <td><a href="sanderson.html">Sanderson State Bank</a><br />
- <a href="sanderson_spanish.html">En Espanol</a></td>
- <td headers="city">Sanderson</td>
- <td headers="state">TX</td>
- <td headers="CERT #">11568</td>
- <td headers="AI">The Pecos County State Bank</td>
- <td headers="Closing Date">December 12, 2008</td>
- <td headers="Updated">September 4, 2012</td>
- </tr>
- <tr>
- <td><a href="haventrust.html">Haven Trust Bank</a></td>
- <td headers="city">Duluth</td>
- <td headers="state">GA</td>
- <td headers="CERT #">35379</td>
- <td headers="AI">Branch Banking & Trust Company, (BB&T) </td>
- <td headers="Closing Date">December 12, 2008</td>
- <td headers="Updated">August 16, 2012</td>
- </tr>
- <tr>
- <td><a href="firstga.html">First Georgia Community Bank</a></td>
- <td headers="city">Jackson</td>
- <td headers="state">GA</td>
- <td headers="CERT #">34301</td>
- <td headers="AI">United Bank</td>
- <td headers="Closing Date">December 5, 2008</td>
- <td headers="Updated">August 16, 2012</td>
- </tr>
- <tr>
- <td><a href="pff.html">PFF Bank & Trust </a></td>
- <td headers="city">Pomona</td>
- <td headers="state">CA</td>
- <td headers="CERT #">28344</td>
- <td headers="AI">U.S. Bank, N.A.</td>
- <td headers="Closing Date">November 21, 2008</td>
- <td headers="Updated">January 4, 2013</td>
- </tr>
- <tr>
- <td><a href="downey.html">Downey Savings & Loan</a></td>
- <td headers="city">Newport Beach</td>
- <td headers="state">CA</td>
- <td headers="CERT #">30968</td>
- <td headers="AI">U.S. Bank, N.A.</td>
- <td headers="Closing Date">November 21, 2008</td>
- <td headers="Updated">January 4, 2013</td>
- </tr>
- <tr>
- <td><a href="community.html">Community Bank</a></td>
- <td headers="city">Loganville</td>
- <td headers="state">GA</td>
- <td headers="CERT #">16490</td>
- <td headers="AI">Bank of Essex</td>
- <td headers="Closing Date">November 21, 2008</td>
- <td headers="Updated">September 4, 2012</td>
- </tr>
- <tr>
- <td><a href="securitypacific.html">Security Pacific Bank</a></td>
- <td headers="city">Los Angeles</td>
- <td headers="state">CA</td>
- <td headers="CERT #">23595</td>
- <td headers="AI">Pacific Western Bank</td>
- <td headers="Closing Date">November 7, 2008</td>
- <td headers="Updated">August 28, 2012</td>
- </tr>
- <tr>
- <td><a href="franklinbank.html">Franklin Bank, SSB</a></td>
- <td headers="city">Houston</td>
- <td headers="state">TX</td>
- <td headers="CERT #">26870</td>
- <td headers="AI">Prosperity Bank</td>
- <td headers="Closing Date">November 7, 2008</td>
- <td headers="Updated">August 16, 2012</td>
- </tr>
- <tr>
- <td><a href="freedom.html">Freedom Bank</a></td>
- <td headers="city">Bradenton</td>
- <td headers="state">FL</td>
- <td headers="CERT #">57930</td>
- <td headers="AI">Fifth Third Bank</td>
- <td headers="Closing Date">October 31, 2008</td>
- <td headers="Updated">August 16, 2012</td>
- </tr>
- <tr>
- <td><a href="alpha.html">Alpha Bank & Trust</a></td>
- <td headers="city">Alpharetta</td>
- <td headers="state">GA</td>
- <td headers="CERT #">58241</td>
- <td headers="AI">Stearns Bank, N.A.</td>
- <td headers="Closing Date">October 24, 2008</td>
- <td headers="Updated">August 16, 2012</td>
- </tr>
- <tr>
- <td><a href="meridian.html">Meridian Bank</a></td>
- <td headers="city">Eldred</td>
- <td headers="state">IL</td>
- <td headers="CERT #">13789</td>
- <td headers="AI">National Bank</td>
- <td headers="Closing Date">October 10, 2008</td>
- <td headers="Updated">May 31, 2012</td>
- </tr>
- <tr>
- <td><a href="mainstreet.html">Main Street Bank</a></td>
- <td headers="city">Northville</td>
- <td headers="state">MI</td>
- <td headers="CERT #">57654</td>
- <td headers="AI">Monroe Bank & Trust</td>
- <td headers="Closing Date">October 10, 2008</td>
- <td headers="Updated">August 16, 2012</td>
- </tr>
- <tr>
- <td><a href="wamu.html">Washington Mutual Bank <br />
- (Including its subsidiary Washington Mutual Bank FSB)</a></td>
- <td headers="city">Henderson</td>
- <td headers="state">NV</td>
- <td headers="CERT #">32633</td>
- <td headers="AI">JP Morgan Chase Bank</td>
- <td headers="Closing Date">September 25, 2008</td>
- <td headers="Updated">August 16, 2012</td>
- </tr>
- <!-- <tr>
- <td width="210"><a href="wamu.html">Washington Mutual Bank FSB</a></td>
- <td headers="city" width="126">Park City</td>
- <td headers="state" width="44">UT</td>
- <td headers="CERT #" width="61">33891</td>
- <td headers="Closing Date" width="117">September 25, 2008</td>
- <td headers="Updated" width="129">November 23, 2009</td>
- </tr> -->
- <tr>
- <td><a href="ameribank.html">Ameribank</a></td>
- <td headers="city">Northfork</td>
- <td headers="state">WV</td>
- <td headers="CERT #">6782</td>
- <td headers="AI">The Citizens Savings Bank<br /><br />Pioneer Community Bank, Inc.</td>
- <td headers="Closing Date">September 19, 2008</td>
- <td headers="Updated">August 16, 2012</td>
- </tr>
- <tr>
- <td><a href="silverstate.html">Silver State Bank</a><br />
- <a href="silverstatesp.html">En Espanol </a></td>
- <td headers="city">Henderson</td>
- <td headers="state">NV</td>
- <td headers="CERT #">34194</td>
- <td headers="AI">Nevada State Bank</td>
- <td headers="Closing Date">September 5, 2008</td>
- <td headers="Updated">August 16, 2012</td>
- </tr>
- <tr>
- <td><a href="integrity.html">Integrity Bank</a></td>
- <td headers="city">Alpharetta</td>
- <td headers="state">GA</td>
- <td headers="CERT #">35469</td>
- <td headers="AI">Regions Bank</td>
- <td headers="Closing Date">August 29, 2008</td>
- <td headers="Updated">August 16, 2012</td>
- </tr>
- <tr>
- <td><a href="columbian.html">Columbian Bank & Trust</a></td>
- <td headers="city">Topeka</td>
- <td headers="state">KS</td>
- <td headers="CERT #">22728</td>
- <td headers="AI">Citizens Bank & Trust</td>
- <td headers="Closing Date">August 22, 2008</td>
- <td headers="Updated">August 16, 2012</td>
- </tr>
- <tr>
- <td><a href="firstprioritybank.html">First Priority Bank</a></td>
- <td headers="city">Bradenton</td>
- <td headers="state">FL</td>
- <td headers="CERT #">57523</td>
- <td headers="AI">SunTrust Bank</td>
- <td headers="Closing Date">August 1, 2008</td>
- <td headers="Updated">August 16, 2012</td>
- </tr>
- <tr>
- <td><a href="heritage.html">First Heritage Bank, NA</a></td>
- <td headers="city">Newport Beach</td>
- <td headers="state">CA</td>
- <td headers="CERT #">57961</td>
- <td headers="AI">Mutual of Omaha Bank</td>
- <td headers="Closing Date">July 25, 2008</td>
- <td headers="Updated">August 28, 2012</td>
- </tr>
- <tr>
- <td><a href="fnbnv.html">First National Bank of Nevada</a></td>
- <td headers="city">Reno</td>
- <td headers="state">NV</td>
- <td headers="CERT #">27011</td>
- <td headers="AI">Mutual of Omaha Bank</td>
- <td headers="Closing Date">July 25, 2008</td>
- <td headers="Updated">August 28, 2012</td>
- </tr>
- <tr>
- <td><a href="IndyMac.html">IndyMac Bank</a></td>
- <td headers="city">Pasadena</td>
- <td headers="state">CA</td>
- <td headers="CERT #">29730</td>
- <td headers="AI">OneWest Bank, FSB</td>
- <td headers="Closing Date">July 11, 2008</td>
- <td headers="Updated">August 28, 2012</td>
- </tr>
- <tr>
- <td><a href="first_integrity_bank.html">First Integrity Bank, NA</a></td>
- <td headers="city">Staples</td>
- <td headers="state">MN</td>
- <td headers="CERT #">12736</td>
- <td headers="AI">First International Bank and Trust</td>
- <td headers="Closing Date">May 30, 2008</td>
- <td headers="Updated">August 28, 2012</td>
- </tr><tr>
- <td><a href="anb.html">ANB Financial, NA</a></td>
- <td headers="city">Bentonville</td>
- <td headers="state">AR</td>
- <td headers="CERT #">33901</td>
- <td headers="AI">Pulaski Bank and Trust Company</td>
- <td headers="Closing Date">May 9, 2008</td>
- <td headers="Updated">August 28, 2012</td>
- </tr><tr>
- <td><a href="Hume.html">Hume Bank</a></td>
- <td headers="city">Hume</td>
- <td headers="state">MO</td>
- <td headers="CERT #">1971</td>
- <td headers="AI">Security Bank</td>
- <td headers="Closing Date">March 7, 2008</td>
- <td headers="Updated">August 28, 2012</td>
- </tr>
- <tr>
- <td><a href="Douglass.html">Douglass National Bank</a></td>
- <td headers="city">Kansas City</td>
- <td headers="state">MO</td>
- <td headers="CERT #">24660</td>
- <td headers="AI">Liberty Bank and Trust Company</td>
- <td headers="Closing Date">January 25, 2008</td>
- <td headers="Updated">October 26, 2012</td>
- </tr>
- <tr>
- <td><a href="MiamiValley.html">Miami Valley Bank</a></td>
- <td headers="city">Lakeview</td>
- <td headers="state">OH</td>
- <td headers="CERT #">16848</td>
- <td headers="AI">The Citizens Banking Company</td>
- <td headers="Closing Date">October 4, 2007</td>
- <td headers="Updated">August 28, 2012</td>
- </tr>
- <tr>
- <td><a href="NetBank.html">NetBank</a></td>
- <td headers="city">Alpharetta</td>
- <td headers="state">GA</td>
- <td headers="CERT #">32575</td>
- <td headers="AI">ING DIRECT</td>
- <td headers="Closing Date">September 28, 2007</td>
- <td headers="Updated">August 28, 2012</td>
- </tr>
- <tr>
- <td><a href="MetropolitanSB.html">Metropolitan Savings Bank</a></td>
- <td headers="city">Pittsburgh</td>
- <td headers="state">PA</td>
- <td headers="CERT #">35353</td>
- <td headers="AI">Allegheny Valley Bank of Pittsburgh</td>
- <td headers="Closing Date">February 2, 2007</td>
- <td headers="Updated">October 27, 2010</td>
- </tr>
- <tr>
- <td><a href="ephraim.html">Bank of Ephraim</a></td>
- <td headers="city">Ephraim</td>
- <td headers="state">UT</td>
- <td headers="CERT #">1249</td>
- <td headers="AI">Far West Bank</td>
- <td headers="Closing Date">June 25, 2004</td>
- <td headers="Updated">April 9, 2008</td>
- </tr>
- <tr>
- <td><a href="reliance.html">Reliance Bank</a></td>
- <td headers="city">White Plains</td>
- <td headers="state">NY</td>
- <td headers="CERT #">26778</td>
- <td headers="AI">Union State Bank</td>
- <td headers="Closing Date">March 19, 2004</td>
- <td headers="Updated">April 9, 2008</td>
- </tr>
- <tr>
- <td><a href="gnb.html">Guaranty National Bank of Tallahassee</a></td>
- <td headers="city">Tallahassee</td>
- <td headers="state">FL</td>
- <td headers="CERT #">26838</td>
- <td headers="AI">Hancock Bank of Florida</td>
- <td headers="Closing Date">March 12, 2004</td>
- <td headers="Updated">June 5, 2012</td>
- </tr>
- <tr>
- <td><a href="dollar.html">Dollar Savings Bank</a></td>
- <td headers="city">Newark</td>
- <td headers="state">NJ</td>
- <td headers="CERT #">31330</td>
- <td headers="AI">No Acquirer</td>
- <td headers="Closing Date">February 14, 2004</td>
- <td headers="Updated">April 9, 2008</td>
- </tr>
- <tr>
- <td><a href="pulaski.html">Pulaski Savings Bank</a></td>
- <td headers="city">Philadelphia</td>
- <td headers="state">PA</td>
- <td headers="CERT #">27203</td>
- <td headers="AI">Earthstar Bank</td>
- <td headers="Closing Date">November 14, 2003</td>
- <td headers="Updated">July 22, 2005</td>
- </tr>
- <tr>
- <td><a href="blanchardville.html">First National Bank of Blanchardville</a></td>
- <td headers="city">Blanchardville</td>
- <td headers="state">WI</td>
- <td headers="CERT #">11639</td>
- <td headers="AI">The Park Bank</td>
- <td headers="Closing Date">May 9, 2003</td>
- <td headers="Updated">June 5, 2012</td>
- </tr>
- <tr>
- <td><a href="spbank.html">Southern Pacific Bank</a></td>
- <td headers="city">Torrance</td>
- <td headers="state">CA</td>
- <td headers="CERT #">27094</td>
- <td headers="AI">Beal Bank</td>
- <td headers="Closing Date">February 7, 2003</td>
- <td headers="Updated">October 20, 2008</td>
- </tr>
- <tr>
- <td><a href="farmers.html">Farmers Bank of Cheneyville</a></td>
- <td headers="city">Cheneyville</td>
- <td headers="state">LA</td>
- <td headers="CERT #">16445</td>
- <td headers="AI">Sabine State Bank & Trust</td>
- <td headers="Closing Date">December 17, 2002</td>
- <td headers="Updated">October 20, 2004</td>
- </tr>
- <tr>
- <td><a href="bankofalamo.html">Bank of Alamo</a></td>
- <td headers="city">Alamo</td>
- <td headers="state">TN</td>
- <td headers="CERT #">9961</td>
- <td headers="AI">No Acquirer</td>
- <td headers="Closing Date">November 8, 2002</td>
- <td headers="Updated">March 18, 2005</td>
- </tr>
- <tr>
- <td><a href="amtrade.html">AmTrade International Bank</a><br /><a href="amtrade-spanish.html">En Espanol </a></td>
- <td headers="city">Atlanta</td>
- <td headers="state">GA</td>
- <td headers="CERT #">33784</td>
- <td headers="AI">No Acquirer</td>
- <td headers="Closing Date">September 30, 2002</td>
- <td headers="Updated">September 11, 2006</td>
- </tr>
- <tr>
- <td headers="Instituition"><a href="universal.html">Universal Federal Savings Bank</a></td>
- <td headers="city">Chicago</td>
- <td headers="state">IL</td>
- <td headers="CERT #">29355</td>
- <td headers="AI">Chicago Community Bank</td>
- <td headers="Closing Date">June 27, 2002</td>
- <td headers="Updated">April 9, 2008</td>
- </tr>
- <tr>
- <td headers="Instituition"><a href="cbc.html">Connecticut Bank of Commerce</a></td>
- <td headers="city">Stamford</td>
- <td headers="state">CT</td>
- <td headers="CERT #">19183</td>
- <td headers="AI">Hudson United Bank</td>
- <td headers="Closing Date">June 26, 2002</td>
- <td headers="Updated">February 14, 2012</td>
- </tr>
- <tr>
- <td headers="Instituition"><a href="newcentury.html">New Century Bank</a></td>
- <td headers="city">Shelby Township</td>
- <td headers="state">MI</td>
- <td headers="CERT #">34979</td>
- <td headers="AI">No Acquirer</td>
- <td headers="Closing Date">March 28, 2002</td>
- <td headers="Updated">March 18, 2005</td>
- </tr>
- <tr>
- <td headers="Instituition"><a href="netfirst.html">Net 1st National Bank</a></td>
- <td headers="city">Boca Raton</td>
- <td headers="state">FL</td>
- <td headers="CERT #">26652</td>
- <td headers="AI">Bank Leumi USA</td>
- <td headers="Closing Date">March 1, 2002</td>
- <td headers="Updated">April 9, 2008</td>
- </tr>
- <tr>
- <td headers="Instituition"><a href="nextbank.html">NextBank, NA</a></td>
- <td headers="city">Phoenix</td>
- <td headers="state">AZ</td>
- <td headers="CERT #">22314</td>
- <td headers="AI">No Acquirer</td>
- <td headers="Closing Date">February 7, 2002</td>
- <td headers="Updated">August 27, 2010</td>
- </tr>
- <tr>
- <td headers="Instituition"><a href="Oakwood.html">Oakwood Deposit Bank Co.</a></td>
- <td headers="city">Oakwood</td>
- <td headers="state">OH</td>
- <td headers="CERT #">8966</td>
- <td headers="AI">The State Bank & Trust Company</td>
- <td headers="Closing Date">February 1, 2002</td>
- <td headers="Updated">October 25, 2012</td>
- </tr>
- <tr>
- <td headers="Instituition"><a href="sierrablanca.html">Bank of Sierra Blanca</a></td>
- <td headers="city">Sierra Blanca</td>
- <td headers="state">TX</td>
- <td headers="CERT #">22002</td>
- <td headers="AI">The Security State Bank of Pecos</td>
- <td headers="Closing Date">January 18, 2002</td>
- <td headers="Updated">November 6, 2003</td>
- </tr>
- <tr>
- <td headers="Instituition"><a href="hamilton.html">Hamilton Bank, NA</a><br />
- <a href="hamilton-spanish.html">En Espanol</a></td>
- <td headers="city">Miami</td>
- <td headers="state">FL</td>
- <td headers="CERT #">24382</td>
- <td headers="AI">Israel Discount Bank of New York</td>
- <td headers="Closing Date">January 11, 2002</td>
- <td headers="Updated">June 5, 2012</td>
- </tr>
- <tr>
- <td headers="Instituition"><a href="sinclair.html">Sinclair National Bank</a></td>
- <td headers="city">Gravette</td>
- <td headers="state">AR</td>
- <td headers="CERT #">34248</td>
- <td headers="AI">Delta Trust & Bank</td>
- <td headers="Closing Date">September 7, 2001</td>
- <td headers="Updated">February 10, 2004</td>
- </tr>
- <tr>
- <td headers="Instituition"><a href="superior.html">Superior Bank, FSB</a></td>
- <td headers="city">Hinsdale</td>
- <td headers="state">IL</td>
- <td headers="CERT #">32646</td>
- <td headers="AI">Superior Federal, FSB</td>
- <td headers="Closing Date">July 27, 2001</td>
- <td headers="Updated">June 5, 2012</td>
- </tr>
- <tr>
- <td headers="Instituition"><a href="Malta.html">Malta National Bank</a></td>
- <td headers="city">Malta</td>
- <td headers="state">OH</td>
- <td headers="CERT #">6629</td>
- <td headers="AI">North Valley Bank</td>
- <td headers="Closing Date">May 3, 2001</td>
- <td headers="Updated">November 18, 2002</td>
- </tr>
- <tr>
- <td headers="Instituition"><a href="firstalliance.html">First Alliance Bank & Trust Co.</a></td>
- <td headers="city">Manchester</td>
- <td headers="state">NH</td>
- <td headers="CERT #">34264</td>
- <td headers="AI">Southern New Hampshire Bank & Trust</td>
- <td headers="Closing Date">February 2, 2001</td>
- <td headers="Updated">February 18, 2003</td>
- </tr>
- <tr>
- <td headers="Instituition"><a href="nsb.html">National State Bank of Metropolis</a></td>
- <td headers="city">Metropolis</td>
- <td headers="state">IL</td>
- <td headers="CERT #">3815</td>
- <td headers="AI">Banterra Bank of Marion</td>
- <td headers="Closing Date">December 14, 2000</td>
- <td headers="Updated">March 17, 2005</td>
- </tr>
- <tr>
- <td headers="Instituition"><a href="boh.html">Bank of Honolulu</a></td>
- <td headers="city">Honolulu</td>
- <td headers="state">HI</td>
- <td headers="CERT #">21029</td>
- <td headers="AI">Bank of the Orient</td>
- <td headers="Closing Date">October 13, 2000</td>
- <td headers="Updated">March 17, 2005</td>
- </tr>
- </tbody>
-</table>
- <!--
-<script language="javascript">
-
-document.writeln("<div id=\"controls\">");
-document.writeln("<div id=\"perpage\">");
-document.writeln("<select onchange=\"sorter.size(this.value)\">");
-document.writeln("<option value=\"5\">5</option>");
-document.writeln("<option value=\"10\" >10</option>");
-document.writeln("<option value=\"20\"selected=\"selected\">20</option>");
-document.writeln("<option value=\"50\">50</option>");
-document.writeln("<option value=\"100\">100</option>");
-document.writeln("<option value=\"150\">150</option>");
-document.writeln("</select>");
-document.writeln(" Entries Per Page");
-document.writeln("</div>");
-document.writeln("<div id=\"navigation\">");
-document.writeln("<img src=\"images/first.gif\" width=\"16\" height=\"16\" alt=\"First Page\" onclick=\"sorter.move(-1,true)\" />");
-document.writeln("<img src=\"images/previous.gif\" width=\"16\" height=\"16\" alt=\"Previous Page\" onclick=\"sorter.move(-1)\" />");
-document.writeln("<img src=\"images/next.gif\" width=\"16\" height=\"16\" alt=\"Next Page\" onclick=\"sorter.move(1)\" />");
-document.writeln("<img src=\"images/last.gif\" width=\"16\" height=\"16\" alt=\"Last Page\" onclick=\"sorter.move(1,true)\" />");
-document.writeln("</div>");
-document.writeln("<div id=\"text\">Displaying Page ");
-document.writeln("<span id=\"currentpage\">");
-document.writeln("</span>");
-document.writeln(" of ");
-document.writeln("<span id=\"pagelimit\">");
-document.writeln("</span>");
-document.writeln("</div>");
-document.writeln("</div>");
-
- </script>
--->
-
+ </fieldset>
+ </form>
+ </div>
+ </div>
+ <!-- close right side -->
+ <a id="responsive_header-fdic_logo" href="/" title="FDIC Homepage">FDIC Homepage</a>
+ <h1>Federal Deposit<br>Insurance Corporation</h1>
+ <h2>Each depositor insured to at least $250,000 per insured bank</h2>
+ <div class="clear"></div>
+ <nav>
+ <div id="responsive_header_nav">
+ <div id="responsive_header-topnav">
+ <div id="responsive_header-topnav-downarrow" onclick="show_rwdnav(this)"></div>
+ <ul id="responsive_header-topnav-list">
+ <li id="responsive_header-topnav-home" title="Home" onmouseover="show_responsive_header_subnav(this)"><a href="/">Home</a></li>
+ <li id="responsive_header-topnav-deposit" title="Deposit Insurance" onmouseover="show_responsive_header_subnav(this)"><a href="/deposit/">Deposit Insurance</a></li>
+ <li id="responsive_header-topnav-consumers" title="Consumer Protection" onmouseover="show_responsive_header_subnav(this)"><a href="/consumers/">Consumer Protection</a></li>
+ <li id="responsive_header-topnav-bank" title="Industry Analysis" onmouseover="show_responsive_header_subnav(this)"><a href="/bank/">Industry Analysis</a></li>
+ <li id="responsive_header-topnav-regulations" title="Regulations & Examinations" onmouseover="show_responsive_header_subnav(this)"><a href="/regulations/">Regulations & Examinations</a></li>
+ <li id="responsive_header-topnav-buying" title="Asset Sales" onmouseover="show_responsive_header_subnav(this)"><a href="/buying/">Asset Sales</a></li>
+ <li id="responsive_header-topnav-news" title="News & Events" onmouseover="show_responsive_header_subnav(this)"><a href="/news/">News & Events</a></li>
+ <li id="responsive_header-topnav-about" title="About FDIC" onmouseover="show_responsive_header_subnav(this)"><a href="/about/">About FDIC</a></li>
+ </ul>
+ <div class="clear"></div>
+ </div>
+ <div id="responsive_header-topnav_subnav">
+ <div id="responsive_header-topnav_subnav-downarrow" onclick="show_rwdnav(this)"></div>
+ <ul id="responsive_header-topnav-home_subnav"><li><a> </a></li></ul>
+ <ul id="responsive_header-topnav-deposit_subnav">
+ <li title="BankFind"><a href="http://research.fdic.gov/bankfind/">BankFind</a></li>
+ <li title="Are My Deposits Insured?"><a href="/deposit/deposits/">Are My Deposits Insured?</a></li>
+ <li title="Uninsured Investments"><a href="/deposit/investments/">Uninsured Investments</a></li>
+ <li title="The Deposit Insurance Fund"><a href="/deposit/insurance/index.html">The Deposit Insurance Fund</a></li>
+ <li title="International Deposit Insurance"><a href="/deposit/deposits/international/">International Deposit Insurance</a></li>
+ </ul>
+ <ul id="responsive_header-topnav-consumers_subnav">
+ <li title="Consumer News & Information"><a href="/consumers/consumer/">Consumer News & Information</a></li>
+ <li title="Loans & Mortgages"><a href="/consumers/loans/">Loans & Mortgages</a></li>
+ <li title="Banking & Your Money"><a href="/consumers/banking/">Banking & Your Money</a></li>
+ <li title="Financial Education & Literacy"><a href="/consumers/education/">Financial Education & Literacy</a></li>
+ <li title="Community Affairs"><a href="/consumers/community/">Community Affairs</a></li>
+ <li title="Identity Theft & Fraud"><a href="/consumers/theft/">Identity Theft & Fraud</a></li>
+ <li title="Consumer Financial Privacy"><a href="/consumers/privacy/">Consumer Financial Privacy</a></li>
+ </ul>
+ <ul id="responsive_header-topnav-bank_subnav">
+ <li title="Bank Data & Statistics"><a href="/bank/statistical/">Bank Data & Statistics</a></li>
+ <li title="Research & Analysis"><a href="/bank/analytical/">Research & Analysis</a></li>
+ <li title="Failed Banks"><a href="/bank/individual/failed/">Failed Banks</a></li>
+ </ul>
+ <ul id="responsive_header-topnav-regulations_subnav">
+ <li title="Bank Examinations"><a href="/regulations/examinations/">Bank Examinations</a></li>
+ <li title="Laws & Regulations"><a href="/regulations/laws/">Laws & Regulations</a></li>
+ <li title="Resources for Bank Officers & Directors"><a href="/regulations/resources/">Resources for Bank Officers & Directors</a></li>
+ <li title="FDICconnect"><a href="http://www.fdicconnect.gov/">FDIC<em>connect</em></a></li>
+ <li title="Required Financial Reports"><a href="/regulations/required/">Required Financial Reports</a></li>
+ <li title="Examiner Training Programs"><a href="/regulations/examiner/">Examiner Training Programs</a></li>
+ </ul>
+ <ul id="responsive_header-topnav-buying_subnav">
+ <li title="Loan Sales"><a href="/buying/loan/">Loan Sales</a></li>
+ <li title="Real Estate Sales"><a href="/buying/owned/">Real Estate and Property Marketplace</a></li>
+ <li title="Financial Asset Sales"><a href="/buying/financial/">Financial Asset Sales</a></li>
+ <li title="Servicing Sales Announcements"><a href="/buying/servicing/">Servicing Sales Announcements</a></li>
+ <li title="Other Asset Sales"><a href="/buying/otherasset/">Other Asset Sales</a></li>
+ <li title="Historical Sales"><a href="/buying/historical/">Historical Sales</a></li>
+ </ul>
+ <ul id="responsive_header-topnav-news_subnav">
+ <li title="Press Releases"><a href="/news/news/press/2013/">Press Releases</a></li>
+ <li title="Online Press Room"><a href="https://fdicsurvey.inquisiteasp.com/fdic/cgi-bin/qwebcorporate.dll?M58TRS">Online Press Room</a></li>
+ <li title="Conferences & Events"><a href="/news/conferences/">Conferences & Events</a></li>
+ <li title="Financial Institution Letters"><a href="/news/news/financial/2013/">Financial Institution Letters</a></li>
+ <li title="Special Alerts"><a href="/news/news/SpecialAlert/2012/">Special Alerts</a></li>
+ <li title="Letters to the Editor/Opinion Editorials"><a href="/news/letters/">Letters to the Editor/Opinion Editorials</a></li>
+ <li title="Speeches & Testimony"><a href="/news/news/speeches/chairman/">Speeches & Testimony</a></li>
+ </ul>
+ <ul id="responsive_header-topnav-about_subnav">
+ <li title="Mission & Purpose"><a href="/about/index.html#1">Mission & Purpose</a></span></li>
+ <li title="Advisory Committees"><a href="/about/index.html#2">Advisory Committees</a></span></li>
+ <li title="Careers with the FDIC"><a href="/about/index.html#3">Careers with the FDIC</a></span></li>
+ <li title="Management Team"><a href="/about/index.html#4">Management Team</a></span></li>
+ <li title="Plans & Reports"><a href="/about/index.html#5">Plans & Reports</a></span></li>
+ <li title="What We Can Do for You"><a href="/about/index.html#6">What We Can Do for You</a></span></li>
+ <li title="Diversity at the FDIC"><a href="/about/index.html#7">Diversity at the FDIC</a></span></li>
+ </ul>
+ </div><!-- Close subnav -->
+ <div class="clear"></div>
+ </div>
+ </nav>
+</div>
+</header>
+<a id="after_header" name="after_header"></a>
<script type="text/javascript">
-var TINY={};
+prepare_responsive_header_nav();
+</script>
+<!-- END of Header -->
-function T$(i){return document.getElementById(i)}
-function T$$(e,p){return p.getElementsByTagName(e)}
+<div id="breadcrumbs"><a href="/">Home</a> > <a href="/bank/">Industry Analysis</a> > <a href="/bank/individual/failed/">Failed Banks</a> > Failed Bank List</div>
-TINY.table=function(){
- function sorter(n){this.n=n; this.pagesize=20; this.paginate=0}
- sorter.prototype.init=function(e,f){
- var t=ge(e), i=0; this.e=e; this.l=t.r.length; t.a=[];
- t.h=T$$('thead',T$(e))[0].rows[0]; t.w=t.h.cells.length;
- for(i;i<t.w;i++){
- var c=t.h.cells[i];
- if(c.className!='nosort'){
- c.className=this.head; c.onclick=new Function(this.n+'.wk(this.cellIndex)')
- }
- }
- for(i=0;i<this.l;i++){t.a[i]={}}
- if(f!=null){var a=new Function(this.n+'.wk('+f+')'); a()}
- if(this.paginate){this.g=1; this.pages()}
- };
- sorter.prototype.wk=function(y){
- var t=ge(this.e), x=t.h.cells[y], i=0;
- for(i;i<this.l;i++){
- t.a[i].o=i; var v=t.r[i].cells[y]; t.r[i].style.display='';
- while(v.hasChildNodes()){v=v.firstChild}
- t.a[i].v=v.nodeValue?v.nodeValue:''
- }
- for(i=0;i<t.w;i++){var c=t.h.cells[i]; if(c.className!='nosort'){c.className=this.head}}
-
-
- if(t.p==y)
- {
- t.a.reverse();
- x.className=t.d?this.asc:this.desc;
- t.d=t.d?0:1
- }
-
- else
- {
- t.p = y;
- t.a.sort(cp);
- t.d = 0;
- x.className = this.asc;
- }
-
-
-
-
- var n=document.createElement('tbody');
- for(i=0;i<this.l;i++){
- var r=t.r[t.a[i].o].cloneNode(true); n.appendChild(r);
- r.className=i%2==0?this.even:this.odd; var cells=T$$('td',r);
- for(var z=0;z<t.w;z++){cells[z].className=y==z?i%2==0?this.evensel:this.oddsel:''}
- }
- t.replaceChild(n,t.b); if(this.paginate){this.size(this.pagesize)}
- };
- sorter.prototype.page=function(s){
- var t=ge(this.e), i=0, l=s+parseInt(this.pagesize);
- if(this.currentid&&this.limitid){T$(this.currentid).innerHTML=this.g}
- for(i;i<this.l;i++){t.r[i].style.display=i>=s&&i<l?'':'none'}
- };
- sorter.prototype.move=function(d,m){
- var s=d==1?(m?this.d:this.g+1):(m?1:this.g-1);
- if(s<=this.d&&s>0){this.g=s; this.page((s-1)*this.pagesize)}
- };
- sorter.prototype.size=function(s){
- this.pagesize=s; this.g=1; this.pages(); this.page(0);
- if(this.currentid&&this.limitid){T$(this.limitid).innerHTML=this.d}
- };
- sorter.prototype.pages=function(){this.d=Math.ceil(this.l/this.pagesize)};
- function ge(e){var t=T$(e); t.b=T$$('tbody',t)[0]; t.r=t.b.rows; return t};
- function cp(f,c){
- var g,h; f=g=f.v.toLowerCase(), c=h=c.v.toLowerCase();
- var i=parseFloat(f.replace(/(\$|\,)/g,'')), n=parseFloat(c.replace(/(\$|\,)/g,''));
- if(!isNaN(i)&&!isNaN(n)){g=i,h=n}
- i=Date.parse(f); n=Date.parse(c);
- if(!isNaN(i)&&!isNaN(n))
- {
- g=i;
- h=n;
-
- }
-
- /**** This string returns the sort by ASCENDING Order *****/
- //return g>h?1:(g<h?-1:0)
-
-
- /**** This string returns the sort by DESCENDING Order *****/
- return g<h?1:(g>h?-1:0)
-
- };
- return{sorter:sorter}
-}();
+<div id="content" class="failed_bank_list">
- </script>
-<script type="text/javascript">
- var sorter = new TINY.table.sorter("sorter");
- sorter.head = "head";
- sorter.asc = "asc";
- sorter.desc = "desc";
- sorter.even = "evenrow";
- sorter.odd = "oddrow";
- sorter.evensel = "evenselected";
- sorter.oddsel = "oddselected";
- sorter.paginate = false;
- sorter.currentid = "currentpage";
- sorter.limitid = "pagelimit";
- sorter.init("table",5);
- </script>
-</td></tr>
-</table>
+ <h1 class="page_title">Failed Bank List</h1>
-<!-- DRR END Product Title & Body-->
- <br />
- <br />
+ <p>The FDIC is often appointed as receiver for failed banks. This page contains useful information for the customers and vendors of these banks. This includes information on the acquiring bank (if applicable), how your accounts and loans are affected, and how vendors can file claims against the receivership. <a href="http://www2.fdic.gov/drrip/cs/index.asp">Failed Financial Institution Contact Search</a> displays point of contact information related to failed banks.</p>
+ <p>This list includes banks which have failed since October 1, 2000. To search for banks that failed prior to those on this page, visit this link: <a href="http://www2.fdic.gov/hsob/SelectRpt.asp?EntryTyp=30">Failures and Assistance Transactions</a></p>
+ <p><a href="banklist.csv">Failed Bank List</a> - CSV file (Updated on Mondays. Also opens in Excel - <a href="/excel.html">Excel Help</a>)</p>
+
+ <p class="small_screen_warning">Due to the small screen size some information is no longer visible.<br>Full information available when viewed on a larger screen.</p>
+ <script type="text/javascript">
+ <!--
+ document.writeln("<p><em>Click arrows next to headers to sort in Ascending or Descending order.</em></p>");
+ //-->
+ </script>
- </td>
- </tr>
-
- <!-- begin: last updated date and contact information -->
- <tr>
- <td width="25"><img src="http://www.fdic.gov/images/spacer.gif" width="25" height="1" alt="" border="0" /><br /></td>
- <td>
-
- <!-- Instruction: change "mm/dd/yyyy" to the date the document was created or last modfied -->
-
- <font face="arial, helvetica, sans-serif" size="1" color="#000066">Last Updated
- 04/30/2013</font></td>
- <td align="right"><font face="arial, helvetica, sans-serif" size="1" color="#000066">
-
-<!-- Instruction: change the link text and href value of "Insert_Content_Email_Address@fdic.gov" to the fdic.gov e-mail address of the document's point of contact -->
-
-<a HREF="mailto:cservicefdicdal@fdic.gov">cservicefdicdal@fdic.gov</a></font></td>
- </tr>
- <!-- end: last updated date and contact information -->
-</table>
-<!-- BEGIN FOOTER INCLUDE -->
-<!-- Instruction: The following statement is the footer include statement. Do not revise this code. -->
-<br />
-</font><!-- Ends Opening Font Tag -->
-<!-- begin footer -->
-<!-- Last Updated Date: 1-18-2011 Time: 2:24PM Version: 1.4 -->
-</div><!-- ends body tag -->
-<!-- begin footer -->
- <div id="footer-container">
- <div>
- <ul id="footer-top">
- <li><a href="/" title="Home">Home</a> </li>
- <li>|</li>
- <li><a href="/about/contact/ask/" title="Contact Us">Contact Us</a></li>
- <li>|</li>
- <li><a href="/search/" title="Search">Search</a></li>
- <li>|</li>
- <li><a href="/help/" title="Help">Help</a></li>
- <li>|</li>
- <li><a href="/sitemap/" title="SiteMap">SiteMap</a></li>
- <li>|</li>
- <li><a href="/regulations/laws/forms/" title="Forms">Forms</a></li>
- <li>|</li>
- <li><a href="/quicklinks/spanish.html" title="En Español">En Español</a></li>
-
- </ul>
- </div>
-
- <div>
- <ul id="footer-middle">
- <li><a href="/about/policies/" title="Website Policies">Website Policies</a></li>
- <li>|</li>
- <li><a href="/about/privacy/policy/index.html" title="Privacy Policy">Privacy Policy</a></li>
- <li>|</li>
- <li><a href="/plainlanguage/index.html" title="Privacy Policy">Plain Writing Act of 2010 </a></li>
- <li>|</li>
- <li><a href="http://www.usa.gov/" title="USA.gov">USA.gov</a></li>
- <li>|</li>
- <li><a href="http://www.fdicoig.gov/" title="FDIC Office of Inspector General">FDIC Office of Inspector General</a></li>
- </ul>
- </div>
+ <div id="table_wrapper">
+ <table id="table" class="sortable">
+ <thead>
+ <tr>
+ <th id="institution" scope="col">Bank Name</th>
+ <th id="city" class="nosort" scope="col">City</th>
+ <th id="state" scope="col">ST</th>
+ <th id="cert" class="nosort" scope="col">CERT</th>
+ <th id="ai" scope="col">Acquiring Institution</th>
+ <th id="closing" scope="col">Closing Date</th>
+ <th id="updated" scope="col">Updated Date</th>
+ </tr>
+ </thead>
+ <tbody>
+ <tr>
+ <td class="institution"><a href="kenosha.html">Banks of Wisconsin d/b/a Bank of Kenosha</a></td>
+ <td class="city">Kenosha</td>
+ <td class="state">WI</td>
+ <td class="cert">35386</td>
+ <td class="ai">North Shore Bank, FSB</td>
+ <td class="closing">May 31, 2013</td>
+ <td class="updated">May 31, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="centralaz.html">Central Arizona Bank</a></td>
+ <td class="city">Scottsdale</td>
+ <td class="state">AZ</td>
+ <td class="cert">34527</td>
+ <td class="ai">Western State Bank</td>
+ <td class="closing">May 14, 2013</td>
+ <td class="updated">May 20, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="sunrisebank.html">Sunrise Bank</a></td>
+ <td class="city">Valdosta</td>
+ <td class="state">GA</td>
+ <td class="cert">58185</td>
+ <td class="ai">Synovus Bank</td>
+ <td class="closing">May 10, 2013</td>
+ <td class="updated">May 21, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="pisgahcommbk.html">Pisgah Community Bank</a></td>
+ <td class="city">Asheville</td>
+ <td class="state">NC</td>
+ <td class="cert">58701</td>
+ <td class="ai">Capital Bank, N.A.</td>
+ <td class="closing">May 10, 2013</td>
+ <td class="updated">May 14, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="douglascb.html">Douglas County Bank</a></td>
+ <td class="city">Douglasville</td>
+ <td class="state">GA</td>
+ <td class="cert">21649</td>
+ <td class="ai">Hamilton State Bank</td>
+ <td class="closing">April 26, 2013</td>
+ <td class="updated">May 16, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="parkway.html">Parkway Bank</a></td>
+ <td class="city">Lenoir</td>
+ <td class="state">NC</td>
+ <td class="cert">57158</td>
+ <td class="ai">CertusBank, National Association</td>
+ <td class="closing">April 26, 2013</td>
+ <td class="updated">May 17, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="chipola.html">Chipola Community Bank</a></td>
+ <td class="city">Marianna</td>
+ <td class="state">FL</td>
+ <td class="cert">58034</td>
+ <td class="ai">First Federal Bank of Florida</td>
+ <td class="closing">April 19, 2013</td>
+ <td class="updated">May 16, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="heritagebank-fl.html">Heritage Bank of North Florida</a></td>
+ <td class="city">Orange Park</td>
+ <td class="state">FL</td>
+ <td class="cert">26680</td>
+ <td class="ai">FirstAtlantic Bank</td>
+ <td class="closing">April 19, 2013</td>
+ <td class="updated">May 16, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstfederal-ky.html">First Federal Bank</a></td>
+ <td class="city">Lexington</td>
+ <td class="state">KY</td>
+ <td class="cert">29594</td>
+ <td class="ai">Your Community Bank</td>
+ <td class="closing">April 19, 2013</td>
+ <td class="updated">April 23, 2013</td>
+ </tr>
+ <td class="institution"><a href="goldcanyon.html">Gold Canyon Bank</a></td>
+ <td class="city">Gold Canyon</td>
+ <td class="state">AZ</td>
+ <td class="cert">58066</td>
+ <td class="ai">First Scottsdale Bank, National Association</td>
+ <td class="closing">April 5, 2013</td>
+ <td class="updated">April 9, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="frontier-ga.html">Frontier Bank</a></td>
+ <td class="city">LaGrange</td>
+ <td class="state">GA</td>
+ <td class="cert">16431</td>
+ <td class="ai">HeritageBank of the South</td>
+ <td class="closing">March 8, 2013</td>
+ <td class="updated">March 26, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="covenant-il.html">Covenant Bank</a></td>
+ <td class="city">Chicago</td>
+ <td class="state">IL</td>
+ <td class="cert">22476</td>
+ <td class="ai">Liberty Bank and Trust Company</td>
+ <td class="closing">February 15, 2013</td>
+ <td class="updated">March 4, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="1stregents.html">1st Regents Bank</a></td>
+ <td class="city">Andover</td>
+ <td class="state">MN</td>
+ <td class="cert">57157</td>
+ <td class="ai">First Minnesota Bank</td>
+ <td class="closing">January 18, 2013</td>
+ <td class="updated">February 28, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="westside.html">Westside Community Bank</a></td>
+ <td class="city">University Place</td>
+ <td class="state">WA</td>
+ <td class="cert">33997</td>
+ <td class="ai">Sunwest Bank</td>
+ <td class="closing">January 11, 2013</td>
+ <td class="updated">January 24, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="cmbkozarks.html">Community Bank of the Ozarks</a></td>
+ <td class="city">Sunrise Beach</td>
+ <td class="state">MO</td>
+ <td class="cert">27331</td>
+ <td class="ai">Bank of Sullivan</td>
+ <td class="closing">December 14, 2012</td>
+ <td class="updated">January 24, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="hometown.html">Hometown Community Bank</a></td>
+ <td class="city">Braselton</td>
+ <td class="state">GA</td>
+ <td class="cert">57928</td>
+ <td class="ai">CertusBank, National Association</td>
+ <td class="closing">November 16, 2012</td>
+ <td class="updated">January 24, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="cfnb.html">Citizens First National Bank</a></td>
+ <td class="city">Princeton</td>
+ <td class="state">IL</td>
+ <td class="cert">3731</td>
+ <td class="ai">Heartland Bank and Trust Company</td>
+ <td class="closing">November 2, 2012</td>
+ <td class="updated">January 24, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="heritage_fl.html">Heritage Bank of Florida</a></td>
+ <td class="city">Lutz</td>
+ <td class="state">FL</td>
+ <td class="cert">35009</td>
+ <td class="ai">Centennial Bank</td>
+ <td class="closing">November 2, 2012</td>
+ <td class="updated">January 24, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="novabank.html">NOVA Bank</a></td>
+ <td class="city">Berwyn</td>
+ <td class="state">PA</td>
+ <td class="cert">27148</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">October 26, 2012</td>
+ <td class="updated">January 24, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="excelbank.html">Excel Bank</a></td>
+ <td class="city">Sedalia</td>
+ <td class="state">MO</td>
+ <td class="cert">19189</td>
+ <td class="ai">Simmons First National Bank</td>
+ <td class="closing">October 19, 2012</td>
+ <td class="updated">January 24, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firsteastside.html">First East Side Savings Bank</a></td>
+ <td class="city">Tamarac</td>
+ <td class="state">FL</td>
+ <td class="cert">28144</td>
+ <td class="ai">Stearns Bank N.A.</td>
+ <td class="closing">October 19, 2012</td>
+ <td class="updated">January 24, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="gulfsouth.html">GulfSouth Private Bank</a></td>
+ <td class="city">Destin</td>
+ <td class="state">FL</td>
+ <td class="cert">58073</td>
+ <td class="ai">SmartBank</td>
+ <td class="closing">October 19, 2012</td>
+ <td class="updated">January 24, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstunited.html">First United Bank</a></td>
+ <td class="city">Crete</td>
+ <td class="state">IL</td>
+ <td class="cert">20685</td>
+ <td class="ai">Old Plank Trail Community Bank, National Association</td>
+ <td class="closing">September 28, 2012</td>
+ <td class="updated">November 15, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="truman.html">Truman Bank</a></td>
+ <td class="city">St. Louis</td>
+ <td class="state">MO</td>
+ <td class="cert">27316</td>
+ <td class="ai">Simmons First National Bank</td>
+ <td class="closing">September 14, 2012</td>
+ <td class="updated">December 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstcommbk_mn.html">First Commercial Bank</a></td>
+ <td class="city">Bloomington</td>
+ <td class="state">MN</td>
+ <td class="cert">35246</td>
+ <td class="ai">Republic Bank & Trust Company</td>
+ <td class="closing">September 7, 2012</td>
+ <td class="updated">December 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="waukegan.html">Waukegan Savings Bank</a></td>
+ <td class="city">Waukegan</td>
+ <td class="state">IL</td>
+ <td class="cert">28243</td>
+ <td class="ai">First Midwest Bank</td>
+ <td class="closing">August 3, 2012</td>
+ <td class="updated">October 11, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="jasper.html">Jasper Banking Company</a></td>
+ <td class="city">Jasper</td>
+ <td class="state">GA</td>
+ <td class="cert">16240</td>
+ <td class="ai">Stearns Bank N.A.</td>
+ <td class="closing">July 27, 2012</td>
+ <td class="updated">December 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="secondfederal.html">Second Federal Savings and Loan Association of Chicago</a></td>
+ <td class="city">Chicago</td>
+ <td class="state">IL</td>
+ <td class="cert">27986</td>
+ <td class="ai">Hinsdale Bank & Trust Company</td>
+ <td class="closing">July 20, 2012</td>
+ <td class="updated">January 14, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="heartland.html">Heartland Bank</a></td>
+ <td class="city">Leawood</td>
+ <td class="state">KS</td>
+ <td class="cert">1361</td>
+ <td class="ai">Metcalf Bank</td>
+ <td class="closing">July 20, 2012</td>
+ <td class="updated">December 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="cherokee.html">First Cherokee State Bank</a></td>
+ <td class="city">Woodstock</td>
+ <td class="state">GA</td>
+ <td class="cert">32711</td>
+ <td class="ai">Community & Southern Bank</td>
+ <td class="closing">July 20, 2012</td>
+ <td class="updated">October 31, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="georgiatrust.html">Georgia Trust Bank</a></td>
+ <td class="city">Buford</td>
+ <td class="state">GA</td>
+ <td class="cert">57847</td>
+ <td class="ai">Community & Southern Bank</td>
+ <td class="closing">July 20, 2012</td>
+ <td class="updated">December 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="royalpalm.html">The Royal Palm Bank of Florida</a></td>
+ <td class="city">Naples</td>
+ <td class="state">FL</td>
+ <td class="cert">57096</td>
+ <td class="ai">First National Bank of the Gulf Coast</td>
+ <td class="closing">July 20, 2012</td>
+ <td class="updated">January 7, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="glasgow.html">Glasgow Savings Bank</a></td>
+ <td class="city">Glasgow</td>
+ <td class="state">MO</td>
+ <td class="cert">1056</td>
+ <td class="ai">Regional Missouri Bank</td>
+ <td class="closing">July 13, 2012</td>
+ <td class="updated">October 11, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="montgomery.html">Montgomery Bank & Trust</a></td>
+ <td class="city">Ailey</td>
+ <td class="state">GA</td>
+ <td class="cert">19498</td>
+ <td class="ai">Ameris Bank</td>
+ <td class="closing">July 6, 2012</td>
+ <td class="updated">October 31, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="farmersbank.html">The Farmers Bank of Lynchburg</a></td>
+ <td class="city">Lynchburg</td>
+ <td class="state">TN</td>
+ <td class="cert">1690</td>
+ <td class="ai">Clayton Bank and Trust</td>
+ <td class="closing">June 15, 2012</td>
+ <td class="updated">October 31, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="securityexchange.html">Security Exchange Bank</a></td>
+ <td class="city">Marietta</td>
+ <td class="state">GA</td>
+ <td class="cert">35299</td>
+ <td class="ai">Fidelity Bank</td>
+ <td class="closing">June 15, 2012</td>
+ <td class="updated">October 10, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="putnam.html">Putnam State Bank</a></td>
+ <td class="city">Palatka</td>
+ <td class="state">FL</td>
+ <td class="cert">27405</td>
+ <td class="ai">Harbor Community Bank</td>
+ <td class="closing">June 15, 2012</td>
+ <td class="updated">October 10, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="waccamaw.html">Waccamaw Bank</a></td>
+ <td class="city">Whiteville</td>
+ <td class="state">NC</td>
+ <td class="cert">34515</td>
+ <td class="ai">First Community Bank</td>
+ <td class="closing">June 8, 2012</td>
+ <td class="updated">November 8, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="ftsb.html">Farmers' and Traders' State Bank</a></td>
+ <td class="city">Shabbona</td>
+ <td class="state">IL</td>
+ <td class="cert">9257</td>
+ <td class="ai">First State Bank</td>
+ <td class="closing">June 8, 2012</td>
+ <td class="updated">October 10, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="carolina.html">Carolina Federal Savings Bank</a></td>
+ <td class="city">Charleston</td>
+ <td class="state">SC</td>
+ <td class="cert">35372</td>
+ <td class="ai">Bank of North Carolina</td>
+ <td class="closing">June 8, 2012</td>
+ <td class="updated">October 31, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstcapital.html">First Capital Bank</a></td>
+ <td class="city">Kingfisher</td>
+ <td class="state">OK</td>
+ <td class="cert">416</td>
+ <td class="ai">F & M Bank</td>
+ <td class="closing">June 8, 2012</td>
+ <td class="updated">October 10, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="alabamatrust.html">Alabama Trust Bank, National Association</a></td>
+ <td class="city">Sylacauga</td>
+ <td class="state">AL</td>
+ <td class="cert">35224</td>
+ <td class="ai">Southern States Bank</td>
+ <td class="closing">May 18, 2012</td>
+ <td class="updated">May 20, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="securitybank.html">Security Bank, National Association</a></td>
+ <td class="city">North Lauderdale</td>
+ <td class="state">FL</td>
+ <td class="cert">23156</td>
+ <td class="ai">Banesco USA</td>
+ <td class="closing">May 4, 2012</td>
+ <td class="updated">October 31, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="palmdesert.html">Palm Desert National Bank</a></td>
+ <td class="city">Palm Desert</td>
+ <td class="state">CA</td>
+ <td class="cert">23632</td>
+ <td class="ai">Pacific Premier Bank</td>
+ <td class="closing">April 27, 2012</td>
+ <td class="updated">May 17, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="plantation.html">Plantation Federal Bank</a></td>
+ <td class="city">Pawleys Island</td>
+ <td class="state">SC</td>
+ <td class="cert">32503</td>
+ <td class="ai">First Federal Bank</td>
+ <td class="closing">April 27, 2012</td>
+ <td class="updated">May 17, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="interbank.html">Inter Savings Bank, fsb D/B/A InterBank, fsb</a></td>
+ <td class="city">Maple Grove</td>
+ <td class="state">MN</td>
+ <td class="cert">31495</td>
+ <td class="ai">Great Southern Bank</td>
+ <td class="closing">April 27, 2012</td>
+ <td class="updated">May 17, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="harvest.html">HarVest Bank of Maryland</a></td>
+ <td class="city">Gaithersburg</td>
+ <td class="state">MD</td>
+ <td class="cert">57766</td>
+ <td class="ai">Sonabank</td>
+ <td class="closing">April 27, 2012</td>
+ <td class="updated">May 17, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="easternshore.html">Bank of the Eastern Shore</a></td>
+ <td class="city">Cambridge</td>
+ <td class="state">MD</td>
+ <td class="cert">26759</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">April 27, 2012</td>
+ <td class="updated">October 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="fortlee.html">Fort Lee Federal Savings Bank, FSB</a></td>
+ <td class="city">Fort Lee</td>
+ <td class="state">NJ</td>
+ <td class="cert">35527</td>
+ <td class="ai">Alma Bank</td>
+ <td class="closing">April 20, 2012</td>
+ <td class="updated">May 17, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="fidelity.html">Fidelity Bank</a></td>
+ <td class="city">Dearborn</td>
+ <td class="state">MI</td>
+ <td class="cert">33883</td>
+ <td class="ai">The Huntington National Bank</td>
+ <td class="closing">March 30, 2012</td>
+ <td class="updated">May 16, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="premier-il.html">Premier Bank</a></td>
+ <td class="city">Wilmette</td>
+ <td class="state">IL</td>
+ <td class="cert">35419</td>
+ <td class="ai">International Bank of Chicago</td>
+ <td class="closing">March 23, 2012</td>
+ <td class="updated">October 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="covenant.html">Covenant Bank & Trust</a></td>
+ <td class="city">Rock Spring</td>
+ <td class="state">GA</td>
+ <td class="cert">58068</td>
+ <td class="ai">Stearns Bank, N.A.</td>
+ <td class="closing">March 23, 2012</td>
+ <td class="updated">October 31, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="newcity.html">New City Bank</a></td>
+ <td class="city">Chicago</td>
+ <td class="state">IL</td>
+ <td class="cert">57597</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">March 9, 2012</td>
+ <td class="updated">October 29, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="global.html">Global Commerce Bank</a></td>
+ <td class="city">Doraville</td>
+ <td class="state">GA</td>
+ <td class="cert">34046</td>
+ <td class="ai">Metro City Bank</td>
+ <td class="closing">March 2, 2012</td>
+ <td class="updated">October 31, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="homesvgs.html">Home Savings of America</a></td>
+ <td class="city">Little Falls</td>
+ <td class="state">MN</td>
+ <td class="cert">29178</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">February 24, 2012</td>
+ <td class="updated">December 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="cbg.html">Central Bank of Georgia</a></td>
+ <td class="city">Ellaville</td>
+ <td class="state">GA</td>
+ <td class="cert">5687</td>
+ <td class="ai">Ameris Bank</td>
+ <td class="closing">February 24, 2012</td>
+ <td class="updated">August 9, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="scbbank.html">SCB Bank</a></td>
+ <td class="city">Shelbyville</td>
+ <td class="state">IN</td>
+ <td class="cert">29761</td>
+ <td class="ai">First Merchants Bank, National Association</td>
+ <td class="closing">February 10, 2012</td>
+ <td class="updated">March 25, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="cnbt.html">Charter National Bank and Trust</a></td>
+ <td class="city">Hoffman Estates</td>
+ <td class="state">IL</td>
+ <td class="cert">23187</td>
+ <td class="ai">Barrington Bank & Trust Company, National Association</td>
+ <td class="closing">February 10, 2012</td>
+ <td class="updated">March 25, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="bankeast.html">BankEast</a></td>
+ <td class="city">Knoxville</td>
+ <td class="state">TN</td>
+ <td class="cert">19869</td>
+ <td class="ai">U.S.Bank National Association</td>
+ <td class="closing">January 27, 2012</td>
+ <td class="updated">March 8, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="patriot-mn.html">Patriot Bank Minnesota</a></td>
+ <td class="city">Forest Lake</td>
+ <td class="state">MN</td>
+ <td class="cert">34823</td>
+ <td class="ai">First Resource Bank</td>
+ <td class="closing">January 27, 2012</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="tcb.html">Tennessee Commerce Bank</a></td>
+ <td class="city">Franklin</td>
+ <td class="state">TN</td>
+ <td class="cert">35296</td>
+ <td class="ai">Republic Bank & Trust Company</td>
+ <td class="closing">January 27, 2012</td>
+ <td class="updated">November 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="fgbtcj.html">First Guaranty Bank and Trust Company of Jacksonville</a></td>
+ <td class="city">Jacksonville</td>
+ <td class="state">FL</td>
+ <td class="cert">16579</td>
+ <td class="ai">CenterState Bank of Florida, N.A.</td>
+ <td class="closing">January 27, 2012</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="americaneagle.html">American Eagle Savings Bank</a></td>
+ <td class="city">Boothwyn</td>
+ <td class="state">PA</td>
+ <td class="cert">31581</td>
+ <td class="ai">Capital Bank, N.A.</td>
+ <td class="closing">January 20, 2012</td>
+ <td class="updated">January 25, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firststatebank-ga.html">The First State Bank</a></td>
+ <td class="city">Stockbridge</td>
+ <td class="state">GA</td>
+ <td class="cert">19252</td>
+ <td class="ai">Hamilton State Bank</td>
+ <td class="closing">January 20, 2012</td>
+ <td class="updated">January 25, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="cfsb.html">Central Florida State Bank</a></td>
+ <td class="city">Belleview</td>
+ <td class="state">FL</td>
+ <td class="cert">57186</td>
+ <td class="ai">CenterState Bank of Florida, N.A.</td>
+ <td class="closing">January 20, 2012</td>
+ <td class="updated">January 25, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="westernnatl.html">Western National Bank</a></td>
+ <td class="city">Phoenix</td>
+ <td class="state">AZ</td>
+ <td class="cert">57917</td>
+ <td class="ai">Washington Federal</td>
+ <td class="closing">December 16, 2011</td>
+ <td class="updated">August 13, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="premier-fl.html">Premier Community Bank of the Emerald Coast</a></td>
+ <td class="city">Crestview</td>
+ <td class="state">FL</td>
+ <td class="cert">58343</td>
+ <td class="ai">Summit Bank</td>
+ <td class="closing">December 16, 2011</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="centralprog.html">Central Progressive Bank</a></td>
+ <td class="city">Lacombe</td>
+ <td class="state">LA</td>
+ <td class="cert">19657</td>
+ <td class="ai">First NBC Bank</td>
+ <td class="closing">November 18, 2011</td>
+ <td class="updated">August 13, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="polkcounty.html">Polk County Bank</a></td>
+ <td class="city">Johnston</td>
+ <td class="state">IA</td>
+ <td class="cert">14194</td>
+ <td class="ai">Grinnell State Bank</td>
+ <td class="closing">November 18, 2011</td>
+ <td class="updated">August 15, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="rockmart.html">Community Bank of Rockmart</a></td>
+ <td class="city">Rockmart</td>
+ <td class="state">GA</td>
+ <td class="cert">57860</td>
+ <td class="ai">Century Bank of Georgia</td>
+ <td class="closing">November 10, 2011</td>
+ <td class="updated">August 13, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="sunfirst.html">SunFirst Bank</a></td>
+ <td class="city">Saint George</td>
+ <td class="state">UT</td>
+ <td class="cert">57087</td>
+ <td class="ai">Cache Valley Bank</td>
+ <td class="closing">November 4, 2011</td>
+ <td class="updated">November 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="midcity.html">Mid City Bank, Inc.</a></td>
+ <td class="city">Omaha</td>
+ <td class="state">NE</td>
+ <td class="cert">19397</td>
+ <td class="ai">Premier Bank</td>
+ <td class="closing">November 4, 2011</td>
+ <td class="updated">August 15, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="allamerican.html ">All American Bank</a></td>
+ <td class="city">Des Plaines</td>
+ <td class="state">IL</td>
+ <td class="cert">57759</td>
+ <td class="ai">International Bank of Chicago</td>
+ <td class="closing">October 28, 2011</td>
+ <td class="updated">August 15, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="commbanksco.html">Community Banks of Colorado</a></td>
+ <td class="city">Greenwood Village</td>
+ <td class="state">CO</td>
+ <td class="cert">21132</td>
+ <td class="ai">Bank Midwest, N.A.</td>
+ <td class="closing">October 21, 2011</td>
+ <td class="updated">January 2, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="commcapbk.html">Community Capital Bank</a></td>
+ <td class="city">Jonesboro</td>
+ <td class="state">GA</td>
+ <td class="cert">57036</td>
+ <td class="ai">State Bank and Trust Company</td>
+ <td class="closing">October 21, 2011</td>
+ <td class="updated">November 8, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="decatur.html">Decatur First Bank</a></td>
+ <td class="city">Decatur</td>
+ <td class="state">GA</td>
+ <td class="cert">34392</td>
+ <td class="ai">Fidelity Bank</td>
+ <td class="closing">October 21, 2011</td>
+ <td class="updated">November 8, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="oldharbor.html">Old Harbor Bank</a></td>
+ <td class="city">Clearwater</td>
+ <td class="state">FL</td>
+ <td class="cert">57537</td>
+ <td class="ai">1st United Bank</td>
+ <td class="closing">October 21, 2011</td>
+ <td class="updated">November 8, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="countrybank.html">Country Bank</a></td>
+ <td class="city">Aledo</td>
+ <td class="state">IL</td>
+ <td class="cert">35395</td>
+ <td class="ai">Blackhawk Bank & Trust</td>
+ <td class="closing">October 14, 2011</td>
+ <td class="updated">August 15, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firststatebank-nj.html">First State Bank</a></td>
+ <td class="city">Cranford</td>
+ <td class="state">NJ</td>
+ <td class="cert">58046</td>
+ <td class="ai">Northfield Bank</td>
+ <td class="closing">October 14, 2011</td>
+ <td class="updated">November 8, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="blueridge.html">Blue Ridge Savings Bank, Inc.</a></td>
+ <td class="city">Asheville</td>
+ <td class="state">NC</td>
+ <td class="cert">32347</td>
+ <td class="ai">Bank of North Carolina</td>
+ <td class="closing">October 14, 2011</td>
+ <td class="updated">November 8, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="piedmont-ga.html">Piedmont Community Bank</a></td>
+ <td class="city">Gray</td>
+ <td class="state">GA</td>
+ <td class="cert">57256</td>
+ <td class="ai">State Bank and Trust Company</td>
+ <td class="closing">October 14, 2011</td>
+ <td class="updated">January 22, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="sunsecurity.html">Sun Security Bank</a></td>
+ <td class="city">Ellington</td>
+ <td class="state">MO</td>
+ <td class="cert">20115</td>
+ <td class="ai">Great Southern Bank</td>
+ <td class="closing">October 7, 2011</td>
+ <td class="updated">November 7, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="riverbank.html">The RiverBank</a></td>
+ <td class="city">Wyoming</td>
+ <td class="state">MN</td>
+ <td class="cert">10216</td>
+ <td class="ai">Central Bank</td>
+ <td class="closing">October 7, 2011</td>
+ <td class="updated">November 7, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstintlbank.html">First International Bank</a></td>
+ <td class="city">Plano</td>
+ <td class="state">TX</td>
+ <td class="cert">33513</td>
+ <td class="ai">American First National Bank</td>
+ <td class="closing">September 30, 2011</td>
+ <td class="updated">October 9, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="cbnc.html">Citizens Bank of Northern California</a></td>
+ <td class="city">Nevada City</td>
+ <td class="state">CA</td>
+ <td class="cert">33983</td>
+ <td class="ai">Tri Counties Bank</td>
+ <td class="closing">September 23, 2011</td>
+ <td class="updated">October 9, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="boc-va.html">Bank of the Commonwealth</a></td>
+ <td class="city">Norfolk</td>
+ <td class="state">VA</td>
+ <td class="cert">20408</td>
+ <td class="ai">Southern Bank and Trust Company</td>
+ <td class="closing">September 23, 2011</td>
+ <td class="updated">October 9, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="fnbf.html">The First National Bank of Florida</a></td>
+ <td class="city">Milton</td>
+ <td class="state">FL</td>
+ <td class="cert">25155</td>
+ <td class="ai">CharterBank</td>
+ <td class="closing">September 9, 2011</td>
+ <td class="updated">September 6, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="creekside.html">CreekSide Bank</a></td>
+ <td class="city">Woodstock</td>
+ <td class="state">GA</td>
+ <td class="cert">58226</td>
+ <td class="ai">Georgia Commerce Bank</td>
+ <td class="closing">September 2, 2011</td>
+ <td class="updated">September 6, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="patriot.html">Patriot Bank of Georgia</a></td>
+ <td class="city">Cumming</td>
+ <td class="state">GA</td>
+ <td class="cert">58273</td>
+ <td class="ai">Georgia Commerce Bank</td>
+ <td class="closing">September 2, 2011</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstchoice-il.html">First Choice Bank</a></td>
+ <td class="city">Geneva</td>
+ <td class="state">IL</td>
+ <td class="cert">57212</td>
+ <td class="ai">Inland Bank & Trust</td>
+ <td class="closing">August 19, 2011</td>
+ <td class="updated">August 15, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstsouthern-ga.html">First Southern National Bank</a></td>
+ <td class="city">Statesboro</td>
+ <td class="state">GA</td>
+ <td class="cert">57239</td>
+ <td class="ai">Heritage Bank of the South</td>
+ <td class="closing">August 19, 2011</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="lydian.html">Lydian Private Bank</a></td>
+ <td class="city">Palm Beach</td>
+ <td class="state">FL</td>
+ <td class="cert">35356</td>
+ <td class="ai">Sabadell United Bank, N.A.</td>
+ <td class="closing">August 19, 2011</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="publicsvgs.html">Public Savings Bank</a></td>
+ <td class="city">Huntingdon Valley</td>
+ <td class="state">PA</td>
+ <td class="cert">34130</td>
+ <td class="ai">Capital Bank, N.A.</td>
+ <td class="closing">August 18, 2011</td>
+ <td class="updated">August 15, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="fnbo.html">The First National Bank of Olathe</a></td>
+ <td class="city">Olathe</td>
+ <td class="state">KS</td>
+ <td class="cert">4744</td>
+ <td class="ai">Enterprise Bank & Trust</td>
+ <td class="closing">August 12, 2011</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="whitman.html">Bank of Whitman</a></td>
+ <td class="city">Colfax</td>
+ <td class="state">WA</td>
+ <td class="cert">22528</td>
+ <td class="ai">Columbia State Bank</td>
+ <td class="closing">August 5, 2011</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="shorewood.html">Bank of Shorewood</a></td>
+ <td class="city">Shorewood</td>
+ <td class="state">IL</td>
+ <td class="cert">22637</td>
+ <td class="ai">Heartland Bank and Trust Company</td>
+ <td class="closing">August 5, 2011</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="integra.html">Integra Bank National Association</a></td>
+ <td class="city">Evansville</td>
+ <td class="state">IN</td>
+ <td class="cert">4392</td>
+ <td class="ai">Old National Bank</td>
+ <td class="closing">July 29, 2011</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="bankmeridian.html">BankMeridian, N.A.</a></td>
+ <td class="city">Columbia</td>
+ <td class="state">SC</td>
+ <td class="cert">58222</td>
+ <td class="ai">SCBT National Association</td>
+ <td class="closing">July 29, 2011</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="vbb.html">Virginia Business Bank</a></td>
+ <td class="city">Richmond</td>
+ <td class="state">VA</td>
+ <td class="cert">58283</td>
+ <td class="ai">Xenith Bank</td>
+ <td class="closing">July 29, 2011</td>
+ <td class="updated">October 9, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="bankofchoice.html">Bank of Choice</a></td>
+ <td class="city">Greeley</td>
+ <td class="state">CO</td>
+ <td class="cert">2994</td>
+ <td class="ai">Bank Midwest, N.A.</td>
+ <td class="closing">July 22, 2011</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="landmark.html">LandMark Bank of Florida</a></td>
+ <td class="city">Sarasota</td>
+ <td class="state">FL</td>
+ <td class="cert">35244</td>
+ <td class="ai">American Momentum Bank</td>
+ <td class="closing">July 22, 2011</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="southshore.html">Southshore Community Bank</a></td>
+ <td class="city">Apollo Beach</td>
+ <td class="state">FL</td>
+ <td class="cert">58056</td>
+ <td class="ai">American Momentum Bank</td>
+ <td class="closing">July 22, 2011</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="summitbank.html">Summit Bank</a></td>
+ <td class="city">Prescott</td>
+ <td class="state">AZ</td>
+ <td class="cert">57442</td>
+ <td class="ai">The Foothills Bank</td>
+ <td class="closing">July 15, 2011</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstpeoples.html">First Peoples Bank</a></td>
+ <td class="city">Port St. Lucie</td>
+ <td class="state">FL</td>
+ <td class="cert">34870</td>
+ <td class="ai">Premier American Bank, N.A.</td>
+ <td class="closing">July 15, 2011</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="hightrust.html">High Trust Bank</a></td>
+ <td class="city">Stockbridge</td>
+ <td class="state">GA</td>
+ <td class="cert">19554</td>
+ <td class="ai">Ameris Bank</td>
+ <td class="closing">July 15, 2011</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="onegeorgia.html">One Georgia Bank</a></td>
+ <td class="city">Atlanta</td>
+ <td class="state">GA</td>
+ <td class="cert">58238</td>
+ <td class="ai">Ameris Bank</td>
+ <td class="closing">July 15, 2011</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="signaturebank.html">Signature Bank</a></td>
+ <td class="city">Windsor</td>
+ <td class="state">CO</td>
+ <td class="cert">57835</td>
+ <td class="ai">Points West Community Bank</td>
+ <td class="closing">July 8, 2011</td>
+ <td class="updated">October 26, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="coloradocapital.html">Colorado Capital Bank</a></td>
+ <td class="city">Castle Rock</td>
+ <td class="state">CO</td>
+ <td class="cert">34522</td>
+ <td class="ai">First-Citizens Bank & Trust Company</td>
+ <td class="closing">July 8, 2011</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstchicago.html">First Chicago Bank & Trust</a></td>
+ <td class="city">Chicago</td>
+ <td class="state">IL</td>
+ <td class="cert">27935</td>
+ <td class="ai">Northbrook Bank & Trust Company</td>
+ <td class="closing">July 8, 2011</td>
+ <td class="updated">September 9, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="mountain.html">Mountain Heritage Bank</a></td>
+ <td class="city">Clayton</td>
+ <td class="state">GA</td>
+ <td class="cert">57593</td>
+ <td class="ai">First American Bank and Trust Company</td>
+ <td class="closing">June 24, 2011</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="fcbtb.html">First Commercial Bank of Tampa Bay</a></td>
+ <td class="city">Tampa</td>
+ <td class="state">FL</td>
+ <td class="cert">27583</td>
+ <td class="ai">Stonegate Bank</td>
+ <td class="closing">June 17, 2011</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="mcintoshstate.html">McIntosh State Bank</a></td>
+ <td class="city">Jackson</td>
+ <td class="state">GA</td>
+ <td class="cert">19237</td>
+ <td class="ai">Hamilton State Bank</td>
+ <td class="closing">June 17, 2011</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="atlanticbanktrust.html">Atlantic Bank and Trust</a></td>
+ <td class="city">Charleston</td>
+ <td class="state">SC</td>
+ <td class="cert">58420</td>
+ <td class="ai">First Citizens Bank and Trust Company, Inc.</td>
+ <td class="closing">June 3, 2011</td>
+ <td class="updated">October 31, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstheritage.html">First Heritage Bank</a></td>
+ <td class="city">Snohomish</td>
+ <td class="state">WA</td>
+ <td class="cert">23626</td>
+ <td class="ai">Columbia State Bank</td>
+ <td class="closing">May 27, 2011</td>
+ <td class="updated">January 28, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="summit.html">Summit Bank</a></td>
+ <td class="city">Burlington</td>
+ <td class="state">WA</td>
+ <td class="cert">513</td>
+ <td class="ai">Columbia State Bank</td>
+ <td class="closing">May 20, 2011</td>
+ <td class="updated">January 22, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="fgbc.html">First Georgia Banking Company</a></td>
+ <td class="city">Franklin</td>
+ <td class="state">GA</td>
+ <td class="cert">57647</td>
+ <td class="ai">CertusBank, National Association</td>
+ <td class="closing">May 20, 2011</td>
+ <td class="updated">November 13, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="atlanticsthrn.html">Atlantic Southern Bank</a></td>
+ <td class="city">Macon</td>
+ <td class="state">GA</td>
+ <td class="cert">57213</td>
+ <td class="ai">CertusBank, National Association</td>
+ <td class="closing">May 20, 2011</td>
+ <td class="updated">October 31, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="coastal_fl.html">Coastal Bank</a></td>
+ <td class="city">Cocoa Beach</td>
+ <td class="state">FL</td>
+ <td class="cert">34898</td>
+ <td class="ai">Florida Community Bank, a division of Premier American Bank, N.A.</td>
+ <td class="closing">May 6, 2011</td>
+ <td class="updated">November 30, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="communitycentral.html">Community Central Bank</a></td>
+ <td class="city">Mount Clemens</td>
+ <td class="state">MI</td>
+ <td class="cert">34234</td>
+ <td class="ai">Talmer Bank & Trust</td>
+ <td class="closing">April 29, 2011</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="parkavenue_ga.html">The Park Avenue Bank</a></td>
+ <td class="city">Valdosta</td>
+ <td class="state">GA</td>
+ <td class="cert">19797</td>
+ <td class="ai">Bank of the Ozarks</td>
+ <td class="closing">April 29, 2011</td>
+ <td class="updated">November 30, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstchoice.html">First Choice Community Bank</a></td>
+ <td class="city">Dallas</td>
+ <td class="state">GA</td>
+ <td class="cert">58539</td>
+ <td class="ai">Bank of the Ozarks</td>
+ <td class="closing">April 29, 2011</td>
+ <td class="updated">January 22, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="cortez.html">Cortez Community Bank</a></td>
+ <td class="city">Brooksville</td>
+ <td class="state">FL</td>
+ <td class="cert">57625</td>
+ <td class="ai">Florida Community Bank, a division of Premier American Bank, N.A.</td>
+ <td class="closing">April 29, 2011</td>
+ <td class="updated">November 30, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="fnbcf.html">First National Bank of Central Florida</a></td>
+ <td class="city">Winter Park</td>
+ <td class="state">FL</td>
+ <td class="cert">26297</td>
+ <td class="ai">Florida Community Bank, a division of Premier American Bank, N.A.</td>
+ <td class="closing">April 29, 2011</td>
+ <td class="updated">November 30, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="heritage_ms.html">Heritage Banking Group</a></td>
+ <td class="city">Carthage</td>
+ <td class="state">MS</td>
+ <td class="cert">14273</td>
+ <td class="ai">Trustmark National Bank</td>
+ <td class="closing">April 15, 2011</td>
+ <td class="updated">November 30, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="rosemount.html">Rosemount National Bank</a></td>
+ <td class="city">Rosemount</td>
+ <td class="state">MN</td>
+ <td class="cert">24099</td>
+ <td class="ai">Central Bank</td>
+ <td class="closing">April 15, 2011</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="superior_al.html">Superior Bank</a></td>
+ <td class="city">Birmingham</td>
+ <td class="state">AL</td>
+ <td class="cert">17750</td>
+ <td class="ai">Superior Bank, National Association</td>
+ <td class="closing">April 15, 2011</td>
+ <td class="updated">November 30, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="nexity.html">Nexity Bank</a></td>
+ <td class="city">Birmingham</td>
+ <td class="state">AL</td>
+ <td class="cert">19794</td>
+ <td class="ai">AloStar Bank of Commerce</td>
+ <td class="closing">April 15, 2011</td>
+ <td class="updated">September 4, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="newhorizons.html">New Horizons Bank</a></td>
+ <td class="city">East Ellijay</td>
+ <td class="state">GA</td>
+ <td class="cert">57705</td>
+ <td class="ai">Citizens South Bank</td>
+ <td class="closing">April 15, 2011</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="bartow.html">Bartow County Bank</a></td>
+ <td class="city">Cartersville</td>
+ <td class="state">GA</td>
+ <td class="cert">21495</td>
+ <td class="ai">Hamilton State Bank</td>
+ <td class="closing">April 15, 2011</td>
+ <td class="updated">January 22, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="nevadacommerce.html">Nevada Commerce Bank</a></td>
+ <td class="city">Las Vegas</td>
+ <td class="state">NV</td>
+ <td class="cert">35418</td>
+ <td class="ai">City National Bank</td>
+ <td class="closing">April 8, 2011</td>
+ <td class="updated">September 9, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="westernsprings.html">Western Springs National Bank and Trust</a></td>
+ <td class="city">Western Springs</td>
+ <td class="state">IL</td>
+ <td class="cert">10086</td>
+ <td class="ai">Heartland Bank and Trust Company</td>
+ <td class="closing">April 8, 2011</td>
+ <td class="updated">January 22, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="bankofcommerce.html">The Bank of Commerce</a></td>
+ <td class="city">Wood Dale</td>
+ <td class="state">IL</td>
+ <td class="cert">34292</td>
+ <td class="ai">Advantage National Bank Group</td>
+ <td class="closing">March 25, 2011</td>
+ <td class="updated">January 22, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="legacy-wi.html">Legacy Bank</a></td>
+ <td class="city">Milwaukee</td>
+ <td class="state">WI</td>
+ <td class="cert">34818</td>
+ <td class="ai">Seaway Bank and Trust Company</td>
+ <td class="closing">March 11, 2011</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstnatldavis.html">First National Bank of Davis</a></td>
+ <td class="city">Davis</td>
+ <td class="state">OK</td>
+ <td class="cert">4077</td>
+ <td class="ai">The Pauls Valley National Bank</td>
+ <td class="closing">March 11, 2011</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="valleycomm.html">Valley Community Bank</a></td>
+ <td class="city">St. Charles</td>
+ <td class="state">IL</td>
+ <td class="cert">34187</td>
+ <td class="ai">First State Bank</td>
+ <td class="closing">February 25, 2011</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="sanluistrust.html">San Luis Trust Bank, FSB</a></td>
+ <td class="city">San Luis Obispo</td>
+ <td class="state">CA</td>
+ <td class="cert">34783</td>
+ <td class="ai">First California Bank</td>
+ <td class="closing">February 18, 2011</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="charteroak.html">Charter Oak Bank</a></td>
+ <td class="city">Napa</td>
+ <td class="state">CA</td>
+ <td class="cert">57855</td>
+ <td class="ai">Bank of Marin</td>
+ <td class="closing">February 18, 2011</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="citizensbk_ga.html">Citizens Bank of Effingham</a></td>
+ <td class="city">Springfield</td>
+ <td class="state">GA</td>
+ <td class="cert">34601</td>
+ <td class="ai">Heritage Bank of the South</td>
+ <td class="closing">February 18, 2011</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="habersham.html">Habersham Bank</a></td>
+ <td class="city">Clarkesville</td>
+ <td class="state">GA</td>
+ <td class="cert">151</td>
+ <td class="ai">SCBT National Association</td>
+ <td class="closing">February 18, 2011</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="canyonstate.html">Canyon National Bank</a></td>
+ <td class="city">Palm Springs</td>
+ <td class="state">CA</td>
+ <td class="cert">34692</td>
+ <td class="ai">Pacific Premier Bank</td>
+ <td class="closing">February 11, 2011</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="badgerstate.html">Badger State Bank</a></td>
+ <td class="city">Cassville</td>
+ <td class="state">WI</td>
+ <td class="cert">13272</td>
+ <td class="ai">Royal Bank</td>
+ <td class="closing">February 11, 2011</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="peoplesstatebank.html">Peoples State Bank</a></td>
+ <td class="city">Hamtramck</td>
+ <td class="state">MI</td>
+ <td class="cert">14939</td>
+ <td class="ai">First Michigan Bank</td>
+ <td class="closing">February 11, 2011</td>
+ <td class="updated">January 22, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="sunshinestate.html">Sunshine State Community Bank</a></td>
+ <td class="city">Port Orange</td>
+ <td class="state">FL</td>
+ <td class="cert">35478</td>
+ <td class="ai">Premier American Bank, N.A.</td>
+ <td class="closing">February 11, 2011</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="commfirst_il.html">Community First Bank Chicago</a></td>
+ <td class="city">Chicago</td>
+ <td class="state">IL</td>
+ <td class="cert">57948</td>
+ <td class="ai">Northbrook Bank & Trust Company</td>
+ <td class="closing">February 4, 2011</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="northgabank.html">North Georgia Bank</a></td>
+ <td class="city">Watkinsville</td>
+ <td class="state">GA</td>
+ <td class="cert">35242</td>
+ <td class="ai">BankSouth</td>
+ <td class="closing">February 4, 2011</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="americantrust.html">American Trust Bank</a></td>
+ <td class="city">Roswell</td>
+ <td class="state">GA</td>
+ <td class="cert">57432</td>
+ <td class="ai">Renasant Bank</td>
+ <td class="closing">February 4, 2011</td>
+ <td class="updated">October 31, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstcomm_nm.html">First Community Bank</a></td>
+ <td class="city">Taos</td>
+ <td class="state">NM</td>
+ <td class="cert">12261</td>
+ <td class="ai">U.S. Bank, N.A.</td>
+ <td class="closing">January 28, 2011</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstier.html">FirsTier Bank</a></td>
+ <td class="city">Louisville</td>
+ <td class="state">CO</td>
+ <td class="cert">57646</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">January 28, 2011</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="evergreenstatewi.html">Evergreen State Bank</a></td>
+ <td class="city">Stoughton</td>
+ <td class="state">WI</td>
+ <td class="cert">5328</td>
+ <td class="ai">McFarland State Bank</td>
+ <td class="closing">January 28, 2011</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firststatebank_ok.html">The First State Bank</a></td>
+ <td class="city">Camargo</td>
+ <td class="state">OK</td>
+ <td class="cert">2303</td>
+ <td class="ai">Bank 7</td>
+ <td class="closing">January 28, 2011</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="unitedwestern.html">United Western Bank</a></td>
+ <td class="city">Denver</td>
+ <td class="state">CO</td>
+ <td class="cert">31293</td>
+ <td class="ai">First-Citizens Bank & Trust Company</td>
+ <td class="closing">January 21, 2011</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="bankofasheville.html">The Bank of Asheville</a></td>
+ <td class="city">Asheville</td>
+ <td class="state">NC</td>
+ <td class="cert">34516</td>
+ <td class="ai">First Bank</td>
+ <td class="closing">January 21, 2011</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="commsouth.html">CommunitySouth Bank & Trust</a></td>
+ <td class="city">Easley</td>
+ <td class="state">SC</td>
+ <td class="cert">57868</td>
+ <td class="ai">CertusBank, National Association</td>
+ <td class="closing">January 21, 2011</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="enterprise.html">Enterprise Banking Company</a></td>
+ <td class="city">McDonough</td>
+ <td class="state">GA</td>
+ <td class="cert">19758</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">January 21, 2011</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="oglethorpe.html">Oglethorpe Bank</a></td>
+ <td class="city">Brunswick</td>
+ <td class="state">GA</td>
+ <td class="cert">57440</td>
+ <td class="ai">Bank of the Ozarks</td>
+ <td class="closing">January 14, 2011</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="legacybank.html">Legacy Bank</a></td>
+ <td class="city">Scottsdale</td>
+ <td class="state">AZ</td>
+ <td class="cert">57820</td>
+ <td class="ai">Enterprise Bank & Trust</td>
+ <td class="closing">January 7, 2011</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstcommercial.html">First Commercial Bank of Florida</a></td>
+ <td class="city">Orlando</td>
+ <td class="state">FL</td>
+ <td class="cert">34965</td>
+ <td class="ai">First Southern Bank</td>
+ <td class="closing">January 7, 2011</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="communitynatl.html">Community National Bank</a></td>
+ <td class="city">Lino Lakes</td>
+ <td class="state">MN</td>
+ <td class="cert">23306</td>
+ <td class="ai">Farmers & Merchants Savings Bank</td>
+ <td class="closing">December 17, 2010</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstsouthern.html">First Southern Bank</a></td>
+ <td class="city">Batesville</td>
+ <td class="state">AR</td>
+ <td class="cert">58052</td>
+ <td class="ai">Southern Bank</td>
+ <td class="closing">December 17, 2010</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="unitedamericas.html">United Americas Bank, N.A.</a></td>
+ <td class="city">Atlanta</td>
+ <td class="state">GA</td>
+ <td class="cert">35065</td>
+ <td class="ai">State Bank and Trust Company</td>
+ <td class="closing">December 17, 2010</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="appalachianga.html">Appalachian Community Bank, FSB</a></td>
+ <td class="city">McCaysville</td>
+ <td class="state">GA</td>
+ <td class="cert">58495</td>
+ <td class="ai">Peoples Bank of East Tennessee</td>
+ <td class="closing">December 17, 2010</td>
+ <td class="updated">October 31, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="chestatee.html">Chestatee State Bank</a></td>
+ <td class="city">Dawsonville</td>
+ <td class="state">GA</td>
+ <td class="cert">34578</td>
+ <td class="ai">Bank of the Ozarks</td>
+ <td class="closing">December 17, 2010</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="bankofmiami.html">The Bank of Miami,N.A.</a></td>
+ <td class="city">Coral Gables</td>
+ <td class="state">FL</td>
+ <td class="cert">19040</td>
+ <td class="ai">1st United Bank</td>
+ <td class="closing">December 17, 2010</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="earthstar.html">Earthstar Bank</a></td>
+ <td class="city">Southampton</td>
+ <td class="state">PA</td>
+ <td class="cert">35561</td>
+ <td class="ai">Polonia Bank</td>
+ <td class="closing">December 10, 2010</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="paramount.html">Paramount Bank</a></td>
+ <td class="city">Farmington Hills</td>
+ <td class="state">MI</td>
+ <td class="cert">34673</td>
+ <td class="ai">Level One Bank</td>
+ <td class="closing">December 10, 2010</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstbanking.html">First Banking Center</a></td>
+ <td class="city">Burlington</td>
+ <td class="state">WI</td>
+ <td class="cert">5287</td>
+ <td class="ai">First Michigan Bank</td>
+ <td class="closing">November 19, 2010</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="allegbank.html">Allegiance Bank of North America</a></td>
+ <td class="city">Bala Cynwyd</td>
+ <td class="state">PA</td>
+ <td class="cert">35078</td>
+ <td class="ai">VIST Bank</td>
+ <td class="closing">November 19, 2010</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="gulfstate.html">Gulf State Community Bank</a></td>
+ <td class="city">Carrabelle</td>
+ <td class="state">FL</td>
+ <td class="cert">20340</td>
+ <td class="ai">Centennial Bank</td>
+ <td class="closing">November 19, 2010</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="copperstar.html">Copper Star Bank</a></td>
+ <td class="city">Scottsdale</td>
+ <td class="state">AZ</td>
+ <td class="cert">35463</td>
+ <td class="ai">Stearns Bank, N.A.</td>
+ <td class="closing">November 12, 2010</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="darbybank.html">Darby Bank & Trust Co.</a></td>
+ <td class="city">Vidalia</td>
+ <td class="state">GA</td>
+ <td class="cert">14580</td>
+ <td class="ai">Ameris Bank</td>
+ <td class="closing">November 12, 2010</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="tifton.html">Tifton Banking Company</a></td>
+ <td class="city">Tifton</td>
+ <td class="state">GA</td>
+ <td class="cert">57831</td>
+ <td class="ai">Ameris Bank</td>
+ <td class="closing">November 12, 2010</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstvietnamese.html">First Vietnamese American Bank</a><br><a href="firstvietnamese_viet.pdf">In Vietnamese</a></td>
+ <td class="city">Westminster</td>
+ <td class="state">CA</td>
+ <td class="cert">57885</td>
+ <td class="ai">Grandpoint Bank</td>
+ <td class="closing">November 5, 2010</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="piercecommercial.html">Pierce Commercial Bank</a></td>
+ <td class="city">Tacoma</td>
+ <td class="state">WA</td>
+ <td class="cert">34411</td>
+ <td class="ai">Heritage Bank</td>
+ <td class="closing">November 5, 2010</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="westerncommercial_ca.html">Western Commercial Bank</a></td>
+ <td class="city">Woodland Hills</td>
+ <td class="state">CA</td>
+ <td class="cert">58087</td>
+ <td class="ai">First California Bank</td>
+ <td class="closing">November 5, 2010</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="kbank.html">K Bank</a></td>
+ <td class="city">Randallstown</td>
+ <td class="state">MD</td>
+ <td class="cert">31263</td>
+ <td class="ai">Manufacturers and Traders Trust Company (M&T Bank)</td>
+ <td class="closing">November 5, 2010</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstazfsb.html">First Arizona Savings, A FSB</a></td>
+ <td class="city">Scottsdale</td>
+ <td class="state">AZ</td>
+ <td class="cert">32582</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">October 22, 2010</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="hillcrest_ks.html">Hillcrest Bank</a></td>
+ <td class="city">Overland Park</td>
+ <td class="state">KS</td>
+ <td class="cert">22173</td>
+ <td class="ai">Hillcrest Bank, N.A.</td>
+ <td class="closing">October 22, 2010</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstsuburban.html">First Suburban National Bank</a></td>
+ <td class="city">Maywood</td>
+ <td class="state">IL</td>
+ <td class="cert">16089</td>
+ <td class="ai">Seaway Bank and Trust Company</td>
+ <td class="closing">October 22, 2010</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="fnbbarnesville.html">The First National Bank of Barnesville</a></td>
+ <td class="city">Barnesville</td>
+ <td class="state">GA</td>
+ <td class="cert">2119</td>
+ <td class="ai">United Bank</td>
+ <td class="closing">October 22, 2010</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="gordon.html">The Gordon Bank</a></td>
+ <td class="city">Gordon</td>
+ <td class="state">GA</td>
+ <td class="cert">33904</td>
+ <td class="ai">Morris Bank</td>
+ <td class="closing">October 22, 2010</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="progress_fl.html">Progress Bank of Florida</a></td>
+ <td class="city">Tampa</td>
+ <td class="state">FL</td>
+ <td class="cert">32251</td>
+ <td class="ai">Bay Cities Bank</td>
+ <td class="closing">October 22, 2010</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstbankjacksonville.html">First Bank of Jacksonville</a></td>
+ <td class="city">Jacksonville</td>
+ <td class="state">FL</td>
+ <td class="cert">27573</td>
+ <td class="ai">Ameris Bank</td>
+ <td class="closing">October 22, 2010</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="premier_mo.html">Premier Bank</a></td>
+ <td class="city">Jefferson City</td>
+ <td class="state">MO</td>
+ <td class="cert">34016</td>
+ <td class="ai">Providence Bank</td>
+ <td class="closing">October 15, 2010</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="westbridge.html">WestBridge Bank and Trust Company</a></td>
+ <td class="city">Chesterfield</td>
+ <td class="state">MO</td>
+ <td class="cert">58205</td>
+ <td class="ai">Midland States Bank</td>
+ <td class="closing">October 15, 2010</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="securitysavingsfsb.html">Security Savings Bank, F.S.B.</a></td>
+ <td class="city">Olathe</td>
+ <td class="state">KS</td>
+ <td class="cert">30898</td>
+ <td class="ai">Simmons First National Bank</td>
+ <td class="closing">October 15, 2010</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="shoreline.html">Shoreline Bank</a></td>
+ <td class="city">Shoreline</td>
+ <td class="state">WA</td>
+ <td class="cert">35250</td>
+ <td class="ai">GBC International Bank</td>
+ <td class="closing">October 1, 2010</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="wakulla.html">Wakulla Bank</a></td>
+ <td class="city">Crawfordville</td>
+ <td class="state">FL</td>
+ <td class="cert">21777</td>
+ <td class="ai">Centennial Bank</td>
+ <td class="closing">October 1, 2010</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="northcounty.html">North County Bank</a></td>
+ <td class="city">Arlington</td>
+ <td class="state">WA</td>
+ <td class="cert">35053</td>
+ <td class="ai">Whidbey Island Bank</td>
+ <td class="closing">September 24, 2010</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="haventrust_fl.html">Haven Trust Bank Florida</a></td>
+ <td class="city">Ponte Vedra Beach</td>
+ <td class="state">FL</td>
+ <td class="cert">58308</td>
+ <td class="ai">First Southern Bank</td>
+ <td class="closing">September 24, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="maritimesavings.html">Maritime Savings Bank</a></td>
+ <td class="city">West Allis</td>
+ <td class="state">WI</td>
+ <td class="cert">28612</td>
+ <td class="ai">North Shore Bank, FSB</td>
+ <td class="closing">September 17, 2010</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="bramblesavings.html">Bramble Savings Bank</a></td>
+ <td class="city">Milford</td>
+ <td class="state">OH</td>
+ <td class="cert">27808</td>
+ <td class="ai">Foundation Bank</td>
+ <td class="closing">September 17, 2010</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="peoplesbank_ga.html">The Peoples Bank</a></td>
+ <td class="city">Winder</td>
+ <td class="state">GA</td>
+ <td class="cert">182</td>
+ <td class="ai">Community & Southern Bank</td>
+ <td class="closing">September 17, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstcommerce_ga.html">First Commerce Community Bank</a></td>
+ <td class="city">Douglasville</td>
+ <td class="state">GA</td>
+ <td class="cert">57448</td>
+ <td class="ai">Community & Southern Bank</td>
+ <td class="closing">September 17, 2010</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="ellijay.html">Bank of Ellijay</a></td>
+ <td class="city">Ellijay</td>
+ <td class="state">GA</td>
+ <td class="cert">58197</td>
+ <td class="ai">Community & Southern Bank</td>
+ <td class="closing">September 17, 2010</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="isnbank.html">ISN Bank</a></td>
+ <td class="city">Cherry Hill</td>
+ <td class="state">NJ</td>
+ <td class="cert">57107</td>
+ <td class="ai">Customers Bank</td>
+ <td class="closing">September 17, 2010</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="horizonfl.html">Horizon Bank</a></td>
+ <td class="city">Bradenton</td>
+ <td class="state">FL</td>
+ <td class="cert">35061</td>
+ <td class="ai">Bank of the Ozarks</td>
+ <td class="closing">September 10, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="sonoma.html">Sonoma Valley Bank</a></td>
+ <td class="city">Sonoma</td>
+ <td class="state">CA</td>
+ <td class="cert">27259</td>
+ <td class="ai">Westamerica Bank</td>
+ <td class="closing">August 20, 2010</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="lospadres.html">Los Padres Bank</a></td>
+ <td class="city">Solvang</td>
+ <td class="state">CA</td>
+ <td class="cert">32165</td>
+ <td class="ai">Pacific Western Bank</td>
+ <td class="closing">August 20, 2010</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="butte.html">Butte Community Bank</a></td>
+ <td class="city">Chico</td>
+ <td class="state">CA</td>
+ <td class="cert">33219</td>
+ <td class="ai">Rabobank, N.A.</td>
+ <td class="closing">August 20, 2010</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="pacificbk.html">Pacific State Bank</a></td>
+ <td class="city">Stockton</td>
+ <td class="state">CA</td>
+ <td class="cert">27090</td>
+ <td class="ai">Rabobank, N.A.</td>
+ <td class="closing">August 20, 2010</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="shorebank.html">ShoreBank</a></td>
+ <td class="city">Chicago</td>
+ <td class="state">IL</td>
+ <td class="cert">15640</td>
+ <td class="ai">Urban Partnership Bank</td>
+ <td class="closing">August 20, 2010</td>
+ <td class="updated">May 16, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="imperialsvgs.html">Imperial Savings and Loan Association</a></td>
+ <td class="city">Martinsville</td>
+ <td class="state">VA</td>
+ <td class="cert">31623</td>
+ <td class="ai">River Community Bank, N.A.</td>
+ <td class="closing">August 20, 2010</td>
+ <td class="updated">August 24, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="inatbank.html">Independent National Bank</a></td>
+ <td class="city">Ocala</td>
+ <td class="state">FL</td>
+ <td class="cert">27344</td>
+ <td class="ai">CenterState Bank of Florida, N.A.</td>
+ <td class="closing">August 20, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="cnbbartow.html">Community National Bank at Bartow</a></td>
+ <td class="city">Bartow</td>
+ <td class="state">FL</td>
+ <td class="cert">25266</td>
+ <td class="ai">CenterState Bank of Florida, N.A.</td>
+ <td class="closing">August 20, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="palosbank.html">Palos Bank and Trust Company</a></td>
+ <td class="city">Palos Heights</td>
+ <td class="state">IL</td>
+ <td class="cert">17599</td>
+ <td class="ai">First Midwest Bank</td>
+ <td class="closing">August 13, 2010</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="ravenswood.html">Ravenswood Bank</a></td>
+ <td class="city">Chicago</td>
+ <td class="state">IL</td>
+ <td class="cert">34231</td>
+ <td class="ai">Northbrook Bank & Trust Company</td>
+ <td class="closing">August 6, 2010</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="libertyor.html">LibertyBank</a></td>
+ <td class="city">Eugene</td>
+ <td class="state">OR</td>
+ <td class="cert">31964</td>
+ <td class="ai">Home Federal Bank</td>
+ <td class="closing">July 30, 2010</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="cowlitz.html">The Cowlitz Bank</a></td>
+ <td class="city">Longview</td>
+ <td class="state">WA</td>
+ <td class="cert">22643</td>
+ <td class="ai">Heritage Bank</td>
+ <td class="closing">July 30, 2010</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="coastal.html">Coastal Community Bank</a></td>
+ <td class="city">Panama City Beach</td>
+ <td class="state">FL</td>
+ <td class="cert">9619</td>
+ <td class="ai">Centennial Bank</td>
+ <td class="closing">July 30, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="bayside.html">Bayside Savings Bank</a></td>
+ <td class="city">Port Saint Joe</td>
+ <td class="state">FL</td>
+ <td class="cert">57669</td>
+ <td class="ai">Centennial Bank</td>
+ <td class="closing">July 30, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="northwestga.html">Northwest Bank & Trust</a></td>
+ <td class="city">Acworth</td>
+ <td class="state">GA</td>
+ <td class="cert">57658</td>
+ <td class="ai">State Bank and Trust Company</td>
+ <td class="closing">July 30, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="homevalleyor.html">Home Valley Bank</a></td>
+ <td class="city">Cave Junction</td>
+ <td class="state">OR</td>
+ <td class="cert">23181</td>
+ <td class="ai">South Valley Bank & Trust</td>
+ <td class="closing">July 23, 2010</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="southwestusanv.html">SouthwestUSA Bank</a></td>
+ <td class="city">Las Vegas</td>
+ <td class="state">NV</td>
+ <td class="cert">35434</td>
+ <td class="ai">Plaza Bank</td>
+ <td class="closing">July 23, 2010</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="communitysecmn.html">Community Security Bank</a></td>
+ <td class="city">New Prague</td>
+ <td class="state">MN</td>
+ <td class="cert">34486</td>
+ <td class="ai">Roundbank</td>
+ <td class="closing">July 23, 2010</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="thunderbankks.html">Thunder Bank</a></td>
+ <td class="city">Sylvan Grove</td>
+ <td class="state">KS</td>
+ <td class="cert">10506</td>
+ <td class="ai">The Bennington State Bank</td>
+ <td class="closing">July 23, 2010</td>
+ <td class="updated">September 13, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="williamsburgsc.html">Williamsburg First National Bank</a></td>
+ <td class="city">Kingstree</td>
+ <td class="state">SC</td>
+ <td class="cert">17837</td>
+ <td class="ai">First Citizens Bank and Trust Company, Inc.</td>
+ <td class="closing">July 23, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="crescentga.html">Crescent Bank and Trust Company</a></td>
+ <td class="city">Jasper</td>
+ <td class="state">GA</td>
+ <td class="cert">27559</td>
+ <td class="ai">Renasant Bank</td>
+ <td class="closing">July 23, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="sterlingfl.html">Sterling Bank</a></td>
+ <td class="city">Lantana</td>
+ <td class="state">FL</td>
+ <td class="cert">32536</td>
+ <td class="ai">IBERIABANK</td>
+ <td class="closing">July 23, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="mainstsvgs.html">Mainstreet Savings Bank, FSB</a></td>
+ <td class="city">Hastings</td>
+ <td class="state">MI</td>
+ <td class="cert">28136</td>
+ <td class="ai">Commercial Bank</td>
+ <td class="closing">July 16, 2010</td>
+ <td class="updated">September 13, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="oldecypress.html">Olde Cypress Community Bank</a></td>
+ <td class="city">Clewiston</td>
+ <td class="state">FL</td>
+ <td class="cert">28864</td>
+ <td class="ai">CenterState Bank of Florida, N.A.</td>
+ <td class="closing">July 16, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="turnberry.html">Turnberry Bank</a></td>
+ <td class="city">Aventura</td>
+ <td class="state">FL</td>
+ <td class="cert">32280</td>
+ <td class="ai">NAFH National Bank</td>
+ <td class="closing">July 16, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="metrobankfl.html">Metro Bank of Dade County</a></td>
+ <td class="city">Miami</td>
+ <td class="state">FL</td>
+ <td class="cert">25172</td>
+ <td class="ai">NAFH National Bank</td>
+ <td class="closing">July 16, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstnatlsc.html">First National Bank of the South</a></td>
+ <td class="city">Spartanburg</td>
+ <td class="state">SC</td>
+ <td class="cert">35383</td>
+ <td class="ai">NAFH National Bank</td>
+ <td class="closing">July 16, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="woodlands.html">Woodlands Bank</a></td>
+ <td class="city">Bluffton</td>
+ <td class="state">SC</td>
+ <td class="cert">32571</td>
+ <td class="ai">Bank of the Ozarks</td>
+ <td class="closing">July 16, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="homenatlok.html">Home National Bank</a></td>
+ <td class="city">Blackwell</td>
+ <td class="state">OK</td>
+ <td class="cert">11636</td>
+ <td class="ai">RCB Bank</td>
+ <td class="closing">July 9, 2010</td>
+ <td class="updated">December 10, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="usabankny.html">USA Bank</a></td>
+ <td class="city">Port Chester</td>
+ <td class="state">NY</td>
+ <td class="cert">58072</td>
+ <td class="ai">New Century Bank</td>
+ <td class="closing">July 9, 2010</td>
+ <td class="updated">September 14, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="idealfedsvngsmd.html">Ideal Federal Savings Bank</a></td>
+ <td class="city">Baltimore</td>
+ <td class="state">MD</td>
+ <td class="cert">32456</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">July 9, 2010</td>
+ <td class="updated">September 14, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="baynatlmd.html">Bay National Bank</a></td>
+ <td class="city">Baltimore</td>
+ <td class="state">MD</td>
+ <td class="cert">35462</td>
+ <td class="ai">Bay Bank, FSB</td>
+ <td class="closing">July 9, 2010</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="highdesertnm.html">High Desert State Bank</a></td>
+ <td class="city">Albuquerque</td>
+ <td class="state">NM</td>
+ <td class="cert">35279</td>
+ <td class="ai">First American Bank</td>
+ <td class="closing">June 25, 2010</td>
+ <td class="updated">September 14, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstnatga.html">First National Bank</a></td>
+ <td class="city">Savannah</td>
+ <td class="state">GA</td>
+ <td class="cert">34152</td>
+ <td class="ai">The Savannah Bank, N.A.</td>
+ <td class="closing">June 25, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="peninsulafl.html">Peninsula Bank</a></td>
+ <td class="city">Englewood</td>
+ <td class="state">FL</td>
+ <td class="cert">26563</td>
+ <td class="ai">Premier American Bank, N.A.</td>
+ <td class="closing">June 25, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="nevsecbank.html">Nevada Security Bank</a></td>
+ <td class="city">Reno</td>
+ <td class="state">NV</td>
+ <td class="cert">57110</td>
+ <td class="ai">Umpqua Bank</td>
+ <td class="closing">June 18, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="washfirstintl.html">Washington First International Bank</a></td>
+ <td class="city">Seattle</td>
+ <td class="state">WA</td>
+ <td class="cert">32955</td>
+ <td class="ai">East West Bank</td>
+ <td class="closing">June 11, 2010</td>
+ <td class="updated">September 14, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="tieronebankne.html">TierOne Bank</a></td>
+ <td class="city">Lincoln</td>
+ <td class="state">NE</td>
+ <td class="cert">29341</td>
+ <td class="ai">Great Western Bank</td>
+ <td class="closing">June 4, 2010</td>
+ <td class="updated">September 14, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="arcolail.html">Arcola Homestead Savings Bank</a></td>
+ <td class="city">Arcola</td>
+ <td class="state">IL</td>
+ <td class="cert">31813</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">June 4, 2010</td>
+ <td class="updated">September 14, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstnatms.html">First National Bank</a></td>
+ <td class="city">Rosedale</td>
+ <td class="state">MS</td>
+ <td class="cert">15814</td>
+ <td class="ai">The Jefferson Bank</td>
+ <td class="closing">June 4, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="swbnevada.html">Sun West Bank</a></td>
+ <td class="city">Las Vegas</td>
+ <td class="state">NV</td>
+ <td class="cert">34785</td>
+ <td class="ai">City National Bank</td>
+ <td class="closing">May 28, 2010</td>
+ <td class="updated">September 14, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="graniteca.html">Granite Community Bank, NA</a></td>
+ <td class="city">Granite Bay</td>
+ <td class="state">CA</td>
+ <td class="cert">57315</td>
+ <td class="ai">Tri Counties Bank</td>
+ <td class="closing">May 28, 2010</td>
+ <td class="updated">September 14, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="bankoffloridatb.html">Bank of Florida - Tampa</a></td>
+ <td class="city">Tampa</td>
+ <td class="state">FL</td>
+ <td class="cert">57814</td>
+ <td class="ai">EverBank</td>
+ <td class="closing">May 28, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="bankoffloridasw.html">Bank of Florida - Southwest</a></td>
+ <td class="city">Naples</td>
+ <td class="state">FL</td>
+ <td class="cert">35106</td>
+ <td class="ai">EverBank</td>
+ <td class="closing">May 28, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="bankoffloridase.html">Bank of Florida - Southeast</a></td>
+ <td class="city">Fort Lauderdale</td>
+ <td class="state">FL</td>
+ <td class="cert">57360</td>
+ <td class="ai">EverBank</td>
+ <td class="closing">May 28, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="pinehurstmn.html">Pinehurst Bank</a></td>
+ <td class="city">Saint Paul</td>
+ <td class="state">MN</td>
+ <td class="cert">57735</td>
+ <td class="ai">Coulee Bank</td>
+ <td class="closing">May 21, 2010</td>
+ <td class="updated">October 26, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="midwestil.html">Midwest Bank and Trust Company</a></td>
+ <td class="city">Elmwood Park</td>
+ <td class="state">IL</td>
+ <td class="cert">18117</td>
+ <td class="ai">FirstMerit Bank, N.A.</td>
+ <td class="closing">May 14, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="swcmntymo.html">Southwest Community Bank</a></td>
+ <td class="city">Springfield</td>
+ <td class="state">MO</td>
+ <td class="cert">34255</td>
+ <td class="ai">Simmons First National Bank</td>
+ <td class="closing">May 14, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="newlibertymi.html">New Liberty Bank</a></td>
+ <td class="city">Plymouth</td>
+ <td class="state">MI</td>
+ <td class="cert">35586</td>
+ <td class="ai">Bank of Ann Arbor</td>
+ <td class="closing">May 14, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="satillacmntyga.html">Satilla Community Bank</a></td>
+ <td class="city">Saint Marys</td>
+ <td class="state">GA</td>
+ <td class="cert">35114</td>
+ <td class="ai">Ameris Bank</td>
+ <td class="closing">May 14, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="1stpacific.html">1st Pacific Bank of California</a></td>
+ <td class="city">San Diego</td>
+ <td class="state">CA</td>
+ <td class="cert">35517</td>
+ <td class="ai">City National Bank</td>
+ <td class="closing">May 7, 2010</td>
+ <td class="updated">December 13, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="townebank.html">Towne Bank of Arizona</a></td>
+ <td class="city">Mesa</td>
+ <td class="state">AZ</td>
+ <td class="cert">57697</td>
+ <td class="ai">Commerce Bank of Arizona</td>
+ <td class="closing">May 7, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="accessbank.html">Access Bank</a></td>
+ <td class="city">Champlin</td>
+ <td class="state">MN</td>
+ <td class="cert">16476</td>
+ <td class="ai">PrinsBank</td>
+ <td class="closing">May 7, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="bonifay.html">The Bank of Bonifay</a></td>
+ <td class="city">Bonifay</td>
+ <td class="state">FL</td>
+ <td class="cert">14246</td>
+ <td class="ai">First Federal Bank of Florida</td>
+ <td class="closing">May 7, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="frontier.html">Frontier Bank</a></td>
+ <td class="city">Everett</td>
+ <td class="state">WA</td>
+ <td class="cert">22710</td>
+ <td class="ai">Union Bank, N.A.</td>
+ <td class="closing">April 30, 2010</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="bc-natl.html">BC National Banks</a></td>
+ <td class="city">Butler</td>
+ <td class="state">MO</td>
+ <td class="cert">17792</td>
+ <td class="ai">Community First Bank</td>
+ <td class="closing">April 30, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="champion.html">Champion Bank</a></td>
+ <td class="city">Creve Coeur</td>
+ <td class="state">MO</td>
+ <td class="cert">58362</td>
+ <td class="ai">BankLiberty</td>
+ <td class="closing">April 30, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="cfbancorp.html">CF Bancorp</a></td>
+ <td class="city">Port Huron</td>
+ <td class="state">MI</td>
+ <td class="cert">30005</td>
+ <td class="ai">First Michigan Bank</td>
+ <td class="closing">April 30, 2010</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="westernbank-puertorico.html">Westernbank Puerto Rico</a><br><a href="westernbank-puertorico_spanish.html">En Espanol</a></td>
+ <td class="city">Mayaguez</td>
+ <td class="state">PR</td>
+ <td class="cert">31027</td>
+ <td class="ai">Banco Popular de Puerto Rico</td>
+ <td class="closing">April 30, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="r-gpremier-puertorico.html">R-G Premier Bank of Puerto Rico</a><br><a href="r-gpremier-puertorico_spanish.html">En Espanol</a></td>
+ <td class="city">Hato Rey</td>
+ <td class="state">PR</td>
+ <td class="cert">32185</td>
+ <td class="ai">Scotiabank de Puerto Rico</td>
+ <td class="closing">April 30, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="eurobank-puertorico.html">Eurobank</a><br><a href="eurobank-puertorico_spanish.html">En Espanol</a></td>
+ <td class="city">San Juan</td>
+ <td class="state">PR</td>
+ <td class="cert">27150</td>
+ <td class="ai">Oriental Bank and Trust</td>
+ <td class="closing">April 30, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="wheatland.html">Wheatland Bank</a></td>
+ <td class="city">Naperville</td>
+ <td class="state">IL</td>
+ <td class="cert">58429</td>
+ <td class="ai">Wheaton Bank & Trust</td>
+ <td class="closing">April 23, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="peotone.html">Peotone Bank and Trust Company</a></td>
+ <td class="city">Peotone</td>
+ <td class="state">IL</td>
+ <td class="cert">10888</td>
+ <td class="ai">First Midwest Bank</td>
+ <td class="closing">April 23, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="lincoln-park.html">Lincoln Park Savings Bank</a></td>
+ <td class="city">Chicago</td>
+ <td class="state">IL</td>
+ <td class="cert">30600</td>
+ <td class="ai">Northbrook Bank & Trust Company</td>
+ <td class="closing">April 23, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="new-century-il.html">New Century Bank</a></td>
+ <td class="city">Chicago</td>
+ <td class="state">IL</td>
+ <td class="cert">34821</td>
+ <td class="ai">MB Financial Bank, N.A.</td>
+ <td class="closing">April 23, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="citizens-bank.html">Citizens Bank and Trust Company of Chicago</a></td>
+ <td class="city">Chicago</td>
+ <td class="state">IL</td>
+ <td class="cert">34658</td>
+ <td class="ai">Republic Bank of Chicago</td>
+ <td class="closing">April 23, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="broadway.html">Broadway Bank</a></td>
+ <td class="city">Chicago</td>
+ <td class="state">IL</td>
+ <td class="cert">22853</td>
+ <td class="ai">MB Financial Bank, N.A.</td>
+ <td class="closing">April 23, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="amcore.html">Amcore Bank, National Association</a></td>
+ <td class="city">Rockford</td>
+ <td class="state">IL</td>
+ <td class="cert">3735</td>
+ <td class="ai">Harris N.A.</td>
+ <td class="closing">April 23, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="citybank.html">City Bank</a></td>
+ <td class="city">Lynnwood</td>
+ <td class="state">WA</td>
+ <td class="cert">21521</td>
+ <td class="ai">Whidbey Island Bank</td>
+ <td class="closing">April 16, 2010</td>
+ <td class="updated">September 14, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="tamalpais.html">Tamalpais Bank</a></td>
+ <td class="city">San Rafael</td>
+ <td class="state">CA</td>
+ <td class="cert">33493</td>
+ <td class="ai">Union Bank, N.A.</td>
+ <td class="closing">April 16, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="innovative.html">Innovative Bank</a></td>
+ <td class="city">Oakland</td>
+ <td class="state">CA</td>
+ <td class="cert">23876</td>
+ <td class="ai">Center Bank</td>
+ <td class="closing">April 16, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="butlerbank.html">Butler Bank</a></td>
+ <td class="city">Lowell</td>
+ <td class="state">MA</td>
+ <td class="cert">26619</td>
+ <td class="ai">People's United Bank</td>
+ <td class="closing">April 16, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="riverside-natl.html">Riverside National Bank of Florida</a></td>
+ <td class="city">Fort Pierce</td>
+ <td class="state">FL</td>
+ <td class="cert">24067</td>
+ <td class="ai">TD Bank, N.A.</td>
+ <td class="closing">April 16, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="americanfirst.html">AmericanFirst Bank</a></td>
+ <td class="city">Clermont</td>
+ <td class="state">FL</td>
+ <td class="cert">57724</td>
+ <td class="ai">TD Bank, N.A.</td>
+ <td class="closing">April 16, 2010</td>
+ <td class="updated">October 31, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="ffbnf.html">First Federal Bank of North Florida</a></td>
+ <td class="city">Palatka</td>
+ <td class="state">FL</td>
+ <td class="cert">28886</td>
+ <td class="ai">TD Bank, N.A.</td>
+ <td class="closing">April 16, 2010</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="lakeside-comm.html">Lakeside Community Bank</a></td>
+ <td class="city">Sterling Heights</td>
+ <td class="state">MI</td>
+ <td class="cert">34878</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">April 16, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="beachfirst.html">Beach First National Bank</a></td>
+ <td class="city">Myrtle Beach</td>
+ <td class="state">SC</td>
+ <td class="cert">34242</td>
+ <td class="ai">Bank of North Carolina</td>
+ <td class="closing">April 9, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="deserthills.html">Desert Hills Bank</a></td>
+ <td class="city">Phoenix</td>
+ <td class="state">AZ</td>
+ <td class="cert">57060</td>
+ <td class="ai">New York Community Bank</td>
+ <td class="closing">March 26, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="unity-natl.html">Unity National Bank</a></td>
+ <td class="city">Cartersville</td>
+ <td class="state">GA</td>
+ <td class="cert">34678</td>
+ <td class="ai">Bank of the Ozarks</td>
+ <td class="closing">March 26, 2010</td>
+ <td class="updated">September 14, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="key-west.html">Key West Bank</a></td>
+ <td class="city">Key West</td>
+ <td class="state">FL</td>
+ <td class="cert">34684</td>
+ <td class="ai">Centennial Bank</td>
+ <td class="closing">March 26, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="mcintosh.html">McIntosh Commercial Bank</a></td>
+ <td class="city">Carrollton</td>
+ <td class="state">GA</td>
+ <td class="cert">57399</td>
+ <td class="ai">CharterBank</td>
+ <td class="closing">March 26, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="state-aurora.html">State Bank of Aurora</a></td>
+ <td class="city">Aurora</td>
+ <td class="state">MN</td>
+ <td class="cert">8221</td>
+ <td class="ai">Northern State Bank</td>
+ <td class="closing">March 19, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstlowndes.html">First Lowndes Bank</a></td>
+ <td class="city">Fort Deposit</td>
+ <td class="state">AL</td>
+ <td class="cert">24957</td>
+ <td class="ai">First Citizens Bank</td>
+ <td class="closing">March 19, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="bankofhiawassee.html">Bank of Hiawassee</a></td>
+ <td class="city">Hiawassee</td>
+ <td class="state">GA</td>
+ <td class="cert">10054</td>
+ <td class="ai">Citizens South Bank</td>
+ <td class="closing">March 19, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="appalachian.html">Appalachian Community Bank</a></td>
+ <td class="city">Ellijay</td>
+ <td class="state">GA</td>
+ <td class="cert">33989</td>
+ <td class="ai">Community & Southern Bank</td>
+ <td class="closing">March 19, 2010</td>
+ <td class="updated">October 31, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="advanta-ut.html">Advanta Bank Corp.</a></td>
+ <td class="city">Draper</td>
+ <td class="state">UT</td>
+ <td class="cert">33535</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">March 19, 2010</td>
+ <td class="updated">September 14, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="cent-security.html">Century Security Bank</a></td>
+ <td class="city">Duluth</td>
+ <td class="state">GA</td>
+ <td class="cert">58104</td>
+ <td class="ai">Bank of Upson</td>
+ <td class="closing">March 19, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="amer-natl-oh.html">American National Bank</a></td>
+ <td class="city">Parma</td>
+ <td class="state">OH</td>
+ <td class="cert">18806</td>
+ <td class="ai">The National Bank and Trust Company</td>
+ <td class="closing">March 19, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="statewide.html">Statewide Bank</a></td>
+ <td class="city">Covington</td>
+ <td class="state">LA</td>
+ <td class="cert">29561</td>
+ <td class="ai">Home Bank</td>
+ <td class="closing">March 12, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="oldsouthern.html">Old Southern Bank</a></td>
+ <td class="city">Orlando</td>
+ <td class="state">FL</td>
+ <td class="cert">58182</td>
+ <td class="ai">Centennial Bank</td>
+ <td class="closing">March 12, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="parkavenue-ny.html">The Park Avenue Bank</a></td>
+ <td class="city">New York</td>
+ <td class="state">NY</td>
+ <td class="cert">27096</td>
+ <td class="ai">Valley National Bank</td>
+ <td class="closing">March 12, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="libertypointe.html">LibertyPointe Bank</a></td>
+ <td class="city">New York</td>
+ <td class="state">NY</td>
+ <td class="cert">58071</td>
+ <td class="ai">Valley National Bank</td>
+ <td class="closing">March 11, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="centennial-ut.html">Centennial Bank</a></td>
+ <td class="city">Ogden</td>
+ <td class="state">UT</td>
+ <td class="cert">34430</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">March 5, 2010</td>
+ <td class="updated">September 14, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="waterfield.html">Waterfield Bank</a></td>
+ <td class="city">Germantown</td>
+ <td class="state">MD</td>
+ <td class="cert">34976</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">March 5, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="bankofillinois.html">Bank of Illinois</a></td>
+ <td class="city">Normal</td>
+ <td class="state">IL</td>
+ <td class="cert">9268</td>
+ <td class="ai">Heartland Bank and Trust Company</td>
+ <td class="closing">March 5, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="sunamerican.html">Sun American Bank</a></td>
+ <td class="city">Boca Raton</td>
+ <td class="state">FL</td>
+ <td class="cert">27126</td>
+ <td class="ai">First-Citizens Bank & Trust Company</td>
+ <td class="closing">March 5, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="rainier.html">Rainier Pacific Bank</a></td>
+ <td class="city">Tacoma</td>
+ <td class="state">WA</td>
+ <td class="cert">38129</td>
+ <td class="ai">Umpqua Bank</td>
+ <td class="closing">February 26, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="carsonriver.html">Carson River Community Bank</a></td>
+ <td class="city">Carson City</td>
+ <td class="state">NV</td>
+ <td class="cert">58352</td>
+ <td class="ai">Heritage Bank of Nevada</td>
+ <td class="closing">February 26, 2010</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="lajolla.html">La Jolla Bank, FSB</a></td>
+ <td class="city">La Jolla</td>
+ <td class="state">CA</td>
+ <td class="cert">32423</td>
+ <td class="ai">OneWest Bank, FSB</td>
+ <td class="closing">February 19, 2010</td>
+ <td class="updated">August 24, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="georgewashington.html">George Washington Savings Bank</a></td>
+ <td class="city">Orland Park</td>
+ <td class="state">IL</td>
+ <td class="cert">29952</td>
+ <td class="ai">FirstMerit Bank, N.A.</td>
+ <td class="closing">February 19, 2010</td>
+ <td class="updated">August 24, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="lacoste.html">The La Coste National Bank</a></td>
+ <td class="city">La Coste</td>
+ <td class="state">TX</td>
+ <td class="cert">3287</td>
+ <td class="ai">Community National Bank</td>
+ <td class="closing">February 19, 2010</td>
+ <td class="updated">September 14, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="marco.html">Marco Community Bank</a></td>
+ <td class="city">Marco Island</td>
+ <td class="state">FL</td>
+ <td class="cert">57586</td>
+ <td class="ai">Mutual of Omaha Bank</td>
+ <td class="closing">February 19, 2010</td>
+ <td class="updated">August 24, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="1stamerican.html">1st American State Bank of Minnesota</a></td>
+ <td class="city">Hancock</td>
+ <td class="state">MN</td>
+ <td class="cert">15448</td>
+ <td class="ai">Community Development Bank, FSB</td>
+ <td class="closing">February 5, 2010</td>
+ <td class="updated">August 24, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="americanmarine.html">American Marine Bank</a></td>
+ <td class="city">Bainbridge Island</td>
+ <td class="state">WA</td>
+ <td class="cert">16730</td>
+ <td class="ai">Columbia State Bank</td>
+ <td class="closing">January 29, 2010</td>
+ <td class="updated">August 24, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstregional.html">First Regional Bank</a></td>
+ <td class="city">Los Angeles</td>
+ <td class="state">CA</td>
+ <td class="cert">23011</td>
+ <td class="ai">First-Citizens Bank & Trust Company</td>
+ <td class="closing">January 29, 2010</td>
+ <td class="updated">August 24, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="cbt-cornelia.html">Community Bank and Trust</a></td>
+ <td class="city">Cornelia</td>
+ <td class="state">GA</td>
+ <td class="cert">5702</td>
+ <td class="ai">SCBT National Association</td>
+ <td class="closing">January 29, 2010</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="marshall-mn.html">Marshall Bank, N.A.</a></td>
+ <td class="city">Hallock</td>
+ <td class="state">MN</td>
+ <td class="cert">16133</td>
+ <td class="ai">United Valley Bank</td>
+ <td class="closing">January 29, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="floridacommunity.html">Florida Community Bank</a></td>
+ <td class="city">Immokalee</td>
+ <td class="state">FL</td>
+ <td class="cert">5672</td>
+ <td class="ai">Premier American Bank, N.A.</td>
+ <td class="closing">January 29, 2010</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstnational-carrollton.html">First National Bank of Georgia</a></td>
+ <td class="city">Carrollton</td>
+ <td class="state">GA</td>
+ <td class="cert">16480</td>
+ <td class="ai">Community & Southern Bank</td>
+ <td class="closing">January 29, 2010</td>
+ <td class="updated">December 13, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="columbiariver.html">Columbia River Bank</a></td>
+ <td class="city">The Dalles</td>
+ <td class="state">OR</td>
+ <td class="cert">22469</td>
+ <td class="ai">Columbia State Bank</td>
+ <td class="closing">January 22, 2010</td>
+ <td class="updated">September 14, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="evergreen-wa.html">Evergreen Bank</a></td>
+ <td class="city">Seattle</td>
+ <td class="state">WA</td>
+ <td class="cert">20501</td>
+ <td class="ai">Umpqua Bank</td>
+ <td class="closing">January 22, 2010</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="charter-nm.html">Charter Bank</a></td>
+ <td class="city">Santa Fe</td>
+ <td class="state">NM</td>
+ <td class="cert">32498</td>
+ <td class="ai">Charter Bank</td>
+ <td class="closing">January 22, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="leeton.html">Bank of Leeton</a></td>
+ <td class="city">Leeton</td>
+ <td class="state">MO</td>
+ <td class="cert">8265</td>
+ <td class="ai">Sunflower Bank, N.A.</td>
+ <td class="closing">January 22, 2010</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="premieramerican.html">Premier American Bank</a></td>
+ <td class="city">Miami</td>
+ <td class="state">FL</td>
+ <td class="cert">57147</td>
+ <td class="ai">Premier American Bank, N.A.</td>
+ <td class="closing">January 22, 2010</td>
+ <td class="updated">December 13, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="barnes.html">Barnes Banking Company</a></td>
+ <td class="city">Kaysville</td>
+ <td class="state">UT</td>
+ <td class="cert">1252</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">January 15, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="ststephen.html">St. Stephen State Bank</a></td>
+ <td class="city">St. Stephen</td>
+ <td class="state">MN</td>
+ <td class="cert">17522</td>
+ <td class="ai">First State Bank of St. Joseph</td>
+ <td class="closing">January 15, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="towncommunity.html">Town Community Bank & Trust</a></td>
+ <td class="city">Antioch</td>
+ <td class="state">IL</td>
+ <td class="cert">34705</td>
+ <td class="ai">First American Bank</td>
+ <td class="closing">January 15, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="horizon-wa.html">Horizon Bank</a></td>
+ <td class="city">Bellingham</td>
+ <td class="state">WA</td>
+ <td class="cert">22977</td>
+ <td class="ai">Washington Federal Savings and Loan Association</td>
+ <td class="closing">January 8, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstfederal-ca.html">First Federal Bank of California, F.S.B.</a></td>
+ <td class="city">Santa Monica</td>
+ <td class="state">CA</td>
+ <td class="cert">28536</td>
+ <td class="ai">OneWest Bank, FSB</td>
+ <td class="closing">December 18, 2009</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="imperialcapital.html">Imperial Capital Bank</a></td>
+ <td class="city">La Jolla</td>
+ <td class="state">CA</td>
+ <td class="cert">26348</td>
+ <td class="ai">City National Bank</td>
+ <td class="closing">December 18, 2009</td>
+ <td class="updated">September 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="ibb.html">Independent Bankers' Bank</a></td>
+ <td class="city">Springfield</td>
+ <td class="state">IL</td>
+ <td class="cert">26820</td>
+ <td class="ai">The Independent BankersBank (TIB)</td>
+ <td class="closing">December 18, 2009</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="newsouth.html">New South Federal Savings Bank</a></td>
+ <td class="city">Irondale</td>
+ <td class="state">AL</td>
+ <td class="cert">32276</td>
+ <td class="ai">Beal Bank</td>
+ <td class="closing">December 18, 2009</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="citizensstate-mi.html">Citizens State Bank</a></td>
+ <td class="city">New Baltimore</td>
+ <td class="state">MI</td>
+ <td class="cert">1006</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">December 18, 2009</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="peoplesfirst-fl.html">Peoples First Community Bank</a></td>
+ <td class="city">Panama City</td>
+ <td class="state">FL</td>
+ <td class="cert">32167</td>
+ <td class="ai">Hancock Bank</td>
+ <td class="closing">December 18, 2009</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="rockbridge.html">RockBridge Commercial Bank</a></td>
+ <td class="city">Atlanta</td>
+ <td class="state">GA</td>
+ <td class="cert">58315</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">December 18, 2009</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="solutions.html">SolutionsBank</a></td>
+ <td class="city">Overland Park</td>
+ <td class="state">KS</td>
+ <td class="cert">4731</td>
+ <td class="ai">Arvest Bank</td>
+ <td class="closing">December 11, 2009</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="valleycapital.html">Valley Capital Bank, N.A.</a></td>
+ <td class="city">Mesa</td>
+ <td class="state">AZ</td>
+ <td class="cert">58399</td>
+ <td class="ai">Enterprise Bank & Trust</td>
+ <td class="closing">December 11, 2009</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="republicfederal.html">Republic Federal Bank, N.A.</a></td>
+ <td class="city">Miami</td>
+ <td class="state">FL</td>
+ <td class="cert">22846</td>
+ <td class="ai">1st United Bank</td>
+ <td class="closing">December 11, 2009</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="atlantic-va.html">Greater Atlantic Bank</a></td>
+ <td class="city">Reston</td>
+ <td class="state">VA</td>
+ <td class="cert">32583</td>
+ <td class="ai">Sonabank</td>
+ <td class="closing">December 4, 2009</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="benchmark-il.html">Benchmark Bank</a></td>
+ <td class="city">Aurora</td>
+ <td class="state">IL</td>
+ <td class="cert">10440</td>
+ <td class="ai">MB Financial Bank, N.A.</td>
+ <td class="closing">December 4, 2009</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="amtrust.html">AmTrust Bank</a></td>
+ <td class="city">Cleveland</td>
+ <td class="state">OH</td>
+ <td class="cert">29776</td>
+ <td class="ai">New York Community Bank</td>
+ <td class="closing">December 4, 2009</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="tattnall.html">The Tattnall Bank</a></td>
+ <td class="city">Reidsville</td>
+ <td class="state">GA</td>
+ <td class="cert">12080</td>
+ <td class="ai">Heritage Bank of the South</td>
+ <td class="closing">December 4, 2009</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstsecurity.html">First Security National Bank</a></td>
+ <td class="city">Norcross</td>
+ <td class="state">GA</td>
+ <td class="cert">26290</td>
+ <td class="ai">State Bank and Trust Company</td>
+ <td class="closing">December 4, 2009</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="buckheadcommunity.html">The Buckhead Community Bank</a></td>
+ <td class="city">Atlanta</td>
+ <td class="state">GA</td>
+ <td class="cert">34663</td>
+ <td class="ai">State Bank and Trust Company</td>
+ <td class="closing">December 4, 2009</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="commercesw-fl.html">Commerce Bank of Southwest Florida</a></td>
+ <td class="city">Fort Myers</td>
+ <td class="state">FL</td>
+ <td class="cert">58016</td>
+ <td class="ai">Central Bank</td>
+ <td class="closing">November 20, 2009</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="pacificcoastnatl.html">Pacific Coast National Bank</a></td>
+ <td class="city">San Clemente</td>
+ <td class="state">CA</td>
+ <td class="cert">57914</td>
+ <td class="ai">Sunwest Bank</td>
+ <td class="closing">November 13, 2009</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="orion-fl.html">Orion Bank</a></td>
+ <td class="city">Naples</td>
+ <td class="state">FL</td>
+ <td class="cert">22427</td>
+ <td class="ai">IBERIABANK</td>
+ <td class="closing">November 13, 2009</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="centuryfsb.html">Century Bank, F.S.B.</a></td>
+ <td class="city">Sarasota</td>
+ <td class="state">FL</td>
+ <td class="cert">32267</td>
+ <td class="ai">IBERIABANK</td>
+ <td class="closing">November 13, 2009</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="ucb.html">United Commercial Bank</a></td>
+ <td class="city">San Francisco</td>
+ <td class="state">CA</td>
+ <td class="cert">32469</td>
+ <td class="ai">East West Bank</td>
+ <td class="closing">November 6, 2009</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="gateway-mo.html">Gateway Bank of St. Louis</a></td>
+ <td class="city">St. Louis</td>
+ <td class="state">MO</td>
+ <td class="cert">19450</td>
+ <td class="ai">Central Bank of Kansas City</td>
+ <td class="closing">November 6, 2009</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="prosperan.html">Prosperan Bank</a></td>
+ <td class="city">Oakdale</td>
+ <td class="state">MN</td>
+ <td class="cert">35074</td>
+ <td class="ai">Alerus Financial, N.A.</td>
+ <td class="closing">November 6, 2009</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="homefsb-mi.html">Home Federal Savings Bank</a></td>
+ <td class="city">Detroit</td>
+ <td class="state">MI</td>
+ <td class="cert">30329</td>
+ <td class="ai">Liberty Bank and Trust Company</td>
+ <td class="closing">November 6, 2009</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="unitedsecurity-ga.html">United Security Bank</a></td>
+ <td class="city">Sparta</td>
+ <td class="state">GA</td>
+ <td class="cert">22286</td>
+ <td class="ai">Ameris Bank</td>
+ <td class="closing">November 6, 2009</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="northhouston-tx.html">North Houston Bank</a></td>
+ <td class="city">Houston</td>
+ <td class="state">TX</td>
+ <td class="cert">18776</td>
+ <td class="ai">U.S. Bank N.A.</td>
+ <td class="closing">October 30, 2009</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="madisonville-tx.html">Madisonville State Bank</a></td>
+ <td class="city">Madisonville</td>
+ <td class="state">TX</td>
+ <td class="cert">33782</td>
+ <td class="ai">U.S. Bank N.A.</td>
+ <td class="closing">October 30, 2009</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="citizens-teague.html">Citizens National Bank</a></td>
+ <td class="city">Teague</td>
+ <td class="state">TX</td>
+ <td class="cert">25222</td>
+ <td class="ai">U.S. Bank N.A.</td>
+ <td class="closing">October 30, 2009</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="park-il.html">Park National Bank</a></td>
+ <td class="city">Chicago</td>
+ <td class="state">IL</td>
+ <td class="cert">11677</td>
+ <td class="ai">U.S. Bank N.A.</td>
+ <td class="closing">October 30, 2009</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="pacificnational-ca.html">Pacific National Bank</a></td>
+ <td class="city">San Francisco</td>
+ <td class="state">CA</td>
+ <td class="cert">30006</td>
+ <td class="ai">U.S. Bank N.A.</td>
+ <td class="closing">October 30, 2009</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="calnational.html">California National Bank</a></td>
+ <td class="city">Los Angeles</td>
+ <td class="state">CA</td>
+ <td class="cert">34659</td>
+ <td class="ai">U.S. Bank N.A.</td>
+ <td class="closing">October 30, 2009</td>
+ <td class="updated">September 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="sandiegonational.html">San Diego National Bank</a></td>
+ <td class="city">San Diego</td>
+ <td class="state">CA</td>
+ <td class="cert">23594</td>
+ <td class="ai">U.S. Bank N.A.</td>
+ <td class="closing">October 30, 2009</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="community-lemont.html">Community Bank of Lemont</a></td>
+ <td class="city">Lemont</td>
+ <td class="state">IL</td>
+ <td class="cert">35291</td>
+ <td class="ai">U.S. Bank N.A.</td>
+ <td class="closing">October 30, 2009</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="bankusa-az.html">Bank USA, N.A.</a></td>
+ <td class="city">Phoenix</td>
+ <td class="state">AZ</td>
+ <td class="cert">32218</td>
+ <td class="ai">U.S. Bank N.A.</td>
+ <td class="closing">October 30, 2009</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstdupage.html">First DuPage Bank</a></td>
+ <td class="city">Westmont</td>
+ <td class="state">IL</td>
+ <td class="cert">35038</td>
+ <td class="ai">First Midwest Bank</td>
+ <td class="closing">October 23, 2009</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="riverview-mn.html">Riverview Community Bank</a></td>
+ <td class="city">Otsego</td>
+ <td class="state">MN</td>
+ <td class="cert">57525</td>
+ <td class="ai">Central Bank</td>
+ <td class="closing">October 23, 2009</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="elmwood.html">Bank of Elmwood</a></td>
+ <td class="city">Racine</td>
+ <td class="state">WI</td>
+ <td class="cert">18321</td>
+ <td class="ai">Tri City National Bank</td>
+ <td class="closing">October 23, 2009</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="flagship.html">Flagship National Bank</a></td>
+ <td class="city">Bradenton</td>
+ <td class="state">FL</td>
+ <td class="cert">35044</td>
+ <td class="ai">First Federal Bank of Florida</td>
+ <td class="closing">October 23, 2009</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="hillcrest-fl.html">Hillcrest Bank Florida</a></td>
+ <td class="city">Naples</td>
+ <td class="state">FL</td>
+ <td class="cert">58336</td>
+ <td class="ai">Stonegate Bank</td>
+ <td class="closing">October 23, 2009</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="americanunited.html">American United Bank</a></td>
+ <td class="city">Lawrenceville</td>
+ <td class="state">GA</td>
+ <td class="cert">57794</td>
+ <td class="ai">Ameris Bank</td>
+ <td class="closing">October 23, 2009</td>
+ <td class="updated">September 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="partners-fl.html">Partners Bank</a></td>
+ <td class="city">Naples</td>
+ <td class="state">FL</td>
+ <td class="cert">57959</td>
+ <td class="ai">Stonegate Bank</td>
+ <td class="closing">October 23, 2009</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="sanjoaquin.html">San Joaquin Bank</a></td>
+ <td class="city">Bakersfield</td>
+ <td class="state">CA</td>
+ <td class="cert">23266</td>
+ <td class="ai">Citizens Business Bank</td>
+ <td class="closing">October 16, 2009</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="scnb-co.html">Southern Colorado National Bank</a></td>
+ <td class="city">Pueblo</td>
+ <td class="state">CO</td>
+ <td class="cert">57263</td>
+ <td class="ai">Legacy Bank</td>
+ <td class="closing">October 2, 2009</td>
+ <td class="updated">September 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="jennings-mn.html">Jennings State Bank</a></td>
+ <td class="city">Spring Grove</td>
+ <td class="state">MN</td>
+ <td class="cert">11416</td>
+ <td class="ai">Central Bank</td>
+ <td class="closing">October 2, 2009</td>
+ <td class="updated">August 21, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="warren-mi.html">Warren Bank</a></td>
+ <td class="city">Warren</td>
+ <td class="state">MI</td>
+ <td class="cert">34824</td>
+ <td class="ai">The Huntington National Bank</td>
+ <td class="closing">October 2, 2009</td>
+ <td class="updated">August 21, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="georgian.html">Georgian Bank</a></td>
+ <td class="city">Atlanta</td>
+ <td class="state">GA</td>
+ <td class="cert">57151</td>
+ <td class="ai">First Citizens Bank and Trust Company, Inc.</td>
+ <td class="closing">September 25, 2009</td>
+ <td class="updated">August 21, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="irwin-ky.html">Irwin Union Bank, F.S.B.</a></td>
+ <td class="city">Louisville</td>
+ <td class="state">KY</td>
+ <td class="cert">57068</td>
+ <td class="ai">First Financial Bank, N.A.</td>
+ <td class="closing">September 18, 2009</td>
+ <td class="updated">September 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="irwin-in.html">Irwin Union Bank and Trust Company</a></td>
+ <td class="city">Columbus</td>
+ <td class="state">IN</td>
+ <td class="cert">10100</td>
+ <td class="ai">First Financial Bank, N.A.</td>
+ <td class="closing">September 18, 2009</td>
+ <td class="updated">August 21, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="venture-wa.html">Venture Bank</a></td>
+ <td class="city">Lacey</td>
+ <td class="state">WA</td>
+ <td class="cert">22868</td>
+ <td class="ai">First-Citizens Bank & Trust Company</td>
+ <td class="closing">September 11, 2009</td>
+ <td class="updated">August 21, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="brickwell-mn.html">Brickwell Community Bank</a></td>
+ <td class="city">Woodbury</td>
+ <td class="state">MN</td>
+ <td class="cert">57736</td>
+ <td class="ai">CorTrust Bank N.A.</td>
+ <td class="closing">September 11, 2009</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="corus.html">Corus Bank, N.A.</a></td>
+ <td class="city">Chicago</td>
+ <td class="state">IL</td>
+ <td class="cert">13693</td>
+ <td class="ai">MB Financial Bank, N.A.</td>
+ <td class="closing">September 11, 2009</td>
+ <td class="updated">August 21, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firststate-az.html">First State Bank</a></td>
+ <td class="city">Flagstaff</td>
+ <td class="state">AZ</td>
+ <td class="cert">34875</td>
+ <td class="ai">Sunwest Bank</td>
+ <td class="closing">September 4, 2009</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="platinum-il.html">Platinum Community Bank</a></td>
+ <td class="city">Rolling Meadows</td>
+ <td class="state">IL</td>
+ <td class="cert">35030</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">September 4, 2009</td>
+ <td class="updated">August 21, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="vantus.html">Vantus Bank</a></td>
+ <td class="city">Sioux City</td>
+ <td class="state">IN</td>
+ <td class="cert">27732</td>
+ <td class="ai">Great Southern Bank</td>
+ <td class="closing">September 4, 2009</td>
+ <td class="updated">August 21, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="inbank.html">InBank</a></td>
+ <td class="city">Oak Forest</td>
+ <td class="state">IL</td>
+ <td class="cert">20203</td>
+ <td class="ai">MB Financial Bank, N.A.</td>
+ <td class="closing">September 4, 2009</td>
+ <td class="updated">August 21, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstbankkc-mo.html">First Bank of Kansas City</a></td>
+ <td class="city">Kansas City</td>
+ <td class="state">MO</td>
+ <td class="cert">25231</td>
+ <td class="ai">Great American Bank</td>
+ <td class="closing">September 4, 2009</td>
+ <td class="updated">August 21, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="affinity-ca.html">Affinity Bank</a></td>
+ <td class="city">Ventura</td>
+ <td class="state">CA</td>
+ <td class="cert">27197</td>
+ <td class="ai">Pacific Western Bank</td>
+ <td class="closing">August 28, 2009</td>
+ <td class="updated">August 21, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="mainstreet-mn.html">Mainstreet Bank</a></td>
+ <td class="city">Forest Lake</td>
+ <td class="state">MN</td>
+ <td class="cert">1909</td>
+ <td class="ai">Central Bank</td>
+ <td class="closing">August 28, 2009</td>
+ <td class="updated">August 21, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="bradford-md.html">Bradford Bank</a></td>
+ <td class="city">Baltimore</td>
+ <td class="state">MD</td>
+ <td class="cert">28312</td>
+ <td class="ai">Manufacturers and Traders Trust Company (M&T Bank)</td>
+ <td class="closing">August 28, 2009</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="guaranty-tx.html">Guaranty Bank</a></td>
+ <td class="city">Austin</td>
+ <td class="state">TX</td>
+ <td class="cert">32618</td>
+ <td class="ai">BBVA Compass</td>
+ <td class="closing">August 21, 2009</td>
+ <td class="updated">August 21, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="capitalsouth.html">CapitalSouth Bank</a></td>
+ <td class="city">Birmingham</td>
+ <td class="state">AL</td>
+ <td class="cert">22130</td>
+ <td class="ai">IBERIABANK</td>
+ <td class="closing">August 21, 2009</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="coweta.html">First Coweta Bank</a></td>
+ <td class="city">Newnan</td>
+ <td class="state">GA</td>
+ <td class="cert">57702</td>
+ <td class="ai">United Bank</td>
+ <td class="closing">August 21, 2009</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="ebank.html">ebank</a></td>
+ <td class="city">Atlanta</td>
+ <td class="state">GA</td>
+ <td class="cert">34682</td>
+ <td class="ai">Stearns Bank, N.A.</td>
+ <td class="closing">August 21, 2009</td>
+ <td class="updated">August 21, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="community-nv.html">Community Bank of Nevada</a></td>
+ <td class="city">Las Vegas</td>
+ <td class="state">NV</td>
+ <td class="cert">34043</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">August 14, 2009</td>
+ <td class="updated">August 21, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="community-az.html">Community Bank of Arizona</a></td>
+ <td class="city">Phoenix</td>
+ <td class="state">AZ</td>
+ <td class="cert">57645</td>
+ <td class="ai">MidFirst Bank</td>
+ <td class="closing">August 14, 2009</td>
+ <td class="updated">August 21, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="union-az.html">Union Bank, National Association</a></td>
+ <td class="city">Gilbert</td>
+ <td class="state">AZ</td>
+ <td class="cert">34485</td>
+ <td class="ai">MidFirst Bank</td>
+ <td class="closing">August 14, 2009</td>
+ <td class="updated">August 21, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="colonial-al.html">Colonial Bank</a></td>
+ <td class="city">Montgomery</td>
+ <td class="state">AL</td>
+ <td class="cert">9609</td>
+ <td class="ai">Branch Banking & Trust Company, (BB&T)</td>
+ <td class="closing">August 14, 2009</td>
+ <td class="updated">September 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="dwelling.html">Dwelling House Savings and Loan Association</a></td>
+ <td class="city">Pittsburgh</td>
+ <td class="state">PA</td>
+ <td class="cert">31559</td>
+ <td class="ai">PNC Bank, N.A.</td>
+ <td class="closing">August 14, 2009</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="community-prineville.html">Community First Bank</a></td>
+ <td class="city">Prineville</td>
+ <td class="state">OR</td>
+ <td class="cert">23268</td>
+ <td class="ai">Home Federal Bank</td>
+ <td class="closing">August 7, 2009</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="community-venice.html">Community National Bank of Sarasota County</a></td>
+ <td class="city">Venice</td>
+ <td class="state">FL</td>
+ <td class="cert">27183</td>
+ <td class="ai">Stearns Bank, N.A.</td>
+ <td class="closing">August 7, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="fsb-sarasota.html">First State Bank</a></td>
+ <td class="city">Sarasota</td>
+ <td class="state">FL</td>
+ <td class="cert">27364</td>
+ <td class="ai">Stearns Bank, N.A.</td>
+ <td class="closing">August 7, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="mutual-harvey.html">Mutual Bank</a></td>
+ <td class="city">Harvey</td>
+ <td class="state">IL</td>
+ <td class="cert">18659</td>
+ <td class="ai">United Central Bank</td>
+ <td class="closing">July 31, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="americano.html">First BankAmericano</a></td>
+ <td class="city">Elizabeth</td>
+ <td class="state">NJ</td>
+ <td class="cert">34270</td>
+ <td class="ai">Crown Bank</td>
+ <td class="closing">July 31, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="peoplescommunity-oh.html">Peoples Community Bank</a></td>
+ <td class="city">West Chester</td>
+ <td class="state">OH</td>
+ <td class="cert">32288</td>
+ <td class="ai">First Financial Bank, N.A.</td>
+ <td class="closing">July 31, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="integrity-fl.html">Integrity Bank</a></td>
+ <td class="city">Jupiter</td>
+ <td class="state">FL</td>
+ <td class="cert">57604</td>
+ <td class="ai">Stonegate Bank</td>
+ <td class="closing">July 31, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="fsb-altus.html">First State Bank of Altus</a></td>
+ <td class="city">Altus</td>
+ <td class="state">OK</td>
+ <td class="cert">9873</td>
+ <td class="ai">Herring Bank</td>
+ <td class="closing">July 31, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="sb-jones.html">Security Bank of Jones County</a></td>
+ <td class="city">Gray</td>
+ <td class="state">GA</td>
+ <td class="cert">8486</td>
+ <td class="ai">State Bank and Trust Company</td>
+ <td class="closing">July 24, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="sb-houston.html">Security Bank of Houston County</a></td>
+ <td class="city">Perry</td>
+ <td class="state">GA</td>
+ <td class="cert">27048</td>
+ <td class="ai">State Bank and Trust Company</td>
+ <td class="closing">July 24, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="sb-bibb.html">Security Bank of Bibb County</a></td>
+ <td class="city">Macon</td>
+ <td class="state">GA</td>
+ <td class="cert">27367</td>
+ <td class="ai">State Bank and Trust Company</td>
+ <td class="closing">July 24, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="sb-metro.html">Security Bank of North Metro</a></td>
+ <td class="city">Woodstock</td>
+ <td class="state">GA</td>
+ <td class="cert">57105</td>
+ <td class="ai">State Bank and Trust Company</td>
+ <td class="closing">July 24, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="sb-fulton.html">Security Bank of North Fulton</a></td>
+ <td class="city">Alpharetta</td>
+ <td class="state">GA</td>
+ <td class="cert">57430</td>
+ <td class="ai">State Bank and Trust Company</td>
+ <td class="closing">July 24, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="sb-gwinnett.html">Security Bank of Gwinnett County</a></td>
+ <td class="city">Suwanee</td>
+ <td class="state">GA</td>
+ <td class="cert">57346</td>
+ <td class="ai">State Bank and Trust Company</td>
+ <td class="closing">July 24, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="waterford.html">Waterford Village Bank</a></td>
+ <td class="city">Williamsville</td>
+ <td class="state">NY</td>
+ <td class="cert">58065</td>
+ <td class="ai">Evans Bank, N.A.</td>
+ <td class="closing">July 24, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="temecula.html">Temecula Valley Bank</a></td>
+ <td class="city">Temecula</td>
+ <td class="state">CA</td>
+ <td class="cert">34341</td>
+ <td class="ai">First-Citizens Bank & Trust Company</td>
+ <td class="closing">July 17, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="vineyard.html">Vineyard Bank</a></td>
+ <td class="city">Rancho Cucamonga</td>
+ <td class="state">CA</td>
+ <td class="cert">23556</td>
+ <td class="ai">California Bank & Trust</td>
+ <td class="closing">July 17, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="bankfirst.html">BankFirst</a></td>
+ <td class="city">Sioux Falls</td>
+ <td class="state">SD</td>
+ <td class="cert">34103</td>
+ <td class="ai">Alerus Financial, N.A.</td>
+ <td class="closing">July 17, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="piedmont.html">First Piedmont Bank</a></td>
+ <td class="city">Winder</td>
+ <td class="state">GA</td>
+ <td class="cert">34594</td>
+ <td class="ai">First American Bank and Trust Company</td>
+ <td class="closing">July 17, 2009</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="wyoming.html">Bank of Wyoming</a></td>
+ <td class="city">Thermopolis</td>
+ <td class="state">WY</td>
+ <td class="cert">22754</td>
+ <td class="ai">Central Bank & Trust</td>
+ <td class="closing">July 10, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="founders.html">Founders Bank</a></td>
+ <td class="city">Worth</td>
+ <td class="state">IL</td>
+ <td class="cert">18390</td>
+ <td class="ai">The PrivateBank and Trust Company</td>
+ <td class="closing">July 2, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="millennium.html">Millennium State Bank of Texas</a></td>
+ <td class="city">Dallas</td>
+ <td class="state">TX</td>
+ <td class="cert">57667</td>
+ <td class="ai">State Bank of Texas</td>
+ <td class="closing">July 2, 2009</td>
+ <td class="updated">October 26, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="danville.html">First National Bank of Danville</a></td>
+ <td class="city">Danville</td>
+ <td class="state">IL</td>
+ <td class="cert">3644</td>
+ <td class="ai">First Financial Bank, N.A.</td>
+ <td class="closing">July 2, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="elizabeth.html">Elizabeth State Bank</a></td>
+ <td class="city">Elizabeth</td>
+ <td class="state">IL</td>
+ <td class="cert">9262</td>
+ <td class="ai">Galena State Bank and Trust Company</td>
+ <td class="closing">July 2, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="rockriver.html">Rock River Bank</a></td>
+ <td class="city">Oregon</td>
+ <td class="state">IL</td>
+ <td class="cert">15302</td>
+ <td class="ai">The Harvard State Bank</td>
+ <td class="closing">July 2, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="winchester.html">First State Bank of Winchester</a></td>
+ <td class="city">Winchester</td>
+ <td class="state">IL</td>
+ <td class="cert">11710</td>
+ <td class="ai">The First National Bank of Beardstown</td>
+ <td class="closing">July 2, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="warner.html">John Warner Bank</a></td>
+ <td class="city">Clinton</td>
+ <td class="state">IL</td>
+ <td class="cert">12093</td>
+ <td class="ai">State Bank of Lincoln</td>
+ <td class="closing">July 2, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="mirae.html">Mirae Bank</a></td>
+ <td class="city">Los Angeles</td>
+ <td class="state">CA</td>
+ <td class="cert">57332</td>
+ <td class="ai">Wilshire State Bank</td>
+ <td class="closing">June 26, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="metropacific.html">MetroPacific Bank</a></td>
+ <td class="city">Irvine</td>
+ <td class="state">CA</td>
+ <td class="cert">57893</td>
+ <td class="ai">Sunwest Bank</td>
+ <td class="closing">June 26, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="horizon.html">Horizon Bank</a></td>
+ <td class="city">Pine City</td>
+ <td class="state">MN</td>
+ <td class="cert">9744</td>
+ <td class="ai">Stearns Bank, N.A.</td>
+ <td class="closing">June 26, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="neighbor.html">Neighborhood Community Bank</a></td>
+ <td class="city">Newnan</td>
+ <td class="state">GA</td>
+ <td class="cert">35285</td>
+ <td class="ai">CharterBank</td>
+ <td class="closing">June 26, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="communityga.html">Community Bank of West Georgia</a></td>
+ <td class="city">Villa Rica</td>
+ <td class="state">GA</td>
+ <td class="cert">57436</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">June 26, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="anthony.html">First National Bank of Anthony</a></td>
+ <td class="city">Anthony</td>
+ <td class="state">KS</td>
+ <td class="cert">4614</td>
+ <td class="ai">Bank of Kansas</td>
+ <td class="closing">June 19, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="cooperative.html">Cooperative Bank</a></td>
+ <td class="city">Wilmington</td>
+ <td class="state">NC</td>
+ <td class="cert">27837</td>
+ <td class="ai">First Bank</td>
+ <td class="closing">June 19, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="scb.html">Southern Community Bank</a></td>
+ <td class="city">Fayetteville</td>
+ <td class="state">GA</td>
+ <td class="cert">35251</td>
+ <td class="ai">United Community Bank</td>
+ <td class="closing">June 19, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="lincolnwood.html">Bank of Lincolnwood</a></td>
+ <td class="city">Lincolnwood</td>
+ <td class="state">IL</td>
+ <td class="cert">17309</td>
+ <td class="ai">Republic Bank of Chicago</td>
+ <td class="closing">June 5, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="citizensnational.html">Citizens National Bank</a></td>
+ <td class="city">Macomb</td>
+ <td class="state">IL</td>
+ <td class="cert">5757</td>
+ <td class="ai">Morton Community Bank</td>
+ <td class="closing">May 22, 2009</td>
+ <td class="updated">September 4, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="strategiccapital.html">Strategic Capital Bank</a></td>
+ <td class="city">Champaign</td>
+ <td class="state">IL</td>
+ <td class="cert">35175</td>
+ <td class="ai">Midland States Bank</td>
+ <td class="closing">May 22, 2009</td>
+ <td class="updated">September 4, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="bankunited.html">BankUnited, FSB</a></td>
+ <td class="city">Coral Gables</td>
+ <td class="state">FL</td>
+ <td class="cert">32247</td>
+ <td class="ai">BankUnited</td>
+ <td class="closing">May 21, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="westsound.html">Westsound Bank</a></td>
+ <td class="city">Bremerton</td>
+ <td class="state">WA</td>
+ <td class="cert">34843</td>
+ <td class="ai">Kitsap Bank</td>
+ <td class="closing">May 8, 2009</td>
+ <td class="updated">September 4, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="americawest.html">America West Bank</a></td>
+ <td class="city">Layton</td>
+ <td class="state">UT</td>
+ <td class="cert">35461</td>
+ <td class="ai">Cache Valley Bank</td>
+ <td class="closing">May 1, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="citizens.html">Citizens Community Bank</a></td>
+ <td class="city">Ridgewood</td>
+ <td class="state">NJ</td>
+ <td class="cert">57563</td>
+ <td class="ai">North Jersey Community Bank</td>
+ <td class="closing">May 1, 2009</td>
+ <td class="updated">September 4, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="silverton.html">Silverton Bank, NA</a></td>
+ <td class="city">Atlanta</td>
+ <td class="state">GA</td>
+ <td class="cert">26535</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">May 1, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstbankidaho.html">First Bank of Idaho</a></td>
+ <td class="city">Ketchum</td>
+ <td class="state">ID</td>
+ <td class="cert">34396</td>
+ <td class="ai">U.S. Bank, N.A.</td>
+ <td class="closing">April 24, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="beverlyhills.html">First Bank of Beverly Hills</a></td>
+ <td class="city">Calabasas</td>
+ <td class="state">CA</td>
+ <td class="cert">32069</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">April 24, 2009</td>
+ <td class="updated">September 4, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="michiganheritage.html">Michigan Heritage Bank</a></td>
+ <td class="city">Farmington Hills</td>
+ <td class="state">MI</td>
+ <td class="cert">34369</td>
+ <td class="ai">Level One Bank</td>
+ <td class="closing">April 24, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="amsouthern.html">American Southern Bank</a></td>
+ <td class="city">Kennesaw</td>
+ <td class="state">GA</td>
+ <td class="cert">57943</td>
+ <td class="ai">Bank of North Georgia</td>
+ <td class="closing">April 24, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="greatbasin.html">Great Basin Bank of Nevada</a></td>
+ <td class="city">Elko</td>
+ <td class="state">NV</td>
+ <td class="cert">33824</td>
+ <td class="ai">Nevada State Bank</td>
+ <td class="closing">April 17, 2009</td>
+ <td class="updated">September 4, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="amsterling.html">American Sterling Bank</a></td>
+ <td class="city">Sugar Creek</td>
+ <td class="state">MO</td>
+ <td class="cert">8266</td>
+ <td class="ai">Metcalf Bank</td>
+ <td class="closing">April 17, 2009</td>
+ <td class="updated">August 31, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="newfrontier.html">New Frontier Bank</a></td>
+ <td class="city">Greeley</td>
+ <td class="state">CO</td>
+ <td class="cert">34881</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">April 10, 2009</td>
+ <td class="updated">September 4, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="capefear.html">Cape Fear Bank</a></td>
+ <td class="city">Wilmington</td>
+ <td class="state">NC</td>
+ <td class="cert">34639</td>
+ <td class="ai">First Federal Savings and Loan Association</td>
+ <td class="closing">April 10, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="omni.html">Omni National Bank</a></td>
+ <td class="city">Atlanta</td>
+ <td class="state">GA</td>
+ <td class="cert">22238</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">March 27, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="teambank.html">TeamBank, NA</a></td>
+ <td class="city">Paola</td>
+ <td class="state">KS</td>
+ <td class="cert">4754</td>
+ <td class="ai">Great Southern Bank</td>
+ <td class="closing">March 20, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="coloradonational.html">Colorado National Bank</a></td>
+ <td class="city">Colorado Springs</td>
+ <td class="state">CO</td>
+ <td class="cert">18896</td>
+ <td class="ai">Herring Bank</td>
+ <td class="closing">March 20, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstcity.html">FirstCity Bank</a></td>
+ <td class="city">Stockbridge</td>
+ <td class="state">GA</td>
+ <td class="cert">18243</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">March 20, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="freedomga.html">Freedom Bank of Georgia</a></td>
+ <td class="city">Commerce</td>
+ <td class="state">GA</td>
+ <td class="cert">57558</td>
+ <td class="ai">Northeast Georgia Bank</td>
+ <td class="closing">March 6, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="securitysavings.html">Security Savings Bank</a></td>
+ <td class="city">Henderson</td>
+ <td class="state">NV</td>
+ <td class="cert">34820</td>
+ <td class="ai">Bank of Nevada</td>
+ <td class="closing">February 27, 2009</td>
+ <td class="updated">September 7, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="heritagebank.html">Heritage Community Bank</a></td>
+ <td class="city">Glenwood</td>
+ <td class="state">IL</td>
+ <td class="cert">20078</td>
+ <td class="ai">MB Financial Bank, N.A.</td>
+ <td class="closing">February 27, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="silverfalls.html">Silver Falls Bank</a></td>
+ <td class="city">Silverton</td>
+ <td class="state">OR</td>
+ <td class="cert">35399</td>
+ <td class="ai">Citizens Bank</td>
+ <td class="closing">February 20, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="pinnacle.html">Pinnacle Bank of Oregon</a></td>
+ <td class="city">Beaverton</td>
+ <td class="state">OR</td>
+ <td class="cert">57342</td>
+ <td class="ai">Washington Trust Bank of Spokane</td>
+ <td class="closing">February 13, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="cornbelt.html">Corn Belt Bank & Trust Co.</a></td>
+ <td class="city">Pittsfield</td>
+ <td class="state">IL</td>
+ <td class="cert">16500</td>
+ <td class="ai">The Carlinville National Bank</td>
+ <td class="closing">February 13, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="riverside.html">Riverside Bank of the Gulf Coast</a></td>
+ <td class="city">Cape Coral</td>
+ <td class="state">FL</td>
+ <td class="cert">34563</td>
+ <td class="ai">TIB Bank</td>
+ <td class="closing">February 13, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="sherman.html">Sherman County Bank</a></td>
+ <td class="city">Loup City</td>
+ <td class="state">NE</td>
+ <td class="cert">5431</td>
+ <td class="ai">Heritage Bank</td>
+ <td class="closing">February 13, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="county.html">County Bank</a></td>
+ <td class="city">Merced</td>
+ <td class="state">CA</td>
+ <td class="cert">22574</td>
+ <td class="ai">Westamerica Bank</td>
+ <td class="closing">February 6, 2009</td>
+ <td class="updated">September 4, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="alliance.html">Alliance Bank</a></td>
+ <td class="city">Culver City</td>
+ <td class="state">CA</td>
+ <td class="cert">23124</td>
+ <td class="ai">California Bank & Trust</td>
+ <td class="closing">February 6, 2009</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstbank.html">FirstBank Financial Services</a></td>
+ <td class="city">McDonough</td>
+ <td class="state">GA</td>
+ <td class="cert">57017</td>
+ <td class="ai">Regions Bank</td>
+ <td class="closing">February 6, 2009</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="ocala.html">Ocala National Bank</a></td>
+ <td class="city">Ocala</td>
+ <td class="state">FL</td>
+ <td class="cert">26538</td>
+ <td class="ai">CenterState Bank of Florida, N.A.</td>
+ <td class="closing">January 30, 2009</td>
+ <td class="updated">September 4, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="suburban.html">Suburban FSB</a></td>
+ <td class="city">Crofton</td>
+ <td class="state">MD</td>
+ <td class="cert">30763</td>
+ <td class="ai">Bank of Essex</td>
+ <td class="closing">January 30, 2009</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="magnet.html">MagnetBank</a></td>
+ <td class="city">Salt Lake City</td>
+ <td class="state">UT</td>
+ <td class="cert">58001</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">January 30, 2009</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="centennial.html">1st Centennial Bank</a></td>
+ <td class="city">Redlands</td>
+ <td class="state">CA</td>
+ <td class="cert">33025</td>
+ <td class="ai">First California Bank</td>
+ <td class="closing">January 23, 2009</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="clark.html">Bank of Clark County</a></td>
+ <td class="city">Vancouver</td>
+ <td class="state">WA</td>
+ <td class="cert">34959</td>
+ <td class="ai">Umpqua Bank</td>
+ <td class="closing">January 16, 2009</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="commerce.html">National Bank of Commerce</a></td>
+ <td class="city">Berkeley</td>
+ <td class="state">IL</td>
+ <td class="cert">19733</td>
+ <td class="ai">Republic Bank of Chicago</td>
+ <td class="closing">January 16, 2009</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="sanderson.html">Sanderson State Bank</a><br><a href="sanderson_spanish.html">En Espanol</a></td>
+ <td class="city">Sanderson</td>
+ <td class="state">TX</td>
+ <td class="cert">11568</td>
+ <td class="ai">The Pecos County State Bank</td>
+ <td class="closing">December 12, 2008</td>
+ <td class="updated">September 4, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="haventrust.html">Haven Trust Bank</a></td>
+ <td class="city">Duluth</td>
+ <td class="state">GA</td>
+ <td class="cert">35379</td>
+ <td class="ai">Branch Banking & Trust Company, (BB&T)</td>
+ <td class="closing">December 12, 2008</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstga.html">First Georgia Community Bank</a></td>
+ <td class="city">Jackson</td>
+ <td class="state">GA</td>
+ <td class="cert">34301</td>
+ <td class="ai">United Bank</td>
+ <td class="closing">December 5, 2008</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="pff.html">PFF Bank & Trust</a></td>
+ <td class="city">Pomona</td>
+ <td class="state">CA</td>
+ <td class="cert">28344</td>
+ <td class="ai">U.S. Bank, N.A.</td>
+ <td class="closing">November 21, 2008</td>
+ <td class="updated">January 4, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="downey.html">Downey Savings & Loan</a></td>
+ <td class="city">Newport Beach</td>
+ <td class="state">CA</td>
+ <td class="cert">30968</td>
+ <td class="ai">U.S. Bank, N.A.</td>
+ <td class="closing">November 21, 2008</td>
+ <td class="updated">January 4, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="community.html">Community Bank</a></td>
+ <td class="city">Loganville</td>
+ <td class="state">GA</td>
+ <td class="cert">16490</td>
+ <td class="ai">Bank of Essex</td>
+ <td class="closing">November 21, 2008</td>
+ <td class="updated">September 4, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="securitypacific.html">Security Pacific Bank</a></td>
+ <td class="city">Los Angeles</td>
+ <td class="state">CA</td>
+ <td class="cert">23595</td>
+ <td class="ai">Pacific Western Bank</td>
+ <td class="closing">November 7, 2008</td>
+ <td class="updated">August 28, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="franklinbank.html">Franklin Bank, SSB</a></td>
+ <td class="city">Houston</td>
+ <td class="state">TX</td>
+ <td class="cert">26870</td>
+ <td class="ai">Prosperity Bank</td>
+ <td class="closing">November 7, 2008</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="freedom.html">Freedom Bank</a></td>
+ <td class="city">Bradenton</td>
+ <td class="state">FL</td>
+ <td class="cert">57930</td>
+ <td class="ai">Fifth Third Bank</td>
+ <td class="closing">October 31, 2008</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="alpha.html">Alpha Bank & Trust</a></td>
+ <td class="city">Alpharetta</td>
+ <td class="state">GA</td>
+ <td class="cert">58241</td>
+ <td class="ai">Stearns Bank, N.A.</td>
+ <td class="closing">October 24, 2008</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="meridian.html">Meridian Bank</a></td>
+ <td class="city">Eldred</td>
+ <td class="state">IL</td>
+ <td class="cert">13789</td>
+ <td class="ai">National Bank</td>
+ <td class="closing">October 10, 2008</td>
+ <td class="updated">May 31, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="mainstreet.html">Main Street Bank</a></td>
+ <td class="city">Northville</td>
+ <td class="state">MI</td>
+ <td class="cert">57654</td>
+ <td class="ai">Monroe Bank & Trust</td>
+ <td class="closing">October 10, 2008</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="wamu.html">Washington Mutual Bank<br>(Including its subsidiary Washington Mutual Bank FSB)</a></td>
+ <td class="city">Henderson</td>
+ <td class="state">NV</td>
+ <td class="cert">32633</td>
+ <td class="ai">JP Morgan Chase Bank</td>
+ <td class="closing">September 25, 2008</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="ameribank.html">Ameribank</a></td>
+ <td class="city">Northfork</td>
+ <td class="state">WV</td>
+ <td class="cert">6782</td>
+ <td class="ai">The Citizens Savings Bank<br><br>Pioneer Community Bank, Inc.</td>
+ <td class="closing">September 19, 2008</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="silverstate.html">Silver State Bank</a><br><a href="silverstatesp.html">En Espanol</a></td>
+ <td class="city">Henderson</td>
+ <td class="state">NV</td>
+ <td class="cert">34194</td>
+ <td class="ai">Nevada State Bank</td>
+ <td class="closing">September 5, 2008</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="integrity.html">Integrity Bank</a></td>
+ <td class="city">Alpharetta</td>
+ <td class="state">GA</td>
+ <td class="cert">35469</td>
+ <td class="ai">Regions Bank</td>
+ <td class="closing">August 29, 2008</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="columbian.html">Columbian Bank & Trust</a></td>
+ <td class="city">Topeka</td>
+ <td class="state">KS</td>
+ <td class="cert">22728</td>
+ <td class="ai">Citizens Bank & Trust</td>
+ <td class="closing">August 22, 2008</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstprioritybank.html">First Priority Bank</a></td>
+ <td class="city">Bradenton</td>
+ <td class="state">FL</td>
+ <td class="cert">57523</td>
+ <td class="ai">SunTrust Bank</td>
+ <td class="closing">August 1, 2008</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="heritage.html">First Heritage Bank, NA</a></td>
+ <td class="city">Newport Beach</td>
+ <td class="state">CA</td>
+ <td class="cert">57961</td>
+ <td class="ai">Mutual of Omaha Bank</td>
+ <td class="closing">July 25, 2008</td>
+ <td class="updated">August 28, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="fnbnv.html">First National Bank of Nevada</a></td>
+ <td class="city">Reno</td>
+ <td class="state">NV</td>
+ <td class="cert">27011</td>
+ <td class="ai">Mutual of Omaha Bank</td>
+ <td class="closing">July 25, 2008</td>
+ <td class="updated">August 28, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="IndyMac.html">IndyMac Bank</a></td>
+ <td class="city">Pasadena</td>
+ <td class="state">CA</td>
+ <td class="cert">29730</td>
+ <td class="ai">OneWest Bank, FSB</td>
+ <td class="closing">July 11, 2008</td>
+ <td class="updated">August 28, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="first_integrity_bank.html">First Integrity Bank, NA</a></td>
+ <td class="city">Staples</td>
+ <td class="state">MN</td>
+ <td class="cert">12736</td>
+ <td class="ai">First International Bank and Trust</td>
+ <td class="closing">May 30, 2008</td>
+ <td class="updated">August 28, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="anb.html">ANB Financial, NA</a></td>
+ <td class="city">Bentonville</td>
+ <td class="state">AR</td>
+ <td class="cert">33901</td>
+ <td class="ai">Pulaski Bank and Trust Company</td>
+ <td class="closing">May 9, 2008</td>
+ <td class="updated">August 28, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="Hume.html">Hume Bank</a></td>
+ <td class="city">Hume</td>
+ <td class="state">MO</td>
+ <td class="cert">1971</td>
+ <td class="ai">Security Bank</td>
+ <td class="closing">March 7, 2008</td>
+ <td class="updated">August 28, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="Douglass.html">Douglass National Bank</a></td>
+ <td class="city">Kansas City</td>
+ <td class="state">MO</td>
+ <td class="cert">24660</td>
+ <td class="ai">Liberty Bank and Trust Company</td>
+ <td class="closing">January 25, 2008</td>
+ <td class="updated">October 26, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="MiamiValley.html">Miami Valley Bank</a></td>
+ <td class="city">Lakeview</td>
+ <td class="state">OH</td>
+ <td class="cert">16848</td>
+ <td class="ai">The Citizens Banking Company</td>
+ <td class="closing">October 4, 2007</td>
+ <td class="updated">August 28, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="NetBank.html">NetBank</a></td>
+ <td class="city">Alpharetta</td>
+ <td class="state">GA</td>
+ <td class="cert">32575</td>
+ <td class="ai">ING DIRECT</td>
+ <td class="closing">September 28, 2007</td>
+ <td class="updated">August 28, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="MetropolitanSB.html">Metropolitan Savings Bank</a></td>
+ <td class="city">Pittsburgh</td>
+ <td class="state">PA</td>
+ <td class="cert">35353</td>
+ <td class="ai">Allegheny Valley Bank of Pittsburgh</td>
+ <td class="closing">February 2, 2007</td>
+ <td class="updated">October 27, 2010</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="ephraim.html">Bank of Ephraim</a></td>
+ <td class="city">Ephraim</td>
+ <td class="state">UT</td>
+ <td class="cert">1249</td>
+ <td class="ai">Far West Bank</td>
+ <td class="closing">June 25, 2004</td>
+ <td class="updated">April 9, 2008</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="reliance.html">Reliance Bank</a></td>
+ <td class="city">White Plains</td>
+ <td class="state">NY</td>
+ <td class="cert">26778</td>
+ <td class="ai">Union State Bank</td>
+ <td class="closing">March 19, 2004</td>
+ <td class="updated">April 9, 2008</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="gnb.html">Guaranty National Bank of Tallahassee</a></td>
+ <td class="city">Tallahassee</td>
+ <td class="state">FL</td>
+ <td class="cert">26838</td>
+ <td class="ai">Hancock Bank of Florida</td>
+ <td class="closing">March 12, 2004</td>
+ <td class="updated">June 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="dollar.html">Dollar Savings Bank</a></td>
+ <td class="city">Newark</td>
+ <td class="state">NJ</td>
+ <td class="cert">31330</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">February 14, 2004</td>
+ <td class="updated">April 9, 2008</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="pulaski.html">Pulaski Savings Bank</a></td>
+ <td class="city">Philadelphia</td>
+ <td class="state">PA</td>
+ <td class="cert">27203</td>
+ <td class="ai">Earthstar Bank</td>
+ <td class="closing">November 14, 2003</td>
+ <td class="updated">July 22, 2005</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="blanchardville.html">First National Bank of Blanchardville</a></td>
+ <td class="city">Blanchardville</td>
+ <td class="state">WI</td>
+ <td class="cert">11639</td>
+ <td class="ai">The Park Bank</td>
+ <td class="closing">May 9, 2003</td>
+ <td class="updated">June 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="spbank.html">Southern Pacific Bank</a></td>
+ <td class="city">Torrance</td>
+ <td class="state">CA</td>
+ <td class="cert">27094</td>
+ <td class="ai">Beal Bank</td>
+ <td class="closing">February 7, 2003</td>
+ <td class="updated">October 20, 2008</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="farmers.html">Farmers Bank of Cheneyville</a></td>
+ <td class="city">Cheneyville</td>
+ <td class="state">LA</td>
+ <td class="cert">16445</td>
+ <td class="ai">Sabine State Bank & Trust</td>
+ <td class="closing">December 17, 2002</td>
+ <td class="updated">October 20, 2004</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="bankofalamo.html">Bank of Alamo</a></td>
+ <td class="city">Alamo</td>
+ <td class="state">TN</td>
+ <td class="cert">9961</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">November 8, 2002</td>
+ <td class="updated">March 18, 2005</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="amtrade.html">AmTrade International Bank</a><br><a href="amtrade-spanish.html">En Espanol</a></td>
+ <td class="city">Atlanta</td>
+ <td class="state">GA</td>
+ <td class="cert">33784</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">September 30, 2002</td>
+ <td class="updated">September 11, 2006</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="universal.html">Universal Federal Savings Bank</a></td>
+ <td class="city">Chicago</td>
+ <td class="state">IL</td>
+ <td class="cert">29355</td>
+ <td class="ai">Chicago Community Bank</td>
+ <td class="closing">June 27, 2002</td>
+ <td class="updated">April 9, 2008</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="cbc.html">Connecticut Bank of Commerce</a></td>
+ <td class="city">Stamford</td>
+ <td class="state">CT</td>
+ <td class="cert">19183</td>
+ <td class="ai">Hudson United Bank</td>
+ <td class="closing">June 26, 2002</td>
+ <td class="updated">February 14, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="newcentury.html">New Century Bank</a></td>
+ <td class="city">Shelby Township</td>
+ <td class="state">MI</td>
+ <td class="cert">34979</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">March 28, 2002</td>
+ <td class="updated">March 18, 2005</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="netfirst.html">Net 1st National Bank</a></td>
+ <td class="city">Boca Raton</td>
+ <td class="state">FL</td>
+ <td class="cert">26652</td>
+ <td class="ai">Bank Leumi USA</td>
+ <td class="closing">March 1, 2002</td>
+ <td class="updated">April 9, 2008</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="nextbank.html">NextBank, NA</a></td>
+ <td class="city">Phoenix</td>
+ <td class="state">AZ</td>
+ <td class="cert">22314</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">February 7, 2002</td>
+ <td class="updated">August 27, 2010</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="Oakwood.html">Oakwood Deposit Bank Co.</a></td>
+ <td class="city">Oakwood</td>
+ <td class="state">OH</td>
+ <td class="cert">8966</td>
+ <td class="ai">The State Bank & Trust Company</td>
+ <td class="closing">February 1, 2002</td>
+ <td class="updated">October 25, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="sierrablanca.html">Bank of Sierra Blanca</a></td>
+ <td class="city">Sierra Blanca</td>
+ <td class="state">TX</td>
+ <td class="cert">22002</td>
+ <td class="ai">The Security State Bank of Pecos</td>
+ <td class="closing">January 18, 2002</td>
+ <td class="updated">November 6, 2003</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="hamilton.html">Hamilton Bank, NA</a><br><a href="hamilton-spanish.html">En Espanol</a></td>
+ <td class="city">Miami</td>
+ <td class="state">FL</td>
+ <td class="cert">24382</td>
+ <td class="ai">Israel Discount Bank of New York</td>
+ <td class="closing">January 11, 2002</td>
+ <td class="updated">June 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="sinclair.html">Sinclair National Bank</a></td>
+ <td class="city">Gravette</td>
+ <td class="state">AR</td>
+ <td class="cert">34248</td>
+ <td class="ai">Delta Trust & Bank</td>
+ <td class="closing">September 7, 2001</td>
+ <td class="updated">February 10, 2004</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="superior.html">Superior Bank, FSB</a></td>
+ <td class="city">Hinsdale</td>
+ <td class="state">IL</td>
+ <td class="cert">32646</td>
+ <td class="ai">Superior Federal, FSB</td>
+ <td class="closing">July 27, 2001</td>
+ <td class="updated">June 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="Malta.html">Malta National Bank</a></td>
+ <td class="city">Malta</td>
+ <td class="state">OH</td>
+ <td class="cert">6629</td>
+ <td class="ai">North Valley Bank</td>
+ <td class="closing">May 3, 2001</td>
+ <td class="updated">November 18, 2002</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstalliance.html">First Alliance Bank & Trust Co.</a></td>
+ <td class="city">Manchester</td>
+ <td class="state">NH</td>
+ <td class="cert">34264</td>
+ <td class="ai">Southern New Hampshire Bank & Trust</td>
+ <td class="closing">February 2, 2001</td>
+ <td class="updated">February 18, 2003</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="nsb.html">National State Bank of Metropolis</a></td>
+ <td class="city">Metropolis</td>
+ <td class="state">IL</td>
+ <td class="cert">3815</td>
+ <td class="ai">Banterra Bank of Marion</td>
+ <td class="closing">December 14, 2000</td>
+ <td class="updated">March 17, 2005</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="boh.html">Bank of Honolulu</a></td>
+ <td class="city">Honolulu</td>
+ <td class="state">HI</td>
+ <td class="cert">21029</td>
+ <td class="ai">Bank of the Orient</td>
+ <td class="closing">October 13, 2000</td>
+ <td class="updated">March 17, 2005</td>
+ </tr>
+ </tbody>
+ </table>
+ </div>
- <div>
- <ul id="footer-bottom">
- <li><a href="/about/freedom/" title="Freedom of Information Act (FOIA) Service Center">Freedom of Information Act (FOIA) Service Center</a></li>
- <li>|</li>
- <li><a href="/open/" title="FDIC Open Government Webpage">FDIC Open Government Webpage</a><a href="/about/diversity/nofear/" title="No FEAR Act Data"></a></li> <li>|</li>
- <li><a href="/about/diversity/nofear/" title="No FEAR Act Data">No FEAR Act Data</a><a href="/about/diversity/nofear/" title="No FEAR Act Data"></a></li>
- </ul>
- </div>
- </div><!-- end of footer container -->
-<!-- end footer -->
-</div><!-- ends site-container -->
+</div>
+<div id="page_foot">
+ <div class="date">Last Updated 05/31/2013</div>
+ <div class="email"><a href="mailto:cservicefdicdal@fdic.gov">cservicefdicdal@fdic.gov</a></div>
+ <div class="clear"></div>
+</div>
-<script language="JavaScript" type="text/javascript">
+<!-- START of Footer -->
+<footer>
+<link rel="stylesheet" type="text/css" href="/responsive/footer/css/footer.css" />
+<div id="responsive_footer">
+ <div id="responsive_footer-full">
+ <ul>
+ <li><a href="/" title="Home">Home</a></li>
+ <li><a href="/about/contact/ask/" title="Contact Us">Contact Us</a></li>
+ <li><a href="/search/" title="Search">Search</a></li>
+ <li><a href="/help/" title="Help">Help</a></li>
+ <li><a href="/sitemap/" title="SiteMap">SiteMap</a></li>
+ <li><a href="/regulations/laws/forms/" title="Forms">Forms</a></li>
+ <li><a href="/quicklinks/spanish.html" title="En Español">En Español</a></li>
+ </ul>
+ <hr>
+ <ul>
+ <li><a href="/about/policies/" title="Website Policies">Website Policies</a></li>
+ <li><a href="/about/privacy/policy/" title="Privacy Policy">Privacy Policy</a></li>
+ <li><a href="/plainlanguage/" title="Privacy Policy">Plain Writing Act of 2010 </a></li>
+ <li><a href="http://www.usa.gov/" title="USA.gov">USA.gov</a></li>
+ <li><a href="http://www.fdicoig.gov/" title="FDIC Office of Inspector General">FDIC Office of Inspector General</a></li>
+ </ul>
+ <hr>
+ <ul>
+ <li><a href="/about/freedom/" title="Freedom of Information Act (FOIA) Service Center">Freedom of Information Act (FOIA) Service Center</a></li>
+ <li><a href="/open/" title="FDIC Open Government Webpage">FDIC Open Government Webpage</a></li>
+ <li><a href="/about/diversity/nofear/" title="No FEAR Act Data">No FEAR Act Data</a></li>
+ </ul>
+ </div>
+ <div id="responsive_footer-small">
+ <ul>
+ <li><a href="/" title="Home">Home</a></li>
+ <li><a href="/about/contact/ask/" title="Contact Us">Contact Us</a></li>
+ <li><a href="/about/policies/" title="Website Policies">Website Policies</a></li>
+ <li><a href="/search/" title="Search">Search</a></li>
+ </ul>
+ </div>
+</div>
+</footer>
+<!-- START Omniture SiteCatalyst Code -->
+<script language="JavaScript" type="text/javascript" src="/js/s_code_v1.js"></script>
+<script type="text/javascript">
/************* DO NOT ALTER ANYTHING BELOW THIS LINE ! **************/
var s_code=s.t();if(s_code)document.write(s_code)</script>
-<script language="JavaScript" type="text/javascript">
+<script type="text/javascript">
if(navigator.appVersion.indexOf('MSIE')>=0)document.write(unescape('%3C')+'\!-'+'-')
</script>
<noscript>
<a href="http://www.omniture.com" title="Web Analytics">
-<img src="http://fdic.122.2o7.net/b/ss/fdicgovprod/1/H.21--NS/0?[AQB]%26cl=Session%26AQE" height="1" width="1" border="0" alt="" /></a>
+<img src="http://fdic.122.2o7.net/b/ss/fdicgovprod/1/H.21--NS/0?[AQB]%26cl=Session%26AQE" height="1" width="1" border="0" alt="" /></a></li>
</noscript>
-
<!--/DO NOT REMOVE/-->
-<!-- End SiteCatalyst code version: H.21. -->
-<!-- end footer -->
-<!-- END FOOTER INCLUDE -->
+<!-- END Omniture SiteCatalyst Code -->
+<!-- END of Footer -->
+<script type="text/javascript" src="/responsive/js/jquery.tablesorter.js"></script>
+<script type="text/javascript" src="banklist.js"></script>
</body>
</html>
diff --git a/pandas/io/tests/test_html.py b/pandas/io/tests/test_html.py
index 418b5471d0406..ea3c0520de169 100644
--- a/pandas/io/tests/test_html.py
+++ b/pandas/io/tests/test_html.py
@@ -41,7 +41,7 @@ def _skip_if_none(module_names):
if isinstance(module_names, basestring):
_skip_if_no(module_names)
else:
- if not any(_have_module(module_name) for module_name in module_names):
+ if not all(_have_module(module_name) for module_name in module_names):
raise nose.SkipTest
@@ -388,7 +388,7 @@ def test(self):
def run_read_html(self, *args, **kwargs):
kwargs['flavor'] = 'bs4'
- _skip_if_no('lxml')
+ _skip_if_none(('lxml', 'bs4'))
parser = _BeautifulSoupLxmlFrameParser
return _run_read_html(parser, *args, **kwargs)
@@ -400,7 +400,7 @@ def test(self):
def run_read_html(self, *args, **kwargs):
kwargs['flavor'] = 'bs4'
- _skip_if_no('html5lib')
+ _skip_if_none(('html5lib', 'bs4'))
parser = _BeautifulSoupHtml5LibFrameParser
return _run_read_html(parser, *args, **kwargs)
@@ -417,17 +417,16 @@ def try_remove_ws(x):
ground_truth = read_csv(os.path.join(DATA_PATH, 'banklist.csv'),
converters={'Updated Date': Timestamp,
'Closing Date': Timestamp})
- # these will not
self.assertTupleEqual(df.shape, ground_truth.shape)
- old = ['First Vietnamese American Bank In Vietnamese',
- 'Westernbank Puerto Rico En Espanol',
- 'R-G Premier Bank of Puerto Rico En Espanol',
- 'Eurobank En Espanol', 'Sanderson State Bank En Espanol',
- 'Washington Mutual Bank (Including its subsidiary Washington '
+ old = ['First Vietnamese American BankIn Vietnamese',
+ 'Westernbank Puerto RicoEn Espanol',
+ 'R-G Premier Bank of Puerto RicoEn Espanol',
+ 'EurobankEn Espanol', 'Sanderson State BankEn Espanol',
+ 'Washington Mutual Bank(Including its subsidiary Washington '
'Mutual Bank FSB)',
- 'Silver State Bank En Espanol',
+ 'Silver State BankEn Espanol',
'AmTrade International BankEn Espanol',
- 'Hamilton Bank, NA En Espanol',
+ 'Hamilton Bank, NAEn Espanol',
'The Citizens Savings BankPioneer Community Bank, Inc.']
new = ['First Vietnamese American Bank', 'Westernbank Puerto Rico',
'R-G Premier Bank of Puerto Rico', 'Eurobank',
| https://api.github.com/repos/pandas-dev/pandas/pulls/3741 | 2013-06-03T06:13:06Z | 2013-06-04T16:58:59Z | 2013-06-04T16:58:59Z | 2014-06-12T18:25:57Z | |
Make div use truediv instead of div | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 9c0a2843370f4..f13fb6c26eecd 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -839,7 +839,12 @@ def __contains__(self, key):
add = _arith_method(operator.add, 'add', '+')
mul = _arith_method(operator.mul, 'multiply', '*')
sub = _arith_method(operator.sub, 'subtract', '-')
- div = divide = _arith_method(lambda x, y: x / y, 'divide', '/')
+ if not py3compat.PY3:
+ # only need to explicitly cast to float in python 2
+ truediv = div = divide = _arith_method(lambda x, y: x / (y + 0.), 'divide', '/')
+ else:
+ truediv = div = divide = _arith_method(lambda x, y: x / y, 'divide', '/')
+ floordiv = _arith_method(lambda x, y: x // y, 'floor division', '//')
pow = _arith_method(operator.pow, 'pow', '**')
radd = _arith_method(_radd_compat, 'radd')
@@ -876,6 +881,9 @@ def __contains__(self, key):
__xor__ = _arith_method(operator.xor, '__xor__')
# Python 2 division methods
+ # behaves similarly to numpy when not
+ # using the future import here, making the use
+ # of `div` different than `__div__`
if not py3compat.PY3:
__div__ = _arith_method(operator.div, '__div__', '/',
default_axis=None, fill_zeros=np.inf)
diff --git a/pandas/core/panel.py b/pandas/core/panel.py
index 0a099661c58f1..0ffef66b6421a 100644
--- a/pandas/core/panel.py
+++ b/pandas/core/panel.py
@@ -1643,13 +1643,8 @@ def f(self, other, axis=0):
cls.add = _panel_arith_method(operator.add, 'add')
cls.subtract = cls.sub = _panel_arith_method(operator.sub, 'subtract')
cls.multiply = cls.mul = _panel_arith_method(operator.mul, 'multiply')
-
- try:
- cls.divide = cls.div = _panel_arith_method(operator.div, 'divide')
- except AttributeError: # pragma: no cover
- # Python 3
- cls.divide = cls.div = _panel_arith_method(
- operator.truediv, 'divide')
+ cls.floordiv = _panel_arith_method(operator.floordiv, 'floor division')
+ cls.truediv = cls.divide = cls.div = _panel_arith_method(operator.truediv, 'divide')
_agg_doc = """
Return %(desc)s over requested axis
diff --git a/pandas/core/series.py b/pandas/core/series.py
index ab8a48f4b8eb9..3b0d2a5e28ad5 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -2126,11 +2126,8 @@ def _binop(self, other, func, level=None, fill_value=None):
add = _flex_method(operator.add, 'add')
sub = _flex_method(operator.sub, 'subtract')
mul = _flex_method(operator.mul, 'multiply')
- try:
- div = _flex_method(operator.div, 'divide')
- except AttributeError: # pragma: no cover
- # Python 3
- div = _flex_method(operator.truediv, 'divide')
+ div = truediv = _flex_method(operator.truediv, 'divide')
+ floordiv = _flex_method(operator.floordiv, 'floor division')
def combine(self, other, func, fill_value=nan):
"""
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 8964b21756439..1649e17a55549 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -4191,6 +4191,24 @@ def test_div(self):
result2 = DataFrame(p.values.astype('float64')/0,index=p.index,columns=p.columns).fillna(np.inf)
assert_frame_equal(result2,expected)
+ def test_truediv(self):
+ import operator
+ p = DataFrame(tm.getIntegerSeriesData())
+ result = p.truediv(2)
+ expected = operator.truediv(p, 2)
+ assert_frame_equal(result, expected)
+
+ # set up dataframe divisble by 3
+ p = (p / 10.).astype(int) * 3
+ result = p.truediv(3)
+ expected = operator.truediv(p, 3)
+ assert_frame_equal(result, expected)
+
+ # test out axes
+ result = p.truediv(3, axis=1)
+ assert_frame_equal(result, expected)
+
+
def test_logical_operators(self):
import operator
@@ -4282,7 +4300,7 @@ def test_first_last_valid(self):
self.assert_(index == frame.index[-6])
def test_arith_flex_frame(self):
- ops = ['add', 'sub', 'mul', 'div', 'pow']
+ ops = ['add', 'sub', 'mul', 'div', 'pow', 'floordiv']
aliases = {'div': 'truediv'}
for op in ops:
diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py
index 58b7ac272401f..6959a2721ad4a 100644
--- a/pandas/tests/test_panel.py
+++ b/pandas/tests/test_panel.py
@@ -302,6 +302,8 @@ def check_op(op, name):
check_op(operator.add, 'add')
check_op(operator.sub, 'subtract')
check_op(operator.mul, 'multiply')
+ check_op(operator.floordiv, 'floordiv')
+ check_op(operator.truediv, 'truediv')
if py3compat.PY3:
check_op(operator.truediv, 'divide')
else:
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index cba908f7136a9..57039a8350303 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -1809,6 +1809,22 @@ def test_div(self):
else:
assert_series_equal(result,p['first'])
+
+ # only matters for Python 2.x, otherwise always truediv
+ def test_truediv(self):
+ arr = np.array([1,2,3,4,5])
+ p = Series(arr)
+ result = p.truediv(2)
+ expected = Series(operator.truediv(arr, 2))
+ assert_series_equal(result, expected)
+
+ arr = np.arange(0,10) * 3
+ p = Series(arr)
+ result = p.truediv(3)
+ expected = Series(operator.truediv(arr, 3))
+ assert_series_equal(result, expected)
+
+
def test_operators(self):
def _check_op(series, other, op, pos_only=False):
@@ -2416,12 +2432,8 @@ def _check_fill(meth, op, a, b, fill_value=0):
a = Series([nan, 1., 2., 3., nan], index=np.arange(5))
b = Series([nan, 1, nan, 3, nan, 4.], index=np.arange(6))
- ops = [Series.add, Series.sub, Series.mul, Series.div]
- equivs = [operator.add, operator.sub, operator.mul]
- if py3compat.PY3:
- equivs.append(operator.truediv)
- else:
- equivs.append(operator.div)
+ ops = [Series.add, Series.sub, Series.mul, Series.floordiv, Series.div]
+ equivs = [operator.add, operator.sub, operator.mul, operator.floordiv, operator.truediv]
fillvals = [0, 0, 1, 1]
for op, equiv_op, fv in zip(ops, equivs, fillvals):
diff --git a/pandas/tests/test_truediv.py b/pandas/tests/test_truediv.py
new file mode 100644
index 0000000000000..e61bab15bab9a
--- /dev/null
+++ b/pandas/tests/test_truediv.py
@@ -0,0 +1,41 @@
+from __future__ import division
+# pylint: disable-msg=W0612,E1101
+import numpy as np
+import pandas as pan
+import pandas.util.testing as tm
+from pandas.core.api import (DataFrame, Index, Series, notnull, isnull,
+ MultiIndex, DatetimeIndex, Timestamp, Period)
+from pandas.util.testing import (assert_almost_equal,
+ assert_series_equal,
+ assert_frame_equal,
+ makeCustomDataframe as mkdf,
+ ensure_clean)
+
+class TestDivUnderTruediv(object):
+ def test_frame_div_dtype(self):
+ p = DataFrame({"A": np.arange(10)})
+ result = p.div(5)
+ assert result.A.dtype.kind == "f", "Expected float dtype, instead saw %r" % result.A.dtype
+
+ def test_series_div_dtype(self):
+ p = Series(np.arange(10))
+ result = p.div(4)
+ assert result.dtype.kind == "f", "Expected float dtype, instead saw %r" % result.dtype
+
+ def test_frame_div(self):
+ p = DataFrame(tm.getIntegerSeriesData())
+ result = p.div(3)
+ expected = p.truediv(3)
+ assert_frame_equal(result, expected)
+
+ result = p.div(p.irow(0), axis=1)
+ expected = p.truediv(p.irow(0), axis=1)
+ assert_frame_equal(result, expected)
+
+ def test_series_div(self):
+ p = DataFrame(tm.getIntegerSeriesData())
+ series = p.icol(0)
+ result = series.div(5)
+ expected = series.truediv(5)
+ assert_series_equal(result, expected)
+
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index 823d2c81bb72c..519df579f8393 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -13,6 +13,7 @@
from distutils.version import LooseVersion
from numpy.random import randn
+from numpy.random import randint
import numpy as np
from pandas.core.common import isnull, _is_sequence
@@ -330,6 +331,11 @@ def getSeriesData():
index = makeStringIndex(N)
return dict((c, Series(randn(N), index=index)) for c in getCols(K))
+def getIntegerSeriesData():
+ """same as `getSeriesData` but returns random positive and negative *integers* instead"""
+ index = makeStringIndex(N)
+ maxint = np.iinfo('i').max
+ return dict((c, Series(randint(-maxint, maxint, N), index=index)) for c in getCols(K))
def makeDataFrame():
data = getSeriesData()
| This changes the `div()` method in pandas on Python 2.x to always do
`truediv` rather than integer division. Currently, pandas doesn't
respect `from __future__ import division` (it's defined in a file that
doesn't start with the import, so it always does integer division).
`__div__`, `__rdiv__`, and `__idiv__` remain the same, so that the `/`
operator still works as expected. This PR also adds `truediv()` and
`floordiv()` methods to series, panel and frame, plus a test utility
to generate integer series.
This change makes `div()` behave the same way in Python 2 and in
Python 3. It also limits the surprise that comes from using the
future import with pandas in Python 2.
You can see the difference between Python 2 and 3 with the following
snippet:
```
from __future__ import division
import pandas
from pandas.util.testing import assert_frame_equal
df = pandas.DataFrame({"A": range(4)})
result = df.div(4)
expected = df / 4
assert_frame_equal(result, expected)
```
It's not easy (or particularly useful) to detect which division setting
is currently active. Plus, if you were to check it, the actual behavior
would depend on [which file imports `pandas`
first](http://stackoverflow.com/questions/16880552/dynamically-detecting-division-future-import).
There might be too many tests associated with this, so if you'd like me
to slim them down, I'd be happy to do so.
Another alternative would to this patch would be to follow the python
`operator` convention and remove `div()` altogether from Python 3, while
leaving `div()` as it previously was in Python 2; however, I think that
will continue to suprise users.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3739 | 2013-06-02T17:17:07Z | 2013-06-02T19:28:32Z | null | 2014-07-03T04:46:53Z |
TST: Fix assert_almost_equal error message | diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index 823d2c81bb72c..dd86862a2d551 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -141,7 +141,7 @@ def assert_almost_equal(a, b, check_less_precise = False):
assert_almost_equal(a[i], b[i], check_less_precise)
return True
- err_msg = lambda a, b: 'expected %.5f but got %.5f' % (a, b)
+ err_msg = lambda a, b: 'expected %.5f but got %.5f' % (b, a)
if isnull(a):
np.testing.assert_(isnull(b))
| Most tests (in pandas' test suite and in general) are of form
`assert_almost_equal(result, expected)`. Verbose error message was
treating its first argument as expected, this is now fixed.
Previous message was:
``` python
err_msg = lambda a, b: 'expected %.5f but got %.5f' % (a, b)
```
But a, the first argument, is actually `actual`, not expected.
Everywhere else, it makes sense to display `"%s != %s" % (a,b)`
| https://api.github.com/repos/pandas-dev/pandas/pulls/3737 | 2013-06-02T09:15:07Z | 2013-06-03T17:11:19Z | 2013-06-03T17:11:19Z | 2014-07-16T08:11:22Z |
PERF: speed up where operations when splitting blocks (GH3733) | diff --git a/RELEASE.rst b/RELEASE.rst
index 35741f7eb008f..4573b45ccaf16 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -76,6 +76,7 @@ pandas 0.11.1
GH3572_). This happens before any drawing takes place which elimnates any
spurious plots from showing up.
- Added Faq section on repr display options, to help users customize their setup.
+ - ``where`` operations that result in block splitting are much faster (GH3733_)
**API Changes**
@@ -116,6 +117,8 @@ pandas 0.11.1
- the ``method`` and ``axis`` arguments of ``DataFrame.replace()`` are
deprecated
- Implement ``__nonzero__`` for ``NDFrame`` objects (GH3691_, GH3696_)
+ - ``as_matrix`` with mixed signed and unsigned dtypes will result in 2 x the lcd of the unsigned
+ as an int, maxing with ``int64``, to avoid precision issues (GH3733_)
**Bug Fixes**
@@ -273,6 +276,7 @@ pandas 0.11.1
.. _GH3691: https://github.com/pydata/pandas/issues/3691
.. _GH3696: https://github.com/pydata/pandas/issues/3696
.. _GH3667: https://github.com/pydata/pandas/issues/3667
+.. _GH3733: https://github.com/pydata/pandas/issues/3733
pandas 0.11.0
=============
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 8b711f5e077ce..af1543dad0314 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -558,42 +558,38 @@ def func(c,v,o):
result.fill(np.nan)
return result
- def create_block(result, items, transpose=True):
+ # see if we can operate on the entire block, or need item-by-item
+ result = func(cond,values,other)
+ if self._can_hold_na:
+
if not isinstance(result, np.ndarray):
raise TypeError('Could not compare [%s] with block values'
% repr(other))
- if transpose and is_transposed:
+ if is_transposed:
result = result.T
# try to cast if requested
if try_cast:
result = self._try_cast_result(result)
- return make_block(result, items, self.ref_items)
+ return make_block(result, self.items, self.ref_items)
- # see if we can operate on the entire block, or need item-by-item
- if not self._can_hold_na:
- axis = cond.ndim-1
- result_blocks = []
- for item in self.items:
- loc = self.items.get_loc(item)
- item = self.items.take([loc])
- v = values.take([loc],axis=axis)
- c = cond.take([loc],axis=axis)
- o = other.take([loc],axis=axis) if hasattr(other,'shape') else other
-
- result = func(c,v,o)
- if len(result) == 1:
- result = np.repeat(result,self.shape[1:])
-
- result = _block_shape(result,ndim=self.ndim,shape=self.shape[1:])
- result_blocks.append(create_block(result, item, transpose=False))
-
- return result_blocks
- else:
- result = func(cond,values,other)
- return create_block(result, self.items)
+ # might need to separate out blocks
+ axis = cond.ndim-1
+ cond = cond.swapaxes(axis,0)
+ mask = np.array([ cond[i].all() for i in enumerate(range(cond.shape[0]))],dtype=bool)
+
+ result_blocks = []
+ for m in [mask, ~mask]:
+ if m.any():
+ items = self.items[m]
+ slices = [slice(None)] * cond.ndim
+ slices[axis] = self.items.get_indexer(items)
+ r = self._try_cast_result(result[slices])
+ result_blocks.append(make_block(r.T, items, self.ref_items))
+
+ return result_blocks
class NumericBlock(Block):
is_numeric = True
@@ -2429,7 +2425,22 @@ def _lcd_dtype(l):
elif have_bool:
return np.dtype(bool)
elif have_int and not have_float and not have_complex:
- return _lcd_dtype(counts[IntBlock])
+
+ # if we are mixing unsigned and signed, then return
+ # the next biggest int type (if we can)
+ lcd = _lcd_dtype(counts[IntBlock])
+ kinds = set([ i.dtype.kind for i in counts[IntBlock] ])
+ if len(kinds) == 1:
+ return lcd
+
+ if lcd == 'uint64' or lcd == 'int64':
+ return np.dtype('int64')
+
+ # return 1 bigger on the itemsize if unsinged
+ if lcd.kind == 'u':
+ return np.dtype('int%s' % (lcd.itemsize*8*2))
+ return lcd
+
elif have_dt64 and not have_float and not have_complex:
return np.dtype('M8[ns]')
elif have_complex:
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index fa6579ca61358..8964b21756439 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -271,6 +271,16 @@ def test_getitem_boolean_casting(self):
expected = Series({'float64': 6, 'int32' : 1, 'int64' : 1})
assert_series_equal(result, expected)
+ # where dtype conversions
+ # GH 3733
+ df = DataFrame(data = np.random.randn(100, 50))
+ df = df.where(df > 0) # create nans
+ bools = df > 0
+ mask = isnull(df)
+ expected = bools.astype(float).mask(mask)
+ result = bools.mask(mask)
+ assert_frame_equal(result,expected)
+
def test_getitem_boolean_list(self):
df = DataFrame(np.arange(12).reshape(3, 4))
@@ -7568,8 +7578,10 @@ def test_where(self):
def _safe_add(df):
# only add to the numeric items
- return DataFrame(dict([ (c,s+1) if issubclass(s.dtype.type, (np.integer,np.floating)) else (c,s) for c, s in df.iteritems() ]))
-
+ def is_ok(s):
+ return issubclass(s.dtype.type, (np.integer,np.floating)) and s.dtype != 'uint8'
+ return DataFrame(dict([ (c,s+1) if is_ok(s) else (c,s) for c, s in df.iteritems() ]))
+
def _check_get(df, cond, check_dtypes = True):
other1 = _safe_add(df)
rs = df.where(cond, other1)
@@ -7605,7 +7617,7 @@ def _check_get(df, cond, check_dtypes = True):
def _check_align(df, cond, other, check_dtypes = True):
rs = df.where(cond, other)
for i, k in enumerate(rs.columns):
- v = rs[k]
+ result = rs[k]
d = df[k].values
c = cond[k].reindex(df[k].index).fillna(False).values
@@ -7613,12 +7625,16 @@ def _check_align(df, cond, other, check_dtypes = True):
o = other
else:
if isinstance(other,np.ndarray):
- o = Series(other[:,i],index=v.index).values
+ o = Series(other[:,i],index=result.index).values
else:
o = other[k].values
new_values = d if c.all() else np.where(c, d, o)
- assert_series_equal(v, Series(new_values,index=v.index))
+ expected = Series(new_values,index=result.index)
+
+ # since we can't always have the correct numpy dtype
+ # as numpy doesn't know how to downcast, don't check
+ assert_series_equal(result, expected, check_dtype=False)
# dtypes
# can't check dtype when other is an ndarray
@@ -9894,14 +9910,14 @@ def test_as_matrix_lcd(self):
self.assert_(values.dtype == np.float16)
values = self.mixed_int.as_matrix(['A','B','C','D'])
- self.assert_(values.dtype == np.uint64)
+ self.assert_(values.dtype == np.int64)
values = self.mixed_int.as_matrix(['A','D'])
self.assert_(values.dtype == np.int64)
# guess all ints are cast to uints....
values = self.mixed_int.as_matrix(['A','B','C'])
- self.assert_(values.dtype == np.uint64)
+ self.assert_(values.dtype == np.int64)
values = self.mixed_int.as_matrix(['A','C'])
self.assert_(values.dtype == np.int32)
diff --git a/vb_suite/frame_methods.py b/vb_suite/frame_methods.py
index 7745450e5c03b..122851bf91a26 100644
--- a/vb_suite/frame_methods.py
+++ b/vb_suite/frame_methods.py
@@ -177,3 +177,18 @@ def f(K=500):
"""
frame_xs_col = Benchmark('df.xs(50000,axis = 1)', setup)
+
+## masking
+setup = common_setup + """
+data = np.random.randn(1000, 500)
+df = DataFrame(data)
+df = df.where(df > 0) # create nans
+bools = df > 0
+mask = isnull(df)
+"""
+
+mask_bools = Benchmark('bools.mask(mask)', setup,
+ start_date=datetime(2013,1,1))
+
+mask_floats = Benchmark('bools.astype(float).mask(mask)', setup,
+ start_date=datetime(2013,1,1))
| close #3733
| https://api.github.com/repos/pandas-dev/pandas/pulls/3736 | 2013-06-01T20:37:47Z | 2013-06-02T11:58:40Z | 2013-06-02T11:58:40Z | 2014-07-16T08:11:20Z |
BUG/BLD: pytables version checking was incorrect | diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 046263a9cb63c..0a86d72a05f16 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -109,15 +109,12 @@ def _tables():
global _table_supports_index
if _table_mod is None:
import tables
+ from distutils.version import LooseVersion
_table_mod = tables
# version requirements
- ver = tables.__version__.split('.')
- try:
- if int(ver[0]) >= 2 and int(ver[1][0]) >= 3:
- _table_supports_index = True
- except:
- pass
+ ver = tables.__version__
+ _table_supports_index = LooseVersion(ver) >= '2.3'
return _table_mod
| https://api.github.com/repos/pandas-dev/pandas/pulls/3735 | 2013-06-01T20:15:39Z | 2013-06-01T20:38:56Z | 2013-06-01T20:38:56Z | 2014-07-16T08:11:19Z | |
BLD: test_perf.py, add --base-pickle --target-pickle options to test_perf | diff --git a/vb_suite/test_perf.py b/vb_suite/test_perf.py
index 72b441d79be84..b0d029de7371a 100755
--- a/vb_suite/test_perf.py
+++ b/vb_suite/test_perf.py
@@ -37,10 +37,18 @@
import random
import numpy as np
+import pandas as pd
from pandas import DataFrame, Series
+try:
+ import git # gitpython
+except Exception:
+ print("Error: Please install the `gitpython` package\n")
+ sys.exit(1)
+
from suite import REPO_PATH
+VB_DIR = os.path.dirname(os.path.abspath(__file__))
DEFAULT_MIN_DURATION = 0.01
HEAD_COL="head[ms]"
BASE_COL="base[ms]"
@@ -57,6 +65,14 @@
parser.add_argument('-t', '--target-commit',
help='The commit to compare against the baseline (default: HEAD).',
type=str)
+parser.add_argument('--base-pickle',
+ help='name of pickle file with timings data generated by a former `-H -d FILE` run. '\
+ 'filename must be of the form <hash>-*.* or specify --base-commit seperately',
+ type=str)
+parser.add_argument('--target-pickle',
+ help='name of pickle file with timings data generated by a former `-H -d FILE` run '\
+ 'filename must be of the form <hash>-*.* or specify --target-commit seperately',
+ type=str)
parser.add_argument('-m', '--min-duration',
help='Minimum duration (in ms) of baseline test for inclusion in report (default: %.3f).' % DEFAULT_MIN_DURATION,
type=float,
@@ -104,8 +120,7 @@
parser.add_argument('-a', '--affinity',
metavar="a",
dest='affinity',
- default=1,
- type=int,
+ default=None,
help='set processor affinity of processm by default bind to cpu/core #1 only'
'requires the "affinity" python module , will raise Warning otherwise' )
@@ -206,21 +221,34 @@ def profile_comparative(benchmarks):
head_res = get_results_df(db, h_head)
baseline_res = get_results_df(db, h_baseline)
- totals = prep_totals(baseline_res, head_res)
-
- h_msg = repo.messages.get(h_head, "")
- b_msg = repo.messages.get(h_baseline, "")
- print_report(totals,h_head=h_head,h_msg=h_msg,
- h_baseline=h_baseline,b_msg=b_msg)
+ report_comparative(head_res,baseline_res)
- if args.outdf:
- prprint("The results DataFrame was written to '%s'\n" % args.outdf)
- totals.save(args.outdf)
finally:
# print("Disposing of TMP_DIR: %s" % TMP_DIR)
shutil.rmtree(TMP_DIR)
+def prep_pickle_for_total(df, agg_name='median'):
+ """
+ accepts a datafram resulting from invocation with -H -d o.pickle
+ If multiple data columns are present (-N was used), the
+ `agg_name` attr of the datafram will be used to reduce
+ them to a single value per vbench, df.median is used by defa
+ ult.
+
+ Returns a datadrame of the form expected by prep_totals
+ """
+ def prep(df):
+ agg = getattr(df,agg_name)
+ df = DataFrame(agg(1))
+ cols = list(df.columns)
+ cols[0]='timing'
+ df.columns=cols
+ df['name'] = list(df.index)
+ return df
+
+ return prep(df)
+
def prep_totals(head_res, baseline_res):
"""
Each argument should be a dataframe with 'timing' and 'name' columns
@@ -241,6 +269,27 @@ def prep_totals(head_res, baseline_res):
).sort("ratio").set_index('name') # sort in ascending order
return totals
+def report_comparative(head_res,baseline_res):
+ try:
+ r=git.Repo(VB_DIR)
+ except:
+ import pdb
+ pdb.set_trace()
+
+ totals = prep_totals(head_res,baseline_res)
+
+ h_head = args.target_commit
+ h_baseline = args.base_commit
+ h_msg = r.commit(h_head).message.strip()
+ b_msg = r.commit(h_baseline).message.strip()
+
+ print_report(totals,h_head=h_head,h_msg=h_msg,
+ h_baseline=h_baseline,b_msg=b_msg)
+
+ if args.outdf:
+ prprint("The results DataFrame was written to '%s'\n" % args.outdf)
+ totals.save(args.outdf)
+
def profile_head_single(benchmark):
import gc
results = []
@@ -398,18 +447,23 @@ def main():
random.seed(args.seed)
np.random.seed(args.seed)
- try:
- import affinity
- affinity.set_process_affinity_mask(0,args.affinity)
- assert affinity.get_process_affinity_mask(0) == args.affinity
- print("CPU affinity set to %d" % args.affinity)
- except ImportError:
- import warnings
- print("\n\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"+
- "The 'affinity' module is not available, results may be unreliable\n" +
- "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n\n"
- )
- time.sleep(2)
+ if args.base_pickle and args.target_pickle:
+ baseline_res = prep_pickle_for_total(pd.load(args.base_pickle))
+ target_res = prep_pickle_for_total(pd.load(args.target_pickle))
+
+ report_comparative(target_res, baseline_res)
+ sys.exit(0)
+
+ if args.affinity is not None:
+ try:
+ import affinity
+
+ affinity.set_process_affinity_mask(0,args.affinity)
+ assert affinity.get_process_affinity_mask(0) == args.affinity
+ print("CPU affinity set to %d" % args.affinity)
+ except ImportError:
+ print("-a/--afinity specified, but the 'affinity' module is not available, aborting.\n")
+ sys.exit(1)
print("\n")
prprint("LOG_FILE = %s" % args.log_file)
@@ -489,10 +543,40 @@ def inner(repo_path):
if __name__ == '__main__':
args = parser.parse_args()
- if not args.head and (not args.base_commit and not args.target_commit):
+ if (not args.head
+ and not (args.base_commit and args.target_commit)
+ and not (args.base_pickle and args.target_pickle)):
parser.print_help()
- else:
- import warnings
- warnings.filterwarnings('ignore',category=FutureWarning)
- warnings.filterwarnings('ignore',category=DeprecationWarning)
- main()
+ sys.exit(1)
+ elif ((args.base_pickle or args.target_pickle) and not
+ (args.base_pickle and args.target_pickle)):
+ print("Must specify Both --base-pickle and --target-pickle.")
+ sys.exit(1)
+
+ if ((args.base_pickle or args.target_pickle) and not
+ (args.base_commit and args.target_commit)):
+ if not args.base_commit:
+ print("base_commit not specified, Assuming base_pickle is named <commit>-foo.*")
+ args.base_commit = args.base_pickle.split('-')[0]
+ if not args.target_commit:
+ print("target_commit not specified, Assuming target_pickle is named <commit>-foo.*")
+ print(args.target_pickle.split('-')[0])
+ args.target_commit = args.target_pickle.split('-')[0]
+
+ import warnings
+ warnings.filterwarnings('ignore',category=FutureWarning)
+ warnings.filterwarnings('ignore',category=DeprecationWarning)
+
+ if args.base_commit and args.target_commit:
+ print("Verifying specified commits exist in repo...")
+ r=git.Repo(VB_DIR)
+ for c in [ args.base_commit, args.target_commit ]:
+ try:
+ msg = r.commit(c).message.strip()
+ except git.BadObject:
+ print("The commit '%s' was not found, aborting" % c)
+ sys.exit(1)
+ else:
+ print("%s: %s" % (c,msg))
+
+ main()
| Until now test_perf either generated a single commit report for HEAD,
or fell back to vbench to compare two commits. with this change, can now
compare results between saved results of test_perf -H invocations.
Flow:
- Use build_cache (cdev from #3156) to jump to desired commit using build_cache
- checkout current vb_suite from upstream/master
- use `test_perf -H -d <commit>-foo.pickle` to save timings to file, for
target and base commits
- Use test_perf options `--base-pickle`, `--target-pickle` to generate comparison report.
cc @jreback
```
λ ./test_perf.sh --base-pickle 31ecaa9-0.10.1.pickle --target-pickle f9eea30-0.11.0.pickle
This script compares the performance of two commits.
Make sure the python 'vbench' library is installed.
Setting the BUILD_CACHE_DIR env var to a temp directory will
potentially speed up subsequent runs.
base_commit not specified, Assuming base_pickle is named <commit>-foo.*
target_commit not specified, Assuming target_pickle is named <commit>-foo.*
f9eea30
Verifying specified commits exist in repo...
31ecaa9: RLS: set released to true
f9eea30: RLS: Version 0.11
***
Invoked with :
--ncalls: 3
--repeats: 3
-------------------------------------------------------------------------------
Test name | head[ms] | base[ms] | ratio |
-------------------------------------------------------------------------------
frame_reindex_columns | 0.3260 | 0.2900 | 1.1241 |
-------------------------------------------------------------------------------
Test name | head[ms] | base[ms] | ratio |
-------------------------------------------------------------------------------
Ratio < 1.0 means the target commit is faster then the baseline.
Seed used: 1234
Target [f9eea30] : RLS: Version 0.11
Base [31ecaa9] : RLS: set released to true
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/3734 | 2013-06-01T19:49:33Z | 2013-06-01T19:50:22Z | 2013-06-01T19:50:22Z | 2014-06-24T15:20:16Z |
DOC: fix read_html attribute reading example | diff --git a/doc/source/io.rst b/doc/source/io.rst
index 204dd2c984ba7..0a1f0e74255bb 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -1021,9 +1021,9 @@ Specify an HTML attribute
.. ipython:: python
- dfs = read_html(url)
- len(dfs)
- dfs[0]
+ dfs1 = read_html(url, attrs={'id': 'table'})
+ dfs2 = read_html(url, attrs={'class': 'sortable'})
+ np.all(dfs1[0] == dfs2[0])
Use some combination of the above
| https://api.github.com/repos/pandas-dev/pandas/pulls/3732 | 2013-05-31T22:04:01Z | 2013-05-31T22:10:47Z | 2013-05-31T22:10:47Z | 2014-07-16T08:11:14Z | |
API: raise TypeError on most datetime64 reduction ops | diff --git a/RELEASE.rst b/RELEASE.rst
index 3a347246be8dd..8da3b4760c303 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -97,6 +97,12 @@ pandas 0.11.1
in your calls.
- Do not allow astypes on ``datetime64[ns]`` except to ``object``, and
``timedelta64[ns]`` to ``object/int`` (GH3425_)
+ - The behavior of ``datetime64`` dtypes has changed with respect to certain
+ so-called reduction operations (GH3726_). The following operations now
+ raise a ``TypeError`` when perfomed on a ``Series`` and return an *empty*
+ ``Series`` when performed on a ``DataFrame`` similar to performing these
+ operations on, for example, a ``DataFrame`` of ``slice`` objects:
+ - sum, prod, mean, std, var, skew, kurt, corr, and cov
- Do not allow datetimelike/timedeltalike creation except with valid types
(e.g. cannot pass ``datetime64[ms]``) (GH3423_)
- Add ``squeeze`` keyword to ``groupby`` to allow reduction from
@@ -294,6 +300,7 @@ pandas 0.11.1
.. _GH3748: https://github.com/pydata/pandas/issues/3748
.. _GH3741: https://github.com/pydata/pandas/issues/3741
.. _GH3750: https://github.com/pydata/pandas/issues/3750
+.. _GH3726: https://github.com/pydata/pandas/issues/3726
pandas 0.11.0
=============
diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt
index badb364d214d1..982b2f9f2eb3b 100644
--- a/doc/source/v0.11.1.txt
+++ b/doc/source/v0.11.1.txt
@@ -128,6 +128,17 @@ API changes
- ``DataFrame.to_html`` and ``DataFrame.to_latex`` now accept a path for
their first argument (GH3702_)
+ - Do not allow astypes on ``datetime64[ns]`` except to ``object``, and
+ ``timedelta64[ns]`` to ``object/int`` (GH3425_)
+
+ - The behavior of ``datetime64`` dtypes has changed with respect to certain
+ so-called reduction operations (GH3726_). The following operations now
+ raise a ``TypeError`` when perfomed on a ``Series`` and return an *empty*
+ ``Series`` when performed on a ``DataFrame`` similar to performing these
+ operations on, for example, a ``DataFrame`` of ``slice`` objects:
+
+ - sum, prod, mean, std, var, skew, kurt, corr, and cov
+
Enhancements
~~~~~~~~~~~~
@@ -345,3 +356,5 @@ on GitHub for a complete list.
.. _GH3696: https://github.com/pydata/pandas/issues/3696
.. _GH3667: https://github.com/pydata/pandas/issues/3667
.. _GH3741: https://github.com/pydata/pandas/issues/3741
+.. _GH3726: https://github.com/pydata/pandas/issues/3726
+.. _GH3425: https://github.com/pydata/pandas/issues/3425
diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py
index f841c0dbecd8e..0d940dc348dc1 100644
--- a/pandas/core/nanops.py
+++ b/pandas/core/nanops.py
@@ -1,10 +1,11 @@
import sys
+import itertools
+import functools
import numpy as np
from pandas.core.common import isnull, notnull
import pandas.core.common as com
-import pandas.core.config as cf
import pandas.lib as lib
import pandas.algos as algos
import pandas.hashtable as _hash
@@ -17,41 +18,70 @@
_USE_BOTTLENECK = False
-def _bottleneck_switch(bn_name, alt, zero_value=None, **kwargs):
- try:
- bn_func = getattr(bn, bn_name)
- except (AttributeError, NameError): # pragma: no cover
- bn_func = None
+class disallow(object):
+ def __init__(self, *dtypes):
+ super(disallow, self).__init__()
+ self.dtypes = tuple(np.dtype(dtype).type for dtype in dtypes)
+
+ def check(self, obj):
+ return hasattr(obj, 'dtype') and issubclass(obj.dtype.type,
+ self.dtypes)
+
+ def __call__(self, f):
+ @functools.wraps(f)
+ def _f(*args, **kwargs):
+ obj_iter = itertools.chain(args, kwargs.itervalues())
+ if any(self.check(obj) for obj in obj_iter):
+ raise TypeError('reduction operation {0!r} not allowed for '
+ 'this dtype'.format(f.__name__.replace('nan',
+ '')))
+ return f(*args, **kwargs)
+ return _f
+
+
+class bottleneck_switch(object):
+ def __init__(self, zero_value=None, **kwargs):
+ self.zero_value = zero_value
+ self.kwargs = kwargs
+
+ def __call__(self, alt):
+ bn_name = alt.__name__
- def f(values, axis=None, skipna=True, **kwds):
- if len(kwargs) > 0:
- for k, v in kwargs.iteritems():
- if k not in kwds:
- kwds[k] = v
try:
- if zero_value is not None and values.size == 0:
- if values.ndim == 1:
- return 0
+ bn_func = getattr(bn, bn_name)
+ except (AttributeError, NameError): # pragma: no cover
+ bn_func = None
+
+ @functools.wraps(alt)
+ def f(values, axis=None, skipna=True, **kwds):
+ if len(self.kwargs) > 0:
+ for k, v in self.kwargs.iteritems():
+ if k not in kwds:
+ kwds[k] = v
+ try:
+ if self.zero_value is not None and values.size == 0:
+ if values.ndim == 1:
+ return 0
+ else:
+ result_shape = (values.shape[:axis] +
+ values.shape[axis + 1:])
+ result = np.empty(result_shape)
+ result.fill(0)
+ return result
+
+ if _USE_BOTTLENECK and skipna and _bn_ok_dtype(values.dtype):
+ result = bn_func(values, axis=axis, **kwds)
+ # prefer to treat inf/-inf as NA
+ if _has_infs(result):
+ result = alt(values, axis=axis, skipna=skipna, **kwds)
else:
- result_shape = values.shape[:
- axis] + values.shape[axis + 1:]
- result = np.empty(result_shape)
- result.fill(0)
- return result
-
- if _USE_BOTTLENECK and skipna and _bn_ok_dtype(values.dtype):
- result = bn_func(values, axis=axis, **kwds)
- # prefer to treat inf/-inf as NA
- if _has_infs(result):
result = alt(values, axis=axis, skipna=skipna, **kwds)
- else:
+ except Exception:
result = alt(values, axis=axis, skipna=skipna, **kwds)
- except Exception:
- result = alt(values, axis=axis, skipna=skipna, **kwds)
- return result
+ return result
- return f
+ return f
def _bn_ok_dtype(dt):
@@ -166,13 +196,17 @@ def nanall(values, axis=None, skipna=True):
values, mask, dtype = _get_values(values, skipna, True, copy=skipna)
return values.all(axis)
-def _nansum(values, axis=None, skipna=True):
+@disallow('M8')
+@bottleneck_switch(zero_value=0)
+def nansum(values, axis=None, skipna=True):
values, mask, dtype = _get_values(values, skipna, 0)
the_sum = values.sum(axis)
the_sum = _maybe_null_out(the_sum, axis, mask)
return the_sum
-def _nanmean(values, axis=None, skipna=True):
+@disallow('M8')
+@bottleneck_switch()
+def nanmean(values, axis=None, skipna=True):
values, mask, dtype = _get_values(values, skipna, 0)
the_sum = _ensure_numeric(values.sum(axis))
count = _get_counts(mask, axis)
@@ -186,8 +220,9 @@ def _nanmean(values, axis=None, skipna=True):
the_mean = the_sum / count if count > 0 else np.nan
return the_mean
-
-def _nanmedian(values, axis=None, skipna=True):
+@disallow('M8')
+@bottleneck_switch()
+def nanmedian(values, axis=None, skipna=True):
def get_median(x):
mask = notnull(x)
if not skipna and not mask.all():
@@ -197,13 +232,31 @@ def get_median(x):
if values.dtype != np.float64:
values = values.astype('f8')
- if values.ndim > 1:
- return np.apply_along_axis(get_median, axis, values)
- else:
- return get_median(values)
+ notempty = values.size
-
-def _nanvar(values, axis=None, skipna=True, ddof=1):
+ # an array from a frame
+ if values.ndim > 1:
+ # there's a non-empty array to apply over otherwise numpy raises
+ if notempty:
+ return np.apply_along_axis(get_median, axis, values)
+
+ # must return the correct shape, but median is not defined for the
+ # empty set so return nans of shape "everything but the passed axis"
+ # since "axis" is where the reduction would occur if we had a nonempty
+ # array
+ shp = np.array(values.shape)
+ dims = np.arange(values.ndim)
+ ret = np.empty(shp[dims != axis])
+ ret.fill(np.nan)
+ return ret
+
+ # otherwise return a scalar value
+ return get_median(values) if notempty else np.nan
+
+
+@disallow('M8')
+@bottleneck_switch(ddof=1)
+def nanvar(values, axis=None, skipna=True, ddof=1):
if not isinstance(values.dtype.type, np.floating):
values = values.astype('f8')
@@ -223,7 +276,8 @@ def _nanvar(values, axis=None, skipna=True, ddof=1):
return np.fabs((XX - X ** 2 / count) / (count - ddof))
-def _nanmin(values, axis=None, skipna=True):
+@bottleneck_switch()
+def nanmin(values, axis=None, skipna=True):
values, mask, dtype = _get_values(values, skipna, fill_value_typ = '+inf')
# numpy 1.6.1 workaround in Python 3.x
@@ -247,7 +301,8 @@ def _nanmin(values, axis=None, skipna=True):
return _maybe_null_out(result, axis, mask)
-def _nanmax(values, axis=None, skipna=True):
+@bottleneck_switch()
+def nanmax(values, axis=None, skipna=True):
values, mask, dtype = _get_values(values, skipna, fill_value_typ ='-inf')
# numpy 1.6.1 workaround in Python 3.x
@@ -291,14 +346,8 @@ def nanargmin(values, axis=None, skipna=True):
result = _maybe_arg_null_out(result, axis, mask, skipna)
return result
-nansum = _bottleneck_switch('nansum', _nansum, zero_value=0)
-nanmean = _bottleneck_switch('nanmean', _nanmean)
-nanmedian = _bottleneck_switch('nanmedian', _nanmedian)
-nanvar = _bottleneck_switch('nanvar', _nanvar, ddof=1)
-nanmin = _bottleneck_switch('nanmin', _nanmin)
-nanmax = _bottleneck_switch('nanmax', _nanmax)
-
+@disallow('M8')
def nanskew(values, axis=None, skipna=True):
if not isinstance(values.dtype.type, np.floating):
values = values.astype('f8')
@@ -332,6 +381,7 @@ def nanskew(values, axis=None, skipna=True):
return result
+@disallow('M8')
def nankurt(values, axis=None, skipna=True):
if not isinstance(values.dtype.type, np.floating):
values = values.astype('f8')
@@ -365,6 +415,7 @@ def nankurt(values, axis=None, skipna=True):
return result
+@disallow('M8')
def nanprod(values, axis=None, skipna=True):
mask = isnull(values)
if skipna and not issubclass(values.dtype.type, np.integer):
@@ -423,6 +474,7 @@ def _zero_out_fperr(arg):
return 0 if np.abs(arg) < 1e-14 else arg
+@disallow('M8')
def nancorr(a, b, method='pearson', min_periods=None):
"""
a, b: ndarrays
@@ -469,6 +521,7 @@ def _spearman(a, b):
return _cor_methods[method]
+@disallow('M8')
def nancov(a, b, min_periods=None):
if len(a) != len(b):
raise AssertionError('Operands to nancov must have same size')
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 64a6e9d3bcaaf..3a7a7d0f49b66 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -97,21 +97,15 @@ def convert_to_array(values):
values = np.array([values])
inferred_type = lib.infer_dtype(values)
if inferred_type in set(['datetime64','datetime','date','time']):
- if isinstance(values, pa.Array) and com.is_datetime64_dtype(values):
- pass
- else:
+ if not (isinstance(values, pa.Array) and com.is_datetime64_dtype(values)):
values = tslib.array_to_datetime(values)
elif inferred_type in set(['timedelta','timedelta64']):
# need to convert timedelta to ns here
# safest to convert it to an object arrany to process
- if isinstance(values, pa.Array) and com.is_timedelta64_dtype(values):
- pass
- else:
+ if not (isinstance(values, pa.Array) and com.is_timedelta64_dtype(values)):
values = com._possibly_cast_to_timedelta(values)
elif inferred_type in set(['integer']):
- if values.dtype == 'timedelta64[ns]':
- pass
- elif values.dtype.kind == 'm':
+ if values.dtype.kind == 'm':
values = values.astype('timedelta64[ns]')
else:
values = pa.array(values)
@@ -125,9 +119,9 @@ def convert_to_array(values):
is_datetime_rhs = com.is_datetime64_dtype(rvalues)
# 2 datetimes or 2 timedeltas
- if (is_timedelta_lhs and is_timedelta_rhs) or (is_datetime_lhs and is_datetime_rhs):
-
- if is_datetime_lhs and name not in ['__sub__']:
+ if (is_timedelta_lhs and is_timedelta_rhs) or (is_datetime_lhs and
+ is_datetime_rhs):
+ if is_datetime_lhs and name != '__sub__':
raise TypeError("can only operate on a datetimes for subtraction, "
"but the operator [%s] was passed" % name)
elif is_timedelta_lhs and name not in ['__add__','__sub__']:
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 0b34d4dc46494..d674a2f44ebe1 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -9167,6 +9167,15 @@ def _check_stat_op(self, name, alternative, frame=None, has_skipna=True,
f = getattr(frame, name)
+ if not ('max' in name or 'min' in name or 'count' in name):
+ df = DataFrame({'b': date_range('1/1/2001', periods=2)})
+ _f = getattr(df, name)
+ print df
+ self.assertFalse(len(_f()))
+
+ df['a'] = range(len(df))
+ self.assert_(len(getattr(df, name)()))
+
if has_skipna:
def skipna_wrapper(x):
nona = x.dropna().values
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index cba908f7136a9..e1589b9499757 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -1460,10 +1460,6 @@ def test_sum_inf(self):
with cf.option_context("mode.use_inf_as_null", True):
assert_almost_equal(s.sum(), s2.sum())
- res = nanops.nansum(arr, axis=1)
- expected = nanops._nansum(arr, axis=1)
- assert_almost_equal(res, expected)
-
res = nanops.nansum(arr, axis=1)
self.assertTrue(np.isinf(res).all())
@@ -1594,6 +1590,12 @@ def testit():
# add some NaNs
self.series[5:15] = np.NaN
+
+ # idxmax, idxmin, min, and max are valid for dates
+ if not ('max' in name or 'min' in name):
+ ds = Series(date_range('1/1/2001', periods=10))
+ self.assertRaises(TypeError, f, ds)
+
# skipna or no
self.assert_(notnull(f(self.series)))
self.assert_(isnull(f(self.series, skipna=False)))
| closes #3726.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3731 | 2013-05-31T20:10:10Z | 2013-06-06T02:10:51Z | 2013-06-06T02:10:51Z | 2014-06-22T11:11:14Z |
ENH/CLN: give all AssertionErrors and nose.SkipTest raises an informative message | diff --git a/pandas/core/panel.py b/pandas/core/panel.py
index 34b65f169b904..697344639c41b 100644
--- a/pandas/core/panel.py
+++ b/pandas/core/panel.py
@@ -528,9 +528,14 @@ def get_value(self, *args):
-------
value : scalar value
"""
+ nargs = len(args)
+ nreq = self._AXIS_LEN
+
# require an arg for each axis
- if not ((len(args) == self._AXIS_LEN)):
- raise AssertionError()
+ if nargs != nreq:
+ raise TypeError('There must be an argument for each axis, you gave'
+ ' {0} args, but {1} are required'.format(nargs,
+ nreq))
# hm, two layers to the onion
frame = self._get_item_cache(args[0])
@@ -554,8 +559,13 @@ def set_value(self, *args):
otherwise a new object
"""
# require an arg for each axis and the value
- if not ((len(args) == self._AXIS_LEN + 1)):
- raise AssertionError()
+ nargs = len(args)
+ nreq = self._AXIS_LEN + 1
+
+ if nargs != nreq:
+ raise TypeError('There must be an argument for each axis plus the '
+ 'value provided, you gave {0} args, but {1} are '
+ 'required'.format(nargs, nreq))
try:
frame = self._get_item_cache(args[0])
@@ -592,8 +602,10 @@ def __setitem__(self, key, value):
**self._construct_axes_dict_for_slice(self._AXIS_ORDERS[1:]))
mat = value.values
elif isinstance(value, np.ndarray):
- if not ((value.shape == shape[1:])):
- raise AssertionError()
+ if value.shape != shape[1:]:
+ raise ValueError('shape of value must be {0}, shape of given '
+ 'object was {1}'.format(shape[1:],
+ value.shape))
mat = np.asarray(value)
elif np.isscalar(value):
dtype, value = _infer_dtype_from_scalar(value)
@@ -1144,8 +1156,9 @@ def _extract_axes(self, data, axes, **kwargs):
@staticmethod
def _extract_axes_for_slice(self, axes):
""" return the slice dictionary for these axes """
- return dict([(self._AXIS_SLICEMAP[i], a) for i, a
- in zip(self._AXIS_ORDERS[self._AXIS_LEN - len(axes):], axes)])
+ return dict([(self._AXIS_SLICEMAP[i], a)
+ for i, a in zip(self._AXIS_ORDERS[self._AXIS_LEN -
+ len(axes):], axes)])
@staticmethod
def _prep_ndarray(self, values, copy=True):
@@ -1157,8 +1170,11 @@ def _prep_ndarray(self, values, copy=True):
else:
if copy:
values = values.copy()
- if not ((values.ndim == self._AXIS_LEN)):
- raise AssertionError()
+ if values.ndim != self._AXIS_LEN:
+ raise ValueError("The number of dimensions required is {0}, "
+ "but the number of dimensions of the "
+ "ndarray given was {1}".format(self._AXIS_LEN,
+ values.ndim))
return values
@staticmethod
diff --git a/pandas/core/series.py b/pandas/core/series.py
index d9e9a0034b56b..808c959eee629 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -1299,9 +1299,6 @@ def __unicode__(self):
dtype=True)
else:
result = u('Series([], dtype: %s)') % self.dtype
-
- if not (isinstance(result, compat.text_type)):
- raise AssertionError()
return result
def _tidy_repr(self, max_vals=20):
@@ -1377,7 +1374,9 @@ def to_string(self, buf=None, na_rep='NaN', float_format=None,
# catch contract violations
if not isinstance(the_repr, compat.text_type):
- raise AssertionError("expected unicode string")
+ raise AssertionError("result must be of type unicode, type"
+ " of result is {0!r}"
+ "".format(the_repr.__class__.__name__))
if buf is None:
return the_repr
@@ -1397,11 +1396,16 @@ def _get_repr(
"""
formatter = fmt.SeriesFormatter(self, name=name, header=print_header,
- length=length, dtype=dtype, na_rep=na_rep,
+ length=length, dtype=dtype,
+ na_rep=na_rep,
float_format=float_format)
result = formatter.to_string()
- if not (isinstance(result, compat.text_type)):
- raise AssertionError()
+
+ # TODO: following check prob. not neces.
+ if not isinstance(result, compat.text_type):
+ raise AssertionError("result must be of type unicode, type"
+ " of result is {0!r}"
+ "".format(result.__class__.__name__))
return result
def __iter__(self):
diff --git a/pandas/io/date_converters.py b/pandas/io/date_converters.py
index 5c99ab4d0a664..ef92b8692c07f 100644
--- a/pandas/io/date_converters.py
+++ b/pandas/io/date_converters.py
@@ -1,5 +1,5 @@
"""This module is designed for community supported date conversion functions"""
-from pandas.compat import range
+from pandas.compat import range, map
import numpy as np
import pandas.lib as lib
@@ -47,12 +47,16 @@ def _maybe_cast(arr):
def _check_columns(cols):
- if not ((len(cols) > 0)):
- raise AssertionError()
+ if not len(cols):
+ raise AssertionError("There must be at least 1 column")
- N = len(cols[0])
- for c in cols[1:]:
- if not ((len(c) == N)):
- raise AssertionError()
+ head, tail = cols[0], cols[1:]
+
+ N = len(head)
+
+ for i, n in enumerate(map(len, tail)):
+ if n != N:
+ raise AssertionError('All columns must have the same length: {0}; '
+ 'column {1} has length {2}'.format(N, i, n))
return N
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 26f15d5ae2aea..e0b12277f4416 100644
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -552,8 +552,10 @@ def _clean_options(self, options, engine):
# type conversion-related
if converters is not None:
- if not (isinstance(converters, dict)):
- raise AssertionError()
+ if not isinstance(converters, dict):
+ raise TypeError('Type converters must be a dict or'
+ ' subclass, input was '
+ 'a {0!r}'.format(type(converters).__name__))
else:
converters = {}
@@ -631,6 +633,7 @@ def get_chunk(self, size=None):
size = self.chunksize
return self.read(nrows=size)
+
def _is_index_col(col):
return col is not None and col is not False
@@ -1174,6 +1177,7 @@ def TextParser(*args, **kwds):
kwds['engine'] = 'python'
return TextFileReader(*args, **kwds)
+
# delimiter=None, dialect=None, names=None, header=0,
# index_col=None,
# na_values=None,
@@ -1653,8 +1657,8 @@ def _rows_to_cols(self, content):
if self._implicit_index:
col_len += len(self.index_col)
- if not ((self.skip_footer >= 0)):
- raise AssertionError()
+ if self.skip_footer < 0:
+ raise ValueError('skip footer cannot be negative')
if col_len != zip_len and self.index_col is not False:
i = 0
@@ -1883,6 +1887,7 @@ def _clean_na_values(na_values, keep_default_na=True):
return na_values, na_fvalues
+
def _clean_index_names(columns, index_col):
if not _is_index_col(index_col):
return None, columns, index_col
@@ -1941,6 +1946,7 @@ def _floatify_na_values(na_values):
pass
return result
+
def _stringify_na_values(na_values):
""" return a stringified and numeric for these values """
result = []
@@ -1965,6 +1971,7 @@ def _stringify_na_values(na_values):
pass
return set(result)
+
def _get_na_values(col, na_values, na_fvalues):
if isinstance(na_values, dict):
if col in na_values:
@@ -2014,15 +2021,17 @@ def __init__(self, f, colspecs, filler, thousands=None, encoding=None):
encoding = get_option('display.encoding')
self.encoding = encoding
- if not ( isinstance(colspecs, (tuple, list))):
- raise AssertionError()
+ if not isinstance(colspecs, (tuple, list)):
+ raise TypeError("column specifications must be a list or tuple, "
+ "input was a %r" % type(colspecs).__name__)
for colspec in colspecs:
- if not ( isinstance(colspec, (tuple, list)) and
- len(colspec) == 2 and
- isinstance(colspec[0], int) and
- isinstance(colspec[1], int) ):
- raise AssertionError()
+ if not (isinstance(colspec, (tuple, list)) and
+ len(colspec) == 2 and
+ isinstance(colspec[0], int) and
+ isinstance(colspec[1], int)):
+ raise TypeError('Each column specification must be '
+ '2 element tuple or list of integers')
def next(self):
line = next(self.f)
diff --git a/pandas/io/tests/test_data.py b/pandas/io/tests/test_data.py
index 091e149ebb1c0..f647b217fb260 100644
--- a/pandas/io/tests/test_data.py
+++ b/pandas/io/tests/test_data.py
@@ -16,6 +16,13 @@
from numpy.testing import assert_array_equal
+def _skip_if_no_lxml():
+ try:
+ import lxml
+ except ImportError:
+ raise nose.SkipTest("no lxml")
+
+
def assert_n_failed_equals_n_null_columns(wngs, obj, cls=SymbolWarning):
all_nan_cols = pd.Series(dict((k, pd.isnull(v).all()) for k, v in
compat.iteritems(obj)))
@@ -88,10 +95,7 @@ def test_get_multi2(self):
class TestYahoo(unittest.TestCase):
@classmethod
def setUpClass(cls):
- try:
- import lxml
- except ImportError:
- raise nose.SkipTest
+ _skip_if_no_lxml()
@network
def test_yahoo(self):
@@ -210,10 +214,7 @@ def test_get_date_ret_index(self):
class TestYahooOptions(unittest.TestCase):
@classmethod
def setUpClass(cls):
- try:
- import lxml
- except ImportError:
- raise nose.SkipTest
+ _skip_if_no_lxml()
# aapl has monthlies
cls.aapl = web.Options('aapl', 'yahoo')
@@ -272,10 +273,7 @@ def test_get_put_data(self):
class TestOptionsWarnings(unittest.TestCase):
@classmethod
def setUpClass(cls):
- try:
- import lxml
- except ImportError:
- raise nose.SkipTest
+ _skip_if_no_lxml()
with assert_produces_warning(FutureWarning):
cls.aapl = web.Options('aapl')
diff --git a/pandas/io/tests/test_ga.py b/pandas/io/tests/test_ga.py
index e33b75c569fef..a0f4dc45725a3 100644
--- a/pandas/io/tests/test_ga.py
+++ b/pandas/io/tests/test_ga.py
@@ -14,7 +14,7 @@
from pandas.io.auth import AuthenticationConfigError, reset_token_store
from pandas.io import auth
except ImportError:
- raise nose.SkipTest
+ raise nose.SkipTest("need httplib2 and auth libs")
class TestGoogle(unittest.TestCase):
@@ -68,7 +68,7 @@ def test_getdata(self):
assert_frame_equal(df, df2)
except AuthenticationConfigError:
- raise nose.SkipTest
+ raise nose.SkipTest("authentication error")
@slow
@with_connectivity_check("http://www.google.com")
@@ -96,7 +96,7 @@ def test_iterator(self):
assert (df2.index > df1.index).all()
except AuthenticationConfigError:
- raise nose.SkipTest
+ raise nose.SkipTest("authentication error")
@slow
@with_connectivity_check("http://www.google.com")
@@ -150,7 +150,8 @@ def test_segment(self):
assert 'pageviewsPerVisit' in df
except AuthenticationConfigError:
- raise nose.SkipTest
+ raise nose.SkipTest("authentication error")
+
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
diff --git a/pandas/io/tests/test_json/test_pandas.py b/pandas/io/tests/test_json/test_pandas.py
index c32fc08dab297..dea7f2b079cef 100644
--- a/pandas/io/tests/test_json/test_pandas.py
+++ b/pandas/io/tests/test_json/test_pandas.py
@@ -567,17 +567,11 @@ def test_round_trip_exception_(self):
assert_frame_equal(result.reindex(index=df.index,columns=df.columns),df)
@network
- @slow
def test_url(self):
- try:
-
- url = 'https://api.github.com/repos/pydata/pandas/issues?per_page=5'
- result = read_json(url,convert_dates=True)
- for c in ['created_at','closed_at','updated_at']:
- self.assert_(result[c].dtype == 'datetime64[ns]')
-
- url = 'http://search.twitter.com/search.json?q=pandas%20python'
- result = read_json(url)
+ url = 'https://api.github.com/repos/pydata/pandas/issues?per_page=5'
+ result = read_json(url,convert_dates=True)
+ for c in ['created_at','closed_at','updated_at']:
+ self.assert_(result[c].dtype == 'datetime64[ns]')
- except URLError:
- raise nose.SkipTest
+ url = 'http://search.twitter.com/search.json?q=pandas%20python'
+ result = read_json(url)
diff --git a/pandas/io/tests/test_json/test_ujson.py b/pandas/io/tests/test_json/test_ujson.py
index 38a30b8baf459..13ccf0bbd1742 100644
--- a/pandas/io/tests/test_json/test_ujson.py
+++ b/pandas/io/tests/test_json/test_ujson.py
@@ -29,7 +29,7 @@
def _skip_if_python_ver(skip_major, skip_minor=None):
major, minor = sys.version_info[:2]
if major == skip_major and (skip_minor is None or minor == skip_minor):
- raise nose.SkipTest
+ raise nose.SkipTest("skipping Python version %d.%d" % (major, minor))
json_unicode = (json.dumps if sys.version_info[0] >= 3
else partial(json.dumps, encoding="utf-8"))
@@ -363,7 +363,8 @@ def test_nat(self):
def test_npy_nat(self):
from distutils.version import LooseVersion
if LooseVersion(np.__version__) < '1.7.0':
- raise nose.SkipTest
+ raise nose.SkipTest("numpy version < 1.7.0, is "
+ "{0}".format(np.__version__))
input = np.datetime64('NaT')
assert ujson.encode(input) == 'null', "Expected null"
diff --git a/pandas/io/tests/test_parsers.py b/pandas/io/tests/test_parsers.py
index fadf70877409f..dea719ce6f397 100644
--- a/pandas/io/tests/test_parsers.py
+++ b/pandas/io/tests/test_parsers.py
@@ -8,6 +8,7 @@
import re
import unittest
import nose
+import platform
from numpy import nan
import numpy as np
@@ -64,6 +65,10 @@ def setUp(self):
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
+ def test_converters_type_must_be_dict(self):
+ with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
+ self.read_csv(StringIO(self.data1), converters=0)
+
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
@@ -755,6 +760,8 @@ def test_deep_skiprows(self):
condensed_data = self.read_csv(StringIO(condensed_text))
tm.assert_frame_equal(data, condensed_data)
+
+
def test_detect_string_na(self):
data = """A,B
foo,bar
@@ -1492,28 +1499,17 @@ def test_na_value_dict(self):
df = self.read_csv(StringIO(data), na_values={}, index_col=['a', 'c'])
tm.assert_frame_equal(df, xp)
- @slow
@tm.network
def test_url(self):
- try:
- # HTTP(S)
- url = ('https://raw.github.com/pydata/pandas/master/'
- 'pandas/io/tests/data/salary.table')
- url_table = self.read_table(url)
- dirpath = tm.get_data_path()
- localtable = os.path.join(dirpath, 'salary.table')
- local_table = self.read_table(localtable)
- tm.assert_frame_equal(url_table, local_table)
- # TODO: ftp testing
-
- except URLError:
- try:
- with tm.closing(urlopen('http://www.google.com')) as resp:
- pass
- except URLError:
- raise nose.SkipTest
- else:
- raise
+ # HTTP(S)
+ url = ('https://raw.github.com/pydata/pandas/master/'
+ 'pandas/io/tests/data/salary.table')
+ url_table = self.read_table(url)
+ dirpath = tm.get_data_path()
+ localtable = os.path.join(dirpath, 'salary.table')
+ local_table = self.read_table(localtable)
+ tm.assert_frame_equal(url_table, local_table)
+ # TODO: ftp testing
@slow
def test_file(self):
@@ -1529,7 +1525,8 @@ def test_file(self):
url_table = self.read_table('file://localhost/' + localtable)
except URLError:
# fails on some systems
- raise nose.SkipTest
+ raise nose.SkipTest("failing on %s" %
+ ' '.join(platform.uname()).strip())
tm.assert_frame_equal(url_table, local_table)
@@ -1710,7 +1707,8 @@ def test_utf16_example(self):
def test_converters_corner_with_nas(self):
# skip aberration observed on Win64 Python 3.2.2
if hash(np.int64(-1)) != -2:
- raise nose.SkipTest
+ raise nose.SkipTest("skipping because of windows hash on Python"
+ " 3.2.2")
csv = """id,score,days
1,2,12
@@ -1893,6 +1891,21 @@ def test_usecols_index_col_conflict(self):
class TestPythonParser(ParserTests, unittest.TestCase):
+ def test_negative_skipfooter_raises(self):
+ text = """#foo,a,b,c
+#foo,a,b,c
+#foo,a,b,c
+#foo,a,b,c
+#foo,a,b,c
+#foo,a,b,c
+1/1/2000,1.,2.,3.
+1/2/2000,4,5,6
+1/3/2000,7,8,9
+"""
+
+ with tm.assertRaisesRegexp(ValueError,
+ 'skip footer cannot be negative'):
+ df = self.read_csv(StringIO(text), skipfooter=-1)
def read_csv(self, *args, **kwds):
kwds = kwds.copy()
@@ -2048,6 +2061,18 @@ def test_fwf(self):
with tm.assertRaisesRegexp(ValueError, "Must specify either"):
read_fwf(StringIO(data3))
+ def test_fwf_colspecs_is_list_or_tuple(self):
+ with tm.assertRaisesRegexp(TypeError,
+ 'column specifications must be a list or '
+ 'tuple.+'):
+ fwr = pd.io.parsers.FixedWidthReader(StringIO(self.data1),
+ {'a': 1}, ',')
+
+ def test_fwf_colspecs_is_list_or_tuple_of_two_element_tuples(self):
+ with tm.assertRaisesRegexp(TypeError,
+ 'Each column specification must be.+'):
+ read_fwf(StringIO(self.data1), {'a': 1})
+
def test_fwf_regression(self):
# GH 3594
#### turns out 'T060' is parsable as a datetime slice!
@@ -2155,7 +2180,7 @@ def test_verbose_import(self):
def test_iteration_open_handle(self):
if PY3:
- raise nose.SkipTest
+ raise nose.SkipTest("won't work in Python 3 {0}".format(sys.version_info))
with tm.ensure_clean() as path:
with open(path, 'wb') as f:
@@ -2371,7 +2396,7 @@ def test_decompression(self):
import gzip
import bz2
except ImportError:
- raise nose.SkipTest
+ raise nose.SkipTest('need gzip and bz2 to run')
data = open(self.csv1, 'rb').read()
expected = self.read_csv(self.csv1)
@@ -2406,7 +2431,7 @@ def test_decompression_regex_sep(self):
import gzip
import bz2
except ImportError:
- raise nose.SkipTest
+ raise nose.SkipTest('need gzip and bz2 to run')
data = open(self.csv1, 'rb').read()
data = data.replace(b',', b'::')
diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py
index 35b9dfbdb6f77..35ecef2acf818 100644
--- a/pandas/io/tests/test_pytables.py
+++ b/pandas/io/tests/test_pytables.py
@@ -2295,7 +2295,7 @@ def test_index_types(self):
def test_timeseries_preepoch(self):
if sys.version_info[0] == 2 and sys.version_info[1] < 7:
- raise nose.SkipTest
+ raise nose.SkipTest("won't work on Python < 2.7")
dr = bdate_range('1/1/1940', '1/1/1960')
ts = Series(np.random.randn(len(dr)), index=dr)
@@ -3599,7 +3599,7 @@ def do_copy(f = None, new_f = None, keys = None, propindexes = True, **kwargs):
safe_remove(self.path)
def test_legacy_table_write(self):
- raise nose.SkipTest
+ raise nose.SkipTest("skipping for now")
store = HDFStore(tm.get_data_path('legacy_hdf/legacy_table_%s.h5' % pandas.__version__), 'a')
diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py
index 624f16b3207cd..f135a3619e03c 100644
--- a/pandas/io/tests/test_sql.py
+++ b/pandas/io/tests/test_sql.py
@@ -431,7 +431,7 @@ def test_tquery(self):
try:
import MySQLdb
except ImportError:
- raise nose.SkipTest
+ raise nose.SkipTest("no MySQLdb")
frame = tm.makeTimeDataFrame()
drop_sql = "DROP TABLE IF EXISTS test_table"
cur = self.db.cursor()
@@ -456,7 +456,7 @@ def test_uquery(self):
try:
import MySQLdb
except ImportError:
- raise nose.SkipTest
+ raise nose.SkipTest("no MySQLdb")
frame = tm.makeTimeDataFrame()
drop_sql = "DROP TABLE IF EXISTS test_table"
cur = self.db.cursor()
diff --git a/pandas/io/tests/test_wb.py b/pandas/io/tests/test_wb.py
index e85c63d7d5999..60b4d8d462723 100644
--- a/pandas/io/tests/test_wb.py
+++ b/pandas/io/tests/test_wb.py
@@ -10,7 +10,7 @@
@slow
@network
def test_wdi_search():
- raise nose.SkipTest
+ raise nose.SkipTest("skipping for now")
expected = {u('id'): {2634: u('GDPPCKD'),
4649: u('NY.GDP.PCAP.KD'),
4651: u('NY.GDP.PCAP.KN'),
@@ -30,7 +30,7 @@ def test_wdi_search():
@slow
@network
def test_wdi_download():
- raise nose.SkipTest
+ raise nose.SkipTest("skipping for now")
expected = {'GDPPCKN': {(u('United States'), u('2003')): u('40800.0735367688'), (u('Canada'), u('2004')): u('37857.1261134552'), (u('United States'), u('2005')): u('42714.8594790102'), (u('Canada'), u('2003')): u('37081.4575704003'), (u('United States'), u('2004')): u('41826.1728310667'), (u('Mexico'), u('2003')): u('72720.0691255285'), (u('Mexico'), u('2004')): u('74751.6003347038'), (u('Mexico'), u('2005')): u('76200.2154469437'), (u('Canada'), u('2005')): u('38617.4563629611')}, 'GDPPCKD': {(u('United States'), u('2003')): u('40800.0735367688'), (u('Canada'), u('2004')): u('34397.055116118'), (u('United States'), u('2005')): u('42714.8594790102'), (u('Canada'), u('2003')): u('33692.2812368928'), (u('United States'), u('2004')): u('41826.1728310667'), (u('Mexico'), u('2003')): u('7608.43848670658'), (u('Mexico'), u('2004')): u('7820.99026814334'), (u('Mexico'), u('2005')): u('7972.55364129367'), (u('Canada'), u('2005')): u('35087.8925933298')}}
expected = pandas.DataFrame(expected)
result = download(country=['CA', 'MX', 'US', 'junk'], indicator=['GDPPCKD',
diff --git a/pandas/sparse/array.py b/pandas/sparse/array.py
index 34823c052a518..8a50a000a9526 100644
--- a/pandas/sparse/array.py
+++ b/pandas/sparse/array.py
@@ -11,12 +11,11 @@
from pandas.core.base import PandasObject
import pandas.core.common as com
-from pandas import compat
+from pandas import compat, lib
from pandas.compat import range
from pandas._sparse import BlockIndex, IntIndex
import pandas._sparse as splib
-import pandas.lib as lib
import pandas.index as _index
@@ -28,8 +27,8 @@ def _sparse_op_wrap(op, name):
def wrapper(self, other):
if isinstance(other, np.ndarray):
- if not ((len(self) == len(other))):
- raise AssertionError()
+ if len(self) != len(other):
+ raise AssertionError("Operands must be of the same size")
if not isinstance(other, SparseArray):
other = SparseArray(other, fill_value=self.fill_value)
return _sparse_array_op(self, other, op, name)
@@ -148,8 +147,10 @@ def __new__(
fill_value=fill_value)
else:
values = data
- if not ((len(values) == sparse_index.npoints)):
- raise AssertionError()
+ if len(values) != sparse_index.npoints:
+ raise AssertionError("Non array-like type {0} must have"
+ " the same length as the"
+ " index".format(type(values)))
# Create array, do *not* copy data by default
if copy:
@@ -329,8 +330,8 @@ def take(self, indices, axis=0):
-------
taken : ndarray
"""
- if not ((axis == 0)):
- raise AssertionError()
+ if axis:
+ raise ValueError("axis must be 0, input was {0}".format(axis))
indices = np.atleast_1d(np.asarray(indices, dtype=int))
# allow -1 to indicate missing values
@@ -339,14 +340,14 @@ def take(self, indices, axis=0):
raise IndexError('out of bounds access')
if self.sp_index.npoints > 0:
- locs = np.array(
- [self.sp_index.lookup(loc) if loc > -1 else -1 for loc in indices])
+ locs = np.array([self.sp_index.lookup(loc) if loc > -1 else -1
+ for loc in indices])
result = self.sp_values.take(locs)
mask = locs == -1
if mask.any():
try:
result[mask] = self.fill_value
- except (ValueError):
+ except ValueError:
# wrong dtype
result = result.astype('float64')
result[mask] = self.fill_value
diff --git a/pandas/sparse/frame.py b/pandas/sparse/frame.py
index 53fabb0160a88..93b29cbf91b91 100644
--- a/pandas/sparse/frame.py
+++ b/pandas/sparse/frame.py
@@ -25,7 +25,6 @@
from pandas.core.generic import NDFrame
from pandas.sparse.series import SparseSeries, SparseArray
from pandas.util.decorators import Appender
-import pandas.lib as lib
class SparseDataFrame(DataFrame):
@@ -601,20 +600,15 @@ def _reindex_with_indexers(self, reindexers, method=None, fill_value=None, limit
def _join_compat(self, other, on=None, how='left', lsuffix='', rsuffix='',
sort=False):
- if isinstance(other, Series):
- if other.name is None:
- raise ValueError('Other Series must have a name')
- other = SparseDataFrame({other.name: other},
- default_fill_value=self._default_fill_value)
if on is not None:
- raise NotImplementedError
- else:
- return self._join_index(other, how, lsuffix, rsuffix)
+ raise NotImplementedError("'on' keyword parameter is not yet "
+ "implemented")
+ return self._join_index(other, how, lsuffix, rsuffix)
def _join_index(self, other, how, lsuffix, rsuffix):
if isinstance(other, Series):
- if not (other.name is not None):
- raise AssertionError()
+ if other.name is None:
+ raise ValueError('Other Series must have a name')
other = SparseDataFrame({other.name: other},
default_fill_value=self._default_fill_value)
diff --git a/pandas/sparse/panel.py b/pandas/sparse/panel.py
index ae981180022c7..286b683b1ea88 100644
--- a/pandas/sparse/panel.py
+++ b/pandas/sparse/panel.py
@@ -77,8 +77,9 @@ def __init__(self, frames, items=None, major_axis=None, minor_axis=None,
default_kind=default_kind)
frames = new_frames
- if not (isinstance(frames, dict)):
- raise AssertionError()
+ if not isinstance(frames, dict):
+ raise TypeError('input must be a dict, a %r was passed' %
+ type(frames).__name__)
self.default_fill_value = fill_value = default_fill_value
self.default_kind = kind = default_kind
@@ -99,7 +100,7 @@ def __init__(self, frames, items=None, major_axis=None, minor_axis=None,
# do we want to fill missing ones?
for item in items:
if item not in clean_frames:
- raise Exception('column %s not found in data' % item)
+ raise ValueError('column %r not found in data' % item)
self._items = items
self.major_axis = major_axis
diff --git a/pandas/sparse/series.py b/pandas/sparse/series.py
index 38003f0096df2..50e80e0c202d5 100644
--- a/pandas/sparse/series.py
+++ b/pandas/sparse/series.py
@@ -555,8 +555,8 @@ def sparse_reindex(self, new_index):
-------
reindexed : SparseSeries
"""
- if not (isinstance(new_index, splib.SparseIndex)):
- raise AssertionError()
+ if not isinstance(new_index, splib.SparseIndex):
+ raise TypeError('new index must be a SparseIndex')
block = self.block.sparse_reindex(new_index)
new_data = SingleBlockManager(block, block.ref_items)
diff --git a/pandas/sparse/tests/test_sparse.py b/pandas/sparse/tests/test_sparse.py
index a74872c8f193f..b3f2a8b3b8136 100644
--- a/pandas/sparse/tests/test_sparse.py
+++ b/pandas/sparse/tests/test_sparse.py
@@ -488,7 +488,7 @@ def test_operators_corner2(self):
def test_binary_operators(self):
# skipping for now #####
- raise nose.SkipTest
+ raise nose.SkipTest("skipping sparse binary operators test")
def _check_inplace_op(iop, op):
tmp = self.bseries.copy()
@@ -539,7 +539,7 @@ def _compare_with_series(sps, new_index):
reindexed = self.bseries.reindex(self.bseries.index, copy=False)
reindexed.sp_values[:] = 1.
- self.assert_((self.bseries.sp_values == 1.).all())
+ np.testing.assert_array_equal(self.bseries.sp_values, 1.)
def test_sparse_reindex(self):
length = 10
@@ -583,6 +583,13 @@ def _check_all(values, first, second):
_check_all(values1, index1, [0, 1, 7, 8, 9])
_check_all(values1, index1, [])
+ first_series = SparseSeries(values1, sparse_index=IntIndex(length,
+ index1),
+ fill_value=nan)
+ with tm.assertRaisesRegexp(TypeError,
+ 'new index must be a SparseIndex'):
+ reindexed = first_series.sparse_reindex(0)
+
def test_repr(self):
bsrepr = repr(self.bseries)
isrepr = repr(self.iseries)
@@ -1308,6 +1315,10 @@ def test_join(self):
right = self.frame.ix[:, ['B', 'D']]
self.assertRaises(Exception, left.join, right)
+ with tm.assertRaisesRegexp(ValueError, 'Other Series must have a name'):
+ self.frame.join(Series(np.random.randn(len(self.frame)),
+ index=self.frame.index))
+
def test_reindex(self):
def _check_frame(frame):
@@ -1576,8 +1587,11 @@ def _test_op(panel, op):
assert_sp_frame_equal(result['ItemA'], op(panel['ItemA'], 1))
def test_constructor(self):
- self.assertRaises(Exception, SparsePanel, self.data_dict,
+ self.assertRaises(ValueError, SparsePanel, self.data_dict,
items=['Item0', 'ItemA', 'ItemB'])
+ with tm.assertRaisesRegexp(TypeError,
+ "input must be a dict, a 'list' was passed"):
+ SparsePanel(['a', 'b', 'c'])
def test_from_dict(self):
fd = SparsePanel.from_dict(self.data_dict)
diff --git a/pandas/stats/ols.py b/pandas/stats/ols.py
index 2bf366f4dc8cb..9d22068c1612f 100644
--- a/pandas/stats/ols.py
+++ b/pandas/stats/ols.py
@@ -1216,8 +1216,9 @@ def _nobs_raw(self):
return result.astype(int)
def _beta_matrix(self, lag=0):
- if not ((lag >= 0)):
- raise AssertionError()
+ if lag < 0:
+ raise AssertionError("'lag' must be greater than or equal to 0, "
+ "input was {0}".format(lag))
betas = self._beta_raw
@@ -1280,8 +1281,8 @@ def _filter_data(lhs, rhs, weights=None):
Cleaned lhs and rhs
"""
if not isinstance(lhs, Series):
- if not ((len(lhs) == len(rhs))):
- raise AssertionError()
+ if len(lhs) != len(rhs):
+ raise AssertionError("length of lhs must equal length of rhs")
lhs = Series(lhs, index=rhs.index)
rhs = _combine_rhs(rhs)
diff --git a/pandas/stats/plm.py b/pandas/stats/plm.py
index 450ddac78e06a..3c67119427ae0 100644
--- a/pandas/stats/plm.py
+++ b/pandas/stats/plm.py
@@ -103,10 +103,12 @@ def _prepare_data(self):
y_regressor = y
if weights is not None:
- if not ((y_regressor.index.equals(weights.index))):
- raise AssertionError()
- if not ((x_regressor.index.equals(weights.index))):
- raise AssertionError()
+ if not y_regressor.index.equals(weights.index):
+ raise AssertionError("y_regressor and weights must have the "
+ "same index")
+ if not x_regressor.index.equals(weights.index):
+ raise AssertionError("x_regressor and weights must have the "
+ "same index")
rt_weights = np.sqrt(weights)
y_regressor = y_regressor * rt_weights
@@ -173,8 +175,10 @@ def _convert_x(self, x):
# .iteritems
iteritems = getattr(x, 'iteritems', x.items)
for key, df in iteritems():
- if not ((isinstance(df, DataFrame))):
- raise AssertionError()
+ if not isinstance(df, DataFrame):
+ raise AssertionError("all input items must be DataFrames, "
+ "at least one is of "
+ "type {0}".format(type(df)))
if _is_numeric(df):
x_converted[key] = df
@@ -642,8 +646,9 @@ def _y_predict_raw(self):
return (betas * x).sum(1)
def _beta_matrix(self, lag=0):
- if not ((lag >= 0)):
- raise AssertionError()
+ if lag < 0:
+ raise AssertionError("'lag' must be greater than or equal to 0, "
+ "input was {0}".format(lag))
index = self._y_trans.index
major_labels = index.labels[0]
diff --git a/pandas/stats/tests/test_math.py b/pandas/stats/tests/test_math.py
index 92dedb35f4512..008fffdc1db06 100644
--- a/pandas/stats/tests/test_math.py
+++ b/pandas/stats/tests/test_math.py
@@ -49,7 +49,7 @@ def test_rank_1d(self):
def test_solve_rect(self):
if not _have_statsmodels:
- raise nose.SkipTest
+ raise nose.SkipTest("no statsmodels")
b = Series(np.random.randn(N), self.frame.index)
result = pmath.solve(self.frame, b)
diff --git a/pandas/stats/tests/test_moments.py b/pandas/stats/tests/test_moments.py
index 24fc04d849c7f..70653d9d96bef 100644
--- a/pandas/stats/tests/test_moments.py
+++ b/pandas/stats/tests/test_moments.py
@@ -22,7 +22,7 @@ def _skip_if_no_scipy():
try:
import scipy.stats
except ImportError:
- raise nose.SkipTest
+ raise nose.SkipTest("no scipy.stats")
class TestMoments(unittest.TestCase):
@@ -73,7 +73,7 @@ def test_cmov_mean(self):
try:
from scikits.timeseries.lib import cmov_mean
except ImportError:
- raise nose.SkipTest
+ raise nose.SkipTest("no scikits.timeseries")
vals = np.random.randn(10)
xp = cmov_mean(vals, 5)
@@ -91,7 +91,7 @@ def test_cmov_window(self):
try:
from scikits.timeseries.lib import cmov_window
except ImportError:
- raise nose.SkipTest
+ raise nose.SkipTest("no scikits.timeseries")
vals = np.random.randn(10)
xp = cmov_window(vals, 5, 'boxcar')
@@ -109,7 +109,7 @@ def test_cmov_window_corner(self):
try:
from scikits.timeseries.lib import cmov_window
except ImportError:
- raise nose.SkipTest
+ raise nose.SkipTest("no scikits.timeseries")
# all nan
vals = np.empty(10, dtype=float)
@@ -133,7 +133,7 @@ def test_cmov_window_frame(self):
try:
from scikits.timeseries.lib import cmov_window
except ImportError:
- raise nose.SkipTest
+ raise nose.SkipTest("no scikits.timeseries")
# DataFrame
vals = np.random.randn(10, 2)
@@ -146,7 +146,7 @@ def test_cmov_window_na_min_periods(self):
try:
from scikits.timeseries.lib import cmov_window
except ImportError:
- raise nose.SkipTest
+ raise nose.SkipTest("no scikits.timeseries")
# min_periods
vals = Series(np.random.randn(10))
@@ -163,7 +163,7 @@ def test_cmov_window_regular(self):
try:
from scikits.timeseries.lib import cmov_window
except ImportError:
- raise nose.SkipTest
+ raise nose.SkipTest("no scikits.timeseries")
win_types = ['triang', 'blackman', 'hamming', 'bartlett', 'bohman',
'blackmanharris', 'nuttall', 'barthann']
@@ -179,7 +179,7 @@ def test_cmov_window_special(self):
try:
from scikits.timeseries.lib import cmov_window
except ImportError:
- raise nose.SkipTest
+ raise nose.SkipTest("no scikits.timeseries")
win_types = ['kaiser', 'gaussian', 'general_gaussian', 'slepian']
kwds = [{'beta': 1.}, {'std': 1.}, {'power': 2., 'width': 2.},
@@ -319,7 +319,7 @@ def test_rolling_kurt(self):
def test_fperr_robustness(self):
# TODO: remove this once python 2.5 out of picture
if PY3:
- raise nose.SkipTest
+ raise nose.SkipTest("doesn't work on python 3")
# #2114
data = '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1a@\xaa\xaa\xaa\xaa\xaa\xaa\x02@8\x8e\xe38\x8e\xe3\xe8?z\t\xed%\xb4\x97\xd0?\xa2\x0c<\xdd\x9a\x1f\xb6?\x82\xbb\xfa&y\x7f\x9d?\xac\'\xa7\xc4P\xaa\x83?\x90\xdf\xde\xb0k8j?`\xea\xe9u\xf2zQ?*\xe37\x9d\x98N7?\xe2.\xf5&v\x13\x1f?\xec\xc9\xf8\x19\xa4\xb7\x04?\x90b\xf6w\x85\x9f\xeb>\xb5A\xa4\xfaXj\xd2>F\x02\xdb\xf8\xcb\x8d\xb8>.\xac<\xfb\x87^\xa0>\xe8:\xa6\xf9_\xd3\x85>\xfb?\xe2cUU\xfd?\xfc\x7fA\xed8\x8e\xe3?\xa5\xaa\xac\x91\xf6\x12\xca?n\x1cs\xb6\xf9a\xb1?\xe8%D\xf3L-\x97?5\xddZD\x11\xe7~?#>\xe7\x82\x0b\x9ad?\xd9R4Y\x0fxK?;7x;\nP2?N\xf4JO\xb8j\x18?4\xf81\x8a%G\x00?\x9a\xf5\x97\r2\xb4\xe5>\xcd\x9c\xca\xbcB\xf0\xcc>3\x13\x87(\xd7J\xb3>\x99\x19\xb4\xe0\x1e\xb9\x99>ff\xcd\x95\x14&\x81>\x88\x88\xbc\xc7p\xddf>`\x0b\xa6_\x96|N>@\xb2n\xea\x0eS4>U\x98\x938i\x19\x1b>\x8eeb\xd0\xf0\x10\x02>\xbd\xdc-k\x96\x16\xe8=(\x93\x1e\xf2\x0e\x0f\xd0=\xe0n\xd3Bii\xb5=*\xe9\x19Y\x8c\x8c\x9c=\xc6\xf0\xbb\x90]\x08\x83=]\x96\xfa\xc0|`i=>d\xfc\xd5\xfd\xeaP=R0\xfb\xc7\xa7\x8e6=\xc2\x95\xf9_\x8a\x13\x1e=\xd6c\xa6\xea\x06\r\x04=r\xda\xdd8\t\xbc\xea<\xf6\xe6\x93\xd0\xb0\xd2\xd1<\x9d\xdeok\x96\xc3\xb7<&~\xea9s\xaf\x9f<UUUUUU\x13@q\x1c\xc7q\x1c\xc7\xf9?\xf6\x12\xdaKh/\xe1?\xf2\xc3"e\xe0\xe9\xc6?\xed\xaf\x831+\x8d\xae?\xf3\x1f\xad\xcb\x1c^\x94?\x15\x1e\xdd\xbd>\xb8\x02@\xc6\xd2&\xfd\xa8\xf5\xe8?\xd9\xe1\x19\xfe\xc5\xa3\xd0?v\x82"\xa8\xb2/\xb6?\x9dX\x835\xee\x94\x9d?h\x90W\xce\x9e\xb8\x83?\x8a\xc0th~Kj?\\\x80\xf8\x9a\xa9\x87Q?%\xab\xa0\xce\x8c_7?1\xe4\x80\x13\x11*\x1f? \x98\x00\r\xb6\xc6\x04?\x80u\xabf\x9d\xb3\xeb>UNrD\xbew\xd2>\x1c\x13C[\xa8\x9f\xb8>\x12b\xd7<pj\xa0>m-\x1fQ@\xe3\x85>\xe6\x91)l\x00/m>Da\xc6\xf2\xaatS>\x05\xd7]\xee\xe3\xf09>'
diff --git a/pandas/stats/tests/test_ols.py b/pandas/stats/tests/test_ols.py
index ad9184e698316..df2f545c90b92 100644
--- a/pandas/stats/tests/test_ols.py
+++ b/pandas/stats/tests/test_ols.py
@@ -6,9 +6,9 @@
from __future__ import division
-from distutils.version import LooseVersion
from datetime import datetime
from pandas import compat
+from distutils.version import LooseVersion
import unittest
import nose
import numpy as np
@@ -77,7 +77,7 @@ def setUpClass(cls):
pass
if not _have_statsmodels:
- raise nose.SkipTest
+ raise nose.SkipTest("no statsmodels")
def testOLSWithDatasets_ccard(self):
self.checkDataSet(sm.datasets.ccard.load(), skip_moving=True)
@@ -262,7 +262,7 @@ class TestOLSMisc(unittest.TestCase):
@classmethod
def setupClass(cls):
if not _have_statsmodels:
- raise nose.SkipTest
+ raise nose.SkipTest("no statsmodels")
def test_f_test(self):
x = tm.makeTimeDataFrame()
diff --git a/pandas/tests/test_categorical.py b/pandas/tests/test_categorical.py
index e47ba0c8e1569..f41f6a9858b47 100644
--- a/pandas/tests/test_categorical.py
+++ b/pandas/tests/test_categorical.py
@@ -35,7 +35,7 @@ def test_getitem(self):
tm.assert_almost_equal(subf.labels, [2, 2, 2])
def test_constructor_unsortable(self):
- raise nose.SkipTest
+ raise nose.SkipTest('skipping for now')
arr = np.array([1, 2, 3, datetime.now()], dtype='O')
diff --git a/pandas/tests/test_expressions.py b/pandas/tests/test_expressions.py
index f81620b897a4a..56f52447aadfe 100644
--- a/pandas/tests/test_expressions.py
+++ b/pandas/tests/test_expressions.py
@@ -18,8 +18,13 @@
if not expr._USE_NUMEXPR:
- raise nose.SkipTest("numexpr not available")
-
+ try:
+ import numexpr
+ except ImportError:
+ msg = "don't have"
+ else:
+ msg = "not using"
+ raise nose.SkipTest("{0} numexpr".format(msg))
_frame = DataFrame(randn(10000, 4), columns=list('ABCD'), dtype='float64')
_frame2 = DataFrame(randn(100, 4), columns = list('ABCD'), dtype='float64')
diff --git a/pandas/tests/test_format.py b/pandas/tests/test_format.py
index 723bf022c3f48..9405f3c58bfd7 100644
--- a/pandas/tests/test_format.py
+++ b/pandas/tests/test_format.py
@@ -202,7 +202,8 @@ def test_repr_non_interactive(self):
def test_repr_max_columns_max_rows(self):
term_width, term_height = get_terminal_size()
if term_width < 10 or term_height < 10:
- raise nose.SkipTest
+ raise nose.SkipTest("terminal size too small, "
+ "{0} x {1}".format(term_width, term_height))
def mkframe(n):
index = ['%05d' % i for i in range(n)]
@@ -766,7 +767,7 @@ def test_pprint_thing(self):
from pandas.core.common import pprint_thing as pp_t
if PY3:
- raise nose.SkipTest()
+ raise nose.SkipTest("doesn't work on Python 3")
self.assertEquals(pp_t('a') , u('a'))
self.assertEquals(pp_t(u('a')) , u('a'))
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index e5d2bb17ec7a8..51278b82aaedc 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -60,7 +60,7 @@ def _skip_if_no_scipy():
try:
import scipy.stats
except ImportError:
- raise nose.SkipTest
+ raise nose.SkipTest("no scipy.stats module")
#---------------------------------------------------------------------
# DataFrame test cases
@@ -9498,7 +9498,7 @@ def test_sum(self):
self._check_stat_op('sum', np.sum, has_numeric_only=True)
def test_sum_mixed_numeric(self):
- raise nose.SkipTest
+ raise nose.SkipTest("skipping for now")
# mixed types
self._check_stat_op('sum', np.sum, frame = self.mixed_float, has_numeric_only=True)
@@ -10910,10 +10910,7 @@ def test_stale_cached_series_bug_473(self):
self.assert_(isnull(Y['g']['c']))
def test_index_namedtuple(self):
- try:
- from collections import namedtuple
- except ImportError:
- raise nose.SkipTest
+ from collections import namedtuple
IndexType = namedtuple("IndexType", ["a", "b"])
idx1 = IndexType("foo", "bar")
idx2 = IndexType("baz", "bof")
diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py
index 49dc31514da7a..bdeb4ca3d0212 100644
--- a/pandas/tests/test_graphics.py
+++ b/pandas/tests/test_graphics.py
@@ -26,7 +26,7 @@ def _skip_if_no_scipy():
try:
import scipy
except ImportError:
- raise nose.SkipTest
+ raise nose.SkipTest("no scipy")
@tm.mplskip
diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py
index 857836fa698ce..2a9149ef30dab 100644
--- a/pandas/tests/test_index.py
+++ b/pandas/tests/test_index.py
@@ -1414,7 +1414,7 @@ def test_pickle(self):
def test_legacy_pickle(self):
if compat.PY3:
- raise nose.SkipTest
+ raise nose.SkipTest("doesn't work on Python 3")
def curpath():
pth, _ = os.path.split(os.path.abspath(__file__))
diff --git a/pandas/tests/test_internals.py b/pandas/tests/test_internals.py
index 07436236a62de..2a9e7f8642601 100644
--- a/pandas/tests/test_internals.py
+++ b/pandas/tests/test_internals.py
@@ -195,7 +195,7 @@ def test_split_block_at(self):
# with dup column support this method was taken out
# GH3679
- raise nose.SkipTest
+ raise nose.SkipTest("skipping for now")
bs = list(self.fblock.split_block_at('a'))
self.assertEqual(len(bs), 1)
diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py
index 2c8394bfde285..5ec97344373a2 100644
--- a/pandas/tests/test_multilevel.py
+++ b/pandas/tests/test_multilevel.py
@@ -1419,7 +1419,7 @@ def test_getitem_lowerdim_corner(self):
# AMBIGUOUS CASES!
def test_partial_ix_missing(self):
- raise nose.SkipTest
+ raise nose.SkipTest("skipping for now")
result = self.ymd.ix[2000, 0]
expected = self.ymd.ix[2000]['A']
diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py
index a61212b341fa7..289bcb9db0c7e 100644
--- a/pandas/tests/test_panel.py
+++ b/pandas/tests/test_panel.py
@@ -33,7 +33,7 @@ def _skip_if_no_scipy():
try:
import scipy.stats
except ImportError:
- raise nose.SkipTest
+ raise nose.SkipTest("no scipy.stats")
class PanelTests(object):
@@ -102,7 +102,7 @@ def test_skew(self):
try:
from scipy.stats import skew
except ImportError:
- raise nose.SkipTest
+ raise nose.SkipTest("no scipy.stats.skew")
def this_skew(x):
if len(x) < 3:
@@ -426,8 +426,8 @@ def test_delitem_and_pop(self):
def test_setitem(self):
# LongPanel with one item
lp = self.panel.filter(['ItemA', 'ItemB']).to_frame()
- self.assertRaises(Exception, self.panel.__setitem__,
- 'ItemE', lp)
+ with tm.assertRaises(ValueError):
+ self.panel['ItemE'] = lp
# DataFrame
df = self.panel['ItemA'][2:].filter(items=['A', 'B'])
@@ -456,6 +456,13 @@ def test_setitem(self):
self.assertRaises(TypeError, self.panel.__setitem__, 'foo',
self.panel.ix[['ItemP']])
+ # bad shape
+ p = Panel(np.random.randn(4, 3, 2))
+ with tm.assertRaisesRegexp(ValueError,
+ "shape of value must be \(3, 2\), "
+ "shape of given object was \(4, 2\)"):
+ p[0] = np.random.randn(4, 2)
+
def test_setitem_ndarray(self):
from pandas import date_range, datetools
@@ -758,6 +765,9 @@ def test_get_value(self):
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
+ with tm.assertRaisesRegexp(TypeError,
+ "There must be an argument for each axis"):
+ self.panel.get_value('a')
def test_set_value(self):
for item in self.panel.items:
@@ -774,6 +784,10 @@ def test_set_value(self):
res3 = self.panel.set_value('ItemE', 'foobar', 'baz', 5)
self.assert_(com.is_float_dtype(res3['ItemE'].values))
+ with tm.assertRaisesRegexp(TypeError,
+ "There must be an argument for each axis"
+ " plus the value provided"):
+ self.panel.set_value('a')
_panel = tm.makePanel()
tm.add_nans(_panel)
@@ -878,6 +892,11 @@ def _check_dtype(panel, dtype):
panel = Panel(np.random.randn(2,10,5),items=lrange(2),major_axis=lrange(10),minor_axis=lrange(5),dtype=dtype)
_check_dtype(panel,dtype)
+ def test_constructor_fails_with_not_3d_input(self):
+ with tm.assertRaisesRegexp(ValueError,
+ "The number of dimensions required is 3"):
+ Panel(np.random.randn(10, 2))
+
def test_consolidate(self):
self.assert_(self.panel._data.is_consolidated())
@@ -1457,14 +1476,14 @@ def test_from_frame_level1_unsorted(self):
assert_frame_equal(p.minor_xs(2), df.xs(2, level=1).sort_index())
def test_to_excel(self):
+ import os
try:
- import os
import xlwt
import xlrd
import openpyxl
from pandas.io.excel import ExcelFile
except ImportError:
- raise nose.SkipTest
+ raise nose.SkipTest("need xlwt xlrd openpyxl")
for ext in ['xls', 'xlsx']:
path = '__tmp__.' + ext
@@ -1473,7 +1492,7 @@ def test_to_excel(self):
try:
reader = ExcelFile(path)
except ImportError:
- raise nose.SkipTest
+ raise nose.SkipTest("need xlwt xlrd openpyxl")
for item, df in compat.iteritems(self.panel):
recdf = reader.parse(str(item), index_col=0)
@@ -1492,8 +1511,8 @@ def test_to_excel_xlsxwriter(self):
self.panel.to_excel(path, engine='xlsxwriter')
try:
reader = ExcelFile(path)
- except ImportError:
- raise nose.SkipTest
+ except ImportError as e:
+ raise nose.SkipTest("cannot write excel file: %s" % e)
for item, df in compat.iteritems(self.panel):
recdf = reader.parse(str(item), index_col=0)
diff --git a/pandas/tests/test_panel4d.py b/pandas/tests/test_panel4d.py
index 1ce909b57402f..4d5d29e08fa9f 100644
--- a/pandas/tests/test_panel4d.py
+++ b/pandas/tests/test_panel4d.py
@@ -73,7 +73,7 @@ def test_skew(self):
try:
from scipy.stats import skew
except ImportError:
- raise nose.SkipTest
+ raise nose.SkipTest("no scipy.stats.skew")
def this_skew(x):
if len(x) < 3:
@@ -397,7 +397,7 @@ def test_comp(func):
test_comp(operator.le)
def test_setitem_ndarray(self):
- raise nose.SkipTest
+ raise nose.SkipTest("skipping for now")
# from pandas import DateRange, datetools
# timeidx = DateRange(start=datetime(2009,1,1),
@@ -510,7 +510,7 @@ def test_getitem_fancy_ints(self):
pass
def test_getitem_fancy_xs(self):
- raise nose.SkipTest
+ raise nose.SkipTest("skipping for now")
# self.assertRaises(NotImplementedError, self.panel4d.major_xs)
# self.assertRaises(NotImplementedError, self.panel4d.minor_xs)
@@ -706,7 +706,7 @@ def test_constructor_resize(self):
assert_panel4d_equal(result, expected)
def test_from_dict_mixed_orient(self):
- raise nose.SkipTest
+ raise nose.SkipTest("skipping for now")
# df = tm.makeDataFrame()
# df['foo'] = 'bar'
@@ -798,7 +798,7 @@ def test_reindex_like(self):
assert_panel4d_equal(smaller, smaller_like)
def test_take(self):
- raise nose.SkipTest
+ raise nose.SkipTest("skipping for now")
# # axis == 0
# result = self.panel.take([2, 0, 1], axis=0)
@@ -876,7 +876,7 @@ def test_swapaxes(self):
self.assert_(id(self.panel4d) != id(result))
def test_to_frame(self):
- raise nose.SkipTest
+ raise nose.SkipTest("skipping for now")
# # filtered
# filtered = self.panel.to_frame()
# expected = self.panel.to_frame().dropna(how='any')
@@ -890,7 +890,7 @@ def test_to_frame(self):
# self.assertEqual(unfiltered.index.names, ('major', 'minor'))
def test_to_frame_mixed(self):
- raise nose.SkipTest
+ raise nose.SkipTest("skipping for now")
# panel = self.panel.fillna(0)
# panel['str'] = 'foo'
# panel['bool'] = panel['ItemA'] > 0
@@ -928,20 +928,20 @@ def test_update(self):
assert_panel4d_equal(p4d, expected)
def test_filter(self):
- raise nose.SkipTest
+ raise nose.SkipTest("skipping for now")
def test_apply(self):
- raise nose.SkipTest
+ raise nose.SkipTest("skipping for now")
def test_compound(self):
- raise nose.SkipTest
+ raise nose.SkipTest("skipping for now")
# compounded = self.panel.compound()
# assert_series_equal(compounded['ItemA'],
# (1 + self.panel['ItemA']).product(0) - 1)
def test_shift(self):
- raise nose.SkipTest
+ raise nose.SkipTest("skipping for now")
# # major
# idx = self.panel.major_axis[0]
# idx_lag = self.panel.major_axis[1]
@@ -963,7 +963,7 @@ def test_shift(self):
# self.assertRaises(Exception, self.panel.shift, 1, axis='items')
def test_multiindex_get(self):
- raise nose.SkipTest
+ raise nose.SkipTest("skipping for now")
# ind = MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1), ('b',2)],
# names=['first', 'second'])
# wp = Panel(np.random.random((4,5,5)),
@@ -981,7 +981,7 @@ def test_multiindex_get(self):
# names=['first', 'second'])
def test_multiindex_blocks(self):
- raise nose.SkipTest
+ raise nose.SkipTest("skipping for now")
# ind = MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)],
# names=['first', 'second'])
# wp = Panel(self.panel._data)
@@ -1034,10 +1034,10 @@ def test_group_agg(self):
self.assertRaises(Exception, group_agg, values, bounds, f2)
def test_from_frame_level1_unsorted(self):
- raise nose.SkipTest
+ raise nose.SkipTest("skipping for now")
def test_to_excel(self):
- raise nose.SkipTest
+ raise nose.SkipTest("skipping for now")
if __name__ == '__main__':
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index 98fa5c0a56ccd..f8320149f4ac6 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -5,6 +5,7 @@
import unittest
import string
from itertools import product, starmap
+from distutils.version import LooseVersion
import nose
@@ -37,14 +38,14 @@ def _skip_if_no_scipy():
try:
import scipy.stats
except ImportError:
- raise nose.SkipTest
+ raise nose.SkipTest("scipy not installed")
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
- raise nose.SkipTest
+ raise nose.SkipTest("pytz not installed")
#------------------------------------------------------------------------------
# Series test cases
@@ -1772,7 +1773,8 @@ def test_cummax(self):
self.assert_(np.array_equal(result, expected))
def test_npdiff(self):
- raise nose.SkipTest
+ raise nose.SkipTest("skipping due to Series no longer being an "
+ "ndarray")
# no longer works as the return type of np.diff is now nd.array
s = Series(np.arange(5))
@@ -3098,8 +3100,9 @@ def test_corr_rank(self):
self.assertAlmostEqual(result, expected)
# these methods got rewritten in 0.8
- if int(scipy.__version__.split('.')[1]) < 9:
- raise nose.SkipTest
+ if scipy.__version__ < LooseVersion('0.9'):
+ raise nose.SkipTest("skipping corr rank because of scipy version "
+ "{0}".format(scipy.__version__))
# results from R
A = Series([-0.89926396, 0.94209606, -1.03289164, -0.95445587,
diff --git a/pandas/tools/merge.py b/pandas/tools/merge.py
index ba60566a7fc55..d5bd1072f6a3e 100644
--- a/pandas/tools/merge.py
+++ b/pandas/tools/merge.py
@@ -407,17 +407,19 @@ def _validate_specification(self):
elif self.left_on is not None:
n = len(self.left_on)
if self.right_index:
- if not ((len(self.left_on) == self.right.index.nlevels)):
- raise AssertionError()
+ if len(self.left_on) != self.right.index.nlevels:
+ raise ValueError('len(left_on) must equal the number '
+ 'of levels in the index of "right"')
self.right_on = [None] * n
elif self.right_on is not None:
n = len(self.right_on)
if self.left_index:
- if not ((len(self.right_on) == self.left.index.nlevels)):
- raise AssertionError()
+ if len(self.right_on) != self.left.index.nlevels:
+ raise ValueError('len(right_on) must equal the number '
+ 'of levels in the index of "left"')
self.left_on = [None] * n
- if not ((len(self.right_on) == len(self.left_on))):
- raise AssertionError()
+ if len(self.right_on) != len(self.left_on):
+ raise ValueError("len(right_on) must equal len(left_on)")
def _get_join_indexers(left_keys, right_keys, sort=False, how='inner'):
@@ -430,8 +432,8 @@ def _get_join_indexers(left_keys, right_keys, sort=False, how='inner'):
-------
"""
- if not ((len(left_keys) == len(right_keys))):
- raise AssertionError()
+ if len(left_keys) != len(right_keys):
+ raise AssertionError('left_key and right_keys must be the same length')
left_labels = []
right_labels = []
@@ -545,8 +547,11 @@ def _left_join_on_index(left_ax, right_ax, join_keys, sort=False):
if len(join_keys) > 1:
if not ((isinstance(right_ax, MultiIndex) and
- len(join_keys) == right_ax.nlevels) ):
- raise AssertionError()
+ len(join_keys) == right_ax.nlevels)):
+ raise AssertionError("If more than one join key is given then "
+ "'right_ax' must be a MultiIndex and the "
+ "number of join keys must be the number of "
+ "levels in right_ax")
left_tmp, right_indexer = \
_get_multiindex_indexer(join_keys, right_ax,
@@ -645,8 +650,9 @@ def __init__(self, data_list, join_index, indexers, axis=1, copy=True):
if axis <= 0: # pragma: no cover
raise MergeError('Only axis >= 1 supported for this operation')
- if not ((len(data_list) == len(indexers))):
- raise AssertionError()
+ if len(data_list) != len(indexers):
+ raise AssertionError("data_list and indexers must have the same "
+ "length")
self.units = []
for data, indexer in zip(data_list, indexers):
@@ -977,8 +983,9 @@ def __init__(self, objs, axis=0, join='outer', join_axes=None,
axis = 1 if axis == 0 else 0
self._is_series = isinstance(sample, ABCSeries)
- if not ((0 <= axis <= sample.ndim)):
- raise AssertionError()
+ if not 0 <= axis <= sample.ndim:
+ raise AssertionError("axis must be between 0 and {0}, "
+ "input was {1}".format(sample.ndim, axis))
# note: this is the BlockManager axis (since DataFrame is transposed)
self.axis = axis
@@ -1202,8 +1209,9 @@ def _concat_single_item(self, objs, item):
to_concat.append(item_values)
# this method only gets called with axis >= 1
- if not ((self.axis >= 1)):
- raise AssertionError()
+ if self.axis < 1:
+ raise AssertionError("axis must be >= 1, input was"
+ " {0}".format(self.axis))
return com._concat_compat(to_concat, axis=self.axis - 1)
def _get_result_dim(self):
@@ -1222,8 +1230,9 @@ def _get_new_axes(self):
continue
new_axes[i] = self._get_comb_axis(i)
else:
- if not ((len(self.join_axes) == ndim - 1)):
- raise AssertionError()
+ if len(self.join_axes) != ndim - 1:
+ raise AssertionError("length of join_axes must not be "
+ "equal to {0}".format(ndim - 1))
# ufff...
indices = lrange(ndim)
diff --git a/pandas/tools/pivot.py b/pandas/tools/pivot.py
index c11ec9f338f88..a4b229e98ada9 100644
--- a/pandas/tools/pivot.py
+++ b/pandas/tools/pivot.py
@@ -386,8 +386,8 @@ def _get_names(arrs, names, prefix='row'):
else:
names.append('%s_%d' % (prefix, i))
else:
- if not ((len(names) == len(arrs))):
- raise AssertionError()
+ if len(names) != len(arrs):
+ raise AssertionError('arrays and names must have the same length')
if not isinstance(names, list):
names = list(names)
diff --git a/pandas/tools/tests/test_merge.py b/pandas/tools/tests/test_merge.py
index d44564db4b830..eec134ebeb990 100644
--- a/pandas/tools/tests/test_merge.py
+++ b/pandas/tools/tests/test_merge.py
@@ -231,6 +231,33 @@ def test_join_on(self):
source_copy['A'] = 0
self.assertRaises(Exception, target.join, source_copy, on='A')
+ def test_join_on_fails_with_different_right_index(self):
+ with tm.assertRaises(ValueError):
+ df = DataFrame({'a': tm.choice(['m', 'f'], size=3),
+ 'b': np.random.randn(3)})
+ df2 = DataFrame({'a': tm.choice(['m', 'f'], size=10),
+ 'b': np.random.randn(10)},
+ index=tm.makeCustomIndex(10, 2))
+ merge(df, df2, left_on='a', right_index=True)
+
+ def test_join_on_fails_with_different_left_index(self):
+ with tm.assertRaises(ValueError):
+ df = DataFrame({'a': tm.choice(['m', 'f'], size=3),
+ 'b': np.random.randn(3)},
+ index=tm.makeCustomIndex(10, 2))
+ df2 = DataFrame({'a': tm.choice(['m', 'f'], size=10),
+ 'b': np.random.randn(10)})
+ merge(df, df2, right_on='b', left_index=True)
+
+ def test_join_on_fails_with_different_column_counts(self):
+ with tm.assertRaises(ValueError):
+ df = DataFrame({'a': tm.choice(['m', 'f'], size=3),
+ 'b': np.random.randn(3)})
+ df2 = DataFrame({'a': tm.choice(['m', 'f'], size=10),
+ 'b': np.random.randn(10)},
+ index=tm.makeCustomIndex(10, 2))
+ merge(df, df2, right_on='a', left_on=['a', 'b'])
+
def test_join_on_pass_vector(self):
expected = self.target.join(self.source, on='C')
del expected['C']
diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py
index 76433cf0c8f88..847896871045b 100644
--- a/pandas/tseries/index.py
+++ b/pandas/tseries/index.py
@@ -309,12 +309,12 @@ def _generate(cls, start, end, periods, name, offset,
if tz is not None and inferred_tz is not None:
if not inferred_tz == tz:
- raise AssertionError()
+ raise AssertionError("Inferred time zone not equal to passed "
+ "time zone")
elif inferred_tz is not None:
tz = inferred_tz
-
if start is not None:
if normalize:
start = normalize_date(start)
@@ -456,16 +456,16 @@ def _cached_range(cls, start=None, end=None, periods=None, offset=None,
cachedRange = drc[offset]
if start is None:
- if not (isinstance(end, Timestamp)):
- raise AssertionError()
+ if not isinstance(end, Timestamp):
+ raise AssertionError('end must be an instance of Timestamp')
end = offset.rollback(end)
endLoc = cachedRange.get_loc(end) + 1
startLoc = endLoc - periods
elif end is None:
- if not (isinstance(start, Timestamp)):
- raise AssertionError()
+ if not isinstance(start, Timestamp):
+ raise AssertionError('start must be an instance of Timestamp')
start = offset.rollforward(start)
@@ -601,14 +601,14 @@ def _format_native_types(self, na_rep=u('NaT'), **kwargs):
if d.time() != zero_time or d.tzinfo is not None:
return [u('%s') % x for x in data]
- values = np.array(data,dtype=object)
+ values = np.array(data, dtype=object)
mask = isnull(self.values)
values[mask] = na_rep
imask = -mask
- values[imask] = np.array([u('%d-%.2d-%.2d') % (
- dt.year, dt.month, dt.day)
- for dt in values[imask] ])
+ values[imask] = np.array([u('%d-%.2d-%.2d') % (dt.year, dt.month,
+ dt.day)
+ for dt in values[imask]])
return values.tolist()
def isin(self, values):
@@ -1130,7 +1130,6 @@ def _partial_date_slice(self, reso, parsed, use_lhs=True, use_rhs=True):
else:
raise KeyError
-
stamps = self.asi8
if is_monotonic:
@@ -1147,8 +1146,8 @@ def _partial_date_slice(self, reso, parsed, use_lhs=True, use_rhs=True):
return slice(left, right)
- lhs_mask = (stamps>=t1.value) if use_lhs else True
- rhs_mask = (stamps<=t2.value) if use_rhs else True
+ lhs_mask = (stamps >= t1.value) if use_lhs else True
+ rhs_mask = (stamps <= t2.value) if use_rhs else True
# try to find a the dates
return (lhs_mask & rhs_mask).nonzero()[0]
@@ -1227,7 +1226,8 @@ def _get_string_slice(self, key, use_lhs=True, use_rhs=True):
freq = getattr(self, 'freqstr',
getattr(self, 'inferred_freq', None))
_, parsed, reso = parse_time_string(key, freq)
- loc = self._partial_date_slice(reso, parsed, use_lhs=use_lhs, use_rhs=use_rhs)
+ loc = self._partial_date_slice(reso, parsed, use_lhs=use_lhs,
+ use_rhs=use_rhs)
return loc
def slice_indexer(self, start=None, end=None, step=None):
@@ -1274,12 +1274,13 @@ def slice_locs(self, start=None, end=None):
# so create an indexer directly
try:
if start:
- start_loc = self._get_string_slice(start,use_rhs=False)
+ start_loc = self._get_string_slice(start,
+ use_rhs=False)
else:
start_loc = np.arange(len(self))
if end:
- end_loc = self._get_string_slice(end,use_lhs=False)
+ end_loc = self._get_string_slice(end, use_lhs=False)
else:
end_loc = np.arange(len(self))
diff --git a/pandas/tseries/period.py b/pandas/tseries/period.py
index afa267ed5b4e4..b6f3c3c83f3d8 100644
--- a/pandas/tseries/period.py
+++ b/pandas/tseries/period.py
@@ -5,7 +5,6 @@
import numpy as np
from pandas.core.base import PandasObject
-import pandas.tseries.offsets as offsets
from pandas.tseries.frequencies import (get_freq_code as _gfc,
_month_numbers, FreqGroup)
from pandas.tseries.index import DatetimeIndex, Int64Index, Index
@@ -217,7 +216,7 @@ def end_time(self):
ordinal = (self + 1).start_time.value - 1
return Timestamp(ordinal)
- def to_timestamp(self, freq=None, how='start',tz=None):
+ def to_timestamp(self, freq=None, how='start', tz=None):
"""
Return the Timestamp representation of the Period at the target
frequency at the specified end (how) of the Period
@@ -245,7 +244,7 @@ def to_timestamp(self, freq=None, how='start',tz=None):
val = self.asfreq(freq, how)
dt64 = tslib.period_ordinal_to_dt64(val.ordinal, base)
- return Timestamp(dt64,tz=tz)
+ return Timestamp(dt64, tz=tz)
year = _period_field_accessor('year', 0)
month = _period_field_accessor('month', 3)
@@ -288,7 +287,6 @@ def __unicode__(self):
value = ("%s" % formatted)
return value
-
def strftime(self, fmt):
"""
Returns the string representation of the :class:`Period`, depending
@@ -479,13 +477,13 @@ def _period_index_cmp(opname):
def wrapper(self, other):
if isinstance(other, Period):
func = getattr(self.values, opname)
- if not (other.freq == self.freq):
- raise AssertionError()
+ if other.freq != self.freq:
+ raise AssertionError("Frequencies must be equal")
result = func(other.ordinal)
elif isinstance(other, PeriodIndex):
- if not (other.freq == self.freq):
- raise AssertionError()
+ if other.freq != self.freq:
+ raise AssertionError("Frequencies must be equal")
return getattr(self.values, opname)(other.values)
else:
other = Period(other, freq=self.freq)
@@ -701,7 +699,6 @@ def asof_locs(self, where, mask):
@property
def asobject(self):
- from pandas.core.index import Index
return Index(self._box_values(self.values), dtype=object)
def _array_values(self):
@@ -940,7 +937,7 @@ def get_loc(self, key):
key = Period(key, self.freq)
try:
return self._engine.get_loc(key.ordinal)
- except KeyError as inst:
+ except KeyError:
raise KeyError(key)
def slice_locs(self, start=None, end=None):
@@ -1062,7 +1059,7 @@ def _format_with_header(self, header, **kwargs):
def _format_native_types(self, na_rep=u('NaT'), **kwargs):
- values = np.array(list(self),dtype=object)
+ values = np.array(list(self), dtype=object)
mask = isnull(self.values)
values[mask] = na_rep
@@ -1169,7 +1166,7 @@ def __setstate__(self, state):
nd_state, own_state = state
np.ndarray.__setstate__(self, nd_state)
self.name = own_state[0]
- try: # backcompat
+ try: # backcompat
self.freq = own_state[1]
except:
pass
@@ -1235,8 +1232,8 @@ def _range_from_fields(year=None, month=None, quarter=None, day=None,
base, mult = _gfc(freq)
if mult != 1:
raise ValueError('Only mult == 1 supported')
- if not (base == FreqGroup.FR_QTR):
- raise AssertionError()
+ if base != FreqGroup.FR_QTR:
+ raise AssertionError("base must equal FR_QTR")
year, quarter = _make_field_arrays(year, quarter)
for y, q in zip(year, quarter):
diff --git a/pandas/tseries/resample.py b/pandas/tseries/resample.py
index 20d42f7211f55..96ff8c47abc1e 100644
--- a/pandas/tseries/resample.py
+++ b/pandas/tseries/resample.py
@@ -120,8 +120,9 @@ def _get_time_grouper(self, obj):
return binner, grouper
def _get_time_bins(self, axis):
- if not (isinstance(axis, DatetimeIndex)):
- raise AssertionError()
+ if not isinstance(axis, DatetimeIndex):
+ raise TypeError('axis must be a DatetimeIndex, but got '
+ 'an instance of %r' % type(axis).__name__)
if len(axis) == 0:
binner = labels = DatetimeIndex(data=[], freq=self.freq)
@@ -180,10 +181,11 @@ def _adjust_bin_edges(self, binner, ax_values):
return binner, bin_edges
def _get_time_period_bins(self, axis):
- if not(isinstance(axis, DatetimeIndex)):
- raise AssertionError()
+ if not isinstance(axis, DatetimeIndex):
+ raise TypeError('axis must be a DatetimeIndex, but got '
+ 'an instance of %r' % type(axis).__name__)
- if len(axis) == 0:
+ if not len(axis):
binner = labels = PeriodIndex(data=[], freq=self.freq)
return binner, [], labels
@@ -211,8 +213,8 @@ def _resample_timestamps(self, obj):
result = grouped.aggregate(self._agg_method)
else:
# upsampling shortcut
- if not (self.axis == 0):
- raise AssertionError()
+ if self.axis:
+ raise AssertionError('axis must be 0')
if self.closed == 'right':
res_index = binner[1:]
@@ -278,7 +280,6 @@ def _resample_periods(self, obj):
def _take_new_index(obj, indexer, new_index, axis=0):
from pandas.core.api import Series, DataFrame
- from pandas.core.internals import BlockManager
if isinstance(obj, Series):
new_values = com.take_1d(obj.values, indexer)
@@ -286,7 +287,7 @@ def _take_new_index(obj, indexer, new_index, axis=0):
elif isinstance(obj, DataFrame):
if axis == 1:
raise NotImplementedError
- return DataFrame(obj._data.take(indexer,new_index=new_index,axis=1))
+ return DataFrame(obj._data.take(indexer, new_index=new_index, axis=1))
else:
raise NotImplementedError
diff --git a/pandas/tseries/tests/test_converter.py b/pandas/tseries/tests/test_converter.py
index c3bb7d82dfb6d..7cb84b5134a9a 100644
--- a/pandas/tseries/tests/test_converter.py
+++ b/pandas/tseries/tests/test_converter.py
@@ -11,7 +11,7 @@
try:
import pandas.tseries.converter as converter
except ImportError:
- raise nose.SkipTest
+ raise nose.SkipTest("no pandas.tseries.converter, skipping")
def test_timtetonum_accepts_unicode():
diff --git a/pandas/tseries/tests/test_daterange.py b/pandas/tseries/tests/test_daterange.py
index d17b42c1e23c9..cb17375266edf 100644
--- a/pandas/tseries/tests/test_daterange.py
+++ b/pandas/tseries/tests/test_daterange.py
@@ -23,7 +23,7 @@ def _skip_if_no_pytz():
try:
import pytz
except ImportError:
- raise nose.SkipTest
+ raise nose.SkipTest("pytz not installed")
def _skip_if_no_cday():
diff --git a/pandas/tseries/tests/test_plotting.py b/pandas/tseries/tests/test_plotting.py
index 96888df114950..cfbde75f6ae21 100644
--- a/pandas/tseries/tests/test_plotting.py
+++ b/pandas/tseries/tests/test_plotting.py
@@ -23,7 +23,7 @@ def _skip_if_no_scipy():
try:
import scipy
except ImportError:
- raise nose.SkipTest
+ raise nose.SkipTest("scipy not installed")
@tm.mplskip
diff --git a/pandas/tseries/tests/test_resample.py b/pandas/tseries/tests/test_resample.py
index 620310e32ffcc..c60d4b3fd48d1 100644
--- a/pandas/tseries/tests/test_resample.py
+++ b/pandas/tseries/tests/test_resample.py
@@ -11,6 +11,8 @@
from pandas.tseries.offsets import Minute, BDay
from pandas.tseries.period import period_range, PeriodIndex, Period
from pandas.tseries.resample import DatetimeIndex, TimeGrouper
+from pandas.tseries.frequencies import MONTHS, DAYS
+
import pandas.tseries.offsets as offsets
import pandas as pd
@@ -28,7 +30,7 @@ def _skip_if_no_pytz():
try:
import pytz
except ImportError:
- raise nose.SkipTest
+ raise nose.SkipTest("pytz not installed")
class TestResample(unittest.TestCase):
@@ -660,9 +662,6 @@ def _simple_pts(start, end, freq='D'):
return TimeSeries(np.random.randn(len(rng)), index=rng)
-from pandas.tseries.frequencies import MONTHS, DAYS
-
-
class TestResamplePeriodIndex(unittest.TestCase):
_multiprocess_can_split_ = True
@@ -1055,6 +1054,7 @@ def test_resample_doesnt_truncate(self):
result = series.resample('D')
self.assertEquals(result.index[0], dates[0])
+
class TestTimeGrouper(unittest.TestCase):
def setUp(self):
@@ -1129,6 +1129,21 @@ def f(x):
result = bingrouped.agg(f)
tm.assert_panel_equal(result, binagg)
+ def test_fails_on_no_datetime_index(self):
+ index_names = ('Int64Index', 'PeriodIndex', 'Index', 'Float64Index',
+ 'MultiIndex')
+ index_funcs = (tm.makeIntIndex, tm.makePeriodIndex,
+ tm.makeUnicodeIndex, tm.makeFloatIndex,
+ lambda m: tm.makeCustomIndex(m, 2))
+ n = 2
+ for name, func in zip(index_names, index_funcs):
+ index = func(n)
+ df = DataFrame({'a': np.random.randn(n)}, index=index)
+ with tm.assertRaisesRegexp(TypeError,
+ "axis must be a DatetimeIndex, "
+ "but got an instance of %r" % name):
+ df.groupby(TimeGrouper('D'))
+
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py
index 0e5e3d1922ec4..28725a6a9ac56 100644
--- a/pandas/tseries/tests/test_timeseries.py
+++ b/pandas/tseries/tests/test_timeseries.py
@@ -5,6 +5,8 @@
import unittest
import operator
+from distutils.version import LooseVersion
+
import nose
import numpy as np
@@ -49,7 +51,7 @@ def _skip_if_no_pytz():
try:
import pytz
except ImportError:
- raise nose.SkipTest
+ raise nose.SkipTest("pytz not installed")
class TestTimeSeriesDuplicates(unittest.TestCase):
@@ -661,8 +663,8 @@ def test_index_cast_datetime64_other_units(self):
def test_index_astype_datetime64(self):
idx = Index([datetime(2012, 1, 1)], dtype=object)
- if np.__version__ >= '1.7':
- raise nose.SkipTest("Test requires numpy < 1.7")
+ if np.__version__ >= LooseVersion('1.7'):
+ raise nose.SkipTest("test only valid in numpy < 1.7")
casted = idx.astype(np.dtype('M8[D]'))
expected = DatetimeIndex(idx.values)
diff --git a/pandas/tseries/tests/test_timeseries_legacy.py b/pandas/tseries/tests/test_timeseries_legacy.py
index babf60758f751..d1f4f647db0e1 100644
--- a/pandas/tseries/tests/test_timeseries_legacy.py
+++ b/pandas/tseries/tests/test_timeseries_legacy.py
@@ -48,7 +48,7 @@ def _skip_if_no_pytz():
try:
import pytz
except ImportError:
- raise nose.SkipTest
+ raise nose.SkipTest("pytz not installed")
# infortunately, too much has changed to handle these legacy pickles
# class TestLegacySupport(unittest.TestCase):
@@ -59,7 +59,7 @@ class LegacySupport(object):
@classmethod
def setUpClass(cls):
if compat.PY3:
- raise nose.SkipTest
+ raise nose.SkipTest("not compatible with Python >= 3")
pth, _ = os.path.split(os.path.abspath(__file__))
filepath = os.path.join(pth, 'data', 'frame.pickle')
diff --git a/pandas/tseries/tests/test_timezones.py b/pandas/tseries/tests/test_timezones.py
index 883025bee1ba1..80d85241ae0ff 100644
--- a/pandas/tseries/tests/test_timezones.py
+++ b/pandas/tseries/tests/test_timezones.py
@@ -37,7 +37,7 @@ def _skip_if_no_pytz():
try:
import pytz
except ImportError:
- raise nose.SkipTest
+ raise nose.SkipTest("pytz not installed")
try:
import pytz
diff --git a/pandas/tseries/tools.py b/pandas/tseries/tools.py
index 5dda1a9b352d9..39364d21d4aa1 100644
--- a/pandas/tseries/tools.py
+++ b/pandas/tseries/tools.py
@@ -30,7 +30,8 @@ def _infer(a, b):
tz = a.tzinfo
if b and b.tzinfo:
if not (tslib.get_timezone(tz) == tslib.get_timezone(b.tzinfo)):
- raise AssertionError()
+ raise AssertionError('Inputs must both have the same timezone,'
+ ' {0} != {1}'.format(tz, b.tzinfo))
return tz
tz = None
if start is not None:
| continuation of #3519 because of a branch rename.
### UPDATE: The exception parsing script has been moved to [a Gist](https://gist.github.com/cpcloud/6745173)
This PR partially addresses #3024.
- all `AssertionError` exceptions now have an informative error message in them
- some `AssertionErrors` have been converted to different `Exception` subclasses, where it makes sense, and there's a corresponding test wherever these were changed.
- all `nose.SkipTest` exceptions now have an informative message
| https://api.github.com/repos/pandas-dev/pandas/pulls/3730 | 2013-05-31T16:42:37Z | 2013-09-28T20:44:42Z | 2013-09-28T20:44:42Z | 2014-06-26T21:36:32Z |
DOC/BLD: squash as many doc-build warnings as possible | diff --git a/doc/source/basics.rst b/doc/source/basics.rst
index f428579b78570..4100c4404ece6 100644
--- a/doc/source/basics.rst
+++ b/doc/source/basics.rst
@@ -1369,7 +1369,7 @@ For instance:
.. ipython:: python
:suppress:
- reset_printoptions()
+ reset_option('^display\.')
The ``set_printoptions`` function has a number of options for controlling how
diff --git a/doc/source/dsintro.rst b/doc/source/dsintro.rst
index 2b42288f670bd..7870bdbeb97d3 100644
--- a/doc/source/dsintro.rst
+++ b/doc/source/dsintro.rst
@@ -17,7 +17,7 @@ objects. To get started, import numpy and load pandas into your namespace:
from pandas import *
randn = np.random.randn
np.set_printoptions(precision=4, suppress=True)
- set_printoptions(precision=4, max_columns=8)
+ set_option('display.precision', 4, 'display.max_columns', 8)
.. ipython:: python
@@ -571,7 +571,7 @@ R package):
:suppress:
# force a summary to be printed
- set_printoptions(max_rows=5)
+ pd.set_option('display.max_rows', 5)
.. ipython:: python
@@ -582,7 +582,7 @@ R package):
:suppress:
# restore GlobalPrintConfig
- reset_printoptions()
+ pd.reset_option('^display\.')
However, using ``to_string`` will return a string representation of the
DataFrame in tabular form, though it won't always fit the console width:
diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst
index 43b512a934558..1c4f5db9a45d0 100644
--- a/doc/source/indexing.rst
+++ b/doc/source/indexing.rst
@@ -991,9 +991,9 @@ Note that how the index is displayed by be controlled using the
.. ipython:: python
- pd.set_printoptions(multi_sparse=False)
+ pd.set_option('display.multi_sparse', False)
df
- pd.set_printoptions(multi_sparse=True)
+ pd.set_option('display.multi_sparse', True)
Reconstructing the level labels
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/source/v0.10.1.txt b/doc/source/v0.10.1.txt
index e8435df7b2b0c..3c22e9552c3a2 100644
--- a/doc/source/v0.10.1.txt
+++ b/doc/source/v0.10.1.txt
@@ -67,8 +67,11 @@ Retrieving unique values in an indexable or data column.
.. ipython:: python
- store.unique('df','index')
- store.unique('df','string')
+ import warnings
+ with warnings.catch_warnings():
+ warnings.simplefilter('ignore', category=DeprecationWarning)
+ store.unique('df','index')
+ store.unique('df','string')
You can now store ``datetime64`` in data columns
diff --git a/pandas/tests/test_format.py b/pandas/tests/test_format.py
index 9e8a69a32d454..bca38ba55e205 100644
--- a/pandas/tests/test_format.py
+++ b/pandas/tests/test_format.py
@@ -81,7 +81,7 @@ def test_eng_float_formatter(self):
fmt.set_eng_float_format(accuracy=0)
repr(self.frame)
- fmt.reset_printoptions()
+ fmt.reset_option('^display.')
def test_repr_tuples(self):
buf = StringIO()
@@ -719,11 +719,11 @@ def test_repr_corner(self):
def test_frame_info_encoding(self):
index = ['\'Til There Was You (1997)',
'ldum klaka (Cold Fever) (1994)']
- fmt.set_printoptions(max_rows=1)
+ fmt.set_option('display.max_rows', 1)
df = DataFrame(columns=['a', 'b', 'c'], index=index)
repr(df)
repr(df.T)
- fmt.set_printoptions(max_rows=200)
+ fmt.set_option('display.max_rows', 200)
def test_large_frame_repr(self):
def wrap_rows_options(f):
@@ -1026,9 +1026,9 @@ def test_to_string_no_index(self):
assert(df_s == expected)
def test_to_string_float_formatting(self):
- fmt.reset_printoptions()
- fmt.set_printoptions(precision=6, column_space=12,
- notebook_repr_html=False)
+ fmt.reset_option('^display.')
+ fmt.set_option('display.precision', 6, 'display.column_space',
+ 12, 'display.notebook_repr_html', False)
df = DataFrame({'x': [0, 0.25, 3456.000, 12e+45, 1.64e+6,
1.7e+8, 1.253456, np.pi, -1e6]})
@@ -1057,7 +1057,7 @@ def test_to_string_float_formatting(self):
'1 0.253')
assert(df_s == expected)
- fmt.reset_printoptions()
+ fmt.reset_option('^display.')
self.assertEqual(get_option("display.precision"), 7)
df = DataFrame({'x': [1e9, 0.2512]})
@@ -1149,7 +1149,7 @@ def test_to_string_index_formatter(self):
self.assertEqual(rs, xp)
def test_to_string_left_justify_cols(self):
- fmt.reset_printoptions()
+ fmt.reset_option('^display.')
df = DataFrame({'x': [3234, 0.253]})
df_s = df.to_string(justify='left')
expected = (' x \n'
@@ -1158,7 +1158,7 @@ def test_to_string_left_justify_cols(self):
assert(df_s == expected)
def test_to_string_format_na(self):
- fmt.reset_printoptions()
+ fmt.reset_option('^display.')
df = DataFrame({'A': [np.nan, -1, -2.1234, 3, 4],
'B': [np.nan, 'foo', 'foooo', 'fooooo', 'bar']})
result = df.to_string()
@@ -1420,13 +1420,13 @@ def test_to_html_index(self):
def test_repr_html(self):
self.frame._repr_html_()
- fmt.set_printoptions(max_rows=1, max_columns=1)
+ fmt.set_option('display.max_rows', 1, 'display.max_columns', 1)
self.frame._repr_html_()
- fmt.set_printoptions(notebook_repr_html=False)
+ fmt.set_option('display.notebook_repr_html', False)
self.frame._repr_html_()
- fmt.reset_printoptions()
+ fmt.reset_option('^display.')
def test_fake_qtconsole_repr_html(self):
def get_ipython():
@@ -1437,11 +1437,11 @@ def get_ipython():
repstr = self.frame._repr_html_()
self.assert_(repstr is not None)
- fmt.set_printoptions(max_rows=5, max_columns=2)
+ fmt.set_option('display.max_rows', 5, 'display.max_columns', 2)
repstr = self.frame._repr_html_()
self.assert_('class' in repstr) # info fallback
- fmt.reset_printoptions()
+ fmt.reset_option('^display.')
def test_to_html_with_classes(self):
df = pandas.DataFrame()
@@ -1751,7 +1751,7 @@ def test_eng_float_formatter(self):
'3 1E+06')
self.assertEqual(result, expected)
- fmt.reset_printoptions()
+ fmt.reset_option('^display.')
def compare(self, formatter, input, output):
formatted_input = formatter(input)
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 39452ece7a33d..fa6579ca61358 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -3927,16 +3927,16 @@ def test_repr_unsortable(self):
index=np.arange(50))
foo = repr(unsortable)
- fmt.set_printoptions(precision=3, column_space=10)
+ fmt.set_option('display.precision', 3, 'display.column_space', 10)
repr(self.frame)
- fmt.set_printoptions(max_rows=10, max_columns=2)
+ fmt.set_option('display.max_rows', 10, 'display.max_columns', 2)
repr(self.frame)
- fmt.set_printoptions(max_rows=1000, max_columns=1000)
+ fmt.set_option('display.max_rows', 1000, 'display.max_columns', 1000)
repr(self.frame)
- fmt.reset_printoptions()
+ fmt.reset_option('^display\.')
warnings.filters = warn_filters
diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py
index da8c900e903c2..5926f5d51abfd 100644
--- a/pandas/tests/test_index.py
+++ b/pandas/tests/test_index.py
@@ -1371,12 +1371,12 @@ def test_format_sparse_config(self):
category=FutureWarning,
module=".*format")
# #1538
- pd.set_printoptions(multi_sparse=False)
+ pd.set_option('display.multi_sparse', False)
result = self.index.format()
self.assertEqual(result[1], 'foo two')
- pd.reset_printoptions()
+ pd.reset_option("^display\.")
warnings.filters = warn_filters
| https://api.github.com/repos/pandas-dev/pandas/pulls/3725 | 2013-05-31T01:00:22Z | 2013-05-31T13:21:02Z | 2013-05-31T13:21:02Z | 2014-07-12T14:11:36Z | |
DOC: older version of bs4 for 64bit as well | diff --git a/doc/source/install.rst b/doc/source/install.rst
index 407746e3cb000..2a0f67fe8d9e6 100644
--- a/doc/source/install.rst
+++ b/doc/source/install.rst
@@ -75,14 +75,14 @@ Dependencies
Recommended Dependencies
~~~~~~~~~~~~~~~~~~~~~~~~
- * `numexpr <http://code.google.com/p/numexpr/>`__: for accelerating certain numerical operations.
+ * `numexpr <http://code.google.com/p/numexpr/>`__: for accelerating certain numerical operations.
``numexpr`` uses multiple cores as well as smart chunking and caching to achieve large speedups.
* `bottleneck <http://berkeleyanalytics.com/bottleneck>`__: for accelerating certain types of ``nan``
evaluations. ``bottleneck`` uses specialized cython routines to achieve large speedups.
.. note::
- You are highly encouraged to install these libraries, as they provide large speedups, especially
+ You are highly encouraged to install these libraries, as they provide large speedups, especially
if working with large data sets.
@@ -105,9 +105,9 @@ Optional Dependencies
.. warning::
- If you are on a 32-bit machine you need to install an older version of
- Beautiful Soup. Version 4.0.2 of BeautifulSoup has been tested on Ubuntu
- 12.04.02 32-bit.
+ You need to install an older version of Beautiful Soup:
+ - Version 4.1.3 and 4.0.2 have been confirmed for 64-bit Ubuntu/Debian
+ - Version 4.0.2 have been confirmed for 32-bit Ubuntu
* Any recent version of ``html5lib`` is okay.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3724 | 2013-05-31T00:08:29Z | 2013-05-31T00:10:59Z | 2013-05-31T00:10:59Z | 2014-07-16T08:10:52Z | |
DOC/BLD: fix annoying sphinx bugs | diff --git a/pandas/core/common.py b/pandas/core/common.py
index 994a57247e50b..69f38bf0c7c61 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -1348,8 +1348,8 @@ def iterpairs(seq):
-------
iterator returning overlapping pairs of elements
- Example
- -------
+ Examples
+ --------
>>> iterpairs([1, 2, 3, 4])
[(1, 2), (2, 3), (3, 4)
"""
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index f590585eea9fa..9c0a2843370f4 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1374,9 +1374,8 @@ def to_csv(self, path_or_buf, sep=",", na_rep='', float_format=None,
mode='w', nanRep=None, encoding=None, quoting=None,
line_terminator='\n', chunksize=None,
tupleize_cols=True, **kwds):
- """
- Write DataFrame to a comma-separated values (csv) file
-
+ r"""Write DataFrame to a comma-separated values (csv) file
+
Parameters
----------
path_or_buf : string or file handle / StringIO
@@ -1390,8 +1389,8 @@ def to_csv(self, path_or_buf, sep=",", na_rep='', float_format=None,
cols : sequence, optional
Columns to write
header : boolean or list of string, default True
- Write out column names. If a list of string is given it is
- assumed to be aliases for the column names
+ Write out column names. If a list of string is given it is assumed
+ to be aliases for the column names
index : boolean, default True
Write row names (index)
index_label : string or sequence, or False, default None
@@ -1400,21 +1399,23 @@ def to_csv(self, path_or_buf, sep=",", na_rep='', float_format=None,
sequence should be given if the DataFrame uses MultiIndex. If
False do not print fields for index names. Use index_label=False
for easier importing in R
- nanRep : deprecated, use na_rep
- mode : Python write mode, default 'w'
+ nanRep : None
+ deprecated, use na_rep
+ mode : str
+ Python write mode, default 'w'
encoding : string, optional
a string representing the encoding to use if the contents are
non-ascii, for python versions prior to 3
- line_terminator: string, default '\n'
+ line_terminator : string, default '\\n'
The newline character or character sequence to use in the output
file
quoting : optional constant from csv module
defaults to csv.QUOTE_MINIMAL
- chunksize : rows to write at a time
+ chunksize : int or None
+ rows to write at a time
tupleize_cols : boolean, default True
write multi_index columns as a list of tuples (if True)
or new (expanded format) if False)
-
"""
if nanRep is not None: # pragma: no cover
import warnings
@@ -2401,27 +2402,31 @@ def xs(self, key, axis=0, level=None, copy=True):
_xs = xs
def lookup(self, row_labels, col_labels):
- """
- Label-based "fancy indexing" function for DataFrame. Given equal-length
- arrays of row and column labels, return an array of the values
- corresponding to each (row, col) pair.
+ """Label-based "fancy indexing" function for DataFrame. Given
+ equal-length arrays of row and column labels, return an array of the
+ values corresponding to each (row, col) pair.
Parameters
----------
row_labels : sequence
+ The row labels to use for lookup
col_labels : sequence
+ The column labels to use for lookup
Notes
-----
Akin to
- result = []
- for row, col in zip(row_labels, col_labels):
- result.append(df.get_value(row, col))
+ .. code-block:: python
- Example
- -------
+ result = []
+ for row, col in zip(row_labels, col_labels):
+ result.append(df.get_value(row, col))
+
+ Examples
+ --------
values : ndarray
+ The found values
"""
from itertools import izip
@@ -3483,12 +3488,16 @@ def replace(self, to_replace=None, value=None, inplace=False, limit=None,
Parameters
----------
to_replace : str, regex, list, dict, Series, numeric, or None
+
* str or regex:
+
- str: string exactly matching `to_replace` will be replaced
with `value`
- regex: regexs matching `to_replace` will be replaced with
`value`
+
* list of str, regex, or numeric:
+
- First, if `to_replace` and `value` are both lists, they
**must** be the same length.
- Second, if ``regex=True`` then all of the strings in **both**
@@ -3496,7 +3505,9 @@ def replace(self, to_replace=None, value=None, inplace=False, limit=None,
directly. This doesn't matter much for `value` since there
are only a few possible substitution regexes you can use.
- str and regex rules apply as above.
+
* dict:
+
- Nested dictionaries, e.g., {'a': {'b': nan}}, are read as
follows: look in column 'a' for the value 'b' and replace it
with nan. You can nest regular expressions as well. Note that
@@ -3505,11 +3516,14 @@ def replace(self, to_replace=None, value=None, inplace=False, limit=None,
- Keys map to column names and values map to substitution
values. You can treat this as a special case of passing two
lists except that you are specifying the column to search in.
+
* None:
+
- This means that the ``regex`` argument must be a string,
compiled regular expression, or list, dict, ndarray or Series
of such elements. If `value` is also ``None`` then this
**must** be a nested dictionary or ``Series``.
+
See the examples section for examples of each of these.
value : scalar, dict, list, str, regex, default None
Value to use to fill holes (e.g. 0), alternately a dict of values
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index 122355581956d..d409adfd71158 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -1374,8 +1374,8 @@ def aggregate(self, func_or_funcs, *args, **kwargs):
-----
agg is an alias for aggregate. Use it.
- Example
- -------
+ Examples
+ --------
>>> series
bar 1.0
baz 2.0
@@ -1523,8 +1523,8 @@ def transform(self, func, *args, **kwargs):
func : function
To apply to each group. Should return a Series with the same index
- Example
- -------
+ Examples
+ --------
>>> grouped.transform(lambda x: (x - x.mean()) / x.std())
Returns
@@ -1906,7 +1906,7 @@ def transform(self, func, *args, **kwargs):
Each subframe is endowed the attribute 'name' in case you need to know
which group you are working on.
- Example
+ Examples
--------
>>> grouped = df.groupby(lambda x: mapping[x])
>>> grouped.transform(lambda x: (x - x.mean()) / x.std())
diff --git a/pandas/io/excel.py b/pandas/io/excel.py
index ea102cb6803d7..c23056ce76a62 100644
--- a/pandas/io/excel.py
+++ b/pandas/io/excel.py
@@ -9,10 +9,6 @@
from itertools import izip
import numpy as np
-from pandas.core.index import Index, MultiIndex
-from pandas.core.frame import DataFrame
-import pandas.core.common as com
-from pandas.util import py3compat
from pandas.io.parsers import TextParser
from pandas.tseries.period import Period
import json
@@ -21,8 +17,7 @@ def read_excel(path_or_buf, sheetname, header=0, skiprows=None, skip_footer=0,
index_col=None, parse_cols=None, parse_dates=False,
date_parser=None, na_values=None, thousands=None, chunksize=None,
kind=None, **kwds):
- """
- Read Excel table into DataFrame
+ """Read an Excel table into a pandas DataFrame
Parameters
----------
@@ -38,23 +33,30 @@ def read_excel(path_or_buf, sheetname, header=0, skiprows=None, skip_footer=0,
Column to use as the row labels of the DataFrame. Pass None if
there is no such column
parse_cols : int or list, default None
- If None then parse all columns,
- If int then indicates last column to be parsed
- If list of ints then indicates list of column numbers to be parsed
- If string then indicates comma separated list of column names and
- column ranges (e.g. "A:E" or "A,C,E:F")
+ * If None then parse all columns,
+ * If int then indicates last column to be parsed
+ * If list of ints then indicates list of column numbers to be parsed
+ * If string then indicates comma separated list of column names and
+ column ranges (e.g. "A:E" or "A,C,E:F")
na_values : list-like, default None
List of additional strings to recognize as NA/NaN
Returns
-------
parsed : DataFrame
+ DataFrame from the passed in Excel file
"""
return ExcelFile(path_or_buf,kind=kind).parse(sheetname=sheetname,
- header=0, skiprows=None, skip_footer=0,
- index_col=None, parse_cols=None, parse_dates=False,
- date_parser=None, na_values=None, thousands=None, chunksize=None,
- kind=None, **kwds)
+ header=0, skiprows=None,
+ skip_footer=0,
+ index_col=None,
+ parse_cols=None,
+ parse_dates=False,
+ date_parser=None,
+ na_values=None,
+ thousands=None,
+ chunksize=None, kind=None,
+ **kwds)
class ExcelFile(object):
"""
@@ -90,8 +92,7 @@ def parse(self, sheetname, header=0, skiprows=None, skip_footer=0,
index_col=None, parse_cols=None, parse_dates=False,
date_parser=None, na_values=None, thousands=None, chunksize=None,
**kwds):
- """
- Read Excel table into DataFrame
+ """Read an Excel table into DataFrame
Parameters
----------
@@ -107,17 +108,19 @@ def parse(self, sheetname, header=0, skiprows=None, skip_footer=0,
Column to use as the row labels of the DataFrame. Pass None if
there is no such column
parse_cols : int or list, default None
- If None then parse all columns,
- If int then indicates last column to be parsed
- If list of ints then indicates list of column numbers to be parsed
- If string then indicates comma separated list of column names and
- column ranges (e.g. "A:E" or "A,C,E:F")
+ * If None then parse all columns
+ * If int then indicates last column to be parsed
+ * If list of ints then indicates list of column numbers to be
+ parsed
+ * If string then indicates comma separated list of column names and
+ column ranges (e.g. "A:E" or "A,C,E:F")
na_values : list-like, default None
List of additional strings to recognize as NA/NaN
Returns
-------
parsed : DataFrame
+ DataFrame parsed from the Excel file
"""
# has_index_names: boolean, default False
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 0ae835c81d870..046263a9cb63c 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -386,9 +386,6 @@ def select(self, key, where=None, start=None, stop=None, columns=None, iterator=
Parameters
----------
key : object
-
- Optional Parameters
- -------------------
where : list of Term (or convertable) objects, optional
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
@@ -421,9 +418,6 @@ def select_as_coordinates(self, key, where=None, start=None, stop=None, **kwargs
Parameters
----------
key : object
-
- Optional Parameters
- -------------------
where : list of Term (or convertable) objects, optional
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
@@ -551,9 +545,6 @@ def remove(self, key, where=None, start=None, stop=None):
----------
key : string
Node to remove or delete rows from
-
- Optional Parameters
- -------------------
where : list of Term (or convertable) objects, optional
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
@@ -602,9 +593,6 @@ def append(self, key, value, columns=None, **kwargs):
----------
key : object
value : {Series, DataFrame, Panel, Panel4D}
-
- Optional Parameters
- -------------------
data_columns : list of columns to create as data columns, or True to use all columns
min_itemsize : dict of columns that specify minimum string sizes
nan_rep : string to use as string nan represenation
@@ -3276,30 +3264,29 @@ def _need_convert(kind):
return False
class Term(object):
- """ create a term object that holds a field, op, and value
+ """create a term object that holds a field, op, and value
- Parameters
- ----------
- field : dict, string term expression, or the field to operate (must be a valid index/column type of DataFrame/Panel)
- op : a valid op (defaults to '=') (optional)
- >, >=, <, <=, =, != (not equal) are allowed
- value : a value or list of values (required)
- queryables : a kinds map (dict of column name -> kind), or None i column is non-indexable
+ Parameters
+ ----------
+ field : dict, string term expression, or the field to operate (must be a valid index/column type of DataFrame/Panel)
+ op : a valid op (defaults to '=') (optional)
+ >, >=, <, <=, =, != (not equal) are allowed
+ value : a value or list of values (required)
+ queryables : a kinds map (dict of column name -> kind), or None i column is non-indexable
- Returns
- -------
- a Term object
-
- Examples
- --------
- Term(dict(field = 'index', op = '>', value = '20121114'))
- Term('index', '20121114')
- Term('index', '>', '20121114')
- Term('index', ['20121114','20121114'])
- Term('index', datetime(2012,11,14))
- Term('major_axis>20121114')
- Term('minor_axis', ['A','B'])
+ Returns
+ -------
+ a Term object
+ Examples
+ --------
+ >>> Term(dict(field = 'index', op = '>', value = '20121114'))
+ >>> Term('index', '20121114')
+ >>> Term('index', '>', '20121114')
+ >>> Term('index', ['20121114','20121114'])
+ >>> Term('index', datetime(2012,11,14))
+ >>> Term('major_axis>20121114')
+ >>> Term('minor_axis', ['A','B'])
"""
_ops = ['<=', '<', '>=', '>', '!=', '==', '=']
diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py
index 3b66eba31fca1..5985a8a898b27 100644
--- a/pandas/tseries/frequencies.py
+++ b/pandas/tseries/frequencies.py
@@ -394,9 +394,10 @@ def to_offset(freqstr):
"""
Return DateOffset object from string representation
- Example
- -------
- to_offset('5Min') -> Minute(5)
+ Examples
+ --------
+ >>> to_offset('5Min')
+ Minute(5)
"""
if freqstr is None:
return None
@@ -444,8 +445,8 @@ def _base_and_stride(freqstr):
"""
Return base freq and stride info from string representation
- Example
- -------
+ Examples
+ --------
_freq_and_stride('5Min') -> 'Min', 5
"""
groups = opattern.match(freqstr)
@@ -478,8 +479,8 @@ def get_offset(name):
"""
Return DateOffset object associated with rule name
- Example
- -------
+ Examples
+ --------
get_offset('EOM') --> BMonthEnd(1)
"""
if name not in _dont_uppercase:
@@ -512,8 +513,8 @@ def get_offset_name(offset):
"""
Return rule name associated with a DateOffset object
- Example
- -------
+ Examples
+ --------
get_offset_name(BMonthEnd(1)) --> 'EOM'
"""
name = _offset_names.get(offset)
| closes #3721. cc @jreback
| https://api.github.com/repos/pandas-dev/pandas/pulls/3722 | 2013-05-30T23:13:59Z | 2013-05-30T23:56:42Z | 2013-05-30T23:56:42Z | 2014-07-17T00:18:56Z |
DOC: clean up io docs and fix up links | diff --git a/doc/source/api.rst b/doc/source/api.rst
index 2e59bf6533205..fea8b95bb2bcf 100644
--- a/doc/source/api.rst
+++ b/doc/source/api.rst
@@ -38,9 +38,8 @@ Pickling
load
save
-File IO
-~~~~~~~
-
+Flat File IO
+^^^^^^^^^^^^
.. currentmodule:: pandas.io.parsers
.. autosummary::
@@ -62,9 +61,13 @@ File IO
:toctree: generated/
read_stata
+ read_fwf
+ read_clipboard
.. currentmodule:: pandas.io.html
+HTML IO
+^^^^^^^
.. autosummary::
:toctree: generated/
@@ -80,6 +83,46 @@ SQL
read_sql
+Excel IO
+^^^^^^^^
+.. currentmodule:: pandas.io.parsers
+
+.. autosummary::
+ :toctree: generated/
+
+ ExcelFile.parse
+
+SQL IO
+^^^^^^
+.. currentmodule:: pandas.io.sql
+
+.. autosummary::
+ :toctree: generated/
+
+ read_frame
+ write_frame
+
+.. currentmodule:: pandas.io
+
+.. autosummary::
+ :toctree: generated/
+
+ sql
+
+STATA IO
+^^^^^^^^
+.. currentmodule:: pandas.io.stata
+
+.. autosummary::
+ :toctree: generated/
+
+ read_stata
+ StataReader.data
+ StataReader.data_label
+ StataReader.value_labels
+ StataReader.variable_labels
+ StataWriter.write_file
+
HDFStore: PyTables (HDF5)
~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -94,6 +137,17 @@ HDFStore: PyTables (HDF5)
HDFStore.get
HDFStore.select
+Top-level Missing Data
+~~~~~~~~~~~~~~~~~~~~~~
+
+.. currentmodule:: pandas.core.common
+
+.. autosummary::
+ :toctree: generated/
+
+ isnull
+ notnull
+
Standard moving window functions
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/source/io.rst b/doc/source/io.rst
index 5bf3075f2688e..a8d5cf4ab2f60 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -667,9 +667,9 @@ should pass the ``escapechar`` option:
Files with Fixed Width Columns
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-While `read_csv` reads delimited data, the :func:`~pandas.io.parsers.read_fwf`
+While ``read_csv`` reads delimited data, the :func:`~pandas.io.parsers.read_fwf`
function works with data files that have known and fixed column widths.
-The function parameters to `read_fwf` are largely the same as `read_csv` with
+The function parameters to ``read_fwf`` are largely the same as `read_csv` with
two extra parameters:
- ``colspecs``: a list of pairs (tuples), giving the extents of the
@@ -2123,23 +2123,30 @@ Writing to STATA format
.. _io.StataWriter:
-The method ``to_stata`` will write a DataFrame into a .dta file.
+The method :func:`~pandas.io.stata.StataWriter.write_file` of
+:class:`~pandas.io.stata.StataWriter` will write a DataFrame into a .dta file.
The format version of this file is always the latest one, 115.
.. ipython:: python
- df = DataFrame(randn(10,2),columns=list('AB'))
- df.to_stata('stata.dta')
+ from pandas.io.stata import StataWriter
+ df = DataFrame(randn(10, 2), columns=list('AB'))
+ writer = StataWriter('stata.dta', df)
+ writer.write_file()
Reading from STATA format
~~~~~~~~~~~~~~~~~~~~~~~~~
-.. _io.StataReader:
+.. _io.statareader:
.. versionadded:: 0.11.1
The top-level function ``read_stata`` will read a dta format file
and return a DataFrame:
+The class :class:`~pandas.io.stata.StataReader` will read the header of the
+given dta file at initialization. Its method
+:func:`~pandas.io.stata.StataReader.data` will read the observations,
+converting them to a DataFrame which is returned:
.. ipython:: python
@@ -2153,6 +2160,7 @@ also be retrieved by the function ``variable_labels``, which requires data to be
called before (see ``pandas.io.stata.StataReader``).
The StataReader supports .dta Formats 104, 105, 108, 113-115.
+Alternatively, the function :func:`~pandas.io.stata.read_stata` can be used
.. ipython:: python
:suppress:
diff --git a/pandas/core/common.py b/pandas/core/common.py
index ee8b3bbbda647..994a57247e50b 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -48,17 +48,19 @@ class AmbiguousIndexError(PandasError, KeyError):
_INT64_DTYPE = np.dtype(np.int64)
def isnull(obj):
- '''
- Detect missing values (NaN in numeric arrays, None/NaN in object arrays)
+ """Detect missing values (NaN in numeric arrays, None/NaN in object arrays)
Parameters
----------
- arr: ndarray or object value
+ arr : ndarray or object value
+ Object to check for null-ness
Returns
-------
- boolean ndarray or boolean
- '''
+ isnulled : array-like of bool or bool
+ Array or bool indicating whether an object is null or if an array is
+ given which of the element is null.
+ """
return _isnull(obj)
@@ -187,18 +189,20 @@ def _isnull_ndarraylike_old(obj):
def notnull(obj):
- '''
- Replacement for numpy.isfinite / -numpy.isnan which is suitable
- for use on object arrays.
+ """Replacement for numpy.isfinite / -numpy.isnan which is suitable for use
+ on object arrays.
Parameters
----------
- arr: ndarray or object value
+ arr : ndarray or object value
+ Object to check for *not*-null-ness
Returns
-------
- boolean ndarray or boolean
- '''
+ isnulled : array-like of bool or bool
+ Array or bool indicating whether an object is *not* null or if an array
+ is given which of the element is *not* null.
+ """
res = isnull(obj)
if np.isscalar(res):
return not res
diff --git a/pandas/io/__init__.py b/pandas/io/__init__.py
index e69de29bb2d1d..a984c40cdc098 100644
--- a/pandas/io/__init__.py
+++ b/pandas/io/__init__.py
@@ -0,0 +1,2 @@
+import sql
+import stata
| closes #3718.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3720 | 2013-05-30T16:09:36Z | 2013-05-30T16:41:20Z | 2013-05-30T16:41:20Z | 2014-07-16T08:10:50Z |
BLD: switch MAJOR and MICRO | diff --git a/setup.py b/setup.py
index bbad3e87a3fa0..2e7fd778578fd 100755
--- a/setup.py
+++ b/setup.py
@@ -183,9 +183,9 @@ def build_extensions(self):
'Topic :: Scientific/Engineering',
]
-MAJOR = 1
+MAJOR = 0
MINOR = 11
-MICRO = 0
+MICRO = 1
ISRELEASED = False
VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
QUALIFIER = ''
| cc @y-p
| https://api.github.com/repos/pandas-dev/pandas/pulls/3716 | 2013-05-30T14:47:20Z | 2013-05-30T14:56:07Z | 2013-05-30T14:56:07Z | 2014-07-16T08:10:48Z |
BUG: restore 10.1 expand_repr behaviour, only for < max_cols, if wider then term GH3706 | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 27aa68ee39d8e..9e276e01dd723 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -703,10 +703,13 @@ def __unicode__(self):
self.to_string(buf=buf)
else:
width, _ = fmt.get_console_size()
- max_rows = get_option("display.max_rows")
- if (get_option("display.expand_frame_repr")
- and fits_vertical):
- # and len(self.columns) < max_rows)
+ max_columns = get_option("display.max_columns")
+ expand_repr = get_option("display.expand_frame_repr")
+ # within max_cols and max_rows, but cols exceed width
+ # of terminal, then use expand_repr
+ if (fits_vertical and
+ expand_repr and
+ len(self.columns) <= max_columns):
self.to_string(buf=buf, line_width=width)
else:
max_info_rows = get_option('display.max_info_rows')
| #3706
| https://api.github.com/repos/pandas-dev/pandas/pulls/3713 | 2013-05-30T05:09:21Z | 2013-05-30T05:10:56Z | 2013-05-30T05:10:56Z | 2014-07-16T08:10:45Z |
API: deprecate unused DataFrame.replace arguments | diff --git a/RELEASE.rst b/RELEASE.rst
index 1d63a7f53954d..e611b330b08f0 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -110,6 +110,8 @@ pandas 0.11.1
- added ``pandas.io.api`` for i/o imports
- removed ``Excel`` support to ``pandas.io.excel``
- added top-level ``pd.read_sql`` and ``to_sql`` DataFrame methods
+ - the ``method`` and ``axis`` arguments of ``DataFrame.replace()`` are
+ deprecated
**Bug Fixes**
diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt
index 9113c74c6813b..c025450c44cca 100644
--- a/doc/source/v0.11.1.txt
+++ b/doc/source/v0.11.1.txt
@@ -83,6 +83,8 @@ API changes
- ``DataFrame.interpolate()`` is now deprecated. Please use
``DataFrame.fillna()`` and ``DataFrame.replace()`` instead. (GH3582_,
GH3675_, GH3676_)
+ - the ``method`` and ``axis`` arguments of ``DataFrame.replace()`` are
+ deprecated
- Add the keyword ``allow_duplicates`` to ``DataFrame.insert`` to allow a duplicate column
to be inserted if ``True``, default is ``False`` (same as prior to 0.11.1) (GH3679_)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 3ad8de077f1ea..1dfeae997451a 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -3481,9 +3481,9 @@ def bfill(self, axis=0, inplace=False, limit=None):
return self.fillna(method='bfill', axis=axis, inplace=inplace,
limit=limit)
- def replace(self, to_replace=None, value=None, method='pad', axis=0,
- inplace=False, limit=None, regex=False, infer_types=False):
- """Replace values given in 'to_replace' with 'value' or using 'method'.
+ def replace(self, to_replace=None, value=None, inplace=False, limit=None,
+ regex=False, infer_types=False, method=None, axis=None):
+ """Replace values given in 'to_replace' with 'value'.
Parameters
----------
@@ -3521,13 +3521,6 @@ def replace(self, to_replace=None, value=None, method='pad', axis=0,
specifying which value to use for each column (columns not in the
dict will not be filled). Regular expressions, strings and lists or
dicts of such objects are also allowed.
- method : {'backfill', 'bfill', 'pad', 'ffill', None}, default 'pad'
- Method to use for filling holes in reindexed Series
- pad / ffill: propagate last valid observation forward to next valid
- backfill / bfill: use NEXT valid observation to fill gap
- axis : {0, 1}, default 0
- 0: fill column-by-column
- 1: fill row-by-row
inplace : boolean, default False
If True, fill the DataFrame in place. Note: this will modify any
other views on this DataFrame, like if you took a no-copy slice of
@@ -3580,10 +3573,17 @@ def replace(self, to_replace=None, value=None, method='pad', axis=0,
if not isinstance(regex, bool) and to_replace is not None:
raise AssertionError("'to_replace' must be 'None' if 'regex' is "
"not a bool")
- self._consolidate_inplace()
+ if method is not None:
+ from warnings import warn
+ warn('the "method" argument is deprecated and will be removed in'
+ 'v0.12; this argument has no effect')
- axis = self._get_axis_number(axis)
- method = com._clean_fill_method(method)
+ if axis is not None:
+ from warnings import warn
+ warn('the "axis" argument is deprecated and will be removed in'
+ 'v0.12; this argument has no effect')
+
+ self._consolidate_inplace()
if value is None:
if not isinstance(to_replace, (dict, Series)):
@@ -3615,8 +3615,8 @@ def replace(self, to_replace=None, value=None, method='pad', axis=0,
else:
to_replace, value = keys, values
- return self.replace(to_replace, value, method=method, axis=axis,
- inplace=inplace, limit=limit, regex=regex,
+ return self.replace(to_replace, value, inplace=inplace,
+ limit=limit, regex=regex,
infer_types=infer_types)
else:
if not len(self.columns):
@@ -3629,7 +3629,7 @@ def replace(self, to_replace=None, value=None, method='pad', axis=0,
for c, src in to_replace.iteritems():
if c in value and c in self:
new_data = new_data.replace(src, value[c],
- filter=[ c ],
+ filter=[c],
inplace=inplace,
regex=regex)
@@ -3638,7 +3638,7 @@ def replace(self, to_replace=None, value=None, method='pad', axis=0,
for k, src in to_replace.iteritems():
if k in self:
new_data = new_data.replace(src, value,
- filter = [ k ],
+ filter=[k],
inplace=inplace,
regex=regex)
else:
@@ -3667,9 +3667,8 @@ def replace(self, to_replace=None, value=None, method='pad', axis=0,
"regular expression or a list or dict of "
"strings or regular expressions, you "
"passed a {0}".format(type(regex)))
- return self.replace(regex, value, method=method, axis=axis,
- inplace=inplace, limit=limit, regex=True,
- infer_types=infer_types)
+ return self.replace(regex, value, inplace=inplace, limit=limit,
+ regex=True, infer_types=infer_types)
else:
# dest iterable dict-like
@@ -3679,7 +3678,7 @@ def replace(self, to_replace=None, value=None, method='pad', axis=0,
for k, v in value.iteritems():
if k in self:
new_data = new_data.replace(to_replace, v,
- filter=[ k ],
+ filter=[k],
inplace=inplace,
regex=regex)
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 4e892f884e541..1de643985d893 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -6360,8 +6360,7 @@ def test_replace_inplace(self):
res = tsframe.replace(nan, 0, inplace=True)
assert_frame_equal(tsframe, self.tsframe.fillna(0))
- self.assertRaises(TypeError, self.tsframe.replace, nan, method='pad',
- inplace=True)
+ self.assertRaises(TypeError, self.tsframe.replace, nan, inplace=True)
# mixed type
self.mixed_frame['foo'][5:20] = nan
@@ -6953,21 +6952,18 @@ def test_interpolate(self):
pass
def test_replace_value_is_none(self):
- self.assertRaises(TypeError, self.tsframe.replace, nan, method='pad')
+ self.assertRaises(TypeError, self.tsframe.replace, nan)
orig_value = self.tsframe.iloc[0, 0]
orig2 = self.tsframe.iloc[1, 0]
self.tsframe.iloc[0, 0] = nan
self.tsframe.iloc[1, 0] = 1
- result = self.tsframe.replace(to_replace={nan: 0}, method='pad',
- axis=1)
- expected = self.tsframe.T.replace(
- to_replace={nan: 0}, method='pad').T
+ result = self.tsframe.replace(to_replace={nan: 0})
+ expected = self.tsframe.T.replace(to_replace={nan: 0}).T
assert_frame_equal(result, expected)
- result = self.tsframe.replace(to_replace={nan: 0, 1: -1e8},
- method='bfill')
+ result = self.tsframe.replace(to_replace={nan: 0, 1: -1e8})
tsframe = self.tsframe.copy()
tsframe.iloc[0, 0] = 0
tsframe.iloc[1, 0] = -1e8
@@ -7088,25 +7084,6 @@ def test_replace_input_formats(self):
expected.replace(to_rep[i], -1, inplace=True)
assert_frame_equal(result, expected)
- def test_replace_axis(self):
- self.tsframe['A'][:5] = nan
- self.tsframe['A'][-5:] = nan
-
- zero_filled = self.tsframe.replace(nan, 0, axis=1)
- assert_frame_equal(zero_filled, self.tsframe.fillna(0, axis=1))
-
- self.assertRaises(TypeError, self.tsframe.replace, method='pad',
- axis=1)
-
- # mixed type
- self.mixed_frame['foo'][5:20] = nan
- self.mixed_frame['A'][-10:] = nan
-
- result = self.mixed_frame.replace(np.nan, -1e8, axis=1)
- expected = self.mixed_frame.fillna(value=-1e8, axis=1)
- assert_frame_equal(result, expected)
-
-
def test_replace_limit(self):
pass
| @jreback i forgot to do this as per our conversation
| https://api.github.com/repos/pandas-dev/pandas/pulls/3712 | 2013-05-30T00:11:09Z | 2013-05-30T16:41:40Z | 2013-05-30T16:41:40Z | 2014-07-16T08:10:43Z |
Excel output in non-ascii encodings | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 68a6c9e261c97..f43adedb61a04 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1470,7 +1470,7 @@ def to_csv(self, path_or_buf, sep=",", na_rep='', float_format=None,
def to_excel(self, excel_writer, sheet_name='sheet1', na_rep='',
float_format=None, cols=None, header=True, index=True,
- index_label=None, startrow=0, startcol=0):
+ index_label=None, startrow=0, startcol=0, encoding = 'ascii'):
"""
Write DataFrame to a excel sheet
@@ -1478,6 +1478,7 @@ def to_excel(self, excel_writer, sheet_name='sheet1', na_rep='',
----------
excel_writer : string or ExcelWriter object
File path or existing ExcelWriter
+ encoding: Ecoding used for the worksheet
sheet_name : string, default 'sheet1'
Name of sheet which will contain DataFrame
na_rep : string, default ''
@@ -1512,7 +1513,7 @@ def to_excel(self, excel_writer, sheet_name='sheet1', na_rep='',
from pandas.io.parsers import ExcelWriter
need_save = False
if isinstance(excel_writer, basestring):
- excel_writer = ExcelWriter(excel_writer)
+ excel_writer = ExcelWriter(excel_writer, encoding = encoding)
need_save = True
formatter = fmt.ExcelFormatter(self,
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 68db97b7a3c53..7d2ebec0b151f 100644
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -1996,7 +1996,6 @@ class ExcelFile(object):
"""
def __init__(self, path_or_buf, kind=None, **kwds):
self.kind = kind
-
import xlrd # throw an ImportError if we need to
ver = tuple(map(int,xlrd.__VERSION__.split(".")[:2]))
if ver < (0, 9):
@@ -2009,7 +2008,7 @@ def __init__(self, path_or_buf, kind=None, **kwds):
self.book = xlrd.open_workbook(path_or_buf)
else:
data = path_or_buf.read()
- self.book = xlrd.open_workbook(file_contents=data)
+ self.book = xlrd.open_workbook(file_contents = data)
def __repr__(self):
return object.__repr__(self)
@@ -2264,12 +2263,13 @@ class ExcelWriter(object):
path : string
Path to xls file
"""
- def __init__(self, path):
+ def __init__(self, path, encoding = 'ascii'):
self.use_xlsx = True
+ self.encoding = encoding
if path.endswith('.xls'):
self.use_xlsx = False
import xlwt
- self.book = xlwt.Workbook()
+ self.book = xlwt.Workbook(encoding = self.encoding)
self.fm_datetime = xlwt.easyxf(
num_format_str='YYYY-MM-DD HH:MM:SS')
self.fm_date = xlwt.easyxf(num_format_str='YYYY-MM-DD')
diff --git a/pandas/io/tests/data/excel_test_ascii.xls b/pandas/io/tests/data/excel_test_ascii.xls
new file mode 100644
index 0000000000000..2d0c77333ae2e
Binary files /dev/null and b/pandas/io/tests/data/excel_test_ascii.xls differ
diff --git a/pandas/io/tests/data/excel_test_noascii.xls b/pandas/io/tests/data/excel_test_noascii.xls
new file mode 100644
index 0000000000000..484071e3a0878
Binary files /dev/null and b/pandas/io/tests/data/excel_test_noascii.xls differ
diff --git a/pandas/io/tests/data/excel_writer_ascii.xls b/pandas/io/tests/data/excel_writer_ascii.xls
new file mode 100644
index 0000000000000..5d80b069f49e3
Binary files /dev/null and b/pandas/io/tests/data/excel_writer_ascii.xls differ
diff --git a/pandas/io/tests/data/excel_writer_noascii.xls b/pandas/io/tests/data/excel_writer_noascii.xls
new file mode 100644
index 0000000000000..6edd1e7cec250
Binary files /dev/null and b/pandas/io/tests/data/excel_writer_noascii.xls differ
diff --git a/pandas/io/tests/test_excel.py b/pandas/io/tests/test_excel.py
index 8a145517d3b5a..9faee58eea954 100644
--- a/pandas/io/tests/test_excel.py
+++ b/pandas/io/tests/test_excel.py
@@ -639,7 +639,11 @@ def test_to_excel_float_format(self):
[12.32, 123123.20, 321321.20]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
tm.assert_frame_equal(rs, xp)
-
+
+
+
+
+
def test_to_excel_unicode_filename(self):
_skip_if_no_excelsuite()
@@ -858,7 +862,9 @@ def roundtrip(df, header=True, parser_hdr=0):
res = roundtrip(DataFrame([0]), False, None)
self.assertEqual(res.shape, (1, 2))
self.assertTrue(res.ix[0, 0] is not np.nan)
+
+
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
diff --git a/pandas/io/tests/test_excel_encoding.py b/pandas/io/tests/test_excel_encoding.py
new file mode 100644
index 0000000000000..a390d032a161f
--- /dev/null
+++ b/pandas/io/tests/test_excel_encoding.py
@@ -0,0 +1,213 @@
+# pylint: disable=E1101
+# -*- coding: utf-8 -*-
+
+
+from pandas.util.py3compat import StringIO, BytesIO, PY3
+from datetime import datetime
+from os.path import split as psplit
+import csv
+import os
+import sys
+import re
+import unittest
+
+import nose
+
+from numpy import nan
+import numpy as np
+
+from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
+import pandas.io.parsers as parsers
+from pandas.io.parsers import (read_csv, read_table, read_fwf,
+ ExcelFile, TextFileReader, TextParser)
+from pandas.util.testing import (assert_almost_equal,
+ assert_series_equal,
+ network,
+ ensure_clean)
+import pandas.util.testing as tm
+import pandas as pd
+
+import pandas.lib as lib
+from pandas.util import py3compat
+from pandas.lib import Timestamp
+from pandas.tseries.index import date_range
+import pandas.tseries.tools as tools
+
+from numpy.testing.decorators import slow
+
+from pandas._parser import OverflowError
+
+from pandas.io.parsers import (ExcelFile, ExcelWriter, read_csv)
+
+
+def _skip_if_no_xlrd():
+ try:
+ import xlrd
+ ver = tuple(map(int, xlrd.__VERSION__.split(".")[:2]))
+ if ver < (0, 9):
+ raise nose.SkipTest('xlrd not installed, skipping')
+ except ImportError:
+ raise nose.SkipTest('xlrd not installed, skipping')
+
+
+def _skip_if_no_xlwt():
+ try:
+ import xlwt
+ except ImportError:
+ raise nose.SkipTest('xlwt not installed, skipping')
+
+
+def _skip_if_no_openpyxl():
+ try:
+ import openpyxl
+ except ImportError:
+ raise nose.SkipTest('openpyxl not installed, skipping')
+
+
+def _skip_if_no_excelsuite():
+ _skip_if_no_xlrd()
+ _skip_if_no_xlwt()
+ _skip_if_no_openpyxl()
+
+
+_seriesd = tm.getSeriesData()
+_tsd = tm.getTimeSeriesData()
+_frame = DataFrame(_seriesd)[:10]
+_frame2 = DataFrame(_seriesd, columns=['D', 'C', 'B', 'A'])[:10]
+_tsframe = tm.makeTimeDataFrame()[:5]
+_mixed_frame = _frame.copy()
+_mixed_frame['foo'] = 'bar'
+
+
+class ExcelTests(unittest.TestCase):
+
+ def setUp(self):
+ self.dirpath = tm.get_data_path()
+ self.xls_ta = os.path.join(self.dirpath, 'excel_test_ascii.xls')
+ self.xls_tna = os.path.join(self.dirpath, 'excel_test_noascii.xls')
+ self.xls_wa = os.path.join(self.dirpath, 'excel_writer_ascii.xls')
+ self.xls_wna = os.path.join(self.dirpath, 'excel_writer_noascii.xls')
+
+ def test_excel_output_encoding(self):
+ _skip_if_no_xlrd()
+ _skip_if_no_xlwt()
+
+ # TESTS IF DataFrame.to_excel() WORKS WITH ENCODING PARAMETER MAKING POSSIBLE TO
+ # WORK WITH ENCODINGS OTHER TAN ASCII
+
+ #FIRST WITH ONLY ASCII
+
+ data_ascii = {
+ 'index' : ['A', 'B', 'C', 'C', 'B', 'A'],
+ 'columns' : ['One', 'One', 'One', 'Two', 'Two', 'Two'],
+ 'values' : [1., 2., 3., 3., 2., 1.]
+ }
+
+ original_ascii = DataFrame(data_ascii)
+
+ original_ascii.to_excel(self.xls_ta, sheet_name='DataFrame_TEST')
+
+ get_xls_ascii = ExcelFile(self.xls_ta)
+
+ saved_ascii = get_xls_ascii.parse('DataFrame_TEST', index_col=None, na_values=['NA'])
+
+ # NOW WITH NON-ASCII CHARS AND SUPPLYING THE PARAMETER encoding TO DataFrame.to_excel()
+
+ data_noascii = {
+ 'index' : ['Año', 'Baldío', 'Trócola', 'Mínimo', 'Barça', 'Cigüeña'],
+ 'columns' : ['Año', 'Narices', 'Búlgaro', 'Libélula', 'Cínico', '1º'],
+ 'values' : ['Céfiro', 'Tarugo', 'Déspota', 'Camión', 'Añejo', 'º']
+ }
+
+ original_noascii = DataFrame(data_noascii)
+
+ original_noascii.to_excel(self.xls_tna, sheet_name='DataFrame_TEST', encoding='utf8')
+
+ get_xls_noascii = ExcelFile(self.xls_tna, encoding = 'uft8')
+
+ #saved_noascii = get_xls_noascii.parse('DataFrame_TEST', index_col=None, na_values=['NA'])
+
+ saved_noascii = get_xls_noascii.parse('DataFrame_TEST', index_col=None, na_values=['NA'])
+
+ print original_noascii,saved_noascii
+
+ tm.assert_frame_equal(original_ascii, saved_ascii)
+ tm.assert_frame_equal(original_noascii, saved_noascii)
+
+
+ # TESTS IF CLASS ExcelWriter WORKS WITH ENCODING PARAMETER MAKING POSSIBLE TO
+ # WORK WITH ENCODINGS OTHER TAN ASCII
+
+ #FIRST WITH ONLY ASCII
+
+ data_ascii_1 = {
+ 'index' : ['A', 'B', 'C', 'C', 'B', 'A'],
+ 'columns' : ['One', 'One', 'One', 'Two', 'Two', 'Two'],
+ 'values' : [1., 2., 3., 3., 2., 1.]
+ }
+
+ data_ascii_2 = {
+ 'index' : ['A', 'B', 'C', 'C', 'B', 'A'],
+ 'columns' : ['One', 'One', 'One', 'Two', 'Two', 'Two'],
+ 'values' : [1., 2., 3., 3., 2., 1.]
+ }
+
+ excel_writer_ascii=ExcelWriter(self.xls_wa)
+
+ original_ascii_1 = DataFrame(data_ascii_1)
+
+ original_ascii_2 = DataFrame(data_ascii_2)
+
+ original_ascii_1.to_excel(excel_writer_ascii, sheet_name = 'DataFrame_TEST')
+
+ original_ascii_2.to_excel(excel_writer_ascii, sheet_name = 'DataFrame_TEST_2')
+
+ excel_writer_ascii.save()
+
+ get_xls_writer_ascii = ExcelFile(self.xls_wa)
+
+ saved_ascii_1 = get_xls_writer_ascii.parse('DataFrame_TEST', index_col = None, na_values = ['NA'])
+
+ saved_ascii_2 = get_xls_writer_ascii.parse('DataFrame_TEST_2', index_col = None, na_values = ['NA'])
+
+ # NOW WITH NON-ASCII CHARS AND SUPPLYING THE PARAMETER encoding TO class ExcelWriter
+
+ data_noascii_1 = {
+ 'index' : ['Puño', 'Mísero', 'Brújula', 'Pájaro', 'Barça', 'Cigüeña'],
+ 'columns' : ['Años', 'Nariz', 'Bígaro', 'Céfiro', '2º', '2€'],
+ 'values' : ['Tímido', 'Variado', 'Efímero', 'Trágico', 'Compañero', '5º']
+ }
+
+ data_noascii_2 = {
+ 'index' : ['Año', 'Baldío', 'Trócola', 'Mínimo', 'Barça', 'Cigüeña'],
+ 'columns' : ['Año', 'Narices', 'Búlgaro', 'Libélula', 'Cínico', '1º'],
+ 'values' : ['Céfiro', 'Tarugo', 'Déspota', 'Camión', 'Añejo', 'º']
+ }
+
+ excel_writer_noascii=ExcelWriter(self.xls_wna,encoding = 'utf8')
+
+ original_noascii_1 = DataFrame(data_noascii_1)
+
+ original_noascii_2 = DataFrame(data_noascii_2)
+
+ original_noascii_1.to_excel(excel_writer_noascii, sheet_name = 'DataFrame_TEST')
+
+ original_noascii_2.to_excel(excel_writer_noascii, sheet_name = 'DataFrame_TEST_2')
+
+ excel_writer_noascii.save()
+
+ get_xls_writer_noascii = ExcelFile(self.xls_wna,encoding = 'uft8')
+
+ saved_noascii_1 = get_xls_writer_noascii.parse('DataFrame_TEST', index_col = None, na_values = ['NA'])
+
+ saved_noascii_2 = get_xls_writer_noascii.parse('DataFrame_TEST_2', index_col = None, na_values = ['NA'])
+
+ tm.assert_frame_equal(original_ascii_1, saved_ascii_1)
+ tm.assert_frame_equal(original_ascii_2, saved_ascii_2)
+
+ tm.assert_frame_equal(original_noascii_1, saved_noascii_1)
+ tm.assert_frame_equal(original_noascii_2, saved_noascii_2)
+
+if __name__ == '__main__':
+ nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
+ exit=False)
\ No newline at end of file
diff --git a/tests2.txt b/tests2.txt
new file mode 100644
index 0000000000000..29ae9d8948d41
--- /dev/null
+++ b/tests2.txt
@@ -0,0 +1,658 @@
+............................................................../usr/lib64/python2.7/site-packages/numpy/core/numeric.py:1977: UnicodeWarning: Unicode equal comparison failed to convert both arguments to Unicode - interpreting them as being unequal
+ return bool(logical_and.reduce(equal(a1,a2).ravel()))
+/home/antares/pandas/pandas/util/testing.py:129: UnicodeWarning: Unicode equal comparison failed to convert both arguments to Unicode - interpreting them as being unequal
+ assert a == b, "%s != %s" % (a, b)
+E....SSSS.SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS.EEEEEEEEEEEEEEEEEE..EEEEEEEEEEEEEEEEEE.......................................S........................................................................................................................................................................................................................................................................................................SSSSSSSSSSS.................SS...................................................................................................................................................................................S.......................SSSSSSS.........................................F............................................S.......................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................S......................................................S.........................................................................................................................................................................................................................................................................................SSS.............................................................................................................................................................................................................................................S............................S.........................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................S................................................................................................................................................................................S...S............SSS.........S............SS...........SS.....SSSS..........................................................................................................................S................................................................................................................................................S.................................................................................................................
+======================================================================
+ERROR: test_excel_output_encoding (pandas.io.tests.test_excel_encoding.ExcelTests)
+----------------------------------------------------------------------
+Traceback (most recent call last):
+ File "/home/antares/pandas/pandas/io/tests/test_excel_encoding.py", line 135, in test_excel_output_encoding
+ tm.assert_frame_equal(original_noascii, saved_noascii)
+ File "/home/antares/pandas/pandas/util/testing.py", line 238, in assert_frame_equal
+ check_less_precise=check_less_precise)
+ File "/home/antares/pandas/pandas/util/testing.py", line 197, in assert_series_equal
+ assert_almost_equal(left.values, right.values, check_less_precise)
+ File "/home/antares/pandas/pandas/util/testing.py", line 141, in assert_almost_equal
+ assert_almost_equal(a[i], b[i], check_less_precise)
+ File "/home/antares/pandas/pandas/util/testing.py", line 129, in assert_almost_equal
+ assert a == b, "%s != %s" % (a, b)
+UnicodeDecodeError: 'ascii' codec can't decode byte 0xc3 in position 1: ordinal not in range(128)
+-------------------- >> begin captured stdout << ---------------------
+ columns index values
+0 A\xf1o A\xf1o C\xe9firo
+1 Narices Bald\xedo Tarugo
+2 B\xfalgaro Tr\xf3cola D\xe9spota
+3 Lib\xe9lula M\xednimo Cami\xf3n
+4 C\xednico Bar\xe7a A\xf1ejo
+5 1\xba Cig\xfce\xf1a \xba columns index values
+0 A\xf1o A\xf1o C\xe9firo
+1 Narices Bald\xedo Tarugo
+2 B\xfalgaro Tr\xf3cola D\xe9spota
+3 Lib\xe9lula M\xednimo Cami\xf3n
+4 C\xednico Bar\xe7a A\xf1ejo
+5 1\xba Cig\xfce\xf1a \xba
+
+--------------------- >> end captured stdout << ----------------------
+
+======================================================================
+ERROR: test_bad_url_protocol (pandas.io.tests.test_html.TestBs4LxmlParser)
+----------------------------------------------------------------------
+Traceback (most recent call last):
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 288, in test_bad_url_protocol
+ '.*Water.*')
+ File "/usr/lib64/python2.7/unittest/case.py", line 471, in assertRaises
+ callableObj(*args, **kwargs)
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 387, in run_read_html
+ return _run_read_html(parser, *args, **kwargs)
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 71, in _run_read_html
+ infer_types, attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 717, in _parse
+ p = parser(io, re.compile(match), attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 363, in __init__
+ from bs4 import SoupStrainer
+ImportError: No module named bs4
+
+======================================================================
+ERROR: test_banklist (pandas.io.tests.test_html.TestBs4LxmlParser)
+----------------------------------------------------------------------
+Traceback (most recent call last):
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 116, in test_banklist
+ attrs={'id': 'table'})
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 387, in run_read_html
+ return _run_read_html(parser, *args, **kwargs)
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 71, in _run_read_html
+ infer_types, attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 717, in _parse
+ p = parser(io, re.compile(match), attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 363, in __init__
+ from bs4 import SoupStrainer
+ImportError: No module named bs4
+
+======================================================================
+ERROR: test_banklist_header (pandas.io.tests.test_html.TestBs4LxmlParser)
+----------------------------------------------------------------------
+Traceback (most recent call last):
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 131, in test_banklist_header
+ attrs={'id': 'table'}, infer_types=False)[0]
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 387, in run_read_html
+ return _run_read_html(parser, *args, **kwargs)
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 71, in _run_read_html
+ infer_types, attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 717, in _parse
+ p = parser(io, re.compile(match), attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 363, in __init__
+ from bs4 import SoupStrainer
+ImportError: No module named bs4
+
+======================================================================
+ERROR: test_banklist_no_match (pandas.io.tests.test_html.TestBs4LxmlParser)
+----------------------------------------------------------------------
+Traceback (most recent call last):
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 167, in test_banklist_no_match
+ dfs = self.run_read_html(self.banklist_data, attrs={'id': 'table'})
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 387, in run_read_html
+ return _run_read_html(parser, *args, **kwargs)
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 71, in _run_read_html
+ infer_types, attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 717, in _parse
+ p = parser(io, re.compile(match), attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 363, in __init__
+ from bs4 import SoupStrainer
+ImportError: No module named bs4
+
+======================================================================
+ERROR: test_banklist_url (pandas.io.tests.test_html.TestBs4LxmlParser)
+----------------------------------------------------------------------
+Traceback (most recent call last):
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 99, in test_banklist_url
+ attrs={"id": 'table'})
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 387, in run_read_html
+ return _run_read_html(parser, *args, **kwargs)
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 71, in _run_read_html
+ infer_types, attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 717, in _parse
+ p = parser(io, re.compile(match), attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 363, in __init__
+ from bs4 import SoupStrainer
+ImportError: No module named bs4
+
+======================================================================
+ERROR: test_file_like (pandas.io.tests.test_html.TestBs4LxmlParser)
+----------------------------------------------------------------------
+Traceback (most recent call last):
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 279, in test_file_like
+ df1 = self.run_read_html(f, '.*Water.*', infer_types=False)
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 387, in run_read_html
+ return _run_read_html(parser, *args, **kwargs)
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 71, in _run_read_html
+ infer_types, attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 717, in _parse
+ p = parser(io, re.compile(match), attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 363, in __init__
+ from bs4 import SoupStrainer
+ImportError: No module named bs4
+
+======================================================================
+ERROR: test_file_url (pandas.io.tests.test_html.TestBs4LxmlParser)
+----------------------------------------------------------------------
+Traceback (most recent call last):
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 294, in test_file_url
+ attrs={'id': 'table'})
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 387, in run_read_html
+ return _run_read_html(parser, *args, **kwargs)
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 71, in _run_read_html
+ infer_types, attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 717, in _parse
+ p = parser(io, re.compile(match), attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 363, in __init__
+ from bs4 import SoupStrainer
+ImportError: No module named bs4
+
+======================================================================
+ERROR: test_gold_canyon (pandas.io.tests.test_html.TestBs4LxmlParser)
+----------------------------------------------------------------------
+Traceback (most recent call last):
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 147, in test_gold_canyon
+ attrs={'id': 'table'}, infer_types=False)[0]
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 387, in run_read_html
+ return _run_read_html(parser, *args, **kwargs)
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 71, in _run_read_html
+ infer_types, attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 717, in _parse
+ p = parser(io, re.compile(match), attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 363, in __init__
+ from bs4 import SoupStrainer
+ImportError: No module named bs4
+
+======================================================================
+ERROR: test_header_and_index (pandas.io.tests.test_html.TestBs4LxmlParser)
+----------------------------------------------------------------------
+Traceback (most recent call last):
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 241, in test_header_and_index
+ index_col=0)
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 387, in run_read_html
+ return _run_read_html(parser, *args, **kwargs)
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 71, in _run_read_html
+ infer_types, attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 717, in _parse
+ p = parser(io, re.compile(match), attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 363, in __init__
+ from bs4 import SoupStrainer
+ImportError: No module named bs4
+
+======================================================================
+ERROR: test_index (pandas.io.tests.test_html.TestBs4LxmlParser)
+----------------------------------------------------------------------
+Traceback (most recent call last):
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 235, in test_index
+ df1 = self.run_read_html(self.spam_data, '.*Water.*', index_col=0)
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 387, in run_read_html
+ return _run_read_html(parser, *args, **kwargs)
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 71, in _run_read_html
+ infer_types, attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 717, in _parse
+ p = parser(io, re.compile(match), attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 363, in __init__
+ from bs4 import SoupStrainer
+ImportError: No module named bs4
+
+======================================================================
+ERROR: test_infer_types (pandas.io.tests.test_html.TestBs4LxmlParser)
+----------------------------------------------------------------------
+Traceback (most recent call last):
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 247, in test_infer_types
+ infer_types=False)
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 387, in run_read_html
+ return _run_read_html(parser, *args, **kwargs)
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 71, in _run_read_html
+ infer_types, attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 717, in _parse
+ p = parser(io, re.compile(match), attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 363, in __init__
+ from bs4 import SoupStrainer
+ImportError: No module named bs4
+
+======================================================================
+ERROR: test_invalid_table_attrs (pandas.io.tests.test_html.TestBs4LxmlParser)
+----------------------------------------------------------------------
+Traceback (most recent call last):
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 304, in test_invalid_table_attrs
+ attrs={'id': 'tasdfable'})
+ File "/usr/lib64/python2.7/unittest/case.py", line 471, in assertRaises
+ callableObj(*args, **kwargs)
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 387, in run_read_html
+ return _run_read_html(parser, *args, **kwargs)
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 71, in _run_read_html
+ infer_types, attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 717, in _parse
+ p = parser(io, re.compile(match), attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 363, in __init__
+ from bs4 import SoupStrainer
+ImportError: No module named bs4
+
+======================================================================
+ERROR: test_multiindex_header (pandas.io.tests.test_html.TestBs4LxmlParser)
+----------------------------------------------------------------------
+Traceback (most recent call last):
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 312, in test_multiindex_header
+ df = self._bank_data(header=[0, 1])[0]
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 308, in _bank_data
+ attrs={'id': 'table'}, *args, **kwargs)
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 387, in run_read_html
+ return _run_read_html(parser, *args, **kwargs)
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 71, in _run_read_html
+ infer_types, attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 717, in _parse
+ p = parser(io, re.compile(match), attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 363, in __init__
+ from bs4 import SoupStrainer
+ImportError: No module named bs4
+
+======================================================================
+ERROR: test_multiindex_header_index (pandas.io.tests.test_html.TestBs4LxmlParser)
+----------------------------------------------------------------------
+Traceback (most recent call last):
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 322, in test_multiindex_header_index
+ df = self._bank_data(header=[0, 1], index_col=[0, 1])[0]
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 308, in _bank_data
+ attrs={'id': 'table'}, *args, **kwargs)
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 387, in run_read_html
+ return _run_read_html(parser, *args, **kwargs)
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 71, in _run_read_html
+ infer_types, attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 717, in _parse
+ p = parser(io, re.compile(match), attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 363, in __init__
+ from bs4 import SoupStrainer
+ImportError: No module named bs4
+
+======================================================================
+ERROR: test_multiindex_header_index_skiprows (pandas.io.tests.test_html.TestBs4LxmlParser)
+----------------------------------------------------------------------
+Traceback (most recent call last):
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 333, in test_multiindex_header_index_skiprows
+ df = self._bank_data(header=[0, 1], index_col=[0, 1], skiprows=1)[0]
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 308, in _bank_data
+ attrs={'id': 'table'}, *args, **kwargs)
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 387, in run_read_html
+ return _run_read_html(parser, *args, **kwargs)
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 71, in _run_read_html
+ infer_types, attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 717, in _parse
+ p = parser(io, re.compile(match), attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 363, in __init__
+ from bs4 import SoupStrainer
+ImportError: No module named bs4
+
+======================================================================
+ERROR: test_multiindex_header_skiprows (pandas.io.tests.test_html.TestBs4LxmlParser)
+----------------------------------------------------------------------
+Traceback (most recent call last):
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 328, in test_multiindex_header_skiprows
+ df = self._bank_data(header=[0, 1], skiprows=1)[0]
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 308, in _bank_data
+ attrs={'id': 'table'}, *args, **kwargs)
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 387, in run_read_html
+ return _run_read_html(parser, *args, **kwargs)
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 71, in _run_read_html
+ infer_types, attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 717, in _parse
+ p = parser(io, re.compile(match), attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 363, in __init__
+ from bs4 import SoupStrainer
+ImportError: No module named bs4
+
+======================================================================
+ERROR: test_multiindex_index (pandas.io.tests.test_html.TestBs4LxmlParser)
+----------------------------------------------------------------------
+Traceback (most recent call last):
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 317, in test_multiindex_index
+ df = self._bank_data(index_col=[0, 1])[0]
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 308, in _bank_data
+ attrs={'id': 'table'}, *args, **kwargs)
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 387, in run_read_html
+ return _run_read_html(parser, *args, **kwargs)
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 71, in _run_read_html
+ infer_types, attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 717, in _parse
+ p = parser(io, re.compile(match), attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 363, in __init__
+ from bs4 import SoupStrainer
+ImportError: No module named bs4
+
+======================================================================
+ERROR: test_multiple_matches (pandas.io.tests.test_html.TestBs4LxmlParser)
+----------------------------------------------------------------------
+Traceback (most recent call last):
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 360, in test_multiple_matches
+ attrs={'class': 'wikitable'})
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 387, in run_read_html
+ return _run_read_html(parser, *args, **kwargs)
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 71, in _run_read_html
+ infer_types, attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 717, in _parse
+ p = parser(io, re.compile(match), attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 363, in __init__
+ from bs4 import SoupStrainer
+ImportError: No module named bs4
+
+======================================================================
+ERROR: test_pythonxy_plugins_table (pandas.io.tests.test_html.TestBs4LxmlParser)
+----------------------------------------------------------------------
+Traceback (most recent call last):
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 367, in test_pythonxy_plugins_table
+ attrs={'class': 'wikitable'})
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 387, in run_read_html
+ return _run_read_html(parser, *args, **kwargs)
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 71, in _run_read_html
+ infer_types, attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 717, in _parse
+ p = parser(io, re.compile(match), attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 363, in __init__
+ from bs4 import SoupStrainer
+ImportError: No module named bs4
+
+======================================================================
+ERROR: test_regex_idempotency (pandas.io.tests.test_html.TestBs4LxmlParser)
+----------------------------------------------------------------------
+Traceback (most recent call last):
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 341, in test_regex_idempotency
+ attrs={'id': 'table'})
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 387, in run_read_html
+ return _run_read_html(parser, *args, **kwargs)
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 71, in _run_read_html
+ infer_types, attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 717, in _parse
+ p = parser(io, re.compile(match), attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 363, in __init__
+ from bs4 import SoupStrainer
+ImportError: No module named bs4
+
+======================================================================
+ERROR: test_skiprows_int (pandas.io.tests.test_html.TestBs4LxmlParser)
+----------------------------------------------------------------------
+Traceback (most recent call last):
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 178, in test_skiprows_int
+ df1 = self.run_read_html(self.spam_data, '.*Water.*', skiprows=1)
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 387, in run_read_html
+ return _run_read_html(parser, *args, **kwargs)
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 71, in _run_read_html
+ infer_types, attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 717, in _parse
+ p = parser(io, re.compile(match), attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 363, in __init__
+ from bs4 import SoupStrainer
+ImportError: No module named bs4
+
+======================================================================
+ERROR: test_skiprows_invalid (pandas.io.tests.test_html.TestBs4LxmlParser)
+----------------------------------------------------------------------
+Traceback (most recent call last):
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 232, in test_skiprows_invalid
+ '.*Water.*', skiprows='asdf')
+ File "/usr/lib64/python2.7/unittest/case.py", line 471, in assertRaises
+ callableObj(*args, **kwargs)
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 387, in run_read_html
+ return _run_read_html(parser, *args, **kwargs)
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 71, in _run_read_html
+ infer_types, attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 717, in _parse
+ p = parser(io, re.compile(match), attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 363, in __init__
+ from bs4 import SoupStrainer
+ImportError: No module named bs4
+
+======================================================================
+ERROR: test_skiprows_list (pandas.io.tests.test_html.TestBs4LxmlParser)
+----------------------------------------------------------------------
+Traceback (most recent call last):
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 190, in test_skiprows_list
+ df1 = self.run_read_html(self.spam_data, '.*Water.*', skiprows=[1, 2])
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 387, in run_read_html
+ return _run_read_html(parser, *args, **kwargs)
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 71, in _run_read_html
+ infer_types, attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 717, in _parse
+ p = parser(io, re.compile(match), attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 363, in __init__
+ from bs4 import SoupStrainer
+ImportError: No module named bs4
+
+======================================================================
+ERROR: test_skiprows_ndarray (pandas.io.tests.test_html.TestBs4LxmlParser)
+----------------------------------------------------------------------
+Traceback (most recent call last):
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 225, in test_skiprows_ndarray
+ skiprows=np.arange(2))
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 387, in run_read_html
+ return _run_read_html(parser, *args, **kwargs)
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 71, in _run_read_html
+ infer_types, attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 717, in _parse
+ p = parser(io, re.compile(match), attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 363, in __init__
+ from bs4 import SoupStrainer
+ImportError: No module named bs4
+
+======================================================================
+ERROR: test_skiprows_set (pandas.io.tests.test_html.TestBs4LxmlParser)
+----------------------------------------------------------------------
+Traceback (most recent call last):
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 197, in test_skiprows_set
+ skiprows=set([1, 2]))
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 387, in run_read_html
+ return _run_read_html(parser, *args, **kwargs)
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 71, in _run_read_html
+ infer_types, attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 717, in _parse
+ p = parser(io, re.compile(match), attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 363, in __init__
+ from bs4 import SoupStrainer
+ImportError: No module named bs4
+
+======================================================================
+ERROR: test_skiprows_slice (pandas.io.tests.test_html.TestBs4LxmlParser)
+----------------------------------------------------------------------
+Traceback (most recent call last):
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 203, in test_skiprows_slice
+ df1 = self.run_read_html(self.spam_data, '.*Water.*', skiprows=1)
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 387, in run_read_html
+ return _run_read_html(parser, *args, **kwargs)
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 71, in _run_read_html
+ infer_types, attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 717, in _parse
+ p = parser(io, re.compile(match), attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 363, in __init__
+ from bs4 import SoupStrainer
+ImportError: No module named bs4
+
+======================================================================
+ERROR: test_skiprows_slice_long (pandas.io.tests.test_html.TestBs4LxmlParser)
+----------------------------------------------------------------------
+Traceback (most recent call last):
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 217, in test_skiprows_slice_long
+ skiprows=slice(2, 5))
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 387, in run_read_html
+ return _run_read_html(parser, *args, **kwargs)
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 71, in _run_read_html
+ infer_types, attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 717, in _parse
+ p = parser(io, re.compile(match), attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 363, in __init__
+ from bs4 import SoupStrainer
+ImportError: No module named bs4
+
+======================================================================
+ERROR: test_skiprows_slice_short (pandas.io.tests.test_html.TestBs4LxmlParser)
+----------------------------------------------------------------------
+Traceback (most recent call last):
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 210, in test_skiprows_slice_short
+ skiprows=slice(2))
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 387, in run_read_html
+ return _run_read_html(parser, *args, **kwargs)
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 71, in _run_read_html
+ infer_types, attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 717, in _parse
+ p = parser(io, re.compile(match), attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 363, in __init__
+ from bs4 import SoupStrainer
+ImportError: No module named bs4
+
+======================================================================
+ERROR: test_skiprows_xrange (pandas.io.tests.test_html.TestBs4LxmlParser)
+----------------------------------------------------------------------
+Traceback (most recent call last):
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 184, in test_skiprows_xrange
+ df1 = [self.run_read_html(self.spam_data, '.*Water.*').pop()[2:]]
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 387, in run_read_html
+ return _run_read_html(parser, *args, **kwargs)
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 71, in _run_read_html
+ infer_types, attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 717, in _parse
+ p = parser(io, re.compile(match), attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 363, in __init__
+ from bs4 import SoupStrainer
+ImportError: No module named bs4
+
+======================================================================
+ERROR: test_spam (pandas.io.tests.test_html.TestBs4LxmlParser)
+----------------------------------------------------------------------
+Traceback (most recent call last):
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 152, in test_spam
+ infer_types=False)
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 387, in run_read_html
+ return _run_read_html(parser, *args, **kwargs)
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 71, in _run_read_html
+ infer_types, attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 717, in _parse
+ p = parser(io, re.compile(match), attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 363, in __init__
+ from bs4 import SoupStrainer
+ImportError: No module named bs4
+
+======================================================================
+ERROR: test_spam_header (pandas.io.tests.test_html.TestBs4LxmlParser)
+----------------------------------------------------------------------
+Traceback (most recent call last):
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 172, in test_spam_header
+ df = self.run_read_html(self.spam_data, '.*Water.*', header=0)
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 387, in run_read_html
+ return _run_read_html(parser, *args, **kwargs)
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 71, in _run_read_html
+ infer_types, attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 717, in _parse
+ p = parser(io, re.compile(match), attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 363, in __init__
+ from bs4 import SoupStrainer
+ImportError: No module named bs4
+
+======================================================================
+ERROR: test_spam_no_match (pandas.io.tests.test_html.TestBs4LxmlParser)
+----------------------------------------------------------------------
+Traceback (most recent call last):
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 162, in test_spam_no_match
+ dfs = self.run_read_html(self.spam_data)
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 387, in run_read_html
+ return _run_read_html(parser, *args, **kwargs)
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 71, in _run_read_html
+ infer_types, attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 717, in _parse
+ p = parser(io, re.compile(match), attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 363, in __init__
+ from bs4 import SoupStrainer
+ImportError: No module named bs4
+
+======================================================================
+ERROR: test_spam_url (pandas.io.tests.test_html.TestBs4LxmlParser)
+----------------------------------------------------------------------
+Traceback (most recent call last):
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 108, in test_spam_url
+ df1 = self.run_read_html(url, '.*Water.*')
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 387, in run_read_html
+ return _run_read_html(parser, *args, **kwargs)
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 71, in _run_read_html
+ infer_types, attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 717, in _parse
+ p = parser(io, re.compile(match), attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 363, in __init__
+ from bs4 import SoupStrainer
+ImportError: No module named bs4
+
+======================================================================
+ERROR: test_string (pandas.io.tests.test_html.TestBs4LxmlParser)
+----------------------------------------------------------------------
+Traceback (most recent call last):
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 272, in test_string
+ df1 = self.run_read_html(data, '.*Water.*', infer_types=False)
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 387, in run_read_html
+ return _run_read_html(parser, *args, **kwargs)
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 71, in _run_read_html
+ infer_types, attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 717, in _parse
+ p = parser(io, re.compile(match), attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 363, in __init__
+ from bs4 import SoupStrainer
+ImportError: No module named bs4
+
+======================================================================
+ERROR: test_string_io (pandas.io.tests.test_html.TestBs4LxmlParser)
+----------------------------------------------------------------------
+Traceback (most recent call last):
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 264, in test_string_io
+ df1 = self.run_read_html(data1, '.*Water.*', infer_types=False)
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 387, in run_read_html
+ return _run_read_html(parser, *args, **kwargs)
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 71, in _run_read_html
+ infer_types, attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 717, in _parse
+ p = parser(io, re.compile(match), attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 363, in __init__
+ from bs4 import SoupStrainer
+ImportError: No module named bs4
+
+======================================================================
+ERROR: test_to_html_compat (pandas.io.tests.test_html.TestBs4LxmlParser)
+----------------------------------------------------------------------
+Traceback (most recent call last):
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 80, in test_to_html_compat
+ index_col=0)[0]
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 387, in run_read_html
+ return _run_read_html(parser, *args, **kwargs)
+ File "/home/antares/pandas/pandas/io/tests/test_html.py", line 71, in _run_read_html
+ infer_types, attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 717, in _parse
+ p = parser(io, re.compile(match), attrs)
+ File "/home/antares/pandas/pandas/io/html.py", line 363, in __init__
+ from bs4 import SoupStrainer
+ImportError: No module named bs4
+
+======================================================================
+FAIL: testWLS (pandas.stats.tests.test_ols.TestOLS)
+----------------------------------------------------------------------
+Traceback (most recent call last):
+ File "/home/antares/pandas/pandas/stats/tests/test_ols.py", line 109, in testWLS
+ self._check_wls(X, Y, weights)
+ File "/home/antares/pandas/pandas/stats/tests/test_ols.py", line 132, in _check_wls
+ self.checkMovingOLS('rolling', x, y, weights=weights)
+ File "/home/antares/pandas/pandas/stats/tests/test_ols.py", line 206, in checkMovingOLS
+ result_index=n)
+ File "/home/antares/pandas/pandas/stats/tests/test_ols.py", line 245, in compare
+ assert_almost_equal(ref, res)
+ File "/home/antares/pandas/pandas/util/testing.py", line 141, in assert_almost_equal
+ assert_almost_equal(a[i], b[i], check_less_precise)
+ File "/home/antares/pandas/pandas/util/testing.py", line 170, in assert_almost_equal
+ 1, a / b, decimal=decimal, err_msg=err_msg(a, b), verbose=False)
+ File "/usr/lib64/python2.7/site-packages/numpy/testing/utils.py", line 468, in assert_almost_equal
+ raise AssertionError(msg)
+AssertionError:
+Arrays are not almost equal to 5 decimals expected 0.60439 but got 0.55051
+-------------------- >> begin captured stdout << ---------------------
+Make sure you're using statsmodels 0.5.0.dev-cec4f26 or later.
+
+--------------------- >> end captured stdout << ----------------------
+
+----------------------------------------------------------------------
+Ran 3355 tests in 139.394s
+
+FAILED (SKIP=89, errors=37, failures=1)
| First, I would like to thank the developers for this excellent work.
My patch is about the possibility of specify the workbook encoding when making output to excel xls files. By default, pandas hasn't it, so say-so non ascii language speakers weren't able to save files with _special_ characters, receiving an error like, for instance:
UnicodeDecodeError: 'ascii' codec can't decode byte 0xc3 in position 2: ordinal not in range(128)
My change makes possible to specify both in ExcelWriter and DataFrame.to_excel the parameter encoding which is passed by both the ExcelWriter **init** and DataFrame.to_excel functions to the appropiate xlwt function to make the trick.
I'm sorry my explanations aren't very proffesional, but I think they are enough for this purpose.
Also I have to apologize in the sense that I've run the tests with nosetests and I lack of knowledge about tests interpretation, but I have checked the log and no error comes from the modules/files I've worked with.
Best regards,
Jorge Tornero
| https://api.github.com/repos/pandas-dev/pandas/pulls/3710 | 2013-05-29T23:06:10Z | 2013-09-30T10:27:38Z | null | 2014-06-24T12:36:06Z |
improve error message when xlrd import fails | diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 68db97b7a3c53..0dde47e6065e4 100644
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -2000,7 +2000,7 @@ def __init__(self, path_or_buf, kind=None, **kwds):
import xlrd # throw an ImportError if we need to
ver = tuple(map(int,xlrd.__VERSION__.split(".")[:2]))
if ver < (0, 9):
- raise ImportError("pandas requires xlrd >= 0.9.0 for excel support")
+ raise ImportError("pandas requires xlrd >= 0.9.0 for excel support, current version "+xlrd.__VERSION__)
self.path_or_buf = path_or_buf
self.tmpfile = None
| https://api.github.com/repos/pandas-dev/pandas/pulls/3709 | 2013-05-29T23:02:12Z | 2013-05-29T23:47:53Z | 2013-05-29T23:47:53Z | 2013-06-10T19:20:35Z | |
BUG: allow DataFrame.from_records to accept empty recarrays | diff --git a/RELEASE.rst b/RELEASE.rst
index 76aac0d73466c..38a8b42fcde6f 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -173,6 +173,7 @@ pandas 0.11.1
- ``read_html()`` now only allows a single backend: ``html5lib`` (GH3616_)
- ``convert_objects`` with ``convert_dates='coerce'`` was parsing some single-letter strings
into today's date
+ - ``DataFrame.from_records`` did not accept empty recarrays (GH3682_)
.. _GH3164: https://github.com/pydata/pandas/issues/3164
.. _GH2786: https://github.com/pydata/pandas/issues/2786
@@ -246,6 +247,7 @@ pandas 0.11.1
.. _GH3582: https://github.com/pydata/pandas/issues/3582
.. _GH3676: https://github.com/pydata/pandas/issues/3676
.. _GH3675: https://github.com/pydata/pandas/issues/3675
+.. _GH3682: https://github.com/pydata/pandas/issues/3682
pandas 0.11.0
=============
diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt
index ffa2cc6dc7cab..c7f590d6ebbe8 100644
--- a/doc/source/v0.11.1.txt
+++ b/doc/source/v0.11.1.txt
@@ -208,6 +208,8 @@ Bug Fixes
to replace all occurrences of the string ``'.'`` with ``NaN``.
+ - ``DataFrame.from_records`` did not accept empty recarrays (GH3682_)
+
See the `full release notes
<https://github.com/pydata/pandas/blob/master/RELEASE.rst>`__ or issue tracker
on GitHub for a complete list.
@@ -247,3 +249,4 @@ on GitHub for a complete list.
.. _GH3582: https://github.com/pydata/pandas/issues/3582
.. _GH3676: https://github.com/pydata/pandas/issues/3676
.. _GH3675: https://github.com/pydata/pandas/issues/3675
+.. _GH3682: https://github.com/pydata/pandas/issues/3682
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 68a6c9e261c97..68edceb29e6b2 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -5756,7 +5756,11 @@ def _to_arrays(data, columns, coerce_float=False, dtype=None):
return arrays, columns
- if len(data) == 0:
+ if not len(data):
+ if isinstance(data, np.ndarray):
+ columns = data.dtype.names
+ if columns is not None:
+ return [[]] * len(columns), columns
return [], [] # columns if columns is not None else []
if isinstance(data[0], (list, tuple)):
return _list_to_arrays(data, columns, coerce_float=coerce_float,
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index fddbbf93552b3..3711a814cc273 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -3522,6 +3522,18 @@ def test_from_records_empty(self):
expected = DataFrame(columns=['a','b','b'])
assert_frame_equal(result, expected)
+ def test_from_records_empty_with_nonempty_fields_gh3682(self):
+ a = np.array([(1, 2)], dtype=[('id', np.int64), ('value', np.int64)])
+ df = DataFrame.from_records(a, index='id')
+ assert_array_equal(df.index, Index([1], name='id'))
+ self.assertEqual(df.index.name, 'id')
+ assert_array_equal(df.columns, Index(['value']))
+
+ b = np.array([], dtype=[('id', np.int64), ('value', np.int64)])
+ df = DataFrame.from_records(b, index='id')
+ assert_array_equal(df.index, Index([], name='id'))
+ self.assertEqual(df.index.name, 'id')
+
def test_to_records_floats(self):
df = DataFrame(np.random.rand(10, 10))
df.to_records()
| closes #3682.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3708 | 2013-05-29T14:23:51Z | 2013-05-30T00:46:35Z | 2013-05-30T00:46:35Z | 2014-07-06T13:17:47Z |
DOC: document read_html and to_html | diff --git a/doc/source/io.rst b/doc/source/io.rst
index 92747f9906da2..5bf3075f2688e 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -938,8 +938,106 @@ Reading HTML Content
.. versionadded:: 0.11.1
-The toplevel :func:`~pandas.io.parsers.read_html` function can accept an HTML
+The toplevel :func:`~pandas.io.html.read_html` function can accept an HTML
string/file/url and will parse HTML tables into list of pandas DataFrames.
+Let's look at a few examples.
+
+Read a URL with no options
+
+.. ipython:: python
+
+ url = 'http://www.fdic.gov/bank/individual/failed/banklist.html'
+ dfs = read_html(url)
+ dfs
+
+.. note::
+
+ ``read_html`` returns a ``list`` of ``DataFrame`` objects, even if there is
+ only a single table contained in the HTML content
+
+Read a URL and match a table that contains specific text
+
+.. ipython:: python
+
+ match = 'Metcalf Bank'
+ df_list = read_html(url, match=match)
+ len(dfs)
+ dfs[0]
+
+Specify a header row (by default ``<th>`` elements are used to form the column
+index); if specified, the header row is taken from the data minus the parsed
+header elements (``<th>`` elements).
+
+.. ipython:: python
+
+ dfs = read_html(url, header=0)
+ len(dfs)
+ dfs[0]
+
+Specify an index column
+
+.. ipython:: python
+
+ dfs = read_html(url, index_col=0)
+ len(dfs)
+ dfs[0]
+ dfs[0].index.name
+
+Specify a number of rows to skip
+
+.. ipython:: python
+
+ dfs = read_html(url, skiprows=0)
+ len(dfs)
+ dfs[0]
+
+Specify a number of rows to skip using a list (``xrange`` (Python 2 only) works
+as well)
+
+.. ipython:: python
+
+ dfs = read_html(url, skiprows=range(2))
+ len(dfs)
+ dfs[0]
+
+Don't infer numeric and date types
+
+.. ipython:: python
+
+ dfs = read_html(url, infer_types=False)
+ len(dfs)
+ dfs[0]
+
+Specify an HTML attribute
+
+.. ipython:: python
+
+ dfs = read_html(url)
+ len(dfs)
+ dfs[0]
+
+Use some combination of the above
+
+.. ipython:: python
+
+ dfs = read_html(url, match='Metcalf Bank', index_col=0)
+ len(dfs)
+ dfs[0]
+
+Read in pandas ``to_html`` output (with some loss of floating point precision)
+
+.. ipython:: python
+
+ df = DataFrame(randn(2, 2))
+ s = df.to_html(float_format='{0:.40g}'.format)
+ dfin = read_html(s, index_col=0)
+ df
+ dfin[0]
+ df.index
+ df.columns
+ dfin[0].index
+ dfin[0].columns
+ np.allclose(df, dfin[0])
Writing to HTML files
@@ -947,9 +1045,134 @@ Writing to HTML files
.. _io.html:
-DataFrame object has an instance method ``to_html`` which renders the contents
-of the DataFrame as an html table. The function arguments are as in the method
-``to_string`` described above.
+``DataFrame`` objects have an instance method ``to_html`` which renders the
+contents of the ``DataFrame`` as an HTML table. The function arguments are as
+in the method ``to_string`` described above.
+
+.. note::
+
+ Not all of the possible options for ``DataFrame.to_html`` are shown here for
+ brevity's sake. See :func:`~pandas.DataFrame.to_html` for the full set of
+ options.
+
+.. ipython:: python
+ :suppress:
+
+ def write_html(df, filename, *args, **kwargs):
+ static = os.path.abspath(os.path.join('source', '_static'))
+ with open(os.path.join(static, filename + '.html'), 'w') as f:
+ df.to_html(f, *args, **kwargs)
+
+.. ipython:: python
+
+ df = DataFrame(randn(2, 2))
+ df
+ print df.to_html() # raw html
+
+.. ipython:: python
+ :suppress:
+
+ write_html(df, 'basic')
+
+HTML:
+
+.. raw:: html
+ :file: _static/basic.html
+
+The ``columns`` argument will limit the columns shown
+
+.. ipython:: python
+
+ print df.to_html(columns=[0])
+
+.. ipython:: python
+ :suppress:
+
+ write_html(df, 'columns', columns=[0])
+
+HTML:
+
+.. raw:: html
+ :file: _static/columns.html
+
+``float_format`` takes a Python callable to control the precision of floating
+point values
+
+.. ipython:: python
+
+ print df.to_html(float_format='{0:.10f}'.format)
+
+.. ipython:: python
+ :suppress:
+
+ write_html(df, 'float_format', float_format='{0:.10f}'.format)
+
+HTML:
+
+.. raw:: html
+ :file: _static/float_format.html
+
+``bold_rows`` will make the row labels bold by default, but you can turn that
+off
+
+.. ipython:: python
+
+ print df.to_html(bold_rows=False)
+
+.. ipython:: python
+ :suppress:
+
+ write_html(df, 'nobold', bold_rows=False)
+
+.. raw:: html
+ :file: _static/nobold.html
+
+The ``classes`` argument provides the ability to give the resulting HTML
+table CSS classes. Note that these classes are *appended* to the existing
+``'dataframe'`` class.
+
+.. ipython:: python
+
+ print df.to_html(classes=['awesome_table_class', 'even_more_awesome_class'])
+
+Finally, the ``escape`` argument allows you to control whether the
+"<", ">" and "&" characters escaped in the resulting HTML (by default it is
+``True``). So to get the HTML without escaped characters pass ``escape=False``
+
+.. ipython:: python
+
+ df = DataFrame({'a': list('&<>'), 'b': randn(3)})
+
+
+.. ipython:: python
+ :suppress:
+
+ write_html(df, 'escape')
+ write_html(df, 'noescape', escape=False)
+
+Escaped:
+
+.. ipython:: python
+
+ print df.to_html()
+
+.. raw:: html
+ :file: _static/escape.html
+
+Not escaped:
+
+.. ipython:: python
+
+ print df.to_html(escape=False)
+
+.. raw:: html
+ :file: _static/noescape.html
+
+.. note::
+
+ Some browsers may not show a difference in the rendering of the previous two
+ HTML tables.
+
Clipboard
---------
diff --git a/doc/source/missing_data.rst b/doc/source/missing_data.rst
index a9fc412a6b8e3..5ff436f6d0d50 100644
--- a/doc/source/missing_data.rst
+++ b/doc/source/missing_data.rst
@@ -357,7 +357,7 @@ Replace the '.' with ``nan`` (str -> str)
:suppress:
from numpy.random import rand, randn
- nan = np.nan
+ from numpy import nan
from pandas import DataFrame
.. ipython:: python
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index ea8dee51565ac..3ad8de077f1ea 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1598,6 +1598,7 @@ def to_html(self, buf=None, columns=None, col_space=None, colSpace=None,
classes=None, escape=True):
"""
to_html-specific options
+
bold_rows : boolean, default True
Make the row labels bold in the output
classes : str or list or tuple, default None
@@ -1605,7 +1606,7 @@ def to_html(self, buf=None, columns=None, col_space=None, colSpace=None,
escape : boolean, default True
Convert the characters <, >, and & to HTML-safe sequences.
- Render a DataFrame to an html table.
+ Render a DataFrame as an HTML table.
"""
import warnings
diff --git a/pandas/io/html.py b/pandas/io/html.py
index 915c30ecc3c40..9b2f292d30f47 100644
--- a/pandas/io/html.py
+++ b/pandas/io/html.py
@@ -18,7 +18,7 @@
import numpy as np
-from pandas import DataFrame, MultiIndex
+from pandas import DataFrame, MultiIndex, Index, Series, isnull
from pandas.io.parsers import _is_url
@@ -398,7 +398,6 @@ def _parse_tables(self, doc, match, attrs):
if not tables:
raise AssertionError("No tables found matching "
"'{0}'".format(match.pattern))
- #import ipdb; ipdb.set_trace()
return tables
def _setup_build_doc(self):
@@ -560,6 +559,17 @@ def _parse_raw_tfoot(self, table):
table.xpath(expr)]
+def _maybe_convert_index_type(index):
+ try:
+ index = index.astype(int)
+ except (TypeError, ValueError):
+ if not isinstance(index, MultiIndex):
+ s = Series(index, name=index.name)
+ index = Index(s.convert_objects(convert_numeric=True),
+ name=index.name)
+ return index
+
+
def _data_to_frame(data, header, index_col, infer_types, skiprows):
"""Parse a BeautifulSoup table into a DataFrame.
@@ -620,6 +630,12 @@ def _data_to_frame(data, header, index_col, infer_types, skiprows):
raise ValueError('Labels {0} not found when trying to skip'
' rows'.format(it))
+ # convert to numbers/dates where possible
+ # must be sequential since dates trump numbers if both args are given
+ if infer_types:
+ df = df.convert_objects(convert_numeric=True)
+ df = df.convert_objects(convert_dates='coerce')
+
if header is not None:
header_rows = df.iloc[header]
@@ -632,11 +648,6 @@ def _data_to_frame(data, header, index_col, infer_types, skiprows):
df = df.drop(df.index[header])
- # convert to numbers/dates where possible
- # must be sequential since dates trump numbers if both args are given
- if infer_types:
- df = df.convert_objects(convert_numeric=True)
-
if index_col is not None:
cols = df.columns[index_col]
@@ -648,12 +659,16 @@ def _data_to_frame(data, header, index_col, infer_types, skiprows):
# drop by default
df.set_index(cols, inplace=True)
if df.index.nlevels == 1:
- if not (df.index.name or df.index.name is None):
+ if isnull(df.index.name) or not df.index.name:
df.index.name = None
else:
names = [name or None for name in df.index.names]
df.index = MultiIndex.from_tuples(df.index.values, names=names)
+ if infer_types:
+ df.index = _maybe_convert_index_type(df.index)
+ df.columns = _maybe_convert_index_type(df.columns)
+
return df
| https://api.github.com/repos/pandas-dev/pandas/pulls/3704 | 2013-05-28T21:56:08Z | 2013-05-30T16:36:50Z | 2013-05-30T16:36:50Z | 2014-07-16T08:10:33Z | |
ENH: allow to_html and to_latex to take a path for their first argument | diff --git a/RELEASE.rst b/RELEASE.rst
index 5293b858b72a3..71d8054283b57 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -179,6 +179,8 @@ pandas 0.11.1
into today's date
- ``DataFrame.from_records`` did not accept empty recarrays (GH3682_)
- ``DataFrame.to_csv`` will succeed with the deprecated option ``nanRep``, @tdsmith
+ - ``DataFrame.to_html`` and ``DataFrame.to_latex`` now accept a path for
+ their first argument (GH3702_)
.. _GH3164: https://github.com/pydata/pandas/issues/3164
.. _GH2786: https://github.com/pydata/pandas/issues/2786
@@ -255,6 +257,7 @@ pandas 0.11.1
.. _GH3676: https://github.com/pydata/pandas/issues/3676
.. _GH3675: https://github.com/pydata/pandas/issues/3675
.. _GH3682: https://github.com/pydata/pandas/issues/3682
+.. _GH3702: https://github.com/pydata/pandas/issues/3702
pandas 0.11.0
=============
diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt
index bd4a7c49fbb4d..ae400a199b372 100644
--- a/doc/source/v0.11.1.txt
+++ b/doc/source/v0.11.1.txt
@@ -233,6 +233,9 @@ Bug Fixes
- ``DataFrame.from_records`` did not accept empty recarrays (GH3682_)
+ - ``DataFrame.to_html`` and ``DataFrame.to_latex`` now accept a path for
+ their first argument (GH3702_)
+
See the `full release notes
<https://github.com/pydata/pandas/blob/master/RELEASE.rst>`__ or issue tracker
on GitHub for a complete list.
@@ -274,3 +277,4 @@ on GitHub for a complete list.
.. _GH3675: https://github.com/pydata/pandas/issues/3675
.. _GH3682: https://github.com/pydata/pandas/issues/3682
.. _GH3679: https://github.com/pydata/pandas/issues/3679
+.. _GH3702: https://github.com/pydata/pandas/issues/3702
diff --git a/pandas/core/format.py b/pandas/core/format.py
index 7327f3b1b2175..40d80e91f0264 100644
--- a/pandas/core/format.py
+++ b/pandas/core/format.py
@@ -364,21 +364,31 @@ def get_col_type(dtype):
raise AssertionError(('column_format must be str or unicode, not %s'
% type(column_format)))
- self.buf.write('\\begin{tabular}{%s}\n' % column_format)
- self.buf.write('\\toprule\n')
-
- nlevels = frame.index.nlevels
- for i, row in enumerate(izip(*strcols)):
- if i == nlevels:
- self.buf.write('\\midrule\n') # End of header
- crow = [(x.replace('_', '\\_')
- .replace('%', '\\%')
- .replace('&', '\\&') if x else '{}') for x in row]
- self.buf.write(' & '.join(crow))
- self.buf.write(' \\\\\n')
-
- self.buf.write('\\bottomrule\n')
- self.buf.write('\\end{tabular}\n')
+ def write(buf, frame, column_format, strcols):
+ buf.write('\\begin{tabular}{%s}\n' % column_format)
+ buf.write('\\toprule\n')
+
+ nlevels = frame.index.nlevels
+ for i, row in enumerate(izip(*strcols)):
+ if i == nlevels:
+ buf.write('\\midrule\n') # End of header
+ crow = [(x.replace('_', '\\_')
+ .replace('%', '\\%')
+ .replace('&', '\\&') if x else '{}') for x in row]
+ buf.write(' & '.join(crow))
+ buf.write(' \\\\\n')
+
+ buf.write('\\bottomrule\n')
+ buf.write('\\end{tabular}\n')
+
+ if hasattr(self.buf, 'write'):
+ write(self.buf, frame, column_format, strcols)
+ elif isinstance(self.buf, basestring):
+ with open(self.buf, 'w') as f:
+ write(f, frame, column_format, strcols)
+ else:
+ raise TypeError('buf is not a file name and it has no write '
+ 'method')
def _format_col(self, i):
formatter = self._get_formatter(i)
@@ -392,7 +402,14 @@ def to_html(self, classes=None):
Render a DataFrame to a html table.
"""
html_renderer = HTMLFormatter(self, classes=classes)
- html_renderer.write_result(self.buf)
+ if hasattr(self.buf, 'write'):
+ html_renderer.write_result(self.buf)
+ elif isinstance(self.buf, basestring):
+ with open(self.buf, 'w') as f:
+ html_renderer.write_result(f)
+ else:
+ raise TypeError('buf is not a file name and it has no write '
+ ' method')
def _get_formatted_column_labels(self):
from pandas.core.index import _sparsify
@@ -574,7 +591,6 @@ def write_result(self, buf):
indent = self._write_body(indent)
self.write('</table>', indent)
-
_put_lines(buf, self.elements)
def _write_header(self, indent):
diff --git a/pandas/core/series.py b/pandas/core/series.py
index a04e931cf07e3..ab8a48f4b8eb9 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -1222,7 +1222,11 @@ def to_string(self, buf=None, na_rep='NaN', float_format=None,
if buf is None:
return the_repr
else:
- print >> buf, the_repr
+ try:
+ buf.write(the_repr)
+ except AttributeError:
+ with open(buf, 'w') as f:
+ f.write(the_repr)
def _get_repr(self, name=False, print_header=False, length=True, dtype=True,
na_rep='NaN', float_format=None):
diff --git a/pandas/tests/test_format.py b/pandas/tests/test_format.py
index fb1465f3cdc7b..9e8a69a32d454 100644
--- a/pandas/tests/test_format.py
+++ b/pandas/tests/test_format.py
@@ -1216,6 +1216,26 @@ def test_to_html(self):
frame = DataFrame(index=np.arange(200))
frame.to_html()
+ def test_to_html_filename(self):
+ biggie = DataFrame({'A': randn(200),
+ 'B': tm.makeStringIndex(200)},
+ index=range(200))
+
+ biggie['A'][:20] = nan
+ biggie['B'][:20] = nan
+ with tm.ensure_clean('test.html') as path:
+ biggie.to_html(path)
+ with open(path, 'r') as f:
+ s = biggie.to_html()
+ s2 = f.read()
+ self.assertEqual(s, s2)
+
+ frame = DataFrame(index=np.arange(200))
+ with tm.ensure_clean('test.html') as path:
+ frame.to_html(path)
+ with open(path, 'r') as f:
+ self.assertEqual(frame.to_html(), f.read())
+
def test_to_html_with_no_bold(self):
x = DataFrame({'x': randn(5)})
ashtml = x.to_html(bold_rows=False)
@@ -1474,6 +1494,13 @@ def test_dict_entries(self):
self.assertTrue("'a': 1" in val)
self.assertTrue("'b': 2" in val)
+ def test_to_latex_filename(self):
+ with tm.ensure_clean('test.tex') as path:
+ self.frame.to_latex(path)
+
+ with open(path, 'r') as f:
+ self.assertEqual(self.frame.to_latex(), f.read())
+
def test_to_latex(self):
# it works!
self.frame.to_latex()
| https://api.github.com/repos/pandas-dev/pandas/pulls/3702 | 2013-05-28T17:33:25Z | 2013-05-30T16:34:48Z | 2013-05-30T16:34:48Z | 2014-06-20T04:17:09Z | |
BUG: DataFrame.to_csv fails silently if nanRep not None | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 68a6c9e261c97..cf57c869f4b05 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1455,18 +1455,16 @@ def to_csv(self, path_or_buf, sep=",", na_rep='', float_format=None,
FutureWarning)
na_rep = nanRep
-
- else:
- formatter = fmt.CSVFormatter(self, path_or_buf,
- line_terminator=line_terminator,
- sep=sep, encoding=encoding,
- quoting=quoting,na_rep=na_rep,
- float_format=float_format, cols=cols,
- header=header, index=index,
- index_label=index_label,mode=mode,
- chunksize=chunksize,engine=kwds.get("engine"),
- tupleize_cols=tupleize_cols)
- formatter.save()
+ formatter = fmt.CSVFormatter(self, path_or_buf,
+ line_terminator=line_terminator,
+ sep=sep, encoding=encoding,
+ quoting=quoting,na_rep=na_rep,
+ float_format=float_format, cols=cols,
+ header=header, index=index,
+ index_label=index_label,mode=mode,
+ chunksize=chunksize,engine=kwds.get("engine"),
+ tupleize_cols=tupleize_cols)
+ formatter.save()
def to_excel(self, excel_writer, sheet_name='sheet1', na_rep='',
float_format=None, cols=None, header=True, index=True,
| The nanRep argument to DataFrame.to_csv() is deprecated and na_rep should be used instead. .to_csv should succeed with a warning if nanRep is specified. Before this fix, to_csv threw a warning but did not save the file.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3701 | 2013-05-28T05:14:38Z | 2013-05-30T01:10:17Z | null | 2014-07-16T08:10:29Z |
DOC: use na_rep not nanRep in .to_csv() | diff --git a/doc/source/io.rst b/doc/source/io.rst
index e192eea0d2b12..a1ba88c0d798b 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -871,7 +871,7 @@ allows storing the contents of the object as a comma-separated-values file. The
function takes a number of arguments. Only the first is required.
- ``path``: A string path to the file to write
- - ``nanRep``: A string representation of a missing value (default '')
+ - ``na_rep``: A string representation of a missing value (default '')
- ``cols``: Columns to write (default None)
- ``header``: Whether to write out the column names (default True)
- ``index``: whether to write row (index) names (default True)
| nanRep works but is deprecated and emits a FutureWarning.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3700 | 2013-05-28T05:03:16Z | 2013-05-30T00:53:56Z | 2013-05-30T00:53:56Z | 2015-08-15T22:13:32Z |
ENH/API: implement __nonzero__ for NDFrame | diff --git a/RELEASE.rst b/RELEASE.rst
index e611b330b08f0..9283bada2d720 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -112,6 +112,7 @@ pandas 0.11.1
- added top-level ``pd.read_sql`` and ``to_sql`` DataFrame methods
- the ``method`` and ``axis`` arguments of ``DataFrame.replace()`` are
deprecated
+ - Implement ``__nonzero__`` for ``NDFrame`` objects (GH3691_, GH3696_)
**Bug Fixes**
@@ -266,6 +267,8 @@ pandas 0.11.1
.. _GH3675: https://github.com/pydata/pandas/issues/3675
.. _GH3682: https://github.com/pydata/pandas/issues/3682
.. _GH3702: https://github.com/pydata/pandas/issues/3702
+.. _GH3691: https://github.com/pydata/pandas/issues/3691
+.. _GH3696: https://github.com/pydata/pandas/issues/3696
pandas 0.11.0
=============
diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt
index c025450c44cca..289c011f7a7a9 100644
--- a/doc/source/v0.11.1.txt
+++ b/doc/source/v0.11.1.txt
@@ -88,6 +88,8 @@ API changes
- Add the keyword ``allow_duplicates`` to ``DataFrame.insert`` to allow a duplicate column
to be inserted if ``True``, default is ``False`` (same as prior to 0.11.1) (GH3679_)
+ - Implement ``__nonzero__`` for ``NDFrame`` objects (GH3691_, GH3696_)
+
- IO api
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 1dfeae997451a..8dc1a921eecad 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -595,14 +595,6 @@ def shape(self):
#----------------------------------------------------------------------
# Class behavior
-
- @property
- def empty(self):
- return not (len(self.columns) > 0 and len(self.index) > 0)
-
- def __nonzero__(self):
- raise ValueError("Cannot call bool() on DataFrame.")
-
def _repr_fits_vertical_(self):
"""
Check length against max_rows.
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index aa574219a259e..7dd0315d7d90e 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -559,6 +559,13 @@ def __repr__(self):
def values(self):
return self._data.as_matrix()
+ @property
+ def empty(self):
+ return not all(len(ax) > 0 for ax in self.axes)
+
+ def __nonzero__(self):
+ return not self.empty
+
@property
def ndim(self):
return self._data.ndim
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 1de643985d893..39452ece7a33d 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -10379,9 +10379,15 @@ def test_index_namedtuple(self):
df = DataFrame([(1, 2), (3, 4)], index=index, columns=["A", "B"])
self.assertEqual(df.ix[IndexType("foo", "bar")]["A"], 1)
- def test_bool_raises_value_error_1069(self):
+ def test_bool_empty_nonzero(self):
df = DataFrame([1, 2, 3])
- self.failUnlessRaises(ValueError, lambda: bool(df))
+ self.assertTrue(bool(df))
+ self.assertFalse(df.empty)
+ df = DataFrame(index=['a', 'b'], columns=['c', 'd']).dropna()
+ self.assertFalse(bool(df))
+ self.assertFalse(bool(df.T))
+ self.assertTrue(df.empty)
+ self.assertTrue(df.T.empty)
def test_any_all(self):
self._check_bool_op('any', np.any, has_skipna=True, has_bool_only=True)
| closes #3691.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3696 | 2013-05-27T01:42:23Z | 2013-05-30T16:45:00Z | 2013-05-30T16:45:00Z | 2014-06-25T21:08:44Z |
Test to verify/fix behavior in #3503 | diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index fddbbf93552b3..207d08e795e37 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -23,6 +23,7 @@
from pandas.core.api import (DataFrame, Index, Series, notnull, isnull,
MultiIndex, DatetimeIndex, Timestamp, Period)
from pandas import date_range
+import pandas as pd
from pandas.io.parsers import read_csv
from pandas.util.testing import (assert_almost_equal,
@@ -4037,7 +4038,7 @@ def test_div(self):
### this is technically wrong as the integer portion is coerced to float ###
expected = DataFrame({ 'first' : Series([1,1,1,1],dtype='float64'), 'second' : Series([np.inf,np.inf,np.inf,1]) })
assert_frame_equal(result,expected)
-
+
result2 = DataFrame(p.values.astype('float64')/p.values,index=p.index,columns=p.columns).fillna(np.inf)
assert_frame_equal(result2,expected)
@@ -4723,7 +4724,7 @@ def _check_df(df,cols=None):
if isinstance(obj_df,Series):
assert_series_equal(obj_df,obj_rs)
else:
- assert_frame_equal(obj_df,obj_rs,check_names=False)
+ assert_frame_equal(obj_df,obj_rs,check_names=False)
# wrote in the same order
else:
@@ -4990,9 +4991,9 @@ def test_to_csv_multiindex(self):
def _make_frame(names=None):
if names is True:
names = ['first','second']
- return DataFrame(np.random.randint(0,10,size=(3,3)),
- columns=MultiIndex.from_tuples([('bah', 'foo'),
- ('bah', 'bar'),
+ return DataFrame(np.random.randint(0,10,size=(3,3)),
+ columns=MultiIndex.from_tuples([('bah', 'foo'),
+ ('bah', 'bar'),
('ban', 'baz')],
names=names),
dtype='int64')
@@ -5069,12 +5070,12 @@ def _make_frame(names=None):
raise AssertionError("failure in read_csv header=range(3)")
try:
- read_csv(path,tupleize_cols=False,header=range(7),index_col=0)
+ read_csv(path,tupleize_cols=False,header=range(7),index_col=0)
except (Exception), detail:
if not str(detail).startswith('Passed header=[0,1,2,3,4,5,6], len of 7, but only 6 lines in file'):
raise AssertionError("failure in read_csv header=range(7)")
- for i in [3,4,5,6,7]:
+ for i in [3,4,5,6,7]:
self.assertRaises(Exception, read_csv, path, tupleize_cols=False, header=range(i), index_col=0)
self.assertRaises(Exception, read_csv, path, tupleize_cols=False, header=[0,2], index_col=0)
@@ -5168,7 +5169,7 @@ def test_to_csv_dups_cols(self):
with ensure_clean() as filename:
df.to_csv(filename)
result = read_csv(filename,index_col=0)
-
+
# date cols
for i in ['0.4','1.4','2.4']:
result[i] = to_datetime(result[i])
@@ -5281,6 +5282,14 @@ def test_to_csv_quoting(self):
self.assertEqual(result, expected)
+ # quoting windows line terminators, presents with encoding?
+ # #3503
+ text = 'a,b,c\n1,"test \r\n",3\n'
+ df = pd.read_csv(StringIO(text))
+ buf = StringIO()
+ df.to_csv(buf, encoding='utf-8', index=False)
+ self.assertEqual(buf.getvalue(), text)
+
def test_to_csv_unicodewriter_quoting(self):
import csv
@@ -8540,7 +8549,7 @@ def test_combine_first_mixed_bug(self):
result = df1.combine_first(df2)[2]
expected = Series([True,True,False])
- assert_series_equal(result,expected)
+ assert_series_equal(result,expected)
# GH 3593, converting datetime64[ns] incorrecly
df0 = DataFrame({"a":[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)]})
@@ -9942,11 +9951,11 @@ def test_columns_with_dups(self):
df.iloc[:,i]
# dup columns across dtype GH 2079/2194
- vals = [[1, -1, 2.], [2, -2, 3.]]
- rs = DataFrame(vals, columns=['A', 'A', 'B'])
- xp = DataFrame(vals)
- xp.columns = ['A', 'A', 'B']
- assert_frame_equal(rs, xp)
+ vals = [[1, -1, 2.], [2, -2, 3.]]
+ rs = DataFrame(vals, columns=['A', 'A', 'B'])
+ xp = DataFrame(vals)
+ xp.columns = ['A', 'A', 'B']
+ assert_frame_equal(rs, xp)
def test_cast_internals(self):
casted = DataFrame(self.frame._data, dtype=int)
| Was not able to reproduce the issue. #3503
| https://api.github.com/repos/pandas-dev/pandas/pulls/3694 | 2013-05-25T23:36:33Z | 2013-06-02T20:38:25Z | 2013-06-02T20:38:25Z | 2014-07-16T08:10:25Z |
CLN: added io.api for i/o importing functions | diff --git a/RELEASE.rst b/RELEASE.rst
index 5293b858b72a3..5b512814d0fec 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -35,6 +35,7 @@ pandas 0.11.1
GH3606_)
- Support for reading Amazon S3 files. (GH3504_)
- Added module for reading and writing Stata files: pandas.io.stata (GH1512_)
+ includes ``to_stata`` DataFrame method, and a ``read_stata`` top-level reader
- Added support for writing in ``to_csv`` and reading in ``read_csv``,
multi-index columns. The ``header`` option in ``read_csv`` now accepts a
list of the rows from which to read the index. Added the option,
@@ -104,6 +105,11 @@ pandas 0.11.1
does not control triggering of summary, similar to < 0.11.0.
- Add the keyword ``allow_duplicates`` to ``DataFrame.insert`` to allow a duplicate column
to be inserted if ``True``, default is ``False`` (same as prior to 0.11.1) (GH3679_)
+ - io API changes
+
+ - added ``pandas.io.api`` for i/o imports
+ - removed ``Excel`` support to ``pandas.io.excel``
+ - added top-level ``pd.read_sql`` and ``to_sql`` DataFrame methods
**Bug Fixes**
diff --git a/doc/source/10min.rst b/doc/source/10min.rst
index 2eda474d7954f..19bacdc81bdf9 100644
--- a/doc/source/10min.rst
+++ b/doc/source/10min.rst
@@ -699,8 +699,7 @@ Reading from an excel file
.. ipython:: python
- xls = ExcelFile('foo.xlsx')
- xls.parse('sheet1', index_col=None, na_values=['NA'])
+ read_excel('foo.xlsx', 'sheet1', index_col=None, na_values=['NA'])
.. ipython:: python
:suppress:
diff --git a/doc/source/api.rst b/doc/source/api.rst
index c5b83e4af6999..2e59bf6533205 100644
--- a/doc/source/api.rst
+++ b/doc/source/api.rst
@@ -48,7 +48,20 @@ File IO
read_table
read_csv
- ExcelFile.parse
+
+.. currentmodule:: pandas.io.excel
+
+.. autosummary::
+ :toctree: generated/
+
+ read_excel
+
+.. currentmodule:: pandas.io.stata
+
+.. autosummary::
+ :toctree: generated/
+
+ read_stata
.. currentmodule:: pandas.io.html
@@ -57,15 +70,29 @@ File IO
read_html
+SQL
+~~~
+
+.. currentmodule:: pandas.io.sql
+
+.. autosummary::
+ :toctree: generated/
+
+ read_sql
+
HDFStore: PyTables (HDF5)
~~~~~~~~~~~~~~~~~~~~~~~~~
+
.. currentmodule:: pandas.io.pytables
.. autosummary::
:toctree: generated/
+ read_hdf
HDFStore.put
+ HDFStore.append
HDFStore.get
+ HDFStore.select
Standard moving window functions
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -532,9 +559,11 @@ Serialization / IO / Conversion
DataFrame.load
DataFrame.save
DataFrame.to_csv
+ DataFrame.to_hdf
DataFrame.to_dict
DataFrame.to_excel
DataFrame.to_html
+ DataFrame.to_stata
DataFrame.to_records
DataFrame.to_sparse
DataFrame.to_string
diff --git a/doc/source/cookbook.rst b/doc/source/cookbook.rst
index 8aac415721f9a..7f6b54667765d 100644
--- a/doc/source/cookbook.rst
+++ b/doc/source/cookbook.rst
@@ -32,25 +32,25 @@ Selection
The :ref:`indexing <indexing>` docs.
-`Boolean Rows Indexing
+Indexing using both row labels and conditionals, see
+`here
<http://stackoverflow.com/questions/14725068/pandas-using-row-labels-in-boolean-indexing>`__
- Indexing using both row labels and conditionals
-`Using loc and iloc in selections
+Use loc for label-oriented slicing and iloc positional slicing, see
+`here
<https://github.com/pydata/pandas/issues/2904>`__
- Use loc for label-oriented slicing and iloc positional slicing
-`Extending a panel along the minor axis
+Extend a panel frame by transposing, adding a new dimension, and transposing back to the original dimensions, see
+`here
<http://stackoverflow.com/questions/15364050/extending-a-pandas-panel-frame-along-the-minor-axis>`__
- Extend a panel frame by transposing, adding a new dimension, and transposing back to the original dimensions
-`Boolean masking in a panel
+Mask a panel by using ``np.where`` and then reconstructing the panel with the new masked values
+`here
<http://stackoverflow.com/questions/14650341/boolean-mask-in-pandas-panel>`__
- Mask a panel by using ``np.where`` and then reconstructing the panel with the new masked values
-`Selecting via the complement
+Using ``~`` to take the complement of a boolean array, see
+`here
<http://stackoverflow.com/questions/14986510/picking-out-elements-based-on-complement-of-indices-in-python-pandas>`__
- ``~`` can be used to take the complement of a boolean array
`Efficiently creating columns using applymap
<http://stackoverflow.com/questions/16575868/efficiently-creating-additional-columns-in-a-pandas-dataframe-using-map>`__
diff --git a/doc/source/io.rst b/doc/source/io.rst
index a1ba88c0d798b..92747f9906da2 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -9,6 +9,7 @@
import csv
from StringIO import StringIO
import pandas as pd
+ ExcelWriter = pd.ExcelWriter
import numpy as np
np.random.seed(123456)
@@ -27,6 +28,18 @@
IO Tools (Text, CSV, HDF5, ...)
*******************************
+The Pandas I/O api is a set of top level ``reader`` functions accessed like ``pd.read_csv()`` that generally return a ``pandas``
+object. The corresponding ``writer`` functions are object methods that are accessed like ``df.to_csv()``
+
+.. csv-table::
+ :widths: 12, 15, 15, 15, 15
+ :delim: ;
+
+ Reader; ``read_csv``; ``read_excel``; ``read_hdf``; ``read_sql``
+ Writer; ``to_csv``; ``to_excel``; ``to_hdf``; ``to_sql``
+ Reader; ``read_html``; ``read_stata``; ``read_clipboard`` ;
+ Writer; ``to_html``; ``to_stata``; ``to_clipboard`` ;
+
.. _io.read_csv_table:
CSV & Text files
@@ -971,29 +984,33 @@ And then import the data directly to a DataFrame by calling:
Excel files
-----------
-The ``ExcelFile`` class can read an Excel 2003 file using the ``xlrd`` Python
+The ``read_excel`` method can read an Excel 2003 file using the ``xlrd`` Python
module and use the same parsing code as the above to convert tabular data into
a DataFrame. See the :ref:`cookbook<cookbook.excel>` for some
advanced strategies
-To use it, create the ``ExcelFile`` object:
+.. note::
-.. code-block:: python
+ The prior method of accessing Excel is now deprecated as of 0.11.1,
+ this will work but will be removed in a future version.
- xls = ExcelFile('path_to_file.xls')
+ .. code-block:: python
-Then use the ``parse`` instance method with a sheetname, then use the same
-additional arguments as the parsers above:
+ from pandas.io.parsers import ExcelFile
+ xls = ExcelFile('path_to_file.xls')
+ xls.parse('Sheet1', index_col=None, na_values=['NA'])
-.. code-block:: python
+ Replaced by
+
+ .. code-block:: python
- xls.parse('Sheet1', index_col=None, na_values=['NA'])
+ read_excel('path_to_file.xls', 'Sheet1', index_col=None, na_values=['NA'])
To read sheets from an Excel 2007 file, you can pass a filename with a ``.xlsx``
extension, in which case the ``openpyxl`` module will be used to read the file.
It is often the case that users will insert columns to do temporary computations
-in Excel and you may not want to read in those columns. `ExcelFile.parse` takes
+in Excel and you may not want to read in those columns. `read_excel` takes
a `parse_cols` keyword to allow you to specify a subset of columns to parse.
If `parse_cols` is an integer, then it is assumed to indicate the last column
@@ -1001,14 +1018,14 @@ to be parsed.
.. code-block:: python
- xls.parse('Sheet1', parse_cols=2, index_col=None, na_values=['NA'])
+ read_excel('path_to_file.xls', 'Sheet1', parse_cols=2, index_col=None, na_values=['NA'])
If `parse_cols` is a list of integers, then it is assumed to be the file column
indices to be parsed.
.. code-block:: python
- xls.parse('Sheet1', parse_cols=[0, 2, 3], index_col=None, na_values=['NA'])
+ read_excel('path_to_file.xls', Sheet1', parse_cols=[0, 2, 3], index_col=None, na_values=['NA'])
To write a DataFrame object to a sheet of an Excel file, you can use the
``to_excel`` instance method. The arguments are largely the same as ``to_csv``
@@ -1883,16 +1900,13 @@ Writing to STATA format
.. _io.StataWriter:
-The function :func:'~pandas.io.StataWriter.write_file' will write a DataFrame
-into a .dta file. The format version of this file is always the latest one,
-115.
+The method ``to_stata`` will write a DataFrame into a .dta file.
+The format version of this file is always the latest one, 115.
.. ipython:: python
- from pandas.io.stata import StataWriter
df = DataFrame(randn(10,2),columns=list('AB'))
- writer = StataWriter('stata.dta',df)
- writer.write_file()
+ df.to_stata('stata.dta')
Reading from STATA format
~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -1901,24 +1915,21 @@ Reading from STATA format
.. versionadded:: 0.11.1
-The class StataReader will read the header of the given dta file at
-initialization. Its function :func:'~pandas.io.StataReader.data' will
-read the observations, converting them to a DataFrame which is returned:
+The top-level function ``read_stata`` will read a dta format file
+and return a DataFrame:
.. ipython:: python
- from pandas.io.stata import StataReader
- reader = StataReader('stata.dta')
- reader.data()
+ pd.read_stata('stata.dta')
-The parameter convert_categoricals indicates wheter value labels should be
-read and used to create a Categorical variable from them. Value labels can
-also be retrieved by the function variable_labels, which requires data to be
-called before.
+Currently the ``index`` is retrieved as a column on read back.
-The StataReader supports .dta Formats 104, 105, 108, 113-115.
+The parameter ``convert_categoricals`` indicates wheter value labels should be
+read and used to create a ``Categorical`` variable from them. Value labels can
+also be retrieved by the function ``variable_labels``, which requires data to be
+called before (see ``pandas.io.stata.StataReader``).
-Alternatively, the function :func:'~pandas.io.read_stata' can be used
+The StataReader supports .dta Formats 104, 105, 108, 113-115.
.. ipython:: python
:suppress:
diff --git a/doc/source/v0.10.0.txt b/doc/source/v0.10.0.txt
index 0c5497868efe2..51075a61bec4d 100644
--- a/doc/source/v0.10.0.txt
+++ b/doc/source/v0.10.0.txt
@@ -1,5 +1,10 @@
.. _whatsnew_0100:
+.. ipython:: python
+ :suppress:
+
+ from StringIO import StringIO
+
v0.10.0 (December 17, 2012)
---------------------------
diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt
index bd4a7c49fbb4d..7cedb62693c73 100644
--- a/doc/source/v0.11.1.txt
+++ b/doc/source/v0.11.1.txt
@@ -6,6 +6,19 @@ v0.11.1 (??)
This is a minor release from 0.11.0 and includes several new features and
enhancements along with a large number of bug fixes.
+The I/O api is now much more consistent with the following top-level reading
+functions available, e.g. ``pd.read_csv``, and the counterpart writers are
+available as object methods, e.g. ``df.to_csv``
+
+.. csv-table::
+ :widths: 12, 15, 15, 15, 15
+ :delim: ;
+
+ Reader; ``read_csv``; ``read_excel``; ``read_hdf``; ``read_sql``
+ Writer; ``to_csv``; ``to_excel``; ``to_hdf``; ``to_sql``
+ Reader; ``read_html``; ``read_stata``; ``read_clipboard`` ;
+ Writer; ``to_html``; ``to_stata``; ``to_clipboard`` ;
+
API changes
~~~~~~~~~~~
@@ -74,6 +87,31 @@ API changes
- Add the keyword ``allow_duplicates`` to ``DataFrame.insert`` to allow a duplicate column
to be inserted if ``True``, default is ``False`` (same as prior to 0.11.1) (GH3679_)
+ - IO api
+
+ - added top-level function ``read_excel`` to replace the following,
+ The original API is deprecated and will be removed in a future version
+
+ .. code-block:: python
+
+ from pandas.io.parsers import ExcelFile
+ xls = ExcelFile('path_to_file.xls')
+ xls.parse('Sheet1', index_col=None, na_values=['NA'])
+
+ With
+
+ .. code-block:: python
+
+ import pandas as pd
+ pd.read_excel('path_to_file.xls', 'Sheet1', index_col=None, na_values=['NA'])
+
+ - added top-level function ``read_sql`` that is equivalent to the following
+
+ .. code-block:: python
+
+ from pandas.io.sql import read_frame
+ read_frame(....)
+
Enhancements
~~~~~~~~~~~~
@@ -109,6 +147,8 @@ Enhancements
a list or tuple.
- Added module for reading and writing Stata files: pandas.io.stata (GH1512_)
+ accessable via ``read_stata`` top-level function for reading,
+ and ``to_stata`` DataFrame method for writing
- ``DataFrame.replace()`` now allows regular expressions on contained
``Series`` with object dtype. See the examples section in the regular docs
@@ -218,7 +258,7 @@ Bug Fixes
.. ipython :: python
df = DataFrame({'a': list('ab..'), 'b': [1, 2, 3, 4]})
- df.replace(regex=r'\s*\.\s*', value=nan)
+ df.replace(regex=r'\s*\.\s*', value=np.nan)
to replace all occurrences of the string ``'.'`` with zero or more
instances of surrounding whitespace with ``NaN``.
@@ -227,7 +267,7 @@ Bug Fixes
.. ipython :: python
- df.replace('.', nan)
+ df.replace('.', np.nan)
to replace all occurrences of the string ``'.'`` with ``NaN``.
diff --git a/pandas/__init__.py b/pandas/__init__.py
index bf5bcc81bc21e..da4c146da3cfd 100644
--- a/pandas/__init__.py
+++ b/pandas/__init__.py
@@ -28,12 +28,8 @@
from pandas.sparse.api import *
from pandas.stats.api import *
from pandas.tseries.api import *
+from pandas.io.api import *
-from pandas.io.parsers import (read_csv, read_table, read_clipboard,
- read_fwf, to_clipboard, ExcelFile,
- ExcelWriter)
-from pandas.io.pytables import HDFStore, Term, get_store, read_hdf
-from pandas.io.html import read_html
from pandas.util.testing import debug
from pandas.tools.describe import value_range
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 9e276e01dd723..ea8dee51565ac 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1300,35 +1300,6 @@ def from_csv(cls, path, header=0, sep=',', index_col=0,
parse_dates=parse_dates, index_col=index_col,
encoding=encoding,tupleize_cols=False)
- @classmethod
- def from_dta(dta, path, parse_dates=True, convert_categoricals=True, encoding=None, index_col=None):
- """
- Read Stata file into DataFrame
-
- Parameters
- ----------
- path : string file path or file handle / StringIO
- parse_dates : boolean, default True
- Convert date variables to DataFrame time values
- convert_categoricals : boolean, default True
- Read value labels and convert columns to Categorical/Factor variables
- encoding : string, None or encoding, default None
- Encoding used to parse the files. Note that Stata doesn't
- support unicode. None defaults to cp1252.
- index_col : int or sequence, default None
- Column to use for index. If a sequence is given, a MultiIndex
- is used. Different default from read_table
-
- Notes
- -----
-
- Returns
- -------
- y : DataFrame
- """
- from pandas.io.stata import read_stata
- return read_stata(path, parse_dates=parse_dates, convert_categoricals=convert_categoricals, encoding=encoding, index=index_col)
-
def to_sparse(self, fill_value=None, kind='block'):
"""
Convert to SparseDataFrame
@@ -1510,7 +1481,7 @@ def to_excel(self, excel_writer, sheet_name='sheet1', na_rep='',
>>> df2.to_excel(writer,'sheet2')
>>> writer.save()
"""
- from pandas.io.parsers import ExcelWriter
+ from pandas.io.excel import ExcelWriter
need_save = False
if isinstance(excel_writer, basestring):
excel_writer = ExcelWriter(excel_writer)
@@ -1529,6 +1500,57 @@ def to_excel(self, excel_writer, sheet_name='sheet1', na_rep='',
if need_save:
excel_writer.save()
+ def to_stata(self, fname, convert_dates=None, write_index=True, encoding="latin-1",
+ byteorder=None):
+ """
+ A class for writing Stata binary dta files from array-like objects
+
+ Parameters
+ ----------
+ fname : file path or buffer
+ Where to save the dta file.
+ convert_dates : dict
+ Dictionary mapping column of datetime types to the stata internal
+ format that you want to use for the dates. Options are
+ 'tc', 'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either a
+ number or a name.
+ encoding : str
+ Default is latin-1. Note that Stata does not support unicode.
+ byteorder : str
+ Can be ">", "<", "little", or "big". The default is None which uses
+ `sys.byteorder`
+
+ Examples
+ --------
+ >>> writer = StataWriter('./data_file.dta', data)
+ >>> writer.write_file()
+
+ Or with dates
+
+ >>> writer = StataWriter('./date_data_file.dta', data, {2 : 'tw'})
+ >>> writer.write_file()
+ """
+ from pandas.io.stata import StataWriter
+ writer = StataWriter(fname,self,convert_dates=convert_dates, encoding=encoding, byteorder=byteorder)
+ writer.write_file()
+
+ def to_sql(self, name, con, flavor='sqlite', if_exists='fail', **kwargs):
+ """
+ Write records stored in a DataFrame to a SQL database.
+
+ Parameters
+ ----------
+ name: name of SQL table
+ conn: an open SQL database connection object
+ flavor: {'sqlite', 'mysql', 'oracle'}, default 'sqlite'
+ if_exists: {'fail', 'replace', 'append'}, default 'fail'
+ fail: If table exists, do nothing.
+ replace: If table exists, drop it, recreate it, and insert data.
+ append: If table exists, insert data. Create if does not exist.
+ """
+ from pandas.io.sql import write_frame
+ write_frame(self, name, con, flavor=flavor, if_exists=if_exists, **kwargs)
+
@Appender(fmt.docstring_to_string, indents=1)
def to_string(self, buf=None, columns=None, col_space=None, colSpace=None,
header=True, index=True, na_rep='NaN', formatters=None,
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 4a80e2f65fd71..aa574219a259e 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -491,6 +491,10 @@ def to_hdf(self, path_or_buf, key, **kwargs):
from pandas.io import pytables
return pytables.to_hdf(path_or_buf, key, self, **kwargs)
+ def to_clipboard(self):
+ from pandas.io import parsers
+ parsers.to_clipboard(self)
+
# install the indexerse
for _name, _indexer in indexing.get_indexers_list():
PandasObject._create_indexer(_name,_indexer)
diff --git a/pandas/core/panel.py b/pandas/core/panel.py
index fa1305d27058e..0a099661c58f1 100644
--- a/pandas/core/panel.py
+++ b/pandas/core/panel.py
@@ -596,7 +596,7 @@ def to_excel(self, path, na_rep=''):
na_rep : string, default ''
Missing data representation
"""
- from pandas.io.parsers import ExcelWriter
+ from pandas.io.excel import ExcelWriter
writer = ExcelWriter(path)
for item, df in self.iteritems():
name = str(item)
diff --git a/pandas/io/api.py b/pandas/io/api.py
new file mode 100644
index 0000000000000..e4c0c8c0c77f0
--- /dev/null
+++ b/pandas/io/api.py
@@ -0,0 +1,11 @@
+"""
+Data IO api
+"""
+
+from pandas.io.parsers import (read_csv, read_table, read_clipboard,
+ read_fwf, to_clipboard)
+from pandas.io.excel import ExcelFile, ExcelWriter, read_excel
+from pandas.io.pytables import HDFStore, Term, get_store, read_hdf
+from pandas.io.html import read_html
+from pandas.io.sql import read_sql
+from pandas.io.stata import read_stata
diff --git a/pandas/io/excel.py b/pandas/io/excel.py
new file mode 100644
index 0000000000000..ea102cb6803d7
--- /dev/null
+++ b/pandas/io/excel.py
@@ -0,0 +1,462 @@
+"""
+Module parse to/from Excel
+"""
+
+#----------------------------------------------------------------------
+# ExcelFile class
+
+import datetime
+from itertools import izip
+import numpy as np
+
+from pandas.core.index import Index, MultiIndex
+from pandas.core.frame import DataFrame
+import pandas.core.common as com
+from pandas.util import py3compat
+from pandas.io.parsers import TextParser
+from pandas.tseries.period import Period
+import json
+
+def read_excel(path_or_buf, sheetname, header=0, skiprows=None, skip_footer=0,
+ index_col=None, parse_cols=None, parse_dates=False,
+ date_parser=None, na_values=None, thousands=None, chunksize=None,
+ kind=None, **kwds):
+ """
+ Read Excel table into DataFrame
+
+ Parameters
+ ----------
+ sheetname : string
+ Name of Excel sheet
+ header : int, default 0
+ Row to use for the column labels of the parsed DataFrame
+ skiprows : list-like
+ Rows to skip at the beginning (0-indexed)
+ skip_footer : int, default 0
+ Rows at the end to skip (0-indexed)
+ index_col : int, default None
+ Column to use as the row labels of the DataFrame. Pass None if
+ there is no such column
+ parse_cols : int or list, default None
+ If None then parse all columns,
+ If int then indicates last column to be parsed
+ If list of ints then indicates list of column numbers to be parsed
+ If string then indicates comma separated list of column names and
+ column ranges (e.g. "A:E" or "A,C,E:F")
+ na_values : list-like, default None
+ List of additional strings to recognize as NA/NaN
+
+ Returns
+ -------
+ parsed : DataFrame
+ """
+ return ExcelFile(path_or_buf,kind=kind).parse(sheetname=sheetname,
+ header=0, skiprows=None, skip_footer=0,
+ index_col=None, parse_cols=None, parse_dates=False,
+ date_parser=None, na_values=None, thousands=None, chunksize=None,
+ kind=None, **kwds)
+
+class ExcelFile(object):
+ """
+ Class for parsing tabular excel sheets into DataFrame objects.
+ Uses xlrd. See ExcelFile.parse for more documentation
+
+ Parameters
+ ----------
+ path : string or file-like object
+ Path to xls or xlsx file
+ """
+ def __init__(self, path_or_buf, kind=None, **kwds):
+ self.kind = kind
+
+ import xlrd # throw an ImportError if we need to
+ ver = tuple(map(int,xlrd.__VERSION__.split(".")[:2]))
+ if ver < (0, 9):
+ raise ImportError("pandas requires xlrd >= 0.9.0 for excel support, current version "+xlrd.__VERSION__)
+
+ self.path_or_buf = path_or_buf
+ self.tmpfile = None
+
+ if isinstance(path_or_buf, basestring):
+ self.book = xlrd.open_workbook(path_or_buf)
+ else:
+ data = path_or_buf.read()
+ self.book = xlrd.open_workbook(file_contents=data)
+
+ def __repr__(self):
+ return object.__repr__(self)
+
+ def parse(self, sheetname, header=0, skiprows=None, skip_footer=0,
+ index_col=None, parse_cols=None, parse_dates=False,
+ date_parser=None, na_values=None, thousands=None, chunksize=None,
+ **kwds):
+ """
+ Read Excel table into DataFrame
+
+ Parameters
+ ----------
+ sheetname : string
+ Name of Excel sheet
+ header : int, default 0
+ Row to use for the column labels of the parsed DataFrame
+ skiprows : list-like
+ Rows to skip at the beginning (0-indexed)
+ skip_footer : int, default 0
+ Rows at the end to skip (0-indexed)
+ index_col : int, default None
+ Column to use as the row labels of the DataFrame. Pass None if
+ there is no such column
+ parse_cols : int or list, default None
+ If None then parse all columns,
+ If int then indicates last column to be parsed
+ If list of ints then indicates list of column numbers to be parsed
+ If string then indicates comma separated list of column names and
+ column ranges (e.g. "A:E" or "A,C,E:F")
+ na_values : list-like, default None
+ List of additional strings to recognize as NA/NaN
+
+ Returns
+ -------
+ parsed : DataFrame
+ """
+
+ # has_index_names: boolean, default False
+ # True if the cols defined in index_col have an index name and are
+ # not in the header
+ has_index_names = False # removed as new argument of API function
+
+ skipfooter = kwds.pop('skipfooter', None)
+ if skipfooter is not None:
+ skip_footer = skipfooter
+
+ return self._parse_excel(sheetname, header=header,
+ skiprows=skiprows, index_col=index_col,
+ has_index_names=has_index_names,
+ parse_cols=parse_cols,
+ parse_dates=parse_dates,
+ date_parser=date_parser,
+ na_values=na_values,
+ thousands=thousands,
+ chunksize=chunksize,
+ skip_footer=skip_footer)
+
+ def _should_parse(self, i, parse_cols):
+
+ def _range2cols(areas):
+ """
+ Convert comma separated list of column names and column ranges to a
+ list of 0-based column indexes.
+
+ >>> _range2cols('A:E')
+ [0, 1, 2, 3, 4]
+ >>> _range2cols('A,C,Z:AB')
+ [0, 2, 25, 26, 27]
+ """
+ def _excel2num(x):
+ "Convert Excel column name like 'AB' to 0-based column index"
+ return reduce(lambda s, a: s * 26 + ord(a) - ord('A') + 1, x.upper().strip(), 0) - 1
+
+ cols = []
+ for rng in areas.split(','):
+ if ':' in rng:
+ rng = rng.split(':')
+ cols += range(_excel2num(rng[0]), _excel2num(rng[1]) + 1)
+ else:
+ cols.append(_excel2num(rng))
+ return cols
+
+ if isinstance(parse_cols, int):
+ return i <= parse_cols
+ elif isinstance(parse_cols, basestring):
+ return i in _range2cols(parse_cols)
+ else:
+ return i in parse_cols
+
+ def _parse_excel(self, sheetname, header=0, skiprows=None,
+ skip_footer=0, index_col=None, has_index_names=None,
+ parse_cols=None, parse_dates=False, date_parser=None,
+ na_values=None, thousands=None, chunksize=None):
+ from xlrd import (xldate_as_tuple, XL_CELL_DATE,
+ XL_CELL_ERROR, XL_CELL_BOOLEAN)
+
+ datemode = self.book.datemode
+ sheet = self.book.sheet_by_name(sheetname)
+
+ data = []
+ should_parse = {}
+ for i in range(sheet.nrows):
+ row = []
+ for j, (value, typ) in enumerate(izip(sheet.row_values(i),
+ sheet.row_types(i))):
+ if parse_cols is not None and j not in should_parse:
+ should_parse[j] = self._should_parse(j, parse_cols)
+
+ if parse_cols is None or should_parse[j]:
+ if typ == XL_CELL_DATE:
+ dt = xldate_as_tuple(value, datemode)
+ # how to produce this first case?
+ if dt[0] < datetime.MINYEAR: # pragma: no cover
+ value = datetime.time(*dt[3:])
+ else:
+ value = datetime.datetime(*dt)
+ elif typ == XL_CELL_ERROR:
+ value = np.nan
+ elif typ == XL_CELL_BOOLEAN:
+ value = bool(value)
+ row.append(value)
+
+ data.append(row)
+
+ if header is not None:
+ data[header] = _trim_excel_header(data[header])
+
+ parser = TextParser(data, header=header, index_col=index_col,
+ has_index_names=has_index_names,
+ na_values=na_values,
+ thousands=thousands,
+ parse_dates=parse_dates,
+ date_parser=date_parser,
+ skiprows=skiprows,
+ skip_footer=skip_footer,
+ chunksize=chunksize)
+
+ return parser.read()
+
+ @property
+ def sheet_names(self):
+ return self.book.sheet_names()
+
+
+def _trim_excel_header(row):
+ # trim header row so auto-index inference works
+ # xlrd uses '' , openpyxl None
+ while len(row) > 0 and (row[0] == '' or row[0] is None):
+ row = row[1:]
+ return row
+
+
+class CellStyleConverter(object):
+ """
+ Utility Class which converts a style dict to xlrd or openpyxl style
+ """
+
+ @staticmethod
+ def to_xls(style_dict, num_format_str=None):
+ """
+ converts a style_dict to an xlwt style object
+ Parameters
+ ----------
+ style_dict: style dictionary to convert
+ """
+ import xlwt
+
+ def style_to_xlwt(item, firstlevel=True, field_sep=',', line_sep=';'):
+ """helper wich recursively generate an xlwt easy style string
+ for example:
+
+ hstyle = {"font": {"bold": True},
+ "border": {"top": "thin",
+ "right": "thin",
+ "bottom": "thin",
+ "left": "thin"},
+ "align": {"horiz": "center"}}
+ will be converted to
+ font: bold on; \
+ border: top thin, right thin, bottom thin, left thin; \
+ align: horiz center;
+ """
+ if hasattr(item, 'items'):
+ if firstlevel:
+ it = ["%s: %s" % (key, style_to_xlwt(value, False))
+ for key, value in item.items()]
+ out = "%s " % (line_sep).join(it)
+ return out
+ else:
+ it = ["%s %s" % (key, style_to_xlwt(value, False))
+ for key, value in item.items()]
+ out = "%s " % (field_sep).join(it)
+ return out
+ else:
+ item = "%s" % item
+ item = item.replace("True", "on")
+ item = item.replace("False", "off")
+ return item
+
+ if style_dict:
+ xlwt_stylestr = style_to_xlwt(style_dict)
+ style = xlwt.easyxf(xlwt_stylestr, field_sep=',', line_sep=';')
+ else:
+ style = xlwt.XFStyle()
+ if num_format_str is not None:
+ style.num_format_str = num_format_str
+
+ return style
+
+ @staticmethod
+ def to_xlsx(style_dict):
+ """
+ converts a style_dict to an openpyxl style object
+ Parameters
+ ----------
+ style_dict: style dictionary to convert
+ """
+
+ from openpyxl.style import Style
+ xls_style = Style()
+ for key, value in style_dict.items():
+ for nk, nv in value.items():
+ if key == "borders":
+ (xls_style.borders.__getattribute__(nk)
+ .__setattr__('border_style', nv))
+ else:
+ xls_style.__getattribute__(key).__setattr__(nk, nv)
+
+ return xls_style
+
+
+def _conv_value(val):
+ # convert value for excel dump
+ if isinstance(val, np.int64):
+ val = int(val)
+ elif isinstance(val, np.bool8):
+ val = bool(val)
+ elif isinstance(val, Period):
+ val = "%s" % val
+
+ return val
+
+
+class ExcelWriter(object):
+ """
+ Class for writing DataFrame objects into excel sheets, uses xlwt for xls,
+ openpyxl for xlsx. See DataFrame.to_excel for typical usage.
+
+ Parameters
+ ----------
+ path : string
+ Path to xls file
+ """
+ def __init__(self, path):
+ self.use_xlsx = True
+ if path.endswith('.xls'):
+ self.use_xlsx = False
+ import xlwt
+ self.book = xlwt.Workbook()
+ self.fm_datetime = xlwt.easyxf(
+ num_format_str='YYYY-MM-DD HH:MM:SS')
+ self.fm_date = xlwt.easyxf(num_format_str='YYYY-MM-DD')
+ else:
+ from openpyxl.workbook import Workbook
+ self.book = Workbook() # optimized_write=True)
+ # open pyxl 1.6.1 adds a dummy sheet remove it
+ if self.book.worksheets:
+ self.book.remove_sheet(self.book.worksheets[0])
+ self.path = path
+ self.sheets = {}
+ self.cur_sheet = None
+
+ def save(self):
+ """
+ Save workbook to disk
+ """
+ self.book.save(self.path)
+
+ def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0):
+ """
+ Write given formated cells into Excel an excel sheet
+
+ Parameters
+ ----------
+ cells : generator
+ cell of formated data to save to Excel sheet
+ sheet_name : string, default None
+ Name of Excel sheet, if None, then use self.cur_sheet
+ startrow: upper left cell row to dump data frame
+ startcol: upper left cell column to dump data frame
+ """
+ if sheet_name is None:
+ sheet_name = self.cur_sheet
+ if sheet_name is None: # pragma: no cover
+ raise Exception('Must pass explicit sheet_name or set '
+ 'cur_sheet property')
+ if self.use_xlsx:
+ self._writecells_xlsx(cells, sheet_name, startrow, startcol)
+ else:
+ self._writecells_xls(cells, sheet_name, startrow, startcol)
+
+ def _writecells_xlsx(self, cells, sheet_name, startrow, startcol):
+
+ from openpyxl.cell import get_column_letter
+
+ if sheet_name in self.sheets:
+ wks = self.sheets[sheet_name]
+ else:
+ wks = self.book.create_sheet()
+ wks.title = sheet_name
+ self.sheets[sheet_name] = wks
+
+ for cell in cells:
+ colletter = get_column_letter(startcol + cell.col + 1)
+ xcell = wks.cell("%s%s" % (colletter, startrow + cell.row + 1))
+ xcell.value = _conv_value(cell.val)
+ if cell.style:
+ style = CellStyleConverter.to_xlsx(cell.style)
+ for field in style.__fields__:
+ xcell.style.__setattr__(field,
+ style.__getattribute__(field))
+
+ if isinstance(cell.val, datetime.datetime):
+ xcell.style.number_format.format_code = "YYYY-MM-DD HH:MM:SS"
+ elif isinstance(cell.val, datetime.date):
+ xcell.style.number_format.format_code = "YYYY-MM-DD"
+
+ # merging requires openpyxl latest (works on 1.6.1)
+ # todo add version check
+ if cell.mergestart is not None and cell.mergeend is not None:
+ cletterstart = get_column_letter(startcol + cell.col + 1)
+ cletterend = get_column_letter(startcol + cell.mergeend + 1)
+
+ wks.merge_cells('%s%s:%s%s' % (cletterstart,
+ startrow + cell.row + 1,
+ cletterend,
+ startrow + cell.mergestart + 1))
+
+ def _writecells_xls(self, cells, sheet_name, startrow, startcol):
+ if sheet_name in self.sheets:
+ wks = self.sheets[sheet_name]
+ else:
+ wks = self.book.add_sheet(sheet_name)
+ self.sheets[sheet_name] = wks
+
+ style_dict = {}
+
+ for cell in cells:
+ val = _conv_value(cell.val)
+
+ num_format_str = None
+ if isinstance(cell.val, datetime.datetime):
+ num_format_str = "YYYY-MM-DD HH:MM:SS"
+ if isinstance(cell.val, datetime.date):
+ num_format_str = "YYYY-MM-DD"
+
+ stylekey = json.dumps(cell.style)
+ if num_format_str:
+ stylekey += num_format_str
+
+ if stylekey in style_dict:
+ style = style_dict[stylekey]
+ else:
+ style = CellStyleConverter.to_xls(cell.style, num_format_str)
+ style_dict[stylekey] = style
+
+ if cell.mergestart is not None and cell.mergeend is not None:
+ wks.write_merge(startrow + cell.row,
+ startrow + cell.mergestart,
+ startcol + cell.col,
+ startcol + cell.mergeend,
+ val, style)
+ else:
+ wks.write(startrow + cell.row,
+ startcol + cell.col,
+ val, style)
+
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 0dde47e6065e4..249afe0755445 100644
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -1981,409 +1981,17 @@ def _make_reader(self, f):
self.data = FixedWidthReader(f, self.colspecs, self.delimiter)
-#----------------------------------------------------------------------
-# ExcelFile class
-
-class ExcelFile(object):
- """
- Class for parsing tabular excel sheets into DataFrame objects.
- Uses xlrd. See ExcelFile.parse for more documentation
-
- Parameters
- ----------
- path : string or file-like object
- Path to xls or xlsx file
- """
- def __init__(self, path_or_buf, kind=None, **kwds):
- self.kind = kind
-
- import xlrd # throw an ImportError if we need to
- ver = tuple(map(int,xlrd.__VERSION__.split(".")[:2]))
- if ver < (0, 9):
- raise ImportError("pandas requires xlrd >= 0.9.0 for excel support, current version "+xlrd.__VERSION__)
-
- self.path_or_buf = path_or_buf
- self.tmpfile = None
-
- if isinstance(path_or_buf, basestring):
- self.book = xlrd.open_workbook(path_or_buf)
- else:
- data = path_or_buf.read()
- self.book = xlrd.open_workbook(file_contents=data)
-
- def __repr__(self):
- return object.__repr__(self)
-
- def parse(self, sheetname, header=0, skiprows=None, skip_footer=0,
- index_col=None, parse_cols=None, parse_dates=False,
- date_parser=None, na_values=None, thousands=None, chunksize=None,
- **kwds):
- """
- Read Excel table into DataFrame
-
- Parameters
- ----------
- sheetname : string
- Name of Excel sheet
- header : int, default 0
- Row to use for the column labels of the parsed DataFrame
- skiprows : list-like
- Rows to skip at the beginning (0-indexed)
- skip_footer : int, default 0
- Rows at the end to skip (0-indexed)
- index_col : int, default None
- Column to use as the row labels of the DataFrame. Pass None if
- there is no such column
- parse_cols : int or list, default None
- If None then parse all columns,
- If int then indicates last column to be parsed
- If list of ints then indicates list of column numbers to be parsed
- If string then indicates comma separated list of column names and
- column ranges (e.g. "A:E" or "A,C,E:F")
- na_values : list-like, default None
- List of additional strings to recognize as NA/NaN
-
- Returns
- -------
- parsed : DataFrame
- """
-
- # has_index_names: boolean, default False
- # True if the cols defined in index_col have an index name and are
- # not in the header
- has_index_names = False # removed as new argument of API function
-
- skipfooter = kwds.pop('skipfooter', None)
- if skipfooter is not None:
- skip_footer = skipfooter
-
- return self._parse_excel(sheetname, header=header,
- skiprows=skiprows, index_col=index_col,
- has_index_names=has_index_names,
- parse_cols=parse_cols,
- parse_dates=parse_dates,
- date_parser=date_parser,
- na_values=na_values,
- thousands=thousands,
- chunksize=chunksize,
- skip_footer=skip_footer)
-
- def _should_parse(self, i, parse_cols):
-
- def _range2cols(areas):
- """
- Convert comma separated list of column names and column ranges to a
- list of 0-based column indexes.
-
- >>> _range2cols('A:E')
- [0, 1, 2, 3, 4]
- >>> _range2cols('A,C,Z:AB')
- [0, 2, 25, 26, 27]
- """
- def _excel2num(x):
- "Convert Excel column name like 'AB' to 0-based column index"
- return reduce(lambda s, a: s * 26 + ord(a) - ord('A') + 1, x.upper().strip(), 0) - 1
-
- cols = []
- for rng in areas.split(','):
- if ':' in rng:
- rng = rng.split(':')
- cols += range(_excel2num(rng[0]), _excel2num(rng[1]) + 1)
- else:
- cols.append(_excel2num(rng))
- return cols
-
- if isinstance(parse_cols, int):
- return i <= parse_cols
- elif isinstance(parse_cols, basestring):
- return i in _range2cols(parse_cols)
- else:
- return i in parse_cols
-
- def _parse_excel(self, sheetname, header=0, skiprows=None,
- skip_footer=0, index_col=None, has_index_names=None,
- parse_cols=None, parse_dates=False, date_parser=None,
- na_values=None, thousands=None, chunksize=None):
- from xlrd import (xldate_as_tuple, XL_CELL_DATE,
- XL_CELL_ERROR, XL_CELL_BOOLEAN)
-
- datemode = self.book.datemode
- sheet = self.book.sheet_by_name(sheetname)
-
- data = []
- should_parse = {}
- for i in range(sheet.nrows):
- row = []
- for j, (value, typ) in enumerate(izip(sheet.row_values(i),
- sheet.row_types(i))):
- if parse_cols is not None and j not in should_parse:
- should_parse[j] = self._should_parse(j, parse_cols)
-
- if parse_cols is None or should_parse[j]:
- if typ == XL_CELL_DATE:
- dt = xldate_as_tuple(value, datemode)
- # how to produce this first case?
- if dt[0] < datetime.MINYEAR: # pragma: no cover
- value = datetime.time(*dt[3:])
- else:
- value = datetime.datetime(*dt)
- elif typ == XL_CELL_ERROR:
- value = np.nan
- elif typ == XL_CELL_BOOLEAN:
- value = bool(value)
- row.append(value)
-
- data.append(row)
-
- if header is not None:
- data[header] = _trim_excel_header(data[header])
-
- parser = TextParser(data, header=header, index_col=index_col,
- has_index_names=has_index_names,
- na_values=na_values,
- thousands=thousands,
- parse_dates=parse_dates,
- date_parser=date_parser,
- skiprows=skiprows,
- skip_footer=skip_footer,
- chunksize=chunksize)
-
- return parser.read()
-
- @property
- def sheet_names(self):
- return self.book.sheet_names()
-
-
-def _trim_excel_header(row):
- # trim header row so auto-index inference works
- # xlrd uses '' , openpyxl None
- while len(row) > 0 and (row[0] == '' or row[0] is None):
- row = row[1:]
- return row
-
-
-class CellStyleConverter(object):
- """
- Utility Class which converts a style dict to xlrd or openpyxl style
- """
-
- @staticmethod
- def to_xls(style_dict, num_format_str=None):
- """
- converts a style_dict to an xlwt style object
- Parameters
- ----------
- style_dict: style dictionary to convert
- """
- import xlwt
-
- def style_to_xlwt(item, firstlevel=True, field_sep=',', line_sep=';'):
- """helper wich recursively generate an xlwt easy style string
- for example:
-
- hstyle = {"font": {"bold": True},
- "border": {"top": "thin",
- "right": "thin",
- "bottom": "thin",
- "left": "thin"},
- "align": {"horiz": "center"}}
- will be converted to
- font: bold on; \
- border: top thin, right thin, bottom thin, left thin; \
- align: horiz center;
- """
- if hasattr(item, 'items'):
- if firstlevel:
- it = ["%s: %s" % (key, style_to_xlwt(value, False))
- for key, value in item.items()]
- out = "%s " % (line_sep).join(it)
- return out
- else:
- it = ["%s %s" % (key, style_to_xlwt(value, False))
- for key, value in item.items()]
- out = "%s " % (field_sep).join(it)
- return out
- else:
- item = "%s" % item
- item = item.replace("True", "on")
- item = item.replace("False", "off")
- return item
-
- if style_dict:
- xlwt_stylestr = style_to_xlwt(style_dict)
- style = xlwt.easyxf(xlwt_stylestr, field_sep=',', line_sep=';')
- else:
- style = xlwt.XFStyle()
- if num_format_str is not None:
- style.num_format_str = num_format_str
-
- return style
-
- @staticmethod
- def to_xlsx(style_dict):
- """
- converts a style_dict to an openpyxl style object
- Parameters
- ----------
- style_dict: style dictionary to convert
- """
-
- from openpyxl.style import Style
- xls_style = Style()
- for key, value in style_dict.items():
- for nk, nv in value.items():
- if key == "borders":
- (xls_style.borders.__getattribute__(nk)
- .__setattr__('border_style', nv))
- else:
- xls_style.__getattribute__(key).__setattr__(nk, nv)
-
- return xls_style
-
-
-def _conv_value(val):
- # convert value for excel dump
- if isinstance(val, np.int64):
- val = int(val)
- elif isinstance(val, np.bool8):
- val = bool(val)
- elif isinstance(val, Period):
- val = "%s" % val
-
- return val
-
-
-class ExcelWriter(object):
- """
- Class for writing DataFrame objects into excel sheets, uses xlwt for xls,
- openpyxl for xlsx. See DataFrame.to_excel for typical usage.
-
- Parameters
- ----------
- path : string
- Path to xls file
- """
+from pandas.io import excel
+class ExcelWriter(excel.ExcelWriter):
def __init__(self, path):
- self.use_xlsx = True
- if path.endswith('.xls'):
- self.use_xlsx = False
- import xlwt
- self.book = xlwt.Workbook()
- self.fm_datetime = xlwt.easyxf(
- num_format_str='YYYY-MM-DD HH:MM:SS')
- self.fm_date = xlwt.easyxf(num_format_str='YYYY-MM-DD')
- else:
- from openpyxl.workbook import Workbook
- self.book = Workbook() # optimized_write=True)
- # open pyxl 1.6.1 adds a dummy sheet remove it
- if self.book.worksheets:
- self.book.remove_sheet(self.book.worksheets[0])
- self.path = path
- self.sheets = {}
- self.cur_sheet = None
-
- def save(self):
- """
- Save workbook to disk
- """
- self.book.save(self.path)
+ from warnings import warn
+ warn("ExcelWriter can now be imported from: pandas.io.excel", FutureWarning)
+ super(ExcelWriter, self).__init__(path)
- def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0):
- """
- Write given formated cells into Excel an excel sheet
-
- Parameters
- ----------
- cells : generator
- cell of formated data to save to Excel sheet
- sheet_name : string, default None
- Name of Excel sheet, if None, then use self.cur_sheet
- startrow: upper left cell row to dump data frame
- startcol: upper left cell column to dump data frame
- """
- if sheet_name is None:
- sheet_name = self.cur_sheet
- if sheet_name is None: # pragma: no cover
- raise Exception('Must pass explicit sheet_name or set '
- 'cur_sheet property')
- if self.use_xlsx:
- self._writecells_xlsx(cells, sheet_name, startrow, startcol)
- else:
- self._writecells_xls(cells, sheet_name, startrow, startcol)
-
- def _writecells_xlsx(self, cells, sheet_name, startrow, startcol):
-
- from openpyxl.cell import get_column_letter
-
- if sheet_name in self.sheets:
- wks = self.sheets[sheet_name]
- else:
- wks = self.book.create_sheet()
- wks.title = sheet_name
- self.sheets[sheet_name] = wks
-
- for cell in cells:
- colletter = get_column_letter(startcol + cell.col + 1)
- xcell = wks.cell("%s%s" % (colletter, startrow + cell.row + 1))
- xcell.value = _conv_value(cell.val)
- if cell.style:
- style = CellStyleConverter.to_xlsx(cell.style)
- for field in style.__fields__:
- xcell.style.__setattr__(field,
- style.__getattribute__(field))
-
- if isinstance(cell.val, datetime.datetime):
- xcell.style.number_format.format_code = "YYYY-MM-DD HH:MM:SS"
- elif isinstance(cell.val, datetime.date):
- xcell.style.number_format.format_code = "YYYY-MM-DD"
-
- # merging requires openpyxl latest (works on 1.6.1)
- # todo add version check
- if cell.mergestart is not None and cell.mergeend is not None:
- cletterstart = get_column_letter(startcol + cell.col + 1)
- cletterend = get_column_letter(startcol + cell.mergeend + 1)
-
- wks.merge_cells('%s%s:%s%s' % (cletterstart,
- startrow + cell.row + 1,
- cletterend,
- startrow + cell.mergestart + 1))
-
- def _writecells_xls(self, cells, sheet_name, startrow, startcol):
- if sheet_name in self.sheets:
- wks = self.sheets[sheet_name]
- else:
- wks = self.book.add_sheet(sheet_name)
- self.sheets[sheet_name] = wks
-
- style_dict = {}
-
- for cell in cells:
- val = _conv_value(cell.val)
-
- num_format_str = None
- if isinstance(cell.val, datetime.datetime):
- num_format_str = "YYYY-MM-DD HH:MM:SS"
- if isinstance(cell.val, datetime.date):
- num_format_str = "YYYY-MM-DD"
-
- stylekey = json.dumps(cell.style)
- if num_format_str:
- stylekey += num_format_str
+class ExcelFile(excel.ExcelFile):
+ def __init__(self, path_or_buf, kind=None, **kwds):
+ from warnings import warn
+ warn("ExcelFile can now be imported from: pandas.io.excel", FutureWarning)
+ super(ExcelFile, self).__init__(path_or_buf, kind=kind, **kwds)
- if stylekey in style_dict:
- style = style_dict[stylekey]
- else:
- style = CellStyleConverter.to_xls(cell.style, num_format_str)
- style_dict[stylekey] = style
-
- if cell.mergestart is not None and cell.mergeend is not None:
- wks.write_merge(startrow + cell.row,
- startrow + cell.mergestart,
- startcol + cell.col,
- startcol + cell.mergeend,
- val, style)
- else:
- wks.write(startrow + cell.row,
- startcol + cell.col,
- val, style)
+
diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index b54a30d95bb54..4a1cac8a60e30 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -167,7 +167,7 @@ def read_frame(sql, con, index_col=None, coerce_float=True, params=None):
return result
frame_query = read_frame
-
+read_sql = read_frame
def write_frame(frame, name, con, flavor='sqlite', if_exists='fail', **kwargs):
"""
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index 3fc246c2ffbc7..f1257f505ca9b 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -37,10 +37,8 @@ def read_stata(filepath_or_buffer, convert_dates=True, convert_categoricals=True
return reader.data(convert_dates, convert_categoricals, index)
-
_date_formats = ["%tc", "%tC", "%td", "%tw", "%tm", "%tq", "%th", "%ty"]
-
def _stata_elapsed_date_to_datetime(date, fmt):
"""
Convert from SIF to datetime. http://www.stata.com/help.cgi?datetime
diff --git a/pandas/io/tests/test_cparser.py b/pandas/io/tests/test_cparser.py
index 0c5b168ee8de5..23503f74f25f2 100644
--- a/pandas/io/tests/test_cparser.py
+++ b/pandas/io/tests/test_cparser.py
@@ -18,7 +18,7 @@
from pandas import DataFrame, Series, Index, isnull, MultiIndex
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
- ExcelFile, TextParser)
+ TextParser)
from pandas.util.testing import (assert_almost_equal, assert_frame_equal,
assert_series_equal, network)
import pandas.lib as lib
diff --git a/pandas/io/tests/test_date_converters.py b/pandas/io/tests/test_date_converters.py
index 9396581f74326..396912c0f5f54 100644
--- a/pandas/io/tests/test_date_converters.py
+++ b/pandas/io/tests/test_date_converters.py
@@ -15,7 +15,7 @@
from pandas import DataFrame, Series, Index, isnull
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
- ExcelFile, TextParser)
+ TextParser)
from pandas.util.testing import (assert_almost_equal, assert_frame_equal,
assert_series_equal, network)
import pandas.lib as lib
diff --git a/pandas/io/tests/test_excel.py b/pandas/io/tests/test_excel.py
index 8a145517d3b5a..00a695f3013cd 100644
--- a/pandas/io/tests/test_excel.py
+++ b/pandas/io/tests/test_excel.py
@@ -17,7 +17,8 @@
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
- ExcelFile, TextFileReader, TextParser)
+ TextParser, TextFileReader)
+from pandas.io.excel import ExcelFile, ExcelWriter, read_excel
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
network,
@@ -35,9 +36,6 @@
from pandas._parser import OverflowError
-from pandas.io.parsers import (ExcelFile, ExcelWriter, read_csv)
-
-
def _skip_if_no_xlrd():
try:
import xlrd
@@ -275,19 +273,16 @@ def _check_extension(self, ext):
# test roundtrip
self.frame.to_excel(path, 'test1')
- reader = ExcelFile(path)
- recons = reader.parse('test1', index_col=0)
+ recons = read_excel(path, 'test1', index_col=0)
tm.assert_frame_equal(self.frame, recons)
self.frame.to_excel(path, 'test1', index=False)
- reader = ExcelFile(path)
- recons = reader.parse('test1', index_col=None)
+ recons = read_excel(path, 'test1', index_col=None)
recons.index = self.frame.index
tm.assert_frame_equal(self.frame, recons)
self.frame.to_excel(path, 'test1', na_rep='NA')
- reader = ExcelFile(path)
- recons = reader.parse('test1', index_col=0, na_values=['NA'])
+ recons = read_excel(path, 'test1', index_col=0, na_values=['NA'])
tm.assert_frame_equal(self.frame, recons)
def test_excel_roundtrip_xls_mixed(self):
@@ -668,7 +663,7 @@ def test_to_excel_unicode_filename(self):
tm.assert_frame_equal(rs, xp)
def test_to_excel_styleconverter(self):
- from pandas.io.parsers import CellStyleConverter
+ from pandas.io.excel import CellStyleConverter
try:
import xlwt
@@ -859,6 +854,23 @@ def roundtrip(df, header=True, parser_hdr=0):
self.assertEqual(res.shape, (1, 2))
self.assertTrue(res.ix[0, 0] is not np.nan)
+ def test_deprecated_from_parsers(self):
+
+ # since 0.11.1 changed the import path
+ import warnings
+
+ with warnings.catch_warnings() as w:
+ warnings.filterwarnings(action='ignore', category=FutureWarning)
+
+ _skip_if_no_xlrd()
+ from pandas.io.parsers import ExcelFile as xf
+ xf(self.xls1)
+
+ _skip_if_no_xlwt()
+ with ensure_clean('test.xls') as path:
+ from pandas.io.parsers import ExcelWriter as xw
+ xw(path)
+
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
diff --git a/pandas/io/tests/test_stata.py b/pandas/io/tests/test_stata.py
index 51062b2ab706f..9f5d796763fb0 100644
--- a/pandas/io/tests/test_stata.py
+++ b/pandas/io/tests/test_stata.py
@@ -130,23 +130,21 @@ def test_read_dta4(self):
def test_write_dta5(self):
original = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)],
columns=['float_miss', 'double_miss', 'byte_miss', 'int_miss', 'long_miss'])
+ original.index.name = 'index'
with ensure_clean(self.dta5) as path:
- writer = StataWriter(path, original, None, False)
- writer.write_file()
-
+ original.to_stata(path, None, False)
written_and_read_again = self.read_dta(path)
- tm.assert_frame_equal(written_and_read_again, original)
+ tm.assert_frame_equal(written_and_read_again.set_index('index'), original)
def test_write_dta6(self):
original = self.read_csv(self.csv3)
+ original.index.name = 'index'
with ensure_clean(self.dta6) as path:
- writer = StataWriter(path, original, None, False)
- writer.write_file()
-
+ original.to_stata(path, None, False)
written_and_read_again = self.read_dta(path)
- tm.assert_frame_equal(written_and_read_again, original)
+ tm.assert_frame_equal(written_and_read_again.set_index('index'), original)
@nose.tools.nottest
def test_read_dta7(self):
@@ -184,6 +182,10 @@ def test_read_dta9(self):
decimal=3
)
+ def test_stata_doc_examples(self):
+ with ensure_clean(self.dta5) as path:
+ df = DataFrame(np.random.randn(10,2),columns=list('AB'))
+ df.to_stata('path')
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py
index 3640025bbf95c..58b7ac272401f 100644
--- a/pandas/tests/test_panel.py
+++ b/pandas/tests/test_panel.py
@@ -1367,7 +1367,7 @@ def test_to_excel(self):
import xlwt
import xlrd
import openpyxl
- from pandas.io.parsers import ExcelFile
+ from pandas.io.excel import ExcelFile
except ImportError:
raise nose.SkipTest
diff --git a/pandas/tests/test_panel4d.py b/pandas/tests/test_panel4d.py
index 5981640b4159c..a2e08bc744ab0 100644
--- a/pandas/tests/test_panel4d.py
+++ b/pandas/tests/test_panel4d.py
@@ -15,7 +15,6 @@
import pandas.core.common as com
import pandas.core.panel as panelmod
from pandas.util import py3compat
-from pandas.io.parsers import (ExcelFile, ExcelWriter)
from pandas.util.testing import (assert_panel_equal,
assert_panel4d_equal,
| closes most of #3411 (all except the deprecation of `from_csv`)
- moved excel functionaility out of io.parsers to io.excel
added read_excel top-level function
aliases from pandas.io.excel
- added read_stata top-level function, to_stata DataFrame method
aliases from pandas.io.stata
removed read_dta (replace by read_stata)
- added read_sql top-level function, to_sql DataFrame method
aliases from pandas.io.sql
DOC: doc updates for all the above and intro section to io.rst
| https://api.github.com/repos/pandas-dev/pandas/pulls/3693 | 2013-05-25T00:40:50Z | 2013-05-30T16:32:27Z | 2013-05-30T16:32:27Z | 2014-07-16T08:10:24Z |
BUG: allow insertion/deletion of columns in non-unique column DataFrames | diff --git a/RELEASE.rst b/RELEASE.rst
index 38a8b42fcde6f..710e3cbbb2b81 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -102,6 +102,8 @@ pandas 0.11.1
GH3675_, GH3676_).
- Deprecated display.height, display.width is now only a formatting option
does not control triggering of summary, similar to < 0.11.0.
+ - Add the keyword ``allow_duplicates`` to ``DataFrame.insert`` to allow a duplicate column
+ to be inserted if ``True``, default is ``False`` (same as prior to 0.11.1) (GH3679_)
**Bug Fixes**
@@ -133,6 +135,8 @@ pandas 0.11.1
- Duplicate indexes with and empty DataFrame.from_records will return a correct frame (GH3562_)
- Concat to produce a non-unique columns when duplicates are across dtypes is fixed (GH3602_)
- Non-unique indexing with a slice via ``loc`` and friends fixed (GH3659_)
+ - Allow insert/delete to non-unique columns (GH3679_)
+ - Extend ``reindex`` to correctly deal with non-unique indices (GH3679_)
- Fixed bug in groupby with empty series referencing a variable before assignment. (GH3510_)
- Fixed bug in mixed-frame assignment with aligned series (GH3492_)
- Fixed bug in selecting month/quarter/year from a series would not select the time element
@@ -242,6 +246,8 @@ pandas 0.11.1
.. _GH3606: https://github.com/pydata/pandas/issues/3606
.. _GH3659: https://github.com/pydata/pandas/issues/3659
.. _GH3649: https://github.com/pydata/pandas/issues/3649
+.. _GH3679: https://github.com/pydata/pandas/issues/3679
+.. _Gh3616: https://github.com/pydata/pandas/issues/3616
.. _GH1818: https://github.com/pydata/pandas/issues/1818
.. _GH3572: https://github.com/pydata/pandas/issues/3572
.. _GH3582: https://github.com/pydata/pandas/issues/3582
diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt
index ac769ed2f1cea..bd4a7c49fbb4d 100644
--- a/doc/source/v0.11.1.txt
+++ b/doc/source/v0.11.1.txt
@@ -71,6 +71,8 @@ API changes
``DataFrame.fillna()`` and ``DataFrame.replace()`` instead. (GH3582_,
GH3675_, GH3676_)
+ - Add the keyword ``allow_duplicates`` to ``DataFrame.insert`` to allow a duplicate column
+ to be inserted if ``True``, default is ``False`` (same as prior to 0.11.1) (GH3679_)
Enhancements
~~~~~~~~~~~~
@@ -209,6 +211,7 @@ Bug Fixes
and handle missing elements like unique indices (GH3561_)
- Duplicate indexes with and empty DataFrame.from_records will return a correct frame (GH3562_)
- Concat to produce a non-unique columns when duplicates are across dtypes is fixed (GH3602_)
+ - Allow insert/delete to non-unique columns (GH3679_)
For example you can do
@@ -270,3 +273,4 @@ on GitHub for a complete list.
.. _GH3676: https://github.com/pydata/pandas/issues/3676
.. _GH3675: https://github.com/pydata/pandas/issues/3675
.. _GH3682: https://github.com/pydata/pandas/issues/3682
+.. _GH3679: https://github.com/pydata/pandas/issues/3679
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 68edceb29e6b2..5c2bc3e632a57 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -2003,7 +2003,11 @@ def __getitem__(self, key):
return self._getitem_multilevel(key)
else:
# get column
- return self._get_item_cache(key)
+ if self.columns.is_unique:
+ return self._get_item_cache(key)
+
+ # duplicate columns
+ return self._constructor(self._data.get(key))
def _getitem_slice(self, key):
return self._slice(key, axis=0)
@@ -2162,10 +2166,10 @@ def _set_item(self, key, value):
value = self._sanitize_column(key, value)
NDFrame._set_item(self, key, value)
- def insert(self, loc, column, value):
+ def insert(self, loc, column, value, allow_duplicates=False):
"""
- Insert column into DataFrame at specified location. Raises Exception if
- column is already contained in the DataFrame
+ Insert column into DataFrame at specified location.
+ if allow_duplicates is False, Raises Exception if column is already contained in the DataFrame
Parameters
----------
@@ -2175,7 +2179,7 @@ def insert(self, loc, column, value):
value : int, Series, or array-like
"""
value = self._sanitize_column(column, value)
- self._data.insert(loc, column, value)
+ self._data.insert(loc, column, value, allow_duplicates=allow_duplicates)
def _sanitize_column(self, key, value):
# Need to make sure new columns (which go into the BlockManager as new
diff --git a/pandas/core/index.py b/pandas/core/index.py
index 3a6913a924c1d..51ebd58c33343 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -940,8 +940,15 @@ def reindex(self, target, method=None, level=None, limit=None):
if self.equals(target):
indexer = None
else:
- indexer = self.get_indexer(target, method=method,
- limit=limit)
+ if self.is_unique:
+ indexer = self.get_indexer(target, method=method,
+ limit=limit)
+ else:
+ if method is not None or limit is not None:
+ raise ValueError("cannot reindex a non-unique index "
+ "with a method or limit")
+ indexer, missing = self.get_indexer_non_unique(target)
+
return target, indexer
def join(self, other, how='left', level=None, return_indexers=False):
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index f7187b7ae5d61..7a7210c479c67 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -457,7 +457,7 @@ def _reindex(keys, level=None):
else:
level = None
- if labels.is_unique:
+ if labels.is_unique and Index(keyarr).is_unique:
return _reindex(keyarr, level=level)
else:
indexer, missing = labels.get_indexer_non_unique(keyarr)
@@ -991,7 +991,6 @@ def _slice(self, indexer, axis=0):
def _setitem_with_indexer(self, indexer, value):
self.obj._set_values(indexer, value)
-
def _check_bool_indexer(ax, key):
# boolean indexing, need to check that the data are aligned, otherwise
# disallowed
@@ -1010,7 +1009,6 @@ def _check_bool_indexer(ax, key):
result = np.asarray(result, dtype=bool)
return result
-
def _is_series(obj):
from pandas.core.series import Series
return isinstance(obj, Series)
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index ca04bd3fe26e0..8b711f5e077ce 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -36,7 +36,7 @@ class Block(object):
_can_hold_na = False
_downcast_dtype = None
- def __init__(self, values, items, ref_items, ndim=2, fastpath=False):
+ def __init__(self, values, items, ref_items, ndim=2, fastpath=False, placement=None):
if values.ndim != ndim:
raise ValueError('Wrong number of dimensions')
@@ -45,7 +45,7 @@ def __init__(self, values, items, ref_items, ndim=2, fastpath=False):
raise ValueError('Wrong number of items passed %d, indices imply %d'
% (len(items), len(values)))
- self._ref_locs = None
+ self.set_ref_locs(placement)
self.values = values
self.ndim = ndim
@@ -71,10 +71,16 @@ def ref_locs(self):
self._ref_locs = indexer
return self._ref_locs
+ def reset_ref_locs(self):
+ """ reset the block ref_locs """
+ self._ref_locs = np.empty(len(self.items),dtype='int64')
+
def set_ref_locs(self, placement):
""" explicity set the ref_locs indexer, only necessary for duplicate indicies """
- if placement is not None:
- self._ref_locs = np.array(placement,dtype='int64')
+ if placement is None:
+ self._ref_locs = None
+ else:
+ self._ref_locs = np.array(placement,dtype='int64', copy=True)
def set_ref_items(self, ref_items, maybe_rename=True):
"""
@@ -129,7 +135,7 @@ def copy(self, deep=True):
values = self.values
if deep:
values = values.copy()
- return make_block(values, self.items, self.ref_items, klass=self.__class__, fastpath=True)
+ return make_block(values, self.items, self.ref_items, klass=self.__class__, fastpath=True, placement=self._ref_locs)
def merge(self, other):
if not self.ref_items.equals(other.ref_items):
@@ -148,7 +154,8 @@ def reindex_axis(self, indexer, axis=1, fill_value=np.nan, mask_info=None):
raise AssertionError('axis must be at least 1, got %d' % axis)
new_values = com.take_nd(self.values, indexer, axis,
fill_value=fill_value, mask_info=mask_info)
- return make_block(new_values, self.items, self.ref_items, fastpath=True)
+ return make_block(new_values, self.items, self.ref_items, fastpath=True,
+ placement=self._ref_locs)
def reindex_items_from(self, new_ref_items, copy=True):
"""
@@ -162,6 +169,7 @@ def reindex_items_from(self, new_ref_items, copy=True):
reindexed : Block
"""
new_ref_items, indexer = self.items.reindex(new_ref_items)
+
if indexer is None:
new_items = new_ref_items
new_values = self.values.copy() if copy else self.values
@@ -201,31 +209,6 @@ def delete(self, item):
new_values = np.delete(self.values, loc, 0)
return make_block(new_values, new_items, self.ref_items, klass=self.__class__, fastpath=True)
- def split_block_at(self, item):
- """
- Split block into zero or more blocks around columns with given label,
- for "deleting" a column without having to copy data by returning views
- on the original array.
-
- Returns
- -------
- generator of Block
- """
- loc = self.items.get_loc(item)
-
- if type(loc) == slice or type(loc) == int:
- mask = [True] * len(self)
- mask[loc] = False
- else: # already a mask, inverted
- mask = -loc
-
- for s, e in com.split_ranges(mask):
- yield make_block(self.values[s:e],
- self.items[s:e].copy(),
- self.ref_items,
- klass=self.__class__,
- fastpath=True)
-
def fillna(self, value, inplace=False, downcast=None):
if not self._can_hold_na:
if inplace:
@@ -707,11 +690,12 @@ class ObjectBlock(Block):
is_object = True
_can_hold_na = True
- def __init__(self, values, items, ref_items, ndim=2, fastpath=False):
+ def __init__(self, values, items, ref_items, ndim=2, fastpath=False, placement=None):
if issubclass(values.dtype.type, basestring):
values = np.array(values, dtype=object)
- super(ObjectBlock, self).__init__(values, items, ref_items, ndim=ndim, fastpath=fastpath)
+ super(ObjectBlock, self).__init__(values, items, ref_items,
+ ndim=ndim, fastpath=fastpath, placement=placement)
@property
def is_bool(self):
@@ -736,6 +720,7 @@ def convert(self, convert_dates = True, convert_numeric = True, copy = True):
"""
# attempt to create new type blocks
+ is_unique = self.items.is_unique
blocks = []
for i, c in enumerate(self.items):
values = self.iget(i)
@@ -743,7 +728,8 @@ def convert(self, convert_dates = True, convert_numeric = True, copy = True):
values = com._possibly_convert_objects(values, convert_dates=convert_dates, convert_numeric=convert_numeric)
values = _block_shape(values)
items = self.items.take([i])
- newb = make_block(values, items, self.ref_items, fastpath=True)
+ placement = None if is_unique else [i]
+ newb = make_block(values, items, self.ref_items, fastpath=True, placement=placement)
blocks.append(newb)
return blocks
@@ -857,11 +843,12 @@ def re_replacer(s):
class DatetimeBlock(Block):
_can_hold_na = True
- def __init__(self, values, items, ref_items, ndim=2, fastpath=True):
+ def __init__(self, values, items, ref_items, ndim=2, fastpath=True, placement=None):
if values.dtype != _NS_DTYPE:
values = tslib.cast_to_nanoseconds(values)
- super(DatetimeBlock, self).__init__(values, items, ref_items, ndim=ndim, fastpath=fastpath)
+ super(DatetimeBlock, self).__init__(values, items, ref_items,
+ ndim=ndim, fastpath=fastpath, placement=placement)
def _gi(self, arg):
return lib.Timestamp(self.values[arg])
@@ -942,8 +929,7 @@ def get_values(self, dtype):
return res.reshape(self.values.shape)
return self.values
-
-def make_block(values, items, ref_items, klass = None, fastpath=False):
+def make_block(values, items, ref_items, klass=None, fastpath=False, placement=None):
if klass is None:
dtype = values.dtype
@@ -977,7 +963,7 @@ def make_block(values, items, ref_items, klass = None, fastpath=False):
if klass is None:
klass = ObjectBlock
- return klass(values, items, ref_items, ndim=values.ndim, fastpath=fastpath)
+ return klass(values, items, ref_items, ndim=values.ndim, fastpath=fastpath, placement=placement)
# TODO: flexible with index=None and/or items=None
@@ -1031,11 +1017,11 @@ def __nonzero__(self):
def ndim(self):
return len(self.axes)
- def set_axis(self, axis, value):
+ def set_axis(self, axis, value, maybe_rename=True, check_axis=True):
cur_axis = self.axes[axis]
value = _ensure_index(value)
- if len(value) != len(cur_axis):
+ if check_axis and len(value) != len(cur_axis):
raise Exception('Length mismatch (%d vs %d)'
% (len(value), len(cur_axis)))
@@ -1049,11 +1035,40 @@ def set_axis(self, axis, value):
# take via ref_locs
for block in self.blocks:
- block.set_ref_items(self.items, maybe_rename=True)
+ block.set_ref_items(self.items, maybe_rename=maybe_rename)
# set/reset ref_locs based on the new index
self._set_ref_locs(labels=value, do_refs=True)
+
+ def _reset_ref_locs(self):
+ """ take the current _ref_locs and reset ref_locs on the blocks
+ to correctly map, ignoring Nones;
+ reset both _items_map and _ref_locs """
+
+ # let's reset the ref_locs in individual blocks
+ if self.items.is_unique:
+ for b in self.blocks:
+ b._ref_locs = None
+ else:
+ for b in self.blocks:
+ b.reset_ref_locs()
+ self._rebuild_ref_locs()
+
+ self._ref_locs = None
+ self._items_map = None
+
+ def _rebuild_ref_locs(self):
+ """ take _ref_locs and set the individual block ref_locs, skipping Nones
+ no effect on a unique index """
+ if self._ref_locs is not None:
+ item_count = 0
+ for v in self._ref_locs:
+ if v is not None:
+ block, item_loc = v
+ block._ref_locs[item_loc] = item_count
+ item_count += 1
+
def _set_ref_locs(self, labels=None, do_refs=False):
"""
if we have a non-unique index on this axis, set the indexers
@@ -1065,61 +1080,50 @@ def _set_ref_locs(self, labels=None, do_refs=False):
"""
- im = None
if labels is None:
labels = self.items
- else:
- _ensure_index(labels)
# we are unique, and coming from a unique
- if labels.is_unique and not do_refs:
+ is_unique = labels.is_unique
+ if is_unique and not do_refs:
- # reset our ref locs
- self._ref_locs = None
- for b in self.blocks:
- b._ref_locs = None
+ if not self.items.is_unique:
+
+ # reset our ref locs
+ self._ref_locs = None
+ for b in self.blocks:
+ b._ref_locs = None
return None
# we are going to a non-unique index
# we have ref_locs on the block at this point
- # or if ref_locs are not set, then we must assume a block
- # ordering
- if not labels.is_unique and do_refs:
+ if (not is_unique and do_refs) or do_refs=='force':
# create the items map
im = getattr(self,'_items_map',None)
if im is None:
im = dict()
- def maybe_create_block(block):
- try:
- return d[block]
- except:
- im[block] = l = [ None ] * len(block.items)
- return l
-
- count_items = 0
for block in self.blocks:
# if we have a duplicate index but
- # _ref_locs have not been set....then
- # have to assume ordered blocks are passed
- num_items = len(block.items)
+ # _ref_locs have not been set
try:
rl = block.ref_locs
except:
- rl = np.arange(num_items) + count_items
+ raise AssertionError("cannot create BlockManager._ref_locs because "
+ "block [%s] with duplicate items [%s] "
+ "does not have _ref_locs set" % (block,labels))
- m = maybe_create_block(block)
+ m = maybe_create_block_in_items_map(im,block)
for i, item in enumerate(block.items):
m[i] = rl[i]
- count_items += num_items
self._items_map = im
# create the _ref_loc map here
- rl = np.empty(len(labels),dtype=object)
+ rl = [ None] * len(labels)
for block, items in im.items():
for i, loc in enumerate(items):
rl[loc] = (block,i)
@@ -1147,20 +1151,13 @@ def get_items_map(self, use_cached=True):
im = dict()
rl = self._set_ref_locs()
- def maybe_create_block(block):
- try:
- return im[block]
- except:
- im[block] = l = [ None ] * len(block.items)
- return l
-
# we have a non-duplicative index
if rl is None:
axis = self.axes[0]
for block in self.blocks:
- m = maybe_create_block(block)
+ m = maybe_create_block_in_items_map(im,block)
for i, item in enumerate(block.items):
m[i] = axis.get_loc(item)
@@ -1170,7 +1167,7 @@ def maybe_create_block(block):
for i, (block, idx) in enumerate(rl):
- m = maybe_create_block(block)
+ m = maybe_create_block_in_items_map(im,block)
m[idx] = i
self._items_map = im
@@ -1445,8 +1442,8 @@ def get_slice(self, slobj, axis=0, raise_on_error=False):
new_items,
new_items,
klass=blk.__class__,
- fastpath=True)
- newb.set_ref_locs(blk._ref_locs)
+ fastpath=True,
+ placement=blk._ref_locs)
new_blocks = [newb]
else:
return self.reindex_items(new_items)
@@ -1469,8 +1466,8 @@ def _slice_blocks(self, slobj, axis):
block.items,
block.ref_items,
klass=block.__class__,
- fastpath=True)
- newb.set_ref_locs(block._ref_locs)
+ fastpath=True,
+ placement=block._ref_locs)
new_blocks.append(newb)
return new_blocks
@@ -1640,13 +1637,41 @@ def consolidate(self):
def _consolidate_inplace(self):
if not self.is_consolidated():
+
self.blocks = _consolidate(self.blocks, self.items)
+
+ # reset our mappings
+ if not self.items.is_unique:
+ self._ref_locs = None
+ self._items_map = None
+ self._set_ref_locs(do_refs=True)
+
self._is_consolidated = True
self._known_consolidated = True
def get(self, item):
- _, block = self._find_block(item)
- return block.get(item)
+ if self.items.is_unique:
+ _, block = self._find_block(item)
+ return block.get(item)
+ else:
+ indexer = self.items.get_loc(item)
+ ref_locs = np.array(self._set_ref_locs())
+
+ # duplicate index but only a single result
+ if com.is_integer(indexer):
+ b, loc = ref_locs[indexer]
+ return b.iget(loc)
+ else:
+
+ # we have a multiple result, potentially across blocks
+ values = [ block.iget(i) for block, i in ref_locs[indexer] ]
+ index = self.items[indexer]
+ axes = [ index ] + self.axes[1:]
+ blocks = form_blocks(values, index, axes)
+ mgr = BlockManager(blocks, axes)
+ mgr._consolidate_inplace()
+ return mgr
+
def iget(self, i):
item = self.items[i]
@@ -1672,18 +1697,23 @@ def get_scalar(self, tup):
return blk.values[full_loc]
def delete(self, item):
- i, _ = self._find_block(item)
- loc = self.items.get_loc(item)
- self._delete_from_block(i, item)
- if com._is_bool_indexer(loc): # dupe keys may return mask
- loc = [i for i, v in enumerate(loc) if v]
+ is_unique = self.items.is_unique
+ loc = self.items.get_loc(item)
+ # dupe keys may return mask
+ loc = _possibly_convert_to_indexer(loc)
+ self._delete_from_all_blocks(loc, item)
+
+ # _ref_locs, and _items_map are good here
new_items = self.items.delete(loc)
-
self.set_items_norename(new_items)
+
self._known_consolidated = False
+ if not is_unique:
+ self._consolidate_inplace()
+
def set(self, item, value):
"""
Set new item in-place. Does not consolidate. Adds new Block if not
@@ -1704,6 +1734,7 @@ def _set_item(item, arr):
block.set(item, arr)
try:
+
loc = self.items.get_loc(item)
if isinstance(loc, int):
_set_item(self.items[loc], value)
@@ -1712,16 +1743,43 @@ def _set_item(item, arr):
if len(value) != len(subset):
raise AssertionError(
'Number of items to set did not match')
- for i, (item, arr) in enumerate(zip(subset, value)):
- _set_item(item, arr[None, :])
+
+ # we are inserting multiple non-unique items as replacements
+ # we are inserting one by one, so the index can go from unique
+ # to non-unique during the loop, need to have _ref_locs defined
+ # at all times
+ if np.isscalar(item) and com.is_list_like(loc):
+
+ # first delete from all blocks
+ self.delete(item)
+
+ loc = _possibly_convert_to_indexer(loc)
+ for i, (l, arr) in enumerate(zip(loc, value)):
+
+ # insert the item
+ self.insert(l, item, arr[None, :], allow_duplicates=True)
+
+ # reset the _ref_locs on indiviual blocks
+ # rebuild ref_locs
+ if self.items.is_unique:
+ self._reset_ref_locs()
+ self._set_ref_locs(do_refs='force')
+
+ self._rebuild_ref_locs()
+
+
+ else:
+ for i, (item, arr) in enumerate(zip(subset, value)):
+ _set_item(item, arr[None, :])
except KeyError:
# insert at end
self.insert(len(self.items), item, value)
self._known_consolidated = False
- def insert(self, loc, item, value):
- if item in self.items:
+ def insert(self, loc, item, value, allow_duplicates=False):
+
+ if not allow_duplicates and item in self.items:
raise Exception('cannot insert %s, already exists' % item)
try:
@@ -1747,20 +1805,89 @@ def insert(self, loc, item, value):
self._known_consolidated = False
def set_items_norename(self, value):
- value = _ensure_index(value)
- self.axes[0] = value
+ self.set_axis(0, value, maybe_rename=False, check_axis=False)
- for block in self.blocks:
- block.set_ref_items(value, maybe_rename=False)
+ def _delete_from_all_blocks(self, loc, item):
+ """ delete from the items loc the item
+ the item could be in multiple blocks which could
+ change each iteration (as we split blocks) """
+
+ # possibily convert to an indexer
+ loc = _possibly_convert_to_indexer(loc)
+
+ if isinstance(loc, (list,tuple,np.ndarray)):
+ for l in loc:
+ for i, b in enumerate(self.blocks):
+ if item in b.items:
+ self._delete_from_block(i, item)
+
+ else:
+ i, _ = self._find_block(item)
+ self._delete_from_block(i, item)
def _delete_from_block(self, i, item):
"""
Delete and maybe remove the whole block
+
+ Remap the split blocks to there old ranges,
+ so after this function, _ref_locs and _items_map (if used)
+ are correct for the items, None fills holes in _ref_locs
"""
- block = self.blocks.pop(i)
- for b in block.split_block_at(item):
- self.blocks.append(b)
+ block = self.blocks.pop(i)
+ ref_locs = self._set_ref_locs()
+ prev_items_map = self._items_map.pop(block) if ref_locs is not None else None
+
+ # compute the split mask
+ loc = block.items.get_loc(item)
+ if type(loc) == slice or com.is_integer(loc):
+ mask = np.array([True] * len(block))
+ mask[loc] = False
+ else: # already a mask, inverted
+ mask = -loc
+
+ # split the block
+ counter = 0
+ for s, e in com.split_ranges(mask):
+
+ sblock = make_block(block.values[s:e],
+ block.items[s:e].copy(),
+ block.ref_items,
+ klass=block.__class__,
+ fastpath=True)
+ self.blocks.append(sblock)
+
+ # update the _ref_locs/_items_map
+ if ref_locs is not None:
+
+ # fill the item_map out for this sub-block
+ m = maybe_create_block_in_items_map(self._items_map,sblock)
+ for j, itm in enumerate(sblock.items):
+
+ # is this item masked (e.g. was deleted)?
+ while (True):
+
+ if counter > len(mask) or mask[counter]:
+ break
+ else:
+ counter += 1
+
+ # find my mapping location
+ m[j] = prev_items_map[counter]
+ counter += 1
+
+ # set the ref_locs in this block
+ sblock.set_ref_locs(m)
+
+ # reset the ref_locs to the new structure
+ if ref_locs is not None:
+
+ # items_map is now good, with the original locations
+ self._set_ref_locs(do_refs=True)
+
+ # reset the ref_locs based on the now good block._ref_locs
+ self._reset_ref_locs()
+
def _add_new_block(self, item, value, loc=None):
# Do we care about dtype at the moment?
@@ -1771,6 +1898,26 @@ def _add_new_block(self, item, value, loc=None):
self.items, fastpath=True)
self.blocks.append(new_block)
+ # set ref_locs based on the this new block
+ # and add to the ref/items maps
+ if not self.items.is_unique:
+
+ # insert into the ref_locs at the appropriate location
+ # _ref_locs is already long enough,
+ # but may need to shift elements
+ new_block.set_ref_locs([0])
+
+ # need to shift elements to the right
+ if self._ref_locs[loc] is not None:
+ for i in reversed(range(loc+1,len(self._ref_locs))):
+ self._ref_locs[i] = self._ref_locs[i-1]
+
+ self._ref_locs[loc] = (new_block, 0)
+
+ # and reset
+ self._reset_ref_locs()
+ self._set_ref_locs(do_refs=True)
+
def _find_block(self, item):
self._check_have(item)
for i, block in enumerate(self.blocks):
@@ -1827,17 +1974,18 @@ def reindex_indexer(self, new_axis, indexer, axis=1, fill_value=np.nan):
def _reindex_indexer_items(self, new_items, indexer, fill_value):
# TODO: less efficient than I'd like
+ is_unique = self.items.is_unique
item_order = com.take_1d(self.items.values, indexer)
# keep track of what items aren't found anywhere
mask = np.zeros(len(item_order), dtype=bool)
-
new_axes = [new_items] + self.axes[1:]
new_blocks = []
for blk in self.blocks:
blk_indexer = blk.items.get_indexer(item_order)
selector = blk_indexer != -1
+
# update with observed items
mask |= selector
@@ -1997,7 +2145,7 @@ def rename_axis(self, mapper, axis=1):
def rename_items(self, mapper, copydata=True):
new_items = Index([mapper(x) for x in self.items])
- new_items.is_unique
+ is_unique = new_items.is_unique
new_blocks = []
for block in self.blocks:
@@ -2057,7 +2205,8 @@ def create_block_manager_from_blocks(blocks, axes):
# if we are passed values, make the blocks
if len(blocks) == 1 and not isinstance(blocks[0], Block):
- blocks = [ make_block(blocks[0], axes[0], axes[0]) ]
+ placement = None if axes[0].is_unique else np.arange(len(axes[0]))
+ blocks = [ make_block(blocks[0], axes[0], axes[0], placement=placement) ]
mgr = BlockManager(blocks, axes)
mgr._consolidate_inplace()
@@ -2077,6 +2226,15 @@ def create_block_manager_from_arrays(arrays, names, axes):
except (ValueError):
construction_error(len(arrays),arrays[0].shape[1:],axes)
+def maybe_create_block_in_items_map(im,block):
+ """ create/return the block in an items_map """
+ try:
+ return im[block]
+ except:
+ im[block] = l = [ None ] * len(block.items)
+ return l
+
+
def form_blocks(arrays, names, axes):
# pre-filter out items if we passed it
@@ -2154,7 +2312,8 @@ def form_blocks(arrays, names, axes):
block_values = np.empty(shape, dtype=object)
block_values.fill(nan)
- na_block = make_block(block_values, extra_items, items)
+ placement = None if is_unique else np.arange(len(extra_items))
+ na_block = make_block(block_values, extra_items, items, placement=placement)
blocks.append(na_block)
return blocks
@@ -2168,9 +2327,9 @@ def _simple_blockify(tuples, ref_items, dtype, is_unique=True):
if dtype is not None and values.dtype != dtype: # pragma: no cover
values = values.astype(dtype)
- block = make_block(values, block_items, ref_items)
- if not is_unique:
- block.set_ref_locs(placement)
+ if is_unique:
+ placement=None
+ block = make_block(values, block_items, ref_items, placement=placement)
return [ block ]
def _multi_blockify(tuples, ref_items, dtype = None, is_unique=True):
@@ -2183,9 +2342,9 @@ def _multi_blockify(tuples, ref_items, dtype = None, is_unique=True):
for dtype, tup_block in grouper:
block_items, values, placement = _stack_arrays(list(tup_block), ref_items, dtype)
- block = make_block(values, block_items, ref_items)
- if not is_unique:
- block.set_ref_locs(placement)
+ if is_unique:
+ placement=None
+ block = make_block(values, block_items, ref_items, placement=placement)
new_blocks.append(block)
return new_blocks
@@ -2308,7 +2467,16 @@ def _merge_blocks(blocks, items, dtype=None):
new_values = _vstack([ b.values for b in blocks ], dtype)
new_items = blocks[0].items.append([b.items for b in blocks[1:]])
new_block = make_block(new_values, new_items, items)
- return new_block.reindex_items_from(items)
+
+ # unique, can reindex
+ if items.is_unique:
+ return new_block.reindex_items_from(items)
+
+ # merge the ref_locs
+ new_ref_locs = [ b._ref_locs for b in blocks ]
+ if all([ x is not None for x in new_ref_locs ]):
+ new_block.set_ref_locs(np.concatenate(new_ref_locs))
+ return new_block
def _block_shape(values, ndim=1, shape=None):
@@ -2328,3 +2496,10 @@ def _vstack(to_stack, dtype):
else:
return np.vstack(to_stack)
+
+def _possibly_convert_to_indexer(loc):
+ if com._is_bool_indexer(loc):
+ loc = [i for i, v in enumerate(loc) if v]
+ elif isinstance(loc,slice):
+ loc = range(loc.start,loc.stop)
+ return loc
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 3711a814cc273..e48cdb52ebae5 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -2825,6 +2825,126 @@ def test_constructor_column_duplicates(self):
[('a', [8]), ('a', [5]), ('b', [6])],
columns=['b', 'a', 'a'])
+
+ def test_column_duplicates_operations(self):
+
+ def check(result, expected=None):
+ if expected is not None:
+ assert_frame_equal(result,expected)
+ result.dtypes
+ str(result)
+
+ # assignment
+ # GH 3687
+ arr = np.random.randn(3, 2)
+ idx = range(2)
+ df = DataFrame(arr, columns=['A', 'A'])
+ df.columns = idx
+ expected = DataFrame(arr,columns=idx)
+ check(df,expected)
+
+ idx = date_range('20130101',periods=4,freq='Q-NOV')
+ df = DataFrame([[1,1,1,5],[1,1,2,5],[2,1,3,5]],columns=['a','a','a','a'])
+ df.columns = idx
+ expected = DataFrame([[1,1,1,5],[1,1,2,5],[2,1,3,5]],columns=idx)
+ check(df,expected)
+
+ # insert
+ df = DataFrame([[1,1,1,5],[1,1,2,5],[2,1,3,5]],columns=['foo','bar','foo','hello'])
+ df['string'] = 'bah'
+ expected = DataFrame([[1,1,1,5,'bah'],[1,1,2,5,'bah'],[2,1,3,5,'bah']],columns=['foo','bar','foo','hello','string'])
+ check(df,expected)
+
+ # insert same dtype
+ df['foo2'] = 3
+ expected = DataFrame([[1,1,1,5,'bah',3],[1,1,2,5,'bah',3],[2,1,3,5,'bah',3]],columns=['foo','bar','foo','hello','string','foo2'])
+ check(df,expected)
+
+ # set (non-dup)
+ df['foo2'] = 4
+ expected = DataFrame([[1,1,1,5,'bah',4],[1,1,2,5,'bah',4],[2,1,3,5,'bah',4]],columns=['foo','bar','foo','hello','string','foo2'])
+ check(df,expected)
+ df['foo2'] = 3
+
+ # delete (non dup)
+ del df['bar']
+ expected = DataFrame([[1,1,5,'bah',3],[1,2,5,'bah',3],[2,3,5,'bah',3]],columns=['foo','foo','hello','string','foo2'])
+ check(df,expected)
+
+ # try to delete again (its not consolidated)
+ del df['hello']
+ expected = DataFrame([[1,1,'bah',3],[1,2,'bah',3],[2,3,'bah',3]],columns=['foo','foo','string','foo2'])
+ check(df,expected)
+
+ # consolidate
+ df = df.consolidate()
+ expected = DataFrame([[1,1,'bah',3],[1,2,'bah',3],[2,3,'bah',3]],columns=['foo','foo','string','foo2'])
+ check(df,expected)
+
+ # insert
+ df.insert(2,'new_col',5.)
+ expected = DataFrame([[1,1,5.,'bah',3],[1,2,5.,'bah',3],[2,3,5.,'bah',3]],columns=['foo','foo','new_col','string','foo2'])
+ check(df,expected)
+
+ # insert a dup
+ self.assertRaises(Exception, df.insert, 2, 'new_col', 4.)
+ df.insert(2,'new_col',4.,allow_duplicates=True)
+ expected = DataFrame([[1,1,4.,5.,'bah',3],[1,2,4.,5.,'bah',3],[2,3,4.,5.,'bah',3]],columns=['foo','foo','new_col','new_col','string','foo2'])
+ check(df,expected)
+
+ # delete (dup)
+ del df['foo']
+ expected = DataFrame([[4.,5.,'bah',3],[4.,5.,'bah',3],[4.,5.,'bah',3]],columns=['new_col','new_col','string','foo2'])
+ assert_frame_equal(df,expected)
+
+ # dup across dtypes
+ df = DataFrame([[1,1,1.,5],[1,1,2.,5],[2,1,3.,5]],columns=['foo','bar','foo','hello'])
+ check(df)
+
+ df['foo2'] = 7.
+ expected = DataFrame([[1,1,1.,5,7.],[1,1,2.,5,7.],[2,1,3.,5,7.]],columns=['foo','bar','foo','hello','foo2'])
+ check(df,expected)
+
+ result = df['foo']
+ expected = DataFrame([[1,1.],[1,2.],[2,3.]],columns=['foo','foo'])
+ check(result,expected)
+
+ # multiple replacements
+ df['foo'] = 'string'
+ expected = DataFrame([['string',1,'string',5,7.],['string',1,'string',5,7.],['string',1,'string',5,7.]],columns=['foo','bar','foo','hello','foo2'])
+ check(df,expected)
+
+ del df['foo']
+ expected = DataFrame([[1,5,7.],[1,5,7.],[1,5,7.]],columns=['bar','hello','foo2'])
+ check(df,expected)
+
+ # reindex
+ df = DataFrame([[1,5,7.],[1,5,7.],[1,5,7.]],columns=['bar','a','a'])
+ expected = DataFrame([[1],[1],[1]],columns=['bar'])
+ result = df.reindex(columns=['bar'])
+ check(result,expected)
+
+ result1 = DataFrame([[1],[1],[1]],columns=['bar']).reindex(columns=['bar','foo'])
+ result2 = df.reindex(columns=['bar','foo'])
+ check(result2,result1)
+
+ # drop
+ df = DataFrame([[1,5,7.],[1,5,7.],[1,5,7.]],columns=['bar','a','a'])
+ df = df.drop(['a'],axis=1)
+ expected = DataFrame([[1],[1],[1]],columns=['bar'])
+ check(df,expected)
+
+ def test_insert_benchmark(self):
+ # from the vb_suite/frame_methods/frame_insert_columns
+ N = 10
+ K = 5
+ df = DataFrame(index=range(N))
+ new_col = np.random.randn(N)
+ for i in range(K):
+ df[i] = new_col
+ expected = DataFrame(np.repeat(new_col,K).reshape(N,K),index=range(N))
+ assert_frame_equal(df,expected)
+
def test_constructor_single_value(self):
# expecting single value upcasting here
diff --git a/pandas/tests/test_internals.py b/pandas/tests/test_internals.py
index e25bd0de769a7..0f3b8c1634416 100644
--- a/pandas/tests/test_internals.py
+++ b/pandas/tests/test_internals.py
@@ -1,7 +1,7 @@
# pylint: disable=W0102
import unittest
-
+import nose
import numpy as np
from pandas import Index, MultiIndex, DataFrame, Series
@@ -173,6 +173,11 @@ def test_delete(self):
self.assertRaises(Exception, self.fblock.delete, 'b')
def test_split_block_at(self):
+
+ # with dup column support this method was taken out
+ # GH3679
+ raise nose.SkipTest
+
bs = list(self.fblock.split_block_at('a'))
self.assertEqual(len(bs), 1)
self.assertTrue(np.array_equal(bs[0].items, ['c', 'e']))
@@ -267,9 +272,21 @@ def test_duplicate_item_failure(self):
for b in blocks:
b.ref_items = items
+ # test trying to create _ref_locs with/o ref_locs set on the blocks
+ self.assertRaises(AssertionError, BlockManager, blocks, [items, np.arange(N)])
+
+ blocks[0].set_ref_locs([0])
+ blocks[1].set_ref_locs([1])
mgr = BlockManager(blocks, [items, np.arange(N)])
mgr.iget(1)
+ # invalidate the _ref_locs
+ for b in blocks:
+ b._ref_locs = None
+ mgr._ref_locs = None
+ mgr._items_map = None
+ self.assertRaises(AssertionError, mgr._set_ref_locs, do_refs=True)
+
def test_contains(self):
self.assert_('a' in self.mgr)
self.assert_('baz' not in self.mgr)
diff --git a/pandas/tools/merge.py b/pandas/tools/merge.py
index f0d8b922be5bf..b19d099790566 100644
--- a/pandas/tools/merge.py
+++ b/pandas/tools/merge.py
@@ -781,10 +781,10 @@ def _upcast_blocks(blocks):
for block in blocks:
if isinstance(block, IntBlock):
newb = make_block(block.values.astype(float), block.items,
- block.ref_items)
+ block.ref_items, placement=block._ref_locs)
elif isinstance(block, BoolBlock):
newb = make_block(block.values.astype(object), block.items,
- block.ref_items)
+ block.ref_items, placement=block._ref_locs)
else:
newb = block
new_blocks.append(newb)
| closes #3679, #3687
Here's example of various operations
```
In [3]: df = DataFrame([[1,1,1,5],[1,1,2,5],[2,1,3,5]],
columns=['foo','bar','foo','hello'])
In [27]: df.columns.is_unique
Out[27]: False
In [4]: # insert
In [5]: df['string'] = 'bah'
In [6]: df
Out[6]:
foo bar foo hello string
0 1 1 1 5 bah
1 1 1 2 5 bah
2 2 1 3 5 bah
In [7]: # insert same dtype
In [8]: df['foo2'] = 3
In [9]: df
Out[9]:
foo bar foo hello string foo2
0 1 1 1 5 bah 3
1 1 1 2 5 bah 3
2 2 1 3 5 bah 3
In [10]: # delete (non dup)
In [11]: del df['bar']
In [12]: df
Out[12]:
foo foo hello string foo2
0 1 1 5 bah 3
1 1 2 5 bah 3
2 2 3 5 bah 3
In [13]: # try to delete again (its not consolidated)
In [14]: del df['hello']
In [15]: df
Out[15]:
foo foo string foo2
0 1 1 bah 3
1 1 2 bah 3
2 2 3 bah 3
In [16]: # insert
In [17]: df.insert(2,'new_col',5.)
In [18]: df
Out[18]:
foo foo new_col string foo2
0 1 1 5 bah 3
1 1 2 5 bah 3
2 2 3 5 bah 3
```
This is the current default behavior now
```
In [19]: # insert a dup
In [20]: df.insert(2,'new_col',4.)
Exception: cannot insert new_col, already exists
In [21]: # insert a dup
In [22]: df.insert(2,'new_col',4.,allow_duplicates=True)
In [23]: df
Out[23]:
foo foo new_col new_col string foo2
0 1 1 4 5 bah 3
1 1 2 4 5 bah 3
2 2 3 4 5 bah 3
```
```
In [24]: # delete (dup)
In [25]: del df['foo']
In [26]: df
Out[26]:
new_col new_col string foo2
0 4 5 bah 3
1 4 5 bah 3
2 4 5 bah 3
```
Don't try this at home
1) duplicates across dtypes
2) assigning those duplicates
```
In [5]: df = DataFrame([[1,1,1.,5],[1,1,2.,5],[2,1,3.,5]],columns=['foo','bar','foo','hello'])
In [6]: df.dtypes
Out[6]:
foo int64
bar int64
foo float64
hello int64
dtype: object
In [7]: df['foo'] = 'string'
In [8]: df
Out[8]:
foo bar foo hello
0 string 1 string 5
1 string 1 string 5
2 string 1 string 5
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/3683 | 2013-05-22T14:44:07Z | 2013-05-30T00:48:23Z | 2013-05-30T00:48:23Z | 2014-06-12T20:29:27Z |
EHN: Add filter methods to SeriesGroupBy, DataFrameGroupBy GH919 | diff --git a/RELEASE.rst b/RELEASE.rst
index b5dd3eef68dea..1e7880016cdee 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -48,6 +48,8 @@ pandas 0.11.1
- Add iterator to ``Series.str`` (GH3638_)
- ``pd.set_option()`` now allows N option, value pairs (GH3667_).
- Added keyword parameters for different types of scatter_matrix subplots
+ - A ``filter`` method on grouped Series or DataFrames returns a subset of
+ the original (GH3680_, GH919_)
**Improvements to existing features**
diff --git a/doc/source/groupby.rst b/doc/source/groupby.rst
index bc2ff9bbe1013..c5e38a72ec3e9 100644
--- a/doc/source/groupby.rst
+++ b/doc/source/groupby.rst
@@ -41,6 +41,12 @@ following:
- Standardizing data (zscore) within group
- Filling NAs within groups with a value derived from each group
+ - **Filtration**: discard some groups, according to a group-wise computation
+ that evaluates True or False. Some examples:
+
+ - Discarding data that belongs to groups with only a few members
+ - Filtering out data based on the group sum or mean
+
- Some combination of the above: GroupBy will examine the results of the apply
step and try to return a sensibly combined result if it doesn't fit into
either of the above two categories
@@ -489,6 +495,39 @@ and that the transformed data contains no NAs.
grouped_trans.count() # counts after transformation
grouped_trans.size() # Verify non-NA count equals group size
+.. _groupby.filter:
+
+Filtration
+----------
+
+The ``filter`` method returns a subset of the original object. Suppose we
+want to take only elements that belong to groups with a group sum greater
+than 2.
+
+.. ipython:: python
+
+ s = Series([1, 1, 2, 3, 3, 3])
+ s.groupby(s).filter(lambda x: x.sum() > 2)
+
+The argument of ``filter`` must a function that, applied to the group as a
+whole, returns ``True`` or ``False``.
+
+Another useful operation is filtering out elements that belong to groups
+with only a couple members.
+
+.. ipython:: python
+
+ df = DataFrame({'A': arange(8), 'B': list('aabbbbcc')})
+ df.groupby('B').filter(lambda x: len(x) > 2)
+
+Alternatively, instead of dropping the offending groups, we can return a
+like-indexed objects where the groups that do not pass the filter are filled
+with NaNs.
+
+.. ipython:: python
+
+ df.groupby('B').filter(lambda x: len(x) > 2, dropna=False)
+
.. _groupby.dispatch:
Dispatching to instance methods
diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt
index b2fee1acbc4d6..0641ffae542c0 100644
--- a/doc/source/v0.11.1.txt
+++ b/doc/source/v0.11.1.txt
@@ -237,6 +237,35 @@ Enhancements
pd.get_option('a.b')
pd.get_option('b.c')
+ - The ``filter`` method for group objects returns a subset of the original
+ object. Suppose we want to take only elements that belong to groups with a
+ group sum greater than 2.
+
+ .. ipython:: python
+
+ s = Series([1, 1, 2, 3, 3, 3])
+ s.groupby(s).filter(lambda x: x.sum() > 2)
+
+ The argument of ``filter`` must a function that, applied to the group as a
+ whole, returns ``True`` or ``False``.
+
+ Another useful operation is filtering out elements that belong to groups
+ with only a couple members.
+
+ .. ipython:: python
+
+ df = DataFrame({'A': arange(8), 'B': list('aabbbbcc')})
+ df.groupby('B').filter(lambda x: len(x) > 2)
+
+ Alternatively, instead of dropping the offending groups, we can return a
+ like-indexed objects where the groups that do not pass the filter are
+ filled with NaNs.
+
+ .. ipython:: python
+
+ df.groupby('B').filter(lambda x: len(x) > 2, dropna=False)
+
+
Bug Fixes
~~~~~~~~~
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index 64606a6e644f9..0be5d438e5e7c 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -1558,6 +1558,42 @@ def transform(self, func, *args, **kwargs):
result = _possibly_downcast_to_dtype(result, dtype)
return self.obj.__class__(result,index=self.obj.index,name=self.obj.name)
+ def filter(self, func, dropna=True, *args, **kwargs):
+ """
+ Return a copy of a Series excluding elements from groups that
+ do not satisfy the boolean criterion specified by func.
+
+ Parameters
+ ----------
+ func : function
+ To apply to each group. Should return True or False.
+ dropna : Drop groups that do not pass the filter. True by default;
+ if False, groups that evaluate False are filled with NaNs.
+
+ Example
+ -------
+ >>> grouped.filter(lambda x: x.mean() > 0)
+
+ Returns
+ -------
+ filtered : Series
+ """
+ if isinstance(func, basestring):
+ wrapper = lambda x: getattr(x, func)(*args, **kwargs)
+ else:
+ wrapper = lambda x: func(x, *args, **kwargs)
+
+ indexers = [self.obj.index.get_indexer(group.index) \
+ if wrapper(group) else [] for _ , group in self]
+
+ if len(indexers) == 0:
+ filtered = self.obj.take([]) # because np.concatenate would fail
+ else:
+ filtered = self.obj.take(np.concatenate(indexers))
+ if dropna:
+ return filtered
+ else:
+ return filtered.reindex(self.obj.index) # Fill with NaNs.
class NDFrameGroupBy(GroupBy):
@@ -1928,47 +1964,22 @@ def transform(self, func, *args, **kwargs):
obj = self._obj_with_exclusions
gen = self.grouper.get_iterator(obj, axis=self.axis)
-
- if isinstance(func, basestring):
- fast_path = lambda group: getattr(group, func)(*args, **kwargs)
- slow_path = lambda group: group.apply(lambda x: getattr(x, func)(*args, **kwargs), axis=self.axis)
- else:
- fast_path = lambda group: func(group, *args, **kwargs)
- slow_path = lambda group: group.apply(lambda x: func(x, *args, **kwargs), axis=self.axis)
+ fast_path, slow_path = self._define_paths(func, *args, **kwargs)
path = None
for name, group in gen:
object.__setattr__(group, 'name', name)
- # decide on a fast path
if path is None:
-
- path = slow_path
+ # Try slow path and fast path.
try:
- res = slow_path(group)
-
- # if we make it here, test if we can use the fast path
- try:
- res_fast = fast_path(group)
-
- # compare that we get the same results
- if res.shape == res_fast.shape:
- res_r = res.values.ravel()
- res_fast_r = res_fast.values.ravel()
- mask = notnull(res_r)
- if (res_r[mask] == res_fast_r[mask]).all():
- path = fast_path
-
- except:
- pass
+ path, res = self._choose_path(fast_path, slow_path, group)
except TypeError:
return self._transform_item_by_item(obj, fast_path)
except Exception: # pragma: no cover
res = fast_path(group)
path = fast_path
-
else:
-
res = path(group)
# broadcasting
@@ -1988,6 +1999,35 @@ def transform(self, func, *args, **kwargs):
concatenated.sort_index(inplace=True)
return concatenated
+ def _define_paths(self, func, *args, **kwargs):
+ if isinstance(func, basestring):
+ fast_path = lambda group: getattr(group, func)(*args, **kwargs)
+ slow_path = lambda group: group.apply(lambda x: getattr(x, func)(*args, **kwargs), axis=self.axis)
+ else:
+ fast_path = lambda group: func(group, *args, **kwargs)
+ slow_path = lambda group: group.apply(lambda x: func(x, *args, **kwargs), axis=self.axis)
+ return fast_path, slow_path
+
+ def _choose_path(self, fast_path, slow_path, group):
+ path = slow_path
+ res = slow_path(group)
+
+ # if we make it here, test if we can use the fast path
+ try:
+ res_fast = fast_path(group)
+
+ # compare that we get the same results
+ if res.shape == res_fast.shape:
+ res_r = res.values.ravel()
+ res_fast_r = res_fast.values.ravel()
+ mask = notnull(res_r)
+ if (res_r[mask] == res_fast_r[mask]).all():
+ path = fast_path
+
+ except:
+ pass
+ return path, res
+
def _transform_item_by_item(self, obj, wrapper):
# iterate through columns
output = {}
@@ -2008,6 +2048,63 @@ def _transform_item_by_item(self, obj, wrapper):
return DataFrame(output, index=obj.index, columns=columns)
+ def filter(self, func, dropna=True, *args, **kwargs):
+ """
+ Return a copy of a DataFrame excluding elements from groups that
+ do not satisfy the boolean criterion specified by func.
+
+ Parameters
+ ----------
+ f : function
+ Function to apply to each subframe. Should return True or False.
+ dropna : Drop groups that do not pass the filter. True by default;
+ if False, groups that evaluate False are filled with NaNs.
+
+ Note
+ ----
+ Each subframe is endowed the attribute 'name' in case you need to know
+ which group you are working on.
+
+ Example
+ --------
+ >>> grouped = df.groupby(lambda x: mapping[x])
+ >>> grouped.filter(lambda x: x['A'].sum() + x['B'].sum() > 0)
+ """
+ from pandas.tools.merge import concat
+
+ indexers = []
+
+ obj = self._obj_with_exclusions
+ gen = self.grouper.get_iterator(obj, axis=self.axis)
+
+ fast_path, slow_path = self._define_paths(func, *args, **kwargs)
+
+ path = None
+ for name, group in gen:
+ object.__setattr__(group, 'name', name)
+
+ if path is None:
+ # Try slow path and fast path.
+ try:
+ path, res = self._choose_path(fast_path, slow_path, group)
+ except Exception: # pragma: no cover
+ res = fast_path(group)
+ path = fast_path
+ else:
+ res = path(group)
+
+ if res:
+ indexers.append(self.obj.index.get_indexer(group.index))
+
+ if len(indexers) == 0:
+ filtered = self.obj.take([]) # because np.concatenate would fail
+ else:
+ filtered = self.obj.take(np.concatenate(indexers))
+ if dropna:
+ return filtered
+ else:
+ return filtered.reindex(self.obj.index) # Fill with NaNs.
+
class DataFrameGroupBy(NDFrameGroupBy):
diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py
index cf62b16a9dd2a..f3a608b82e756 100644
--- a/pandas/tests/test_groupby.py
+++ b/pandas/tests/test_groupby.py
@@ -2498,6 +2498,155 @@ def test_groupby_with_empty(self):
grouped = series.groupby(grouper)
assert next(iter(grouped), None) is None
+ def test_filter_series(self):
+ import pandas as pd
+ s = pd.Series([1, 3, 20, 5, 22, 24, 7])
+ expected_odd = pd.Series([1, 3, 5, 7], index=[0, 1, 3, 6])
+ expected_even = pd.Series([20, 22, 24], index=[2, 4, 5])
+ grouper = s.apply(lambda x: x % 2)
+ grouped = s.groupby(grouper)
+ assert_series_equal(
+ grouped.filter(lambda x: x.mean() < 10), expected_odd)
+ assert_series_equal(
+ grouped.filter(lambda x: x.mean() > 10), expected_even)
+ # Test dropna=False.
+ assert_series_equal(
+ grouped.filter(lambda x: x.mean() < 10, dropna=False),
+ expected_odd.reindex(s.index))
+ assert_series_equal(
+ grouped.filter(lambda x: x.mean() > 10, dropna=False),
+ expected_even.reindex(s.index))
+
+ def test_filter_single_column_df(self):
+ import pandas as pd
+ df = pd.DataFrame([1, 3, 20, 5, 22, 24, 7])
+ expected_odd = pd.DataFrame([1, 3, 5, 7], index=[0, 1, 3, 6])
+ expected_even = pd.DataFrame([20, 22, 24], index=[2, 4, 5])
+ grouper = df[0].apply(lambda x: x % 2)
+ grouped = df.groupby(grouper)
+ assert_frame_equal(
+ grouped.filter(lambda x: x.mean() < 10), expected_odd)
+ assert_frame_equal(
+ grouped.filter(lambda x: x.mean() > 10), expected_even)
+ # Test dropna=False.
+ assert_frame_equal(
+ grouped.filter(lambda x: x.mean() < 10, dropna=False),
+ expected_odd.reindex(df.index))
+ assert_frame_equal(
+ grouped.filter(lambda x: x.mean() > 10, dropna=False),
+ expected_even.reindex(df.index))
+
+ def test_filter_multi_column_df(self):
+ import pandas as pd
+ df = pd.DataFrame({'A': [1, 12, 12, 1], 'B': [1, 1, 1, 1]})
+ grouper = df['A'].apply(lambda x: x % 2)
+ grouped = df.groupby(grouper)
+ expected = pd.DataFrame({'A': [12, 12], 'B': [1, 1]}, index=[1, 2])
+ assert_frame_equal(
+ grouped.filter(lambda x: x['A'].sum() - x['B'].sum() > 10), expected)
+
+ def test_filter_mixed_df(self):
+ import pandas as pd
+ df = pd.DataFrame({'A': [1, 12, 12, 1], 'B': 'a b c d'.split()})
+ grouper = df['A'].apply(lambda x: x % 2)
+ grouped = df.groupby(grouper)
+ expected = pd.DataFrame({'A': [12, 12], 'B': ['b', 'c']},
+ index=[1, 2])
+ assert_frame_equal(
+ grouped.filter(lambda x: x['A'].sum() > 10), expected)
+
+ def test_filter_out_all_groups(self):
+ import pandas as pd
+ s = pd.Series([1, 3, 20, 5, 22, 24, 7])
+ grouper = s.apply(lambda x: x % 2)
+ grouped = s.groupby(grouper)
+ assert_series_equal(
+ grouped.filter(lambda x: x.mean() > 1000), s[[]])
+ df = pd.DataFrame({'A': [1, 12, 12, 1], 'B': 'a b c d'.split()})
+ grouper = df['A'].apply(lambda x: x % 2)
+ grouped = df.groupby(grouper)
+ assert_frame_equal(
+ grouped.filter(lambda x: x['A'].sum() > 1000), df.ix[[]])
+
+ def test_filter_out_no_groups(self):
+ import pandas as pd
+ s = pd.Series([1, 3, 20, 5, 22, 24, 7])
+ grouper = s.apply(lambda x: x % 2)
+ grouped = s.groupby(grouper)
+ filtered = grouped.filter(lambda x: x.mean() > 0)
+ filtered.sort() # was sorted by group
+ s.sort() # was sorted arbitrarily
+ assert_series_equal(filtered, s)
+ df = pd.DataFrame({'A': [1, 12, 12, 1], 'B': 'a b c d'.split()})
+ grouper = df['A'].apply(lambda x: x % 2)
+ grouped = df.groupby(grouper)
+ filtered = grouped.filter(lambda x: x['A'].mean() > 0)
+ assert_frame_equal(filtered.sort(), df)
+
+ def test_filter_condition_raises(self):
+ import pandas as pd
+ def raise_if_sum_is_zero(x):
+ if x.sum() == 0:
+ raise ValueError
+ else:
+ return x.sum() > 0
+ s = pd.Series([-1,0,1,2])
+ grouper = s.apply(lambda x: x % 2)
+ grouped = s.groupby(grouper)
+ self.assertRaises(ValueError,
+ lambda: grouped.filter(raise_if_sum_is_zero))
+
+ def test_filter_against_workaround(self):
+ np.random.seed(0)
+ # Series of ints
+ s = Series(np.random.randint(0,100,1000))
+ grouper = s.apply(lambda x: np.round(x, -1))
+ grouped = s.groupby(grouper)
+ f = lambda x: x.mean() > 10
+ old_way = s[grouped.transform(f).astype('bool')]
+ new_way = grouped.filter(f)
+ assert_series_equal(new_way.order(), old_way.order())
+
+ # Series of floats
+ s = 100*Series(np.random.random(1000))
+ grouper = s.apply(lambda x: np.round(x, -1))
+ grouped = s.groupby(grouper)
+ f = lambda x: x.mean() > 10
+ old_way = s[grouped.transform(f).astype('bool')]
+ new_way = grouped.filter(f)
+ assert_series_equal(new_way.order(), old_way.order())
+
+ # Set up DataFrame of ints, floats, strings.
+ from string import ascii_lowercase
+ letters = np.array(list(ascii_lowercase))
+ N = 1000
+ random_letters = letters.take(np.random.randint(0, 26, N))
+ df = DataFrame({'ints': Series(np.random.randint(0, 100, N)),
+ 'floats': N/10*Series(np.random.random(N)),
+ 'letters': Series(random_letters)})
+
+ # Group by ints; filter on floats.
+ grouped = df.groupby('ints')
+ old_way = df[grouped.floats.\
+ transform(lambda x: x.mean() > N/20).astype('bool')]
+ new_way = grouped.filter(lambda x: x['floats'].mean() > N/20)
+ assert_frame_equal(new_way.sort(), old_way.sort())
+
+ # Group by floats (rounded); filter on strings.
+ grouper = df.floats.apply(lambda x: np.round(x, -1))
+ grouped = df.groupby(grouper)
+ old_way = df[grouped.letters.\
+ transform(lambda x: len(x) < N/10).astype('bool')]
+ new_way = grouped.filter(
+ lambda x: len(x.letters) < N/10)
+ assert_frame_equal(new_way.sort(), old_way.sort())
+
+ # Group by strings; filter on ints.
+ grouped = df.groupby('letters')
+ old_way = df[grouped.ints.\
+ transform(lambda x: x.mean() > N/20).astype('bool')]
+ new_way = grouped.filter(lambda x: x['ints'].mean() > N/20)
+ assert_frame_equal(new_way.sort_index(), old_way.sort_index())
def assert_fp_equal(a, b):
assert((np.abs(a - b) < 1e-12).all())
| closes #919
I have been using Wes' workaround (see #919) for filtering groups. Finally, for brevity's sake, I wrote a real filter method. In the one simple case I checked, it performs faster than the workaround.
On a small (~10) Series:
```
In [7]: %timeit grouped.filter(lambda x: x.mean() > 10) # my method
1000 loops, best of 3: 346 us per loop
In [8]: %timeit grouped.obj[grouped.transform(lambda x: x.mean() > 10)] # workaround
1000 loops, best of 3: 462 us per loop
```
On a large (1000000) Series:
```
In [18]: %timeit grouped.filter(lambda x: x.mean() > 0) # my method
1 loops, best of 3: 213 ms per loop
In [19]: %timeit grouped.obj[grouped.transform(lambda x: x.mean() > 0)]
1 loops, best of 3: 696 ms per loop
```
This PR only handles Series, and I included one simple test. If I am on the right track, I'll write one for DataFrames also and write additional tests. If this is a job for Cython, I'm out of my depth, but I think numpy is sufficient. Does this look OK?
| https://api.github.com/repos/pandas-dev/pandas/pulls/3680 | 2013-05-22T02:43:16Z | 2013-06-06T21:06:26Z | 2013-06-06T21:06:26Z | 2014-06-15T15:23:56Z |
ENH: enhance set_option syntax | diff --git a/RELEASE.rst b/RELEASE.rst
index 9283bada2d720..35741f7eb008f 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -46,6 +46,9 @@ pandas 0.11.1
Note: The default value will change in 0.12 to make the default *to* write and
read multi-index columns in the new format. (GH3571_, GH1651_, GH3141_)
- Add iterator to ``Series.str`` (GH3638_)
+ - ``pd.set_option()`` now allows N option, value pairs (GH3667_).
+
+
**Improvements to existing features**
@@ -269,6 +272,7 @@ pandas 0.11.1
.. _GH3702: https://github.com/pydata/pandas/issues/3702
.. _GH3691: https://github.com/pydata/pandas/issues/3691
.. _GH3696: https://github.com/pydata/pandas/issues/3696
+.. _GH3667: https://github.com/pydata/pandas/issues/3667
pandas 0.11.0
=============
diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt
index 26069681552f0..5acd2aa365ea3 100644
--- a/doc/source/v0.11.1.txt
+++ b/doc/source/v0.11.1.txt
@@ -238,6 +238,30 @@ Enhancements
GH3572_). This happens before any drawing takes place which elimnates any
spurious plots from showing up.
+ - ``pd.set_option()`` now allows N option, value pairs (GH3667_).
+
+ Let's say that we had an option ``'a.b'`` and another option ``'b.c'``.
+ We can set them at the same time:
+
+ .. ipython:: python
+ :suppress:
+
+ pd.core.config.register_option('a.b', 2, 'ay dot bee')
+ pd.core.config.register_option('b.c', 3, 'bee dot cee')
+
+ .. ipython:: python
+
+ pd.get_option('a.b')
+ pd.get_option('b.c')
+ pd.set_option('a.b', 1, 'b.c', 4)
+ pd.get_option('a.b')
+ pd.get_option('b.c')
+
+ You can of course still do it sequentially if you want. You can use up to
+ N arguments here, the only stipulation is that the number of arguments
+ must be even (since if they weren't then that would mean you provided an
+ argument name with no value).
+
Bug Fixes
~~~~~~~~~
@@ -305,3 +329,4 @@ on GitHub for a complete list.
.. _GH3702: https://github.com/pydata/pandas/issues/3702
.. _GH3691: https://github.com/pydata/pandas/issues/3691
.. _GH3696: https://github.com/pydata/pandas/issues/3696
+.. _GH3667: https://github.com/pydata/pandas/issues/3667
diff --git a/pandas/core/config.py b/pandas/core/config.py
index 2d62b807cf203..e8403164ac1b9 100644
--- a/pandas/core/config.py
+++ b/pandas/core/config.py
@@ -94,7 +94,7 @@ def _get_option(pat, silent=False):
return root[k]
-def _set_option(pat, value, silent=False):
+def _set_single_option(pat, value, silent):
key = _get_single_key(pat, silent)
o = _get_registered_option(key)
@@ -109,6 +109,40 @@ def _set_option(pat, value, silent=False):
o.cb(key)
+def _set_multiple_options(args, silent):
+ for k, v in zip(args[::2], args[1::2]):
+ _set_single_option(k, v, silent)
+
+
+def _set_option(*args, **kwargs):
+ # must at least 1 arg deal with constraints later
+ nargs = len(args)
+ if not nargs or nargs % 2 != 0:
+ raise AssertionError("Must provide an even number of non-keyword "
+ "arguments")
+
+ # must be 0 or 1 kwargs
+ nkwargs = len(kwargs)
+ if nkwargs not in (0, 1):
+ raise AssertionError("The can only be 0 or 1 keyword arguments")
+
+ # if 1 kwarg then it must be silent=True or silent=False
+ if nkwargs:
+ k, = kwargs.keys()
+ v, = kwargs.values()
+
+ if k != 'silent':
+ raise ValueError("the only allowed keyword argument is 'silent', "
+ "you passed '{0}'".format(k))
+ if not isinstance(v, bool):
+ raise TypeError("the type of the keyword argument passed must be "
+ "bool, you passed a {0}".format(v.__class__))
+
+ # default to false
+ silent = kwargs.get('silent', False)
+ _set_multiple_options(args, silent)
+
+
def _describe_option(pat='', _print_desc=True):
keys = _select_options(pat)
@@ -186,7 +220,7 @@ def __dir__(self):
# of options, and option descriptions.
-class CallableDyanmicDoc(object):
+class CallableDynamicDoc(object):
def __init__(self, func, doc_tmpl):
self.__doc_tmpl__ = doc_tmpl
@@ -301,10 +335,10 @@ def __doc__(self):
# bind the functions with their docstrings into a Callable
# and use that as the functions exposed in pd.api
-get_option = CallableDyanmicDoc(_get_option, _get_option_tmpl)
-set_option = CallableDyanmicDoc(_set_option, _set_option_tmpl)
-reset_option = CallableDyanmicDoc(_reset_option, _reset_option_tmpl)
-describe_option = CallableDyanmicDoc(_describe_option, _describe_option_tmpl)
+get_option = CallableDynamicDoc(_get_option, _get_option_tmpl)
+set_option = CallableDynamicDoc(_set_option, _set_option_tmpl)
+reset_option = CallableDynamicDoc(_reset_option, _reset_option_tmpl)
+describe_option = CallableDynamicDoc(_describe_option, _describe_option_tmpl)
options = DictWrapper(_global_config)
######################################################
@@ -505,13 +539,7 @@ def _get_registered_option(key):
-------
RegisteredOption (namedtuple) if key is deprecated, None otherwise
"""
-
- try:
- d = _registered_options[key]
- except KeyError:
- return None
- else:
- return d
+ return _registered_options.get(key)
def _translate_key(key):
diff --git a/pandas/tests/test_config.py b/pandas/tests/test_config.py
index c1231df026853..a2b1ea43717cf 100644
--- a/pandas/tests/test_config.py
+++ b/pandas/tests/test_config.py
@@ -169,6 +169,44 @@ def test_set_option(self):
self.assertRaises(KeyError, self.cf.set_option, 'no.such.key', None)
+
+ def test_set_option_empty_args(self):
+ self.assertRaises(AssertionError, self.cf.set_option)
+
+ def test_set_option_uneven_args(self):
+ self.assertRaises(AssertionError, self.cf.set_option, 'a.b', 2, 'b.c')
+
+
+ def test_set_option_2_kwargs(self):
+ self.assertRaises(AssertionError, self.cf.set_option, 'a.b', 2,
+ silenadf=2, asdf=2)
+
+ def test_set_option_invalid_kwargs_key(self):
+ self.assertRaises(ValueError, self.cf.set_option, 'a.b', 2,
+ silenadf=2)
+
+ def test_set_option_invalid_kwargs_value_type(self):
+ self.assertRaises(TypeError, self.cf.set_option, 'a.b', 2,
+ silent=2)
+
+ def test_set_option_invalid_single_argument_type(self):
+ self.assertRaises(AssertionError, self.cf.set_option, 2)
+
+ def test_set_option_multiple(self):
+ self.cf.register_option('a', 1, 'doc')
+ self.cf.register_option('b.c', 'hullo', 'doc2')
+ self.cf.register_option('b.b', None, 'doc2')
+
+ self.assertEqual(self.cf.get_option('a'), 1)
+ self.assertEqual(self.cf.get_option('b.c'), 'hullo')
+ self.assertTrue(self.cf.get_option('b.b') is None)
+
+ self.cf.set_option('a', '2', 'b.c', None, 'b.b', 10.0)
+
+ self.assertEqual(self.cf.get_option('a'), '2')
+ self.assertTrue(self.cf.get_option('b.c') is None)
+ self.assertEqual(self.cf.get_option('b.b'), 10.0)
+
def test_validation(self):
self.cf.register_option('a', 1, 'doc', validator=self.cf.is_int)
self.cf.register_option('b.c', 'hullo', 'doc2',
| closes #3667.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3677 | 2013-05-21T22:33:21Z | 2013-05-30T18:12:03Z | 2013-05-30T18:12:03Z | 2014-06-14T19:08:56Z |
API: deprecate DataFrame.interpolate | diff --git a/RELEASE.rst b/RELEASE.rst
index efc0f912060b7..1377bac856a96 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -96,6 +96,9 @@ pandas 0.11.1
- The ``raise_on_error`` option to plotting methods is obviated by GH3572_,
so it is removed. Plots now always raise when data cannot be plotted or the
object being plotted has a dtype of ``object``.
+ - ``DataFrame.interpolate()`` is now deprecated. Please use
+ ``DataFrame.fillna()`` and ``DataFrame.replace()`` instead (GH3582_,
+ GH3675_, GH3676_).
**Bug Fixes**
@@ -233,9 +236,11 @@ pandas 0.11.1
.. _GH3606: https://github.com/pydata/pandas/issues/3606
.. _GH3659: https://github.com/pydata/pandas/issues/3659
.. _GH3649: https://github.com/pydata/pandas/issues/3649
-.. _Gh3616: https://github.com/pydata/pandas/issues/3616
.. _GH1818: https://github.com/pydata/pandas/issues/1818
.. _GH3572: https://github.com/pydata/pandas/issues/3572
+.. _GH3582: https://github.com/pydata/pandas/issues/3582
+.. _GH3676: https://github.com/pydata/pandas/issues/3676
+.. _GH3675: https://github.com/pydata/pandas/issues/3675
pandas 0.11.0
=============
diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt
index 9209c3938023e..ffa2cc6dc7cab 100644
--- a/doc/source/v0.11.1.txt
+++ b/doc/source/v0.11.1.txt
@@ -67,6 +67,9 @@ API changes
and thus you should cast to an appropriate numeric dtype if you need to
plot something.
+ - ``DataFrame.interpolate()`` is now deprecated. Please use
+ ``DataFrame.fillna()`` and ``DataFrame.replace()`` instead. (GH3582_,
+ GH3675_, GH3676_)
Enhancements
@@ -241,3 +244,6 @@ on GitHub for a complete list.
.. _GH3656: https://github.com/pydata/pandas/issues/3656
.. _GH1818: https://github.com/pydata/pandas/issues/1818
.. _GH3572: https://github.com/pydata/pandas/issues/3572
+.. _GH3582: https://github.com/pydata/pandas/issues/3582
+.. _GH3676: https://github.com/pydata/pandas/issues/3676
+.. _GH3675: https://github.com/pydata/pandas/issues/3675
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index ed56a658d817d..962f994194108 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -3506,7 +3506,7 @@ def replace(self, to_replace=None, value=None, method='pad', axis=0,
See also
--------
- reindex, asfreq, fillna, interpolate
+ reindex, asfreq, fillna
Returns
-------
@@ -3678,6 +3678,10 @@ def interpolate(self, to_replace, method='pad', axis=0, inplace=False,
--------
reindex, replace, fillna
"""
+ from warnings import warn
+ warn('DataFrame.interpolate will be removed in v0.12, please use '
+ 'either DataFrame.fillna or DataFrame.replace instead',
+ FutureWarning)
if self._is_mixed_type and axis == 1:
return self.T.replace(to_replace, method=method, limit=limit).T
| closes #3582.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3675 | 2013-05-21T20:33:17Z | 2013-05-22T13:45:59Z | 2013-05-22T13:45:59Z | 2014-06-14T02:16:46Z |
scatter_matrix bug | diff --git a/RELEASE.rst b/RELEASE.rst
index 436f9d8b833a3..fc54f1b87453e 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -40,11 +40,12 @@ pandas 0.11.1
list of the rows from which to read the index. Added the option,
``tupleize_cols`` to provide compatiblity for the pre 0.11.1 behavior of
writing and reading multi-index columns via a list of tuples. The default in
- 0.11.1 is to write lists of tuples and *not* interpret list of tuples as a
- multi-index column.
+ 0.11.1 is to write lists of tuples and *not* interpret list of tuples as a
+ multi-index column.
Note: The default value will change in 0.12 to make the default *to* write and
read multi-index columns in the new format. (GH3571_, GH1651_, GH3141_)
- Add iterator to ``Series.str`` (GH3638_)
+ - Added keyword parameters for different types of scatter_matrix subplots
**Improvements to existing features**
@@ -63,7 +64,7 @@ pandas 0.11.1
- Add modulo operator to Series, DataFrame
- Add ``date`` method to DatetimeIndex
- Simplified the API and added a describe method to Categorical
- - ``melt`` now accepts the optional parameters ``var_name`` and ``value_name``
+ - ``melt`` now accepts the optional parameters ``var_name`` and ``value_name``
to specify custom column names of the returned DataFrame (GH3649_),
thanks @hoechenberger
@@ -82,8 +83,8 @@ pandas 0.11.1
``timedelta64[ns]`` to ``object/int`` (GH3425_)
- Do not allow datetimelike/timedeltalike creation except with valid types
(e.g. cannot pass ``datetime64[ms]``) (GH3423_)
- - Add ``squeeze`` keyword to ``groupby`` to allow reduction from
- DataFrame -> Series if groups are unique. Regression from 0.10.1,
+ - Add ``squeeze`` keyword to ``groupby`` to allow reduction from
+ DataFrame -> Series if groups are unique. Regression from 0.10.1,
partial revert on (GH2893_) with (GH3596_)
- Raise on ``iloc`` when boolean indexing with a label based indexer mask
e.g. a boolean Series, even with integer labels, will raise. Since ``iloc``
@@ -137,7 +138,7 @@ pandas 0.11.1
is a ``list`` or ``tuple``.
- Fixed bug where a time-series was being selected in preference to an actual column name
in a frame (GH3594_)
- - Fix modulo and integer division on Series,DataFrames to act similary to ``float`` dtypes to return
+ - Fix modulo and integer division on Series,DataFrames to act similary to ``float`` dtypes to return
``np.nan`` or ``np.inf`` as appropriate (GH3590_)
- Fix incorrect dtype on groupby with ``as_index=False`` (GH3610_)
- Fix ``read_csv`` to correctly encode identical na_values, e.g. ``na_values=[-999.0,-999]``
diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py
index 751f5fcdb82b2..f0c2e272348ea 100644
--- a/pandas/tools/plotting.py
+++ b/pandas/tools/plotting.py
@@ -159,7 +159,8 @@ def use(self, key, value):
def scatter_matrix(frame, alpha=0.5, figsize=None, ax=None, grid=False,
- diagonal='hist', marker='.', **kwds):
+ diagonal='hist', marker='.', density_kwds={}, hist_kwds={},
+ **kwds):
"""
Draw a matrix of scatter plots.
@@ -174,6 +175,10 @@ def scatter_matrix(frame, alpha=0.5, figsize=None, ax=None, grid=False,
either Kernel Density Estimation or Histogram
plot in the diagonal
marker : Matplotlib marker type, default '.'
+ hist_kwds : other plotting keyword arguments
+ To be passed to hist function
+ density_kwds : other plotting keyword arguments
+ To be passed to kernel density estimate plot
kwds : other plotting keyword arguments
To be passed to scatter function
@@ -205,13 +210,13 @@ def scatter_matrix(frame, alpha=0.5, figsize=None, ax=None, grid=False,
# Deal with the diagonal by drawing a histogram there.
if diagonal == 'hist':
- ax.hist(values)
+ ax.hist(values, hist_kwds)
elif diagonal in ('kde', 'density'):
from scipy.stats import gaussian_kde
y = values
gkde = gaussian_kde(y)
ind = np.linspace(y.min(), y.max(), 1000)
- ax.plot(ind, gkde.evaluate(ind), **kwds)
+ ax.plot(ind, gkde.evaluate(ind), **density_kwds)
else:
common = (mask[a] & mask[b]).values
@@ -368,16 +373,16 @@ def andrews_curves(data, class_column, ax=None, samples=200):
"""
Parameters:
-----------
- data : DataFrame
+ data : DataFrame
Data to be plotted, preferably normalized to (0.0, 1.0)
class_column : Name of the column containing class names
ax : matplotlib axes object, default None
samples : Number of points to plot in each curve
-
+
Returns:
--------
ax: Matplotlib axis object
-
+
"""
from math import sqrt, pi, sin, cos
import matplotlib.pyplot as plt
@@ -1805,7 +1810,7 @@ def scatter_plot(data, x, y, by=None, ax=None, figsize=None, grid=False, **kwarg
grid : Setting this to True will show the grid
kwargs : other plotting keyword arguments
To be passed to scatter function
-
+
Returns
-------
fig : matplotlib.Figure
@@ -2198,9 +2203,9 @@ def _subplots(nrows=1, ncols=1, sharex=False, sharey=False, squeeze=True,
data : DataFrame, optional
If secondary_y is a sequence, data is used to select columns.
-
- fig_kw : Other keyword arguments to be passed to the figure() call.
- Note that all keywords not recognized above will be
+
+ fig_kw : Other keyword arguments to be passed to the figure() call.
+ Note that all keywords not recognized above will be
automatically included here.
| Currently scatter_matrix passes all unmatched keyword arguments to both scatter and line subplots (off-diagonal plots are scatter, diagonal plots are discrete or continuous histograms). I needed to color the points on the scatter plots but could not pass a "c" argument or pandas would try to pass the "c" argument to the line plots and barf. This change scratches my itch and doesn't break backward compatibility.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3673 | 2013-05-21T18:26:36Z | 2013-06-03T01:18:28Z | 2013-06-03T01:18:28Z | 2014-07-16T08:10:09Z |
BUG: convert_objects with convert_dates=coerce was parsing `a` into a date | diff --git a/RELEASE.rst b/RELEASE.rst
index 436f9d8b833a3..f2c150341d2c6 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -155,6 +155,8 @@ pandas 0.11.1
- Fix running of bs4 tests when it is not installed (GH3605_)
- Fix parsing of html table (GH3606_)
- ``read_html()`` now only allows a single backend: ``html5lib`` (GH3616_)
+ - ``convert_objects`` with ``convert_dates='coerce'`` was parsing some single-letter strings
+ into today's date
.. _GH3164: https://github.com/pydata/pandas/issues/3164
.. _GH2786: https://github.com/pydata/pandas/issues/2786
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index 5cff7f85593a6..75b8d1dc69452 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -3509,6 +3509,15 @@ def test_convert_objects(self):
#result = r.convert_objects(convert_dates=True,convert_numeric=False)
#self.assert_(result.dtype == 'M8[ns]')
+ # dateutil parses some single letters into today's value as a date
+ for x in 'abcdefghijklmnopqrstuvwxyz':
+ s = Series([x])
+ result = s.convert_objects(convert_dates='coerce')
+ assert_series_equal(result,s)
+ s = Series([x.upper()])
+ result = s.convert_objects(convert_dates='coerce')
+ assert_series_equal(result,s)
+
def test_apply_args(self):
s = Series(['foo,bar'])
diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx
index a633b9482da06..abec45b52a363 100644
--- a/pandas/tslib.pyx
+++ b/pandas/tslib.pyx
@@ -319,6 +319,7 @@ class Timestamp(_Timestamp):
_nat_strings = set(['NaT','nat','NAT','nan','NaN','NAN'])
+_not_datelike_strings = set(['a','A','m','M','p','P','t','T'])
class NaTType(_NaT):
"""(N)ot-(A)-(T)ime, the time equivalent of NaN"""
@@ -876,6 +877,14 @@ def array_to_datetime(ndarray[object] values, raise_=False, dayfirst=False,
&dts)
_check_dts_bounds(iresult[i], &dts)
except ValueError:
+
+ # for some reason, dateutil parses some single letter len-1 strings into today's date
+ if len(val) == 1 and val in _not_datelike_strings:
+ if coerce:
+ iresult[i] = iNaT
+ continue
+ elif raise_:
+ raise
try:
result[i] = parse(val, dayfirst=dayfirst)
except Exception:
| this is a 'bug' in dateutil:
`a,t,m,p` are the offenders
```
In [1]: import dateutil
In [2]: dateutil.parser.parse('a')
Out[2]: datetime.datetime(2013, 5, 21, 0, 0)
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/3671 | 2013-05-21T17:41:33Z | 2013-05-21T19:04:01Z | 2013-05-21T19:04:01Z | 2014-07-09T17:58:31Z |
BUG: Fix alignment issue when setitem in a mixed-DataFrame with a Series (GH3668) | diff --git a/RELEASE.rst b/RELEASE.rst
index 3940cd6d10b51..436f9d8b833a3 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -146,7 +146,8 @@ pandas 0.11.1
- Correctly parse date columns with embedded (nan/NaT) into datetime64[ns] dtype in ``read_csv``
when ``parse_dates`` is specified (GH3062_)
- Fix not consolidating before to_csv (GH3624_)
- - Fix alignment issue when setitem in a DataFrame with a piece of a DataFrame (GH3626_)
+ - Fix alignment issue when setitem in a DataFrame with a piece of a DataFrame (GH3626_) or
+ a mixed DataFrame and a Series (GH3668_)
- Fix plotting of unordered DatetimeIndex (GH3601_)
- ``sql.write_frame`` failing when writing a single column to sqlite (GH3628_),
thanks to @stonebig
@@ -217,6 +218,7 @@ pandas 0.11.1
.. _GH3141: https://github.com/pydata/pandas/issues/3141
.. _GH3628: https://github.com/pydata/pandas/issues/3628
.. _GH3638: https://github.com/pydata/pandas/issues/3638
+.. _GH3668: https://github.com/pydata/pandas/issues/3668
.. _GH3605: https://github.com/pydata/pandas/issues/3605
.. _GH3606: https://github.com/pydata/pandas/issues/3606
.. _GH3659: https://github.com/pydata/pandas/issues/3659
diff --git a/pandas/core/common.py b/pandas/core/common.py
index a52c932b30ba4..29516f9d2d4a3 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -851,6 +851,7 @@ def _maybe_upcast_indexer(result, indexer, other, dtype=None):
return the result and a changed flag
"""
+ original_dtype = result.dtype
def changeit():
# our type is wrong here, need to upcast
r, fill_value = _maybe_upcast(result, fill_value=other, dtype=dtype, copy=True)
@@ -861,9 +862,11 @@ def changeit():
# if we hit this then we still have an incompatible type
r[indexer] = fill_value
+ # if we have changed to floats, might want to cast back if we can
+ r = _possibly_downcast_to_dtype(r,original_dtype)
return r, True
- new_dtype, fill_value = _maybe_promote(result.dtype,other)
+ new_dtype, fill_value = _maybe_promote(original_dtype,other)
if new_dtype != result.dtype:
return changeit()
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 41f20cbcc15ac..f7187b7ae5d61 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -143,7 +143,7 @@ def setter(item, v):
else:
setter(item, np.nan)
- # we have an equal len ndarray
+ # we have an equal len ndarray to our labels
elif isinstance(value, np.ndarray) and value.ndim == 2:
if len(labels) != value.shape[1]:
raise ValueError('Must have equal len keys and value when'
@@ -153,7 +153,8 @@ def setter(item, v):
setter(item, value[:,i])
# we have an equal len list/ndarray
- elif len(labels) == 1 and len(self.obj[labels[0]]) == len(value):
+ elif len(labels) == 1 and (
+ len(self.obj[labels[0]]) == len(value) or len(plane_indexer[0]) == len(value)):
setter(labels[0], value)
# per label values
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 50bddb6ecd85c..fddbbf93552b3 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -938,12 +938,6 @@ def test_getitem_setitem_non_ix_labels(self):
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
- def test_ix_assign_column_mixed(self):
- # GH #1142
- orig = self.mixed_frame.ix[:, 'B'].copy()
- self.mixed_frame.ix[:, 'B'] = self.mixed_frame.ix[:, 'B'] + 1
- assert_series_equal(self.mixed_frame.B, orig + 1)
-
def test_ix_multi_take(self):
df = DataFrame(np.random.randn(3, 2))
rs = df.ix[df.index == 0, :]
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index 5891e8ac08040..ad3d150c7e0ad 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -881,7 +881,7 @@ def test_multi_assign(self):
expected = DataFrame({'FC':['a',np.nan,'a','b','a','b'],
'PF':[0,0,0,0,1,1],
- 'col1':Series([0,1,4,6,8,10],dtype='float64'),
+ 'col1':Series([0,1,4,6,8,10]),
'col2':[12,7,16,np.nan,20,22]})
@@ -898,6 +898,27 @@ def test_multi_assign(self):
df2.ix[mask, cols]= dft.ix[mask, cols].values
assert_frame_equal(df2,expected)
+ def test_ix_assign_column_mixed(self):
+ # GH #1142
+ df = DataFrame(tm.getSeriesData())
+ df['foo'] = 'bar'
+
+ orig = df.ix[:, 'B'].copy()
+ df.ix[:, 'B'] = df.ix[:, 'B'] + 1
+ assert_series_equal(df.B, orig + 1)
+
+ # GH 3668, mixed frame with series value
+ df = DataFrame({'x':range(10), 'y':range(10,20),'z' : 'bar'})
+ expected = df.copy()
+ expected.ix[0, 'y'] = 1000
+ expected.ix[2, 'y'] = 1200
+ expected.ix[4, 'y'] = 1400
+ expected.ix[6, 'y'] = 1600
+ expected.ix[8, 'y'] = 1800
+
+ df.ix[df.x % 2 == 0, 'y'] = df.ix[df.x % 2 == 0, 'y'] * 100
+ assert_frame_equal(df,expected)
+
def test_iloc_mask(self):
# GH 3631, iloc with a mask (of a series) should raise
| closes #3668
| https://api.github.com/repos/pandas-dev/pandas/pulls/3670 | 2013-05-21T16:14:14Z | 2013-05-21T17:37:31Z | 2013-05-21T17:37:31Z | 2014-06-23T10:57:56Z |
ENH: Allow fetching stock dividend and split info from yahoo | diff --git a/RELEASE.rst b/RELEASE.rst
index 4d0417143a789..5634b9b30716d 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -87,6 +87,7 @@ pandas 0.11.1
- Add ``unit`` keyword to ``Timestamp`` and ``to_datetime`` to enable passing of
integers or floats that are in an epoch unit of ``s, ms, us, ns``
(e.g. unix timestamps or epoch ``s``, with fracional seconds allowed) (GH3540_)
+ - ``DataReader`` now fetches stock dividend and split info (GH3666_)
**API Changes**
@@ -327,7 +328,7 @@ pandas 0.11.1
.. _GH3834: https://github.com/pydata/pandas/issues/3834
.. _GH3873: https://github.com/pydata/pandas/issues/3873
.. _GH3877: https://github.com/pydata/pandas/issues/3877
-
+.. _GH3666: https://github.com/pydata/pandas/pull/3666
pandas 0.11.0
=============
diff --git a/ci/README.txt b/ci/README.txt
index f69fc832fde85..d7b97890c6476 100644
--- a/ci/README.txt
+++ b/ci/README.txt
@@ -1,5 +1,5 @@
Travis is a ci service that's well-integrated with github.
-The following ypes of breakage should be detected
+The following types of breakage should be detected
by travis builds:
1) Failing tests on any supported version of python.
diff --git a/pandas/io/data.py b/pandas/io/data.py
index 8bc3df561cadb..ae3722f5e49bc 100644
--- a/pandas/io/data.py
+++ b/pandas/io/data.py
@@ -19,7 +19,7 @@
def DataReader(name, data_source=None, start=None, end=None,
- retry_count=3, pause=0):
+ retry_count=3, pause=0, dividends=False, splits=False):
"""
Imports data from a number of online sources.
@@ -56,8 +56,9 @@ def DataReader(name, data_source=None, start=None, end=None,
if(data_source == "yahoo"):
return get_data_yahoo(symbols=name, start=start, end=end,
- adjust_price=False, chunk=25,
- retry_count=retry_count, pause=pause)
+ adjust_price=False, chunk=25,
+ retry_count=retry_count, pause=pause,
+ dividends=dividends, splits=splits)
elif(data_source == "google"):
return get_data_google(symbols=name, start=start, end=end,
adjust_price=False, chunk=25,
@@ -140,7 +141,7 @@ def get_quote_google(symbols):
raise NotImplementedError("Google Finance doesn't have this functionality")
def _get_hist_yahoo(sym=None, start=None, end=None, retry_count=3,
- pause=0, **kwargs):
+ pause=0, dividends=False, splits=False, **kwargs):
"""
Get historical data for the given name from yahoo.
Date format is datetime
@@ -153,7 +154,13 @@ def _get_hist_yahoo(sym=None, start=None, end=None, retry_count=3,
start, end = _sanitize_dates(start, end)
- yahoo_URL = 'http://ichart.yahoo.com/table.csv?'
+ # Yahoo! Finance doesn't show splits with 'table.csv' setting
+ if splits:
+ url_type = 'x'
+ else:
+ url_type = 'table.csv'
+
+ yahoo_URL = 'http://ichart.yahoo.com/%s?' % url_type
url = yahoo_URL + 's=%s' % sym + \
'&a=%s' % (start.month - 1) + \
@@ -162,7 +169,7 @@ def _get_hist_yahoo(sym=None, start=None, end=None, retry_count=3,
'&d=%s' % (end.month - 1) + \
'&e=%s' % end.day + \
'&f=%s' % end.year + \
- '&g=d' + \
+ '&g=%s' % ('v' if dividends or splits else 'd') + \
'&ignore=.csv'
for _ in range(retry_count):
@@ -177,6 +184,83 @@ def _get_hist_yahoo(sym=None, start=None, end=None, retry_count=3,
if len(rs) > 2 and rs.index[-1] == rs.index[-2]: # pragma: no cover
rs = rs[:-1]
+ rs.rename(columns={'Dividends': 'Values'}, inplace=True)
+ rs_splits, rs_dividends = DataFrame(), DataFrame()
+
+ # check to see if there is split data
+ try:
+ has_splits = rs.xs('SPLIT')['Values'].any()
+ except AttributeError:
+ has_splits = rs.xs('SPLIT')['Values']
+ except KeyError:
+ # There is no split data
+ has_splits = False
+
+ split_format = splits or has_splits
+
+ if (splits and has_splits and hasattr(rs.xs('SPLIT'), 'pivot')):
+ # Yahoo! Finance returns additional info like 'STARTDATE' and
+ # 'ENDDATE'. This selects only the data we want
+ rs_splits = rs.xs('SPLIT').reset_index()
+
+ # If Yahoo! Finance returns one value, the result of '.xs' will
+ # be a Series instead of a DataFrame
+ elif (splits and has_splits):
+ d = {'index': ['SPLIT'],
+ 'Date': [rs.xs('SPLIT')['Date']],
+ 'Values': [rs.xs('SPLIT')['Values']]}
+
+ rs_splits = DataFrame(d)
+
+ if dividends and split_format:
+ # check to see if there is dividend data
+ try:
+ has_dividends = rs.xs('DIVIDEND')['Values'].any()
+ except AttributeError:
+ has_dividends = rs.xs('DIVIDEND')['Values']
+ except KeyError:
+ # There is no dividend data
+ has_dividends = False
+
+ if (has_dividends and hasattr(rs.xs('DIVIDEND'), 'pivot')):
+ rs_dividends = rs.xs('DIVIDEND').reset_index()
+ elif has_dividends:
+ d = {'index': ['DIVIDEND'],
+ 'Date': [rs.xs('DIVIDEND')['Date']],
+ 'Values': [rs.xs('DIVIDEND')['Values']]}
+
+ rs_dividends = DataFrame(d)
+
+ elif dividends:
+ # if there are no splits there won't be a 'DIVIDEND' section
+ has_dividends = len(rs) > 0
+
+ if has_dividends:
+ rs_dividends = rs
+
+ # print(rs)
+ rs = concat([rs_splits, rs_dividends])
+ # print(rs_splits)
+ # print(rs_dividends)
+ # print(rs)
+
+ if (split_format and not rs.empty):
+ # Dates in split format are yyyymmdd so convert to yyyy-mm-dd
+ rs.Date = rs.Date.apply(lambda x: str(x))
+ rs.Date = rs.Date.apply(
+ lambda x: '%s-%s-%s' % (x[:4], x[4:6], x[6:]))
+
+ # pivot DataFrame to match format of a normal query
+ rs = rs.pivot(index='Date', columns='index', values='Values')
+
+ if (splits and has_splits):
+ rs.rename(columns={'SPLIT': 'Splits'}, inplace=True)
+
+ if (dividends and has_dividends and split_format):
+ rs.rename(columns={'DIVIDEND': 'Dividends'}, inplace=True)
+ elif (dividends and has_dividends):
+ rs.rename(columns={'Values': 'Dividends'}, inplace=True)
+
return rs
time.sleep(pause)
@@ -312,7 +396,7 @@ def get_components_yahoo(idx_sym):
def get_data_yahoo(symbols=None, start=None, end=None, retry_count=3, pause=0,
adjust_price=False, ret_index=False, chunksize=25,
- **kwargs):
+ dividends=False, splits=False, **kwargs):
"""
Returns DataFrame/Panel of historical stock prices from symbols, over date
range, start to end. To avoid being penalized by Yahoo! Finance servers,
@@ -341,6 +425,10 @@ def get_data_yahoo(symbols=None, start=None, end=None, retry_count=3, pause=0,
If True, includes a simple return index 'Ret_Index' in hist_data.
chunksize : int, default 25
Number of symbols to download consecutively before intiating pause.
+ dividends : boolean, default False
+ Fetch dividends instead of prices
+ splits : boolean, default False
+ Fetch splits instead of prices
Returns
-------
@@ -352,8 +440,9 @@ def dl_mult_symbols(symbols):
for sym_group in _in_chunks(symbols, chunksize):
for sym in sym_group:
try:
- stocks[sym] = _get_hist_yahoo(sym, start=start,
- end=end, **kwargs)
+ stocks[sym] = _get_hist_yahoo(sym, start=start, end=end,
+ dividends=dividends,
+ splits=splits, **kwargs)
except:
warnings.warn('Error with sym: ' + sym + '... skipping.')
@@ -369,7 +458,9 @@ def dl_mult_symbols(symbols):
#If a single symbol, (e.g., 'GOOG')
if isinstance(symbols, (str, int)):
sym = symbols
- hist_data = _get_hist_yahoo(sym, start=start, end=end)
+ hist_data = _get_hist_yahoo(sym, start=start, end=end,
+ dividends=dividends, splits=splits,
+ **kwargs)
#Or multiple symbols, (e.g., ['GOOG', 'AAPL', 'MSFT'])
elif isinstance(symbols, DataFrame):
try:
| Usage:
``` python
from pandas.io.data import DataReader
```
Requesting dividends and splits but only dividends available
``` python
DataReader('CAT', 'yahoo', '12/31/04', '1/18/05', dividends=True, splits=True)
Dividends
Date
2005-01-18.0 0.205
```
Requesting dividends and splits but only splits available
``` python
DataReader('CAT', 'yahoo', '7/14/05', '7/15/05', dividends=True, splits=True)
Splits
Date
2005-07-14 2:1
```
Requesting dividends and splits and both are available
``` python
DataReader('CAT', 'yahoo', '7/1/05', '8/1/05', dividends=True, splits=True)
Dividends Splits
Date
2005-07-14 NaN 2:1
2005-07-20 0.250000 NaN
```
Requesting multiple dividends and splits
``` python
DataReader('CAT', 'yahoo', '7/14/97', '7/15/05', dividends=True, splits=True)
Dividends Splits
Date
1997-07-14 NaN 2:1
1997-07-17 0.125000 NaN
1997-10-16 0.125000 NaN
1998-01-15 0.125000 NaN
1998-04-16 0.125000 NaN
1998-07-16 0.150000 NaN
1998-10-22 0.150000 NaN
1999-01-15 0.150000 NaN
1999-04-22 0.150000 NaN
1999-07-16 0.162500 NaN
1999-10-21 0.162500 NaN
2000-01-18 0.162500 NaN
2000-04-19 0.162500 NaN
2000-07-18 0.170000 NaN
2000-10-18 0.170000 NaN
2001-01-18 0.170000 NaN
2001-04-19 0.170000 NaN
2001-07-18 0.175000 NaN
2001-10-18 0.175000 NaN
2002-01-17 0.175000 NaN
2002-04-18 0.175000 NaN
2002-07-18 0.175000 NaN
2002-10-17 0.175000 NaN
2003-01-16 0.175000 NaN
2003-04-16 0.175000 NaN
2003-07-17 0.175000 NaN
2003-10-16 0.185000 NaN
2004-01-15 0.185000 NaN
2004-04-22 0.185000 NaN
2004-07-16 0.205000 NaN
2004-10-21 0.205000 NaN
2005-01-18 0.205000 NaN
2005-04-21 0.205000 NaN
2005-07-14 NaN 2:1
```
Requesting dividends or splits and neither are available
``` python
DataReader('CAT', 'yahoo', '7/21/05', '7/22/05', dividends=True)
DataReader('CAT', 'yahoo', '7/21/05', '7/22/05', splits=True)
Empty DataFrame
```
Requesting dividends and only dividends are available
``` python
DataReader('CAT', 'yahoo', '12/31/04', '1/18/05', dividends=True)
Dividends
2005-01-18 0.205
```
Requesting dividends and both dividends and splits are available
``` python
DataReader('CAT', 'yahoo', '7/1/05', '8/01/05', dividends=True)
Dividends
2005-07-20 0.25
```
Requesting dividends and only splits are available
``` python
DataReader('CAT', 'yahoo', '7/14/05', '7/15/05', dividends=True)
Empty DataFrame
```
Requesting multiple dividends
``` python
DataReader('CAT', 'yahoo', '7/20/05', '9/21/06', dividends=True)
Dividends
2005-07-20 0.25
2005-10-20 0.25
2006-01-18 0.25
2006-04-20 0.25
2006-07-18 0.30
```
Requesting splits and only splits are available
``` python
DataReader('CAT', 'yahoo', '7/14/05', '7/15/05', splits=True)
Splits
Date
2005-07-14 2:1
```
Requesting splits and both dividends and splits are available
``` python
DataReader('CAT', 'yahoo', '7/1/05', '8/01/05', splits=True)
Splits
Date
2005-07-14 2:1
```
Requesting splits and only dividends are available
``` python
DataReader('CAT', 'yahoo', '12/31/04', '1/18/05', splits=True)
Empty DataFrame
```
Requesting multiple splits
``` python
DataReader('CAT', 'yahoo', '7/14/70', '7/15/05', splits=True)
Splits
Date
1994-09-06 2:1
1997-07-14 2:1
2005-07-14 2:1
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/3666 | 2013-05-21T11:28:43Z | 2014-01-18T03:42:51Z | null | 2014-07-01T07:47:18Z |
CLN: pandas stata reader should clean up after itself | diff --git a/pandas/io/tests/test_stata.py b/pandas/io/tests/test_stata.py
index 79cec2870d687..dccfdc2bb3c63 100644
--- a/pandas/io/tests/test_stata.py
+++ b/pandas/io/tests/test_stata.py
@@ -141,6 +141,7 @@ def test_write_dta5(self):
written_and_read_again = self.read_dta(self.dta5)
tm.assert_frame_equal(written_and_read_again, original)
+ os.remove(self.dta5)
def test_write_dta6(self):
original = self.read_csv(self.csv3)
@@ -150,6 +151,7 @@ def test_write_dta6(self):
written_and_read_again = self.read_dta(self.dta6)
tm.assert_frame_equal(written_and_read_again, original)
+ os.remove(self.dta6)
@nose.tools.nottest
def test_read_dta7(self):
| https://api.github.com/repos/pandas-dev/pandas/pulls/3665 | 2013-05-21T02:33:41Z | 2013-05-21T12:08:54Z | null | 2013-12-04T00:46:37Z | |
ENH: sort the keys of a passed dict using numpy's sort function | diff --git a/pandas/core/series.py b/pandas/core/series.py
index a04e931cf07e3..5d2fc4629e20b 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -456,7 +456,7 @@ def __new__(cls, data=None, index=None, dtype=None, name=None,
if isinstance(data, OrderedDict):
index = Index(data)
else:
- index = Index(sorted(data))
+ index = Index(np.sort(data.keys()))
try:
if isinstance(index, DatetimeIndex):
# coerce back to datetime objects for lookup
| closes #3658
| https://api.github.com/repos/pandas-dev/pandas/pulls/3664 | 2013-05-21T01:24:17Z | 2013-05-21T02:51:26Z | null | 2014-07-04T15:32:41Z |
Rework display logic again. | diff --git a/RELEASE.rst b/RELEASE.rst
index e02ad66252bdc..18468ebcd3f4c 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -40,8 +40,8 @@ pandas 0.11.1
list of the rows from which to read the index. Added the option,
``tupleize_cols`` to provide compatiblity for the pre 0.11.1 behavior of
writing and reading multi-index columns via a list of tuples. The default in
- 0.11.1 is to write lists of tuples and *not* interpret list of tuples as a
- multi-index column.
+ 0.11.1 is to write lists of tuples and *not* interpret list of tuples as a
+ multi-index column.
Note: The default value will change in 0.12 to make the default *to* write and
read multi-index columns in the new format. (GH3571_, GH1651_, GH3141_)
- Add iterator to ``Series.str`` (GH3638_)
@@ -63,6 +63,7 @@ pandas 0.11.1
- Add modulo operator to Series, DataFrame
- Add ``date`` method to DatetimeIndex
- Simplified the API and added a describe method to Categorical
+ - Added Faq section on repr display options, to help users customize their setup.
**API Changes**
@@ -79,12 +80,14 @@ pandas 0.11.1
``timedelta64[ns]`` to ``object/int`` (GH3425_)
- Do not allow datetimelike/timedeltalike creation except with valid types
(e.g. cannot pass ``datetime64[ms]``) (GH3423_)
- - Add ``squeeze`` keyword to ``groupby`` to allow reduction from
- DataFrame -> Series if groups are unique. Regression from 0.10.1,
+ - Add ``squeeze`` keyword to ``groupby`` to allow reduction from
+ DataFrame -> Series if groups are unique. Regression from 0.10.1,
partial revert on (GH2893_) with (GH3596_)
- Raise on ``iloc`` when boolean indexing with a label based indexer mask
e.g. a boolean Series, even with integer labels, will raise. Since ``iloc``
is purely positional based, the labels on the Series are not alignable (GH3631_)
+ - Deprecated display.height, display.width is now only a formatting option
+ does not control triggering of summary, simuliar to < 0.11.0.
**Bug Fixes**
@@ -134,11 +137,13 @@ pandas 0.11.1
is a ``list`` or ``tuple``.
- Fixed bug where a time-series was being selected in preference to an actual column name
in a frame (GH3594_)
- - Fix modulo and integer division on Series,DataFrames to act similary to ``float`` dtypes to return
+ - Fix modulo and integer division on Series,DataFrames to act similary to ``float`` dtypes to return
``np.nan`` or ``np.inf`` as appropriate (GH3590_)
- Fix incorrect dtype on groupby with ``as_index=False`` (GH3610_)
- Fix ``read_csv`` to correctly encode identical na_values, e.g. ``na_values=[-999.0,-999]``
was failing (GH3611_)
+ - Disable HTML output in qtconsole again. (GH3657_)
+ - Reworked the new repr display logic, which users found confusing. (GH3663_)
- Fix indexing issue in ndim >= 3 with ``iloc`` (GH3617_)
- Correctly parse date columns with embedded (nan/NaT) into datetime64[ns] dtype in ``read_csv``
when ``parse_dates`` is specified (GH3062_)
diff --git a/doc/source/faq.rst b/doc/source/faq.rst
index 8009c7014c347..a5b6db2964cd2 100644
--- a/doc/source/faq.rst
+++ b/doc/source/faq.rst
@@ -35,8 +35,8 @@ horizontal scrolling, auto-detection of width/height.
To appropriately address all these environments, the display behavior is controlled
by several options, which you're encouraged to tweak to suit your setup.
-As of 0.11.0, the relavent options are all under the `display` namespace,
-(e.g. display.width, display.height, etc'):
+As of 0.11.1, these are the relavent options, all under the `display` namespace,
+(e.g. display.width, etc'):
- notebook_repr_html: if True, IPython frontends with HTML support will display
dataframes as HTML tables when possible.
- expand_repr (default True): when the frame width cannot fit within the screen,
@@ -45,10 +45,10 @@ As of 0.11.0, the relavent options are all under the `display` namespace,
- max_columns: max dataframe columns to display. a wider frame will trigger
a summary view, unless `expand_repr` is True and HTML output is disabled.
- max_rows: max dataframe rows display. a longer frame will trigger a summary view.
-- width: width of display screen in characters. When using a terminal, setting this to None
- will trigger auto-detection of terminal width.
-- height: height of display screen. When using a terminal, setting this to None
- will trigger auto-detection of terminal height.
+- width: width of display screen in characters, used to determine the width of lines
+ when expand_repr is active, Setting this to None will trigger auto-detection of terminal
+ width, this only works for proper terminals, not IPython frontends such as ipnb.
+ width is ignored in IPython notebook, since the browser provides horizontal scrolling.
IPython users can use the IPython startup file to import pandas and set these
options automatically when starting up.
diff --git a/pandas/core/common.py b/pandas/core/common.py
index a52c932b30ba4..cbc85e6b91c33 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -1904,8 +1904,23 @@ def in_qtconsole():
return True
except:
return False
+ return False
+
+def in_ipnb():
+ """
+ check if we're inside an IPython Notebook
+ """
+ try:
+ ip = get_ipython()
+ front_end = (ip.config.get('KernelApp',{}).get('parent_appname',"") or
+ ip.config.get('IPKernelApp',{}).get('parent_appname',""))
+ if 'notebook' in front_end.lower():
+ return True
+ except:
+ return False
+ return False
-def in_ipnb_frontend():
+def in_ipython_frontend():
"""
check if we're inside an an IPython zmq frontend
"""
diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py
index 71b4539265069..57bbe747c9c2c 100644
--- a/pandas/core/config_init.py
+++ b/pandas/core/config_init.py
@@ -120,13 +120,17 @@
pc_line_width_doc = """
: int
- When printing wide DataFrames, this is the width of each line.
+ Deprecated.
"""
pc_line_width_deprecation_warning = """\
line_width has been deprecated, use display.width instead (currently both are identical)
"""
+pc_height_deprecation_warning = """\
+height has been deprecated.
+"""
+
pc_width_doc = """
: int
Width of the display in characters. In case python/IPython is running in
@@ -138,10 +142,7 @@
pc_height_doc = """
: int
- Height of the display in lines. In case python/IPython is running in a
- terminal this can be set to None and pandas will auto-detect the width.
- Note that the IPython notebook, IPython qtconsole, or IDLE do not run
- in a terminal, and hence it is not possible to correctly detect the height.
+ Deprecated.
"""
pc_chop_threshold_doc = """
@@ -244,10 +245,15 @@ def mpl_style_cb(key):
validator=is_instance_factory([type(None), int]))
# redirected to width, make defval identical
cf.register_option('line_width', get_default_val('display.width'), pc_line_width_doc)
+
cf.deprecate_option('display.line_width',
msg=pc_line_width_deprecation_warning,
rkey='display.width')
+cf.deprecate_option('display.height',
+ msg=pc_height_deprecation_warning,
+ rkey='display.height')
+
tc_sim_interactive_doc = """
: boolean
Whether to simulate interactive mode for purposes of testing
diff --git a/pandas/core/format.py b/pandas/core/format.py
index 3d38caa84492f..7327f3b1b2175 100644
--- a/pandas/core/format.py
+++ b/pandas/core/format.py
@@ -1702,7 +1702,7 @@ def detect_console_encoding():
def get_console_size():
"""Return console size as tuple = (width, height).
- May return (None,None) in some cases.
+ Returns (None,None) in non-interactive session.
"""
display_width = get_option('display.width')
display_height = get_option('display.height')
@@ -1718,7 +1718,7 @@ def get_console_size():
# Simple. yeah.
if com.in_interactive_session():
- if com.in_ipnb_frontend():
+ if com.in_ipython_frontend():
# sane defaults for interactive non-shell terminal
# match default for width,height in config_init
from pandas.core.config import get_default_val
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index ed56a658d817d..0580be25a3f04 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -605,57 +605,62 @@ def __nonzero__(self):
def _repr_fits_vertical_(self):
"""
- Check if full repr fits in vertical boundaries imposed by the display
- options height and max_rows. In case of non-interactive session,
- no boundaries apply.
+ Check length against max_rows.
"""
- width, height = fmt.get_console_size()
max_rows = get_option("display.max_rows")
+ return len(self) <= max_rows
- if height is None and max_rows is None:
- return True
-
- else:
- # min of two, where one may be None
- height = height or max_rows +1
- max_rows = max_rows or height +1
- return len(self) <= min(max_rows, height)
-
- def _repr_fits_horizontal_(self):
+ def _repr_fits_horizontal_(self,ignore_width=False):
"""
Check if full repr fits in horizontal boundaries imposed by the display
options width and max_columns. In case off non-interactive session, no
boundaries apply.
+
+ ignore_width is here so ipnb+HTML output can behave the way
+ users expect. display.max_columns remains in effect.
+ GH3541, GH3573
"""
+
width, height = fmt.get_console_size()
max_columns = get_option("display.max_columns")
nb_columns = len(self.columns)
# exceed max columns
if ((max_columns and nb_columns > max_columns) or
- (width and nb_columns > (width // 2))):
+ ((not ignore_width) and width and nb_columns > (width // 2))):
return False
- if width is None:
- # no sense finding width of repr if no width set
+ if (ignore_width # used by repr_html under IPython notebook
+ or not com.in_interactive_session()): # scripts ignore terminal dims
return True
+ if (get_option('display.width') is not None or
+ com.in_ipython_frontend()):
+ # check at least the column row for excessive width
+ max_rows = 1
+ else:
+ max_rows = get_option("display.max_rows")
+
+ # when auto-detecting, so width=None and not in ipython front end
+ # check whether repr fits horizontal by actualy checking
+ # the width of the rendered repr
buf = StringIO()
# only care about the stuff we'll actually print out
# and to_string on entire frame may be expensive
d = self
- max_rows = get_option("display.max_rows")
- if not (height is None and max_rows is None):
+
+ if not (max_rows is None): # unlimited rows
# min of two, where one may be None
- height = height or max_rows +1
- max_rows = max_rows or height +1
- d=d.iloc[:min(max_rows, height,len(d))]
+ d=d.iloc[:min(max_rows,len(d))]
+ else:
+ return True
d.to_string(buf=buf)
value = buf.getvalue()
repr_width = max([len(l) for l in value.split('\n')])
- return repr_width <= width
+
+ return repr_width < width
def __str__(self):
"""
@@ -697,14 +702,11 @@ def __unicode__(self):
if fits_vertical and fits_horizontal:
self.to_string(buf=buf)
else:
- width, height = fmt.get_console_size()
- max_rows = get_option("display.max_rows") or height
- # expand_repr basically takes the extrac columns that don't
- # fit the width, and creates a new page, which increases
- # the effective row count. check number of cols agaibst
- # max rows to catch wrapping. that would exceed max_rows.
- if (get_option("display.expand_frame_repr") and fits_vertical and
- len(self.columns) < max_rows):
+ width, _ = fmt.get_console_size()
+ max_rows = get_option("display.max_rows")
+ if (get_option("display.expand_frame_repr")
+ and fits_vertical):
+ # and len(self.columns) < max_rows)
self.to_string(buf=buf, line_width=width)
else:
max_info_rows = get_option('display.max_info_rows')
@@ -731,12 +733,22 @@ def _repr_html_(self):
Return a html representation for a particular DataFrame.
Mainly for IPython notebook.
"""
+ # ipnb in html repr mode allows scrolling
+ # users strongly prefer to h-scroll a wide HTML table in the browser
+ # then to get a summary view. GH3541, GH3573
+ ipnbh = com.in_ipnb() and get_option('display.notebook_repr_html')
+
+ # qtconsole doesn't report it's line width, and also
+ # behaves badly when outputting an HTML table
+ # that doesn't fit the window, so disable it.
+ if com.in_qtconsole():
+ raise ValueError('Disable HTML output in QtConsole')
if get_option("display.notebook_repr_html"):
fits_vertical = self._repr_fits_vertical_()
fits_horizontal = False
if fits_vertical:
- fits_horizontal = self._repr_fits_horizontal_()
+ fits_horizontal = self._repr_fits_horizontal_(ignore_width=ipnbh)
if fits_horizontal and fits_vertical:
return ('<div style="max-height:1000px;'
@@ -870,7 +882,7 @@ def __contains__(self, key):
# Python 2 division methods
if not py3compat.PY3:
- __div__ = _arith_method(operator.div, '__div__', '/',
+ __div__ = _arith_method(operator.div, '__div__', '/',
default_axis=None, fill_zeros=np.inf)
__rdiv__ = _arith_method(lambda x, y: y / x, '__rdiv__',
default_axis=None, fill_zeros=np.inf)
diff --git a/pandas/tests/test_format.py b/pandas/tests/test_format.py
index 6b281edf17da9..7feb2f17d79a5 100644
--- a/pandas/tests/test_format.py
+++ b/pandas/tests/test_format.py
@@ -170,8 +170,9 @@ def test_expand_frame_repr(self):
df_tall = DataFrame('hello', range(30), range(5))
with option_context('mode.sim_interactive', True):
- with option_context('display.width', 50,
- 'display.height', 20):
+ with option_context('display.max_columns', 5,
+ 'display.width',20,
+ 'display.max_rows', 20):
with option_context('display.expand_frame_repr', True):
self.assertFalse(has_info_repr(df_small))
self.assertFalse(has_expanded_repr(df_small))
@@ -226,19 +227,21 @@ def mkframe(n):
# since not exceeding width
self.assertFalse(has_expanded_repr(df6))
self.assertFalse(has_info_repr(df6))
-
+
with option_context('display.max_rows', 9,
'display.max_columns', 10):
# out vertical bounds can not result in exanded repr
self.assertFalse(has_expanded_repr(df10))
self.assertTrue(has_info_repr(df10))
- with option_context('display.max_columns', 0,
+ # width=None in terminal, auto detection
+ with option_context('display.max_columns', 100,
'display.max_rows', term_width * 20,
- 'display.width', 0):
+ 'display.width', None):
df = mkframe((term_width // 7) - 2)
self.assertFalse(has_expanded_repr(df))
df = mkframe((term_width // 7) + 2)
+ print( df._repr_fits_horizontal_())
self.assertTrue(has_expanded_repr(df))
def test_to_string_repr_unicode(self):
@@ -787,7 +790,8 @@ def test_pprint_thing(self):
def test_wide_repr(self):
with option_context('mode.sim_interactive', True):
col = lambda l, k: [tm.rands(k) for _ in xrange(l)]
- df = DataFrame([col(20, 25) for _ in range(10)])
+ max_cols = get_option('display.max_columns')
+ df = DataFrame([col(max_cols+1, 25) for _ in range(10)])
set_option('display.expand_frame_repr', False)
rep_str = repr(df)
set_option('display.expand_frame_repr', True)
@@ -810,7 +814,8 @@ def test_wide_repr_wide_columns(self):
def test_wide_repr_named(self):
with option_context('mode.sim_interactive', True):
col = lambda l, k: [tm.rands(k) for _ in xrange(l)]
- df = DataFrame([col(20, 25) for _ in range(10)])
+ max_cols = get_option('display.max_columns')
+ df = DataFrame([col(max_cols+1, 25) for _ in range(10)])
df.index.name = 'DataFrame Index'
set_option('display.expand_frame_repr', False)
@@ -833,7 +838,8 @@ def test_wide_repr_multiindex(self):
col = lambda l, k: [tm.rands(k) for _ in xrange(l)]
midx = pandas.MultiIndex.from_arrays([np.array(col(10, 5)),
np.array(col(10, 5))])
- df = DataFrame([col(20, 25) for _ in range(10)],
+ max_cols = get_option('display.max_columns')
+ df = DataFrame([col(max_cols+1, 25) for _ in range(10)],
index=midx)
df.index.names = ['Level 0', 'Level 1']
set_option('display.expand_frame_repr', False)
@@ -853,12 +859,13 @@ def test_wide_repr_multiindex(self):
def test_wide_repr_multiindex_cols(self):
with option_context('mode.sim_interactive', True):
+ max_cols = get_option('display.max_columns')
col = lambda l, k: [tm.rands(k) for _ in xrange(l)]
midx = pandas.MultiIndex.from_arrays([np.array(col(10, 5)),
np.array(col(10, 5))])
- mcols = pandas.MultiIndex.from_arrays([np.array(col(20, 3)),
- np.array(col(20, 3))])
- df = DataFrame([col(20, 25) for _ in range(10)],
+ mcols = pandas.MultiIndex.from_arrays([np.array(col(max_cols+1, 3)),
+ np.array(col(max_cols+1, 3))])
+ df = DataFrame([col(max_cols+1, 25) for _ in range(10)],
index=midx, columns=mcols)
df.index.names = ['Level 0', 'Level 1']
set_option('display.expand_frame_repr', False)
@@ -876,7 +883,8 @@ def test_wide_repr_multiindex_cols(self):
def test_wide_repr_unicode(self):
with option_context('mode.sim_interactive', True):
col = lambda l, k: [tm.randu(k) for _ in xrange(l)]
- df = DataFrame([col(20, 25) for _ in range(10)])
+ max_cols = get_option('display.max_columns')
+ df = DataFrame([col(max_cols+1, 25) for _ in range(10)])
set_option('display.expand_frame_repr', False)
rep_str = repr(df)
set_option('display.expand_frame_repr', True)
| v0.11.0 introduced last-minute changes to the way the dataframe display logic works,
and they haven't proven themselves. The churn is unforunate, but if it's broken,
fix it.
`height` is deprecated.
max_rows solely determines the amount of rows displayed.
rebased on top of #3657.
Merging the previous PR and subsequent last minute fixes so close to the release
contributed to the problems only being discovered after the release.
So please test and report if this works for you, in terminal, qtconsole or notebook,
since the 0.11.1 release is so close.
cc @hayd , @lodagro ,@jreback
| https://api.github.com/repos/pandas-dev/pandas/pulls/3663 | 2013-05-21T00:07:41Z | 2013-05-25T23:08:36Z | 2013-05-25T23:08:36Z | 2014-06-22T03:00:30Z |
BUG: Non-unique indexing via loc and friends fixed when slicing (GH3659_) | diff --git a/RELEASE.rst b/RELEASE.rst
index 9b3cc3683c3de..e02ad66252bdc 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -115,6 +115,7 @@ pandas 0.11.1
and handle missing elements like unique indices (GH3561_)
- Duplicate indexes with and empty DataFrame.from_records will return a correct frame (GH3562_)
- Concat to produce a non-unique columns when duplicates are across dtypes is fixed (GH3602_)
+ - Non-unique indexing with a slice via ``loc`` and friends fixed (GH3659_)
- Fixed bug in groupby with empty series referencing a variable before assignment. (GH3510_)
- Fixed bug in mixed-frame assignment with aligned series (GH3492_)
- Fixed bug in selecting month/quarter/year from a series would not select the time element
@@ -215,6 +216,7 @@ pandas 0.11.1
.. _GH3638: https://github.com/pydata/pandas/issues/3638
.. _GH3605: https://github.com/pydata/pandas/issues/3605
.. _GH3606: https://github.com/pydata/pandas/issues/3606
+.. _GH3659: https://github.com/pydata/pandas/issues/3659
.. _Gh3616: https://github.com/pydata/pandas/issues/3616
pandas 0.11.0
diff --git a/pandas/core/index.py b/pandas/core/index.py
index 3e5a4f5676437..3a6913a924c1d 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -1219,13 +1219,25 @@ def slice_locs(self, start=None, end=None):
-----
This function assumes that the data is sorted, so use at your own peril
"""
+
+ is_unique = self.is_unique
if start is None:
start_slice = 0
else:
try:
start_slice = self.get_loc(start)
+
+ if not is_unique:
+
+ # get_loc will return a boolean array for non_uniques
+ # if we are not monotonic
+ if isinstance(start_slice,np.ndarray):
+ raise KeyError("cannot peform a slice operation "
+ "on a non-unique non-monotonic index")
+
if isinstance(start_slice, slice):
start_slice = start_slice.start
+
except KeyError:
if self.is_monotonic:
start_slice = self.searchsorted(start, side='left')
@@ -1237,10 +1249,19 @@ def slice_locs(self, start=None, end=None):
else:
try:
end_slice = self.get_loc(end)
+
+ if not is_unique:
+
+ # get_loc will return a boolean array for non_uniques
+ if isinstance(end_slice,np.ndarray):
+ raise KeyError("cannot perform a slice operation "
+ "on a non-unique non-monotonic index")
+
if isinstance(end_slice, slice):
end_slice = end_slice.stop
else:
end_slice += 1
+
except KeyError:
if self.is_monotonic:
end_slice = self.searchsorted(end, side='right')
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index ea684ef11446c..41f20cbcc15ac 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -759,6 +759,7 @@ def _getitem_axis(self, key, axis=0):
labels = self.obj._get_axis(axis)
if isinstance(key, slice):
+ self._has_valid_type(key,axis)
return self._get_slice_axis(key, axis=axis)
elif com._is_bool_indexer(key):
return self._getbool_axis(key, axis=axis)
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index e9afa1ae6ec1d..5891e8ac08040 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -953,6 +953,30 @@ def test_iloc_mask(self):
(key,ans,r))
warnings.filterwarnings(action='always', category=UserWarning)
+ def test_non_unique_loc(self):
+ ## GH3659
+ ## non-unique indexer with loc slice
+ ## https://groups.google.com/forum/?fromgroups#!topic/pydata/zTm2No0crYs
+
+ # these are going to raise becuase the we are non monotonic
+ df = DataFrame({'A' : [1,2,3,4,5,6], 'B' : [3,4,5,6,7,8]}, index = [0,1,0,1,2,3])
+ self.assertRaises(KeyError, df.loc.__getitem__, tuple([slice(1,None)]))
+ self.assertRaises(KeyError, df.loc.__getitem__, tuple([slice(0,None)]))
+ self.assertRaises(KeyError, df.loc.__getitem__, tuple([slice(1,2)]))
+
+ # monotonic are ok
+ df = DataFrame({'A' : [1,2,3,4,5,6], 'B' : [3,4,5,6,7,8]}, index = [0,1,0,1,2,3]).sort(axis=0)
+ result = df.loc[1:]
+ expected = DataFrame({'A' : [2,4,5,6], 'B' : [4, 6,7,8]}, index = [1,1,2,3])
+ assert_frame_equal(result,expected)
+
+ result = df.loc[0:]
+ assert_frame_equal(result,df)
+
+ result = df.loc[1:2]
+ expected = DataFrame({'A' : [2,4,5], 'B' : [4,6,7]}, index = [1,1,2])
+ assert_frame_equal(result,expected)
+
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
| closes #3659
This is if you try a non_monotonic selection on a non_unique index (a mouthful)!
The reason is we cannot determinate a proper start/end point on what to include
```
In [11]: df = DataFrame({'A' : [1,2,3,4,5,6], 'B' : [3,4,5,6,7,8]},
index = [0,1,0,1,2,3])
In [18]: df
Out[18]:
A B
0 1 3
1 2 4
0 3 5
1 4 6
2 5 7
3 6 8
In [12]: df.loc[1:]
KeyError: 'cannot perform a slice operation on a non-unique non-monotonic index'
```
On a non_unique, but monotonic index, however, slicing works normally
(notice, since we are using loc, that both endpoints ARE included)
```
In [13]: df = DataFrame({'A' : [1,2,3,4,5,6], 'B' : [3,4,5,6,7,8]},
index = [0,1,0,1,2,3]).sort(axis=0)
In [14]: df
Out[14]:
A B
0 1 3
0 3 5
1 2 4
1 4 6
2 5 7
3 6 8
In [15]: df.loc[1:]
Out[15]:
A B
1 2 4
1 4 6
2 5 7
3 6 8
In [16]: df.loc[1:2]
Out[16]:
A B
1 2 4
1 4 6
2 5 7
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/3661 | 2013-05-20T19:44:44Z | 2013-05-20T23:34:40Z | 2013-05-20T23:34:40Z | 2014-06-20T10:08:11Z |
ENH: special case HTML repr behaviour on ipnb GH3573 | diff --git a/RELEASE.rst b/RELEASE.rst
index e02ad66252bdc..c2d4154bf2587 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -40,8 +40,8 @@ pandas 0.11.1
list of the rows from which to read the index. Added the option,
``tupleize_cols`` to provide compatiblity for the pre 0.11.1 behavior of
writing and reading multi-index columns via a list of tuples. The default in
- 0.11.1 is to write lists of tuples and *not* interpret list of tuples as a
- multi-index column.
+ 0.11.1 is to write lists of tuples and *not* interpret list of tuples as a
+ multi-index column.
Note: The default value will change in 0.12 to make the default *to* write and
read multi-index columns in the new format. (GH3571_, GH1651_, GH3141_)
- Add iterator to ``Series.str`` (GH3638_)
@@ -79,8 +79,8 @@ pandas 0.11.1
``timedelta64[ns]`` to ``object/int`` (GH3425_)
- Do not allow datetimelike/timedeltalike creation except with valid types
(e.g. cannot pass ``datetime64[ms]``) (GH3423_)
- - Add ``squeeze`` keyword to ``groupby`` to allow reduction from
- DataFrame -> Series if groups are unique. Regression from 0.10.1,
+ - Add ``squeeze`` keyword to ``groupby`` to allow reduction from
+ DataFrame -> Series if groups are unique. Regression from 0.10.1,
partial revert on (GH2893_) with (GH3596_)
- Raise on ``iloc`` when boolean indexing with a label based indexer mask
e.g. a boolean Series, even with integer labels, will raise. Since ``iloc``
@@ -134,11 +134,12 @@ pandas 0.11.1
is a ``list`` or ``tuple``.
- Fixed bug where a time-series was being selected in preference to an actual column name
in a frame (GH3594_)
- - Fix modulo and integer division on Series,DataFrames to act similary to ``float`` dtypes to return
+ - Fix modulo and integer division on Series,DataFrames to act similary to ``float`` dtypes to return
``np.nan`` or ``np.inf`` as appropriate (GH3590_)
- Fix incorrect dtype on groupby with ``as_index=False`` (GH3610_)
- Fix ``read_csv`` to correctly encode identical na_values, e.g. ``na_values=[-999.0,-999]``
was failing (GH3611_)
+ - Disable HTML output in qtconsole again. (GH3657_)
- Fix indexing issue in ndim >= 3 with ``iloc`` (GH3617_)
- Correctly parse date columns with embedded (nan/NaT) into datetime64[ns] dtype in ``read_csv``
when ``parse_dates`` is specified (GH3062_)
diff --git a/pandas/core/common.py b/pandas/core/common.py
index a52c932b30ba4..cbc85e6b91c33 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -1904,8 +1904,23 @@ def in_qtconsole():
return True
except:
return False
+ return False
+
+def in_ipnb():
+ """
+ check if we're inside an IPython Notebook
+ """
+ try:
+ ip = get_ipython()
+ front_end = (ip.config.get('KernelApp',{}).get('parent_appname',"") or
+ ip.config.get('IPKernelApp',{}).get('parent_appname',""))
+ if 'notebook' in front_end.lower():
+ return True
+ except:
+ return False
+ return False
-def in_ipnb_frontend():
+def in_ipython_frontend():
"""
check if we're inside an an IPython zmq frontend
"""
diff --git a/pandas/core/format.py b/pandas/core/format.py
index 3d38caa84492f..608165f4ed340 100644
--- a/pandas/core/format.py
+++ b/pandas/core/format.py
@@ -1718,7 +1718,7 @@ def get_console_size():
# Simple. yeah.
if com.in_interactive_session():
- if com.in_ipnb_frontend():
+ if com.in_ipython_frontend():
# sane defaults for interactive non-shell terminal
# match default for width,height in config_init
from pandas.core.config import get_default_val
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index ed56a658d817d..d2476735a256d 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -621,19 +621,26 @@ def _repr_fits_vertical_(self):
max_rows = max_rows or height +1
return len(self) <= min(max_rows, height)
- def _repr_fits_horizontal_(self):
+ def _repr_fits_horizontal_(self,ignore_width=False):
"""
Check if full repr fits in horizontal boundaries imposed by the display
options width and max_columns. In case off non-interactive session, no
boundaries apply.
+
+ ignore_width is here so ipnb+HTML output can behave the way
+ users expect. display.max_columns remains in effect.
+ GH3541, GH3573
"""
+
+ # everytime you add an if-clause here, god slaughters a kitten.
+ # please. think of the kittens.
width, height = fmt.get_console_size()
max_columns = get_option("display.max_columns")
nb_columns = len(self.columns)
# exceed max columns
if ((max_columns and nb_columns > max_columns) or
- (width and nb_columns > (width // 2))):
+ ((not ignore_width) and width and nb_columns > (width // 2))):
return False
if width is None:
@@ -655,7 +662,12 @@ def _repr_fits_horizontal_(self):
d.to_string(buf=buf)
value = buf.getvalue()
repr_width = max([len(l) for l in value.split('\n')])
- return repr_width <= width
+
+ # special case ipnb+HTML repr
+ if not ignore_width:
+ return repr_width <= width
+ else:
+ return True
def __str__(self):
"""
@@ -731,12 +743,22 @@ def _repr_html_(self):
Return a html representation for a particular DataFrame.
Mainly for IPython notebook.
"""
+ # ipnb in html repr mode allows scrolling
+ # users strongly prefer to h-scroll a wide HTML table in the browser
+ # then to get a summary view. GH3541, GH3573
+ ipnbh = com.in_ipnb() and get_option('display.notebook_repr_html')
+
+ # qtconsole doesn't report it's line width, and also
+ # behaves badly when outputting an HTML table
+ # that doesn't fit the window, so disable it.
+ if com.in_qtconsole():
+ raise ValueError('Disable HTML output in QtConsole')
if get_option("display.notebook_repr_html"):
fits_vertical = self._repr_fits_vertical_()
fits_horizontal = False
if fits_vertical:
- fits_horizontal = self._repr_fits_horizontal_()
+ fits_horizontal = self._repr_fits_horizontal_(ignore_width=ipnbh)
if fits_horizontal and fits_vertical:
return ('<div style="max-height:1000px;'
| #3573, and SO question mentioned in #3541
| https://api.github.com/repos/pandas-dev/pandas/pulls/3657 | 2013-05-20T16:31:24Z | 2013-05-25T23:08:36Z | 2013-05-25T23:08:36Z | 2014-06-12T18:10:10Z |
DOC: add doc for reading from DataFrame.to_html | diff --git a/RELEASE.rst b/RELEASE.rst
index 3940cd6d10b51..1ab2cab84a70a 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -66,6 +66,7 @@ pandas 0.11.1
- ``melt`` now accepts the optional parameters ``var_name`` and ``value_name``
to specify custom column names of the returned DataFrame (GH3649_),
thanks @hoechenberger
+ - ``read_html`` no longer performs hard date conversion
**API Changes**
diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt
index f4f0546427ef9..6ff3afeb69581 100644
--- a/doc/source/v0.11.1.txt
+++ b/doc/source/v0.11.1.txt
@@ -68,6 +68,21 @@ Enhancements
- ``pd.read_html()`` can now parse HTML strings, files or urls and return
DataFrames, courtesy of @cpcloud. (GH3477_, GH3605_, GH3606_, GH3616_).
It works with a *single* parser backend: BeautifulSoup4 + html5lib
+ - You can use ``pd.read_html()`` to read the output from ``DataFrame.to_html()`` like so
+
+ .. ipython :: python
+
+ df = DataFrame({'a': range(3), 'b': list('abc')})
+ print df
+ html = df.to_html()
+ alist = pd.read_html(html, infer_types=True, index_col=0)
+ print df == alist[0]
+
+ Note that ``alist`` here is a Python ``list`` so ``pd.read_html()`` and
+ ``DataFrame.to_html()`` are not inverses.
+
+ - ``pd.read_html()`` no longer performs hard conversion of date strings
+ (GH3656_).
- ``HDFStore``
@@ -211,3 +226,4 @@ on GitHub for a complete list.
.. _GH3616: https://github.com/pydata/pandas/issues/3616
.. _GH3605: https://github.com/pydata/pandas/issues/3605
.. _GH3606: https://github.com/pydata/pandas/issues/3606
+.. _GH3656: https://github.com/pydata/pandas/issues/3656
diff --git a/pandas/io/html.py b/pandas/io/html.py
index 732bd57bec418..915c30ecc3c40 100644
--- a/pandas/io/html.py
+++ b/pandas/io/html.py
@@ -636,7 +636,6 @@ def _data_to_frame(data, header, index_col, infer_types, skiprows):
# must be sequential since dates trump numbers if both args are given
if infer_types:
df = df.convert_objects(convert_numeric=True)
- df = df.convert_objects(convert_dates='coerce')
if index_col is not None:
cols = df.columns[index_col]
@@ -722,7 +721,7 @@ def _parse(parser, io, match, flavor, header, index_col, skiprows, infer_types,
def read_html(io, match='.+', flavor='html5lib', header=None, index_col=None,
- skiprows=None, infer_types=False, attrs=None):
+ skiprows=None, infer_types=True, attrs=None):
r"""Read an HTML table into a DataFrame.
Parameters
diff --git a/pandas/io/tests/test_html.py b/pandas/io/tests/test_html.py
index 6e2f6ec00d8ac..7ece8f8e07d81 100644
--- a/pandas/io/tests/test_html.py
+++ b/pandas/io/tests/test_html.py
@@ -2,7 +2,6 @@
import re
from cStringIO import StringIO
from unittest import TestCase
-import collections
import numbers
from urllib2 import urlopen
from contextlib import closing
@@ -408,7 +407,7 @@ def try_remove_ws(x):
return x
df = self.run_read_html(self.banklist_data, 'Metcalf',
- attrs={'id': 'table'}, infer_types=True)[0]
+ attrs={'id': 'table'})[0]
ground_truth = read_csv(os.path.join(DATA_PATH, 'banklist.csv'),
converters={'Updated Date': Timestamp,
'Closing Date': Timestamp})
@@ -431,7 +430,9 @@ def try_remove_ws(x):
'Hamilton Bank, NA', 'The Citizens Savings Bank']
dfnew = df.applymap(try_remove_ws).replace(old, new)
gtnew = ground_truth.applymap(try_remove_ws)
- assert_frame_equal(dfnew, gtnew)
+ converted = dfnew.convert_objects(convert_numeric=True)
+ assert_frame_equal(converted.convert_objects(convert_dates='coerce'),
+ gtnew)
@slow
def test_gold_canyon(self):
@@ -487,6 +488,3 @@ def test_lxml_finds_tbody():
url = ('http://ndb.nal.usda.gov/ndb/foods/show/1732?fg=&man=&'
'lfacet=&format=&count=&max=25&offset=&sort=&qlookup=spam')
assert get_lxml_elements(url, 'tbody')
-
-
-
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index f38fe61d453c2..823d2c81bb72c 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -126,13 +126,13 @@ def assert_almost_equal(a, b, check_less_precise = False):
return assert_dict_equal(a, b)
if isinstance(a, basestring):
- assert a == b, "{0} != {1}".format(a, b)
+ assert a == b, "%s != %s" % (a, b)
return True
if isiterable(a):
np.testing.assert_(isiterable(b))
na, nb = len(a), len(b)
- assert na == nb, "{0} != {1}".format(na, nb)
+ assert na == nb, "%s != %s" % (na, nb)
if np.array_equal(a, b):
return True
@@ -154,8 +154,6 @@ def assert_almost_equal(a, b, check_less_precise = False):
if check_less_precise:
dtype_a = np.dtype(type(a))
dtype_b = np.dtype(type(b))
- if dtype_a.kind == 'i' and dtype_b == 'i':
- pass
if dtype_a.kind == 'f' and dtype_b == 'f':
if dtype_a.itemsize <= 4 and dtype_b.itemsize <= 4:
decimal = 3
| https://api.github.com/repos/pandas-dev/pandas/pulls/3656 | 2013-05-20T14:03:15Z | 2013-05-21T18:04:45Z | 2013-05-21T18:04:45Z | 2014-06-21T15:38:39Z | |
TST: add html5lib to travis | diff --git a/README.rst b/README.rst
index 2d49c168eac60..a8ecf01aac953 100644
--- a/README.rst
+++ b/README.rst
@@ -95,8 +95,14 @@ Optional dependencies
- Both `html5lib <https://github.com/html5lib/html5lib-python>`__ **and**
`Beautiful Soup 4 <http://www.crummy.com/software/BeautifulSoup>`__: for
reading HTML tables
- - These can both easily be installed by ``pip install html5lib`` and ``pip
- install beautifulsoup4``.
+
+ .. warning::
+
+ If you are on a 32-bit machine you need to install an older version of
+ Beautiful Soup. Version 4.0.2 of BeautifulSoup has been tested on Ubuntu
+ 12.04.02 32-bit.
+
+ - Any recent version of ``html5lib`` is okay.
- `boto <https://pypi.python.org/pypi/boto>`__: necessary for Amazon S3 access.
diff --git a/ci/install.sh b/ci/install.sh
index cd897cf7313c2..a091834a9570f 100755
--- a/ci/install.sh
+++ b/ci/install.sh
@@ -30,7 +30,13 @@ fi;
#scipy is not included in the cached venv
if [ x"$FULL_DEPS" == x"true" ] ; then
# for pytables gets the lib as well
- sudo apt-get $APT_ARGS install libhdf5-serial-dev;
+ sudo apt-get $APT_ARGS install libhdf5-serial-dev
+
+ if [ ${TRAVIS_PYTHON_VERSION:0:1} == "3" ]; then
+ sudo apt-get $APT_ARGS install python3-bs4
+ elif [ ${TRAVIS_PYTHON_VERSION:0:1} == "2" ]; then
+ sudo apt-get $APT_ARGS install python-bs4
+ fi
if [ ${TRAVIS_PYTHON_VERSION} == "3.2" ]; then
sudo apt-get $APT_ARGS install python3-scipy
@@ -76,8 +82,13 @@ if ( ! $VENV_FILE_AVAILABLE ); then
pip install $PIP_ARGS 'http://downloads.sourceforge.net/project/pytseries/scikits.timeseries/0.91.3/scikits.timeseries-0.91.3.tar.gz?r='
pip install $PIP_ARGS patsy
pip install $PIP_ARGS lxml
- pip install $PIP_ARGS beautifulsoup4
+ pip install $PIP_ARGS html5lib
+ if [ ${TRAVIS_PYTHON_VERSION:0:1} == "3" ]; then
+ sudo apt-get $APT_ARGS remove python3-lxml
+ elif [ ${TRAVIS_PYTHON_VERSION:0:1} == "2" ]; then
+ sudo apt-get $APT_ARGS remove python-lxml
+ fi
# fool statsmodels into thinking pandas was already installed
# so it won't refuse to install itself. We want it in the zipped venv
diff --git a/doc/source/install.rst b/doc/source/install.rst
index 658d9d78d5b29..407746e3cb000 100644
--- a/doc/source/install.rst
+++ b/doc/source/install.rst
@@ -102,8 +102,14 @@ Optional Dependencies
* Both `html5lib <https://github.com/html5lib/html5lib-python>`__ **and**
`Beautiful Soup 4 <http://www.crummy.com/software/BeautifulSoup>`__: for
reading HTML tables
- * These can both easily be installed by ``pip install html5lib`` and ``pip
- install beautifulsoup4``.
+
+ .. warning::
+
+ If you are on a 32-bit machine you need to install an older version of
+ Beautiful Soup. Version 4.0.2 of BeautifulSoup has been tested on Ubuntu
+ 12.04.02 32-bit.
+
+ * Any recent version of ``html5lib`` is okay.
.. note::
| closes #3654
| https://api.github.com/repos/pandas-dev/pandas/pulls/3655 | 2013-05-20T13:29:48Z | 2013-05-21T13:43:38Z | 2013-05-21T13:43:38Z | 2014-07-01T18:52:09Z |
TST: fix unicdoe errors test_strings | diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py
index d49338af698d1..d057dc5304277 100644
--- a/pandas/tests/test_strings.py
+++ b/pandas/tests/test_strings.py
@@ -350,8 +350,8 @@ def test_replace(self):
tm.assert_series_equal(result, exp)
#flags + unicode
- values = Series(["abcd,\xc3\xa0".decode("utf-8")])
- exp = Series(["abcd, \xc3\xa0".decode("utf-8")])
+ values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
+ exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
result = values.str.replace("(?<=\w),(?=\w)", ", ", flags=re.UNICODE)
tm.assert_series_equal(result, exp)
| PTF
| https://api.github.com/repos/pandas-dev/pandas/pulls/3652 | 2013-05-20T01:14:18Z | 2013-05-20T01:20:28Z | 2013-05-20T01:20:28Z | 2014-07-16T08:09:51Z |
ENH: Allow for custom variable/value column names when melt()'ing | diff --git a/doc/source/reshaping.rst b/doc/source/reshaping.rst
index 9a7a9c2a87e52..5f7526235a4c3 100644
--- a/doc/source/reshaping.rst
+++ b/doc/source/reshaping.rst
@@ -200,7 +200,9 @@ Reshaping by Melt
The ``melt`` function found in ``pandas.core.reshape`` is useful to massage a
DataFrame into a format where one or more columns are identifier variables,
while all other columns, considered measured variables, are "pivoted" to the
-row axis, leaving just two non-identifier columns, "variable" and "value".
+row axis, leaving just two non-identifier columns, "variable" and "value". The
+names of those columns can be customized by supplying the ``var_name`` and
+``value_name`` parameters.
For instance,
@@ -212,6 +214,7 @@ For instance,
'weight' : [130, 150]})
cheese
melt(cheese, id_vars=['first', 'last'])
+ melt(cheese, id_vars=['first', 'last'], var_name='quantity')
Combining with stats and GroupBy
--------------------------------
diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt
index a42765591c818..13d08a9fc9c76 100644
--- a/doc/source/v0.11.1.txt
+++ b/doc/source/v0.11.1.txt
@@ -138,6 +138,9 @@ Enhancements
import os
os.remove('mi.csv')
+ - ``pd.melt()`` now accepts the optional parameters ``var_name`` and ``value_name``
+ to specify custom column names of the returned DataFrame.
+
Bug Fixes
~~~~~~~~~
diff --git a/pandas/core/reshape.py b/pandas/core/reshape.py
index b2e5bb01f53af..4e0f35f5d9555 100644
--- a/pandas/core/reshape.py
+++ b/pandas/core/reshape.py
@@ -600,7 +600,8 @@ def _stack_multi_columns(frame, level=-1, dropna=True):
return result
-def melt(frame, id_vars=None, value_vars=None):
+def melt(frame, id_vars=None, value_vars=None,
+ var_name='variable', value_name='value'):
"""
"Unpivots" a DataFrame from wide format to long format, optionally leaving
id variables set
@@ -608,8 +609,10 @@ def melt(frame, id_vars=None, value_vars=None):
Parameters
----------
frame : DataFrame
- id_vars :
- value_vars :
+ id_vars : tuple, list, or ndarray
+ value_vars : tuple, list, or ndarray
+ var_name : scalar
+ value_name : scalar
Examples
--------
@@ -621,9 +624,16 @@ def melt(frame, id_vars=None, value_vars=None):
>>> melt(df, id_vars=['A'], value_vars=['B'])
A variable value
- a B 1
- b B 3
- c B 5
+ a B 1
+ b B 3
+ c B 5
+
+ >>> melt(df, id_vars=['A'], value_vars=['B'],
+ ... var_name='myVarname', value_name='myValname')
+ A myVarname myValname
+ a B 1
+ b B 3
+ c B 5
"""
# TODO: what about the existing index?
if id_vars is not None:
@@ -648,11 +658,11 @@ def melt(frame, id_vars=None, value_vars=None):
for col in id_vars:
mdata[col] = np.tile(frame.pop(col).values, K)
- mcolumns = id_vars + ['variable', 'value']
+ mcolumns = id_vars + [var_name, value_name]
- mdata['value'] = frame.values.ravel('F')
-
- mdata['variable'] = np.asarray(frame.columns).repeat(N)
+ mdata[value_name] = frame.values.ravel('F')
+ mdata[var_name] = np.asarray(frame.columns).repeat(N)
+
return DataFrame(mdata, columns=mcolumns)
diff --git a/pandas/tests/test_reshape.py b/pandas/tests/test_reshape.py
index 278e745c7d312..5ddb30b0e1377 100644
--- a/pandas/tests/test_reshape.py
+++ b/pandas/tests/test_reshape.py
@@ -25,14 +25,73 @@ def test_melt():
df['id1'] = (df['A'] > 0).astype(int)
df['id2'] = (df['B'] > 0).astype(int)
- molten1 = melt(df)
- molten2 = melt(df, id_vars=['id1'])
- molten3 = melt(df, id_vars=['id1', 'id2'])
- molten4 = melt(df, id_vars=['id1', 'id2'],
+ var_name = 'var'
+ value_name = 'val'
+
+ # Default column names
+ result = melt(df)
+ result1 = melt(df, id_vars=['id1'])
+ result2 = melt(df, id_vars=['id1', 'id2'])
+ result3 = melt(df, id_vars=['id1', 'id2'],
value_vars='A')
- molten5 = melt(df, id_vars=['id1', 'id2'],
+ result4 = melt(df, id_vars=['id1', 'id2'],
value_vars=['A', 'B'])
-
+
+ expected4 = DataFrame({'id1': df['id1'].tolist() * 2,
+ 'id2': df['id2'].tolist() * 2,
+ 'variable': ['A']*10 + ['B']*10,
+ 'value': df['A'].tolist() + df['B'].tolist()},
+ columns=['id1', 'id2', 'variable', 'value'])
+ tm.assert_frame_equal(result4, expected4)
+
+ # Supply custom name for the 'variable' column
+ result5 = melt(df, var_name=var_name)
+ result6 = melt(df, id_vars=['id1'], var_name=var_name)
+ result7 = melt(df, id_vars=['id1', 'id2'], var_name=var_name)
+ result8 = melt(df, id_vars=['id1', 'id2'],
+ value_vars='A', var_name=var_name)
+ result9 = melt(df, id_vars=['id1', 'id2'],
+ value_vars=['A', 'B'], var_name=var_name)
+
+ expected9 = DataFrame({'id1': df['id1'].tolist() * 2,
+ 'id2': df['id2'].tolist() * 2,
+ var_name: ['A']*10 + ['B']*10,
+ 'value': df['A'].tolist() + df['B'].tolist()},
+ columns=['id1', 'id2', var_name, 'value'])
+ tm.assert_frame_equal(result9, expected9)
+
+ # Supply custom name for the 'value' column
+ result10 = melt(df, value_name=value_name)
+ result11 = melt(df, id_vars=['id1'], value_name=value_name)
+ result12 = melt(df, id_vars=['id1', 'id2'], value_name=value_name)
+ result13 = melt(df, id_vars=['id1', 'id2'],
+ value_vars='A', value_name=value_name)
+ result14 = melt(df, id_vars=['id1', 'id2'],
+ value_vars=['A', 'B'], value_name=value_name)
+
+ expected14 = DataFrame({'id1': df['id1'].tolist() * 2,
+ 'id2': df['id2'].tolist() * 2,
+ 'variable': ['A']*10 + ['B']*10,
+ value_name: df['A'].tolist() + df['B'].tolist()},
+ columns=['id1', 'id2', 'variable', value_name])
+ tm.assert_frame_equal(result14, expected14)
+
+ # Supply custom names for the 'variable' and 'value' columns
+ result15 = melt(df, var_name=var_name, value_name=value_name)
+ result16 = melt(df, id_vars=['id1'], var_name=var_name, value_name=value_name)
+ result17 = melt(df, id_vars=['id1', 'id2'],
+ var_name=var_name, value_name=value_name)
+ result18 = melt(df, id_vars=['id1', 'id2'],
+ value_vars='A', var_name=var_name, value_name=value_name)
+ result19 = melt(df, id_vars=['id1', 'id2'],
+ value_vars=['A', 'B'], var_name=var_name, value_name=value_name)
+
+ expected19 = DataFrame({'id1': df['id1'].tolist() * 2,
+ 'id2': df['id2'].tolist() * 2,
+ var_name: ['A']*10 + ['B']*10,
+ value_name: df['A'].tolist() + df['B'].tolist()},
+ columns=['id1', 'id2', var_name, value_name])
+ tm.assert_frame_equal(result19, expected19)
def test_convert_dummies():
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
| ENH, CLN: When melt()'ing, allow for specification of custom variable and value names of the resulting DataFrame. Also fix indentation in melt() example to reflect actual output.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3649 | 2013-05-19T18:59:50Z | 2013-05-21T14:25:37Z | 2013-05-21T14:25:37Z | 2014-06-12T14:57:21Z |
TST: fixup 32-bit failing tests | diff --git a/pandas/core/format.py b/pandas/core/format.py
index cd4364edc6662..3d38caa84492f 100644
--- a/pandas/core/format.py
+++ b/pandas/core/format.py
@@ -778,8 +778,6 @@ def __init__(self, obj, path_or_buf, sep=",", na_rep='', float_format=None,
tupleize_cols=True):
self.engine = engine # remove for 0.12
-
- obj._consolidate_inplace()
self.obj = obj
self.path_or_buf = path_or_buf
@@ -835,7 +833,7 @@ def __init__(self, obj, path_or_buf, sep=",", na_rep='', float_format=None,
self.blocks = self.obj._data.blocks
ncols = sum(len(b.items) for b in self.blocks)
self.data =[None] * ncols
- self.column_map = self.obj._data.get_items_map()
+ self.column_map = self.obj._data.get_items_map(use_cached=False)
if chunksize is None:
chunksize = (100000/ (len(self.cols) or 1)) or 1
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 849776940512e..ca04bd3fe26e0 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -1130,17 +1130,20 @@ def maybe_create_block(block):
# when we recreate the block manager if needed
return getattr(self,'_ref_locs',None)
- def get_items_map(self):
+ def get_items_map(self, use_cached=True):
"""
return an inverted ref_loc map for an item index
block -> item (in that block) location -> column location
+
+ use_cached : boolean, use the cached items map, or recreate
"""
# cache check
- im = getattr(self,'_items_map',None)
- if im is not None:
- return im
-
+ if use_cached:
+ im = getattr(self,'_items_map',None)
+ if im is not None:
+ return im
+
im = dict()
rl = self._set_ref_locs()
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 413c39a330ad2..50bddb6ecd85c 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -4942,7 +4942,7 @@ def test_to_csv_no_index(self):
df.to_csv(path, index=False)
result = read_csv(path)
assert_frame_equal(df,result)
- df['c3'] = [7,8,9]
+ df['c3'] = Series([7,8,9],dtype='int64')
df.to_csv(path, index=False)
result = read_csv(path)
assert_frame_equal(df,result)
@@ -5000,7 +5000,8 @@ def _make_frame(names=None):
columns=MultiIndex.from_tuples([('bah', 'foo'),
('bah', 'bar'),
('ban', 'baz')],
- names=names))
+ names=names),
+ dtype='int64')
# column & index are multi-index
df = mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)
| PTF
| https://api.github.com/repos/pandas-dev/pandas/pulls/3648 | 2013-05-19T17:20:14Z | 2013-05-19T17:28:31Z | 2013-05-19T17:28:31Z | 2014-07-16T08:09:47Z |
BUG: (GH3602) Concat to produce a non-unique columns when duplicates are across dtypes | diff --git a/RELEASE.rst b/RELEASE.rst
index 74bafd419af54..4599c2a7553da 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -111,6 +111,7 @@ pandas 0.11.1
- Duplicate indexes with getitem will return items in the correct order (GH3455_, GH3457_)
and handle missing elements like unique indices (GH3561_)
- Duplicate indexes with and empty DataFrame.from_records will return a correct frame (GH3562_)
+ - Concat to produce a non-unique columns when duplicates are across dtypes is fixed (GH3602_)
- Fixed bug in groupby with empty series referencing a variable before assignment. (GH3510_)
- Fixed bug in mixed-frame assignment with aligned series (GH3492_)
- Fixed bug in selecting month/quarter/year from a series would not select the time element
@@ -196,6 +197,7 @@ pandas 0.11.1
.. _GH3626: https://github.com/pydata/pandas/issues/3626
.. _GH3601: https://github.com/pydata/pandas/issues/3601
.. _GH3631: https://github.com/pydata/pandas/issues/3631
+.. _GH3602: https://github.com/pydata/pandas/issues/3602
.. _GH1512: https://github.com/pydata/pandas/issues/1512
=======
.. _GH3571: https://github.com/pydata/pandas/issues/3571
diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt
index a724ce96a7381..a9911ed6db008 100644
--- a/doc/source/v0.11.1.txt
+++ b/doc/source/v0.11.1.txt
@@ -64,6 +64,7 @@ API changes
Enhancements
~~~~~~~~~~~~
+
- ``pd.read_html()`` can now parse HTML string, files or urls and return dataframes
courtesy of @cpcloud. (GH3477_)
- ``HDFStore``
@@ -114,10 +115,37 @@ Enhancements
import os
os.remove('mi.csv')
+Bug Fixes
+~~~~~~~~~
+
+ - Non-unique index support clarified (GH3468_).
+
+ - Fix assigning a new index to a duplicate index in a DataFrame would fail (GH3468_)
+ - Fix construction of a DataFrame with a duplicate index
+ - ref_locs support to allow duplicative indices across dtypes,
+ allows iget support to always find the index (even across dtypes) (GH2194_)
+ - applymap on a DataFrame with a non-unique index now works
+ (removed warning) (GH2786_), and fix (GH3230_)
+ - Fix to_csv to handle non-unique columns (GH3495_)
+ - Duplicate indexes with getitem will return items in the correct order (GH3455_, GH3457_)
+ and handle missing elements like unique indices (GH3561_)
+ - Duplicate indexes with and empty DataFrame.from_records will return a correct frame (GH3562_)
+ - Concat to produce a non-unique columns when duplicates are across dtypes is fixed (GH3602_)
+
See the `full release notes
<https://github.com/pydata/pandas/blob/master/RELEASE.rst>`__ or issue tracker
on GitHub for a complete list.
+.. _GH3468: https://github.com/pydata/pandas/issues/3468
+.. _GH2194: https://github.com/pydata/pandas/issues/2194
+.. _GH2786: https://github.com/pydata/pandas/issues/2786
+.. _GH3230: https://github.com/pydata/pandas/issues/3230
+.. _GH3495: https://github.com/pydata/pandas/issues/3495
+.. _GH3455: https://github.com/pydata/pandas/issues/3455
+.. _GH3457: https://github.com/pydata/pandas/issues/3457
+.. _GH3561: https://github.com/pydata/pandas/issues/3561
+.. _GH3562: https://github.com/pydata/pandas/issues/3562
+.. _GH3602: https://github.com/pydata/pandas/issues/3602
.. _GH2437: https://github.com/pydata/pandas/issues/2437
.. _GH2852: https://github.com/pydata/pandas/issues/2852
.. _GH3477: https://github.com/pydata/pandas/issues/3477
diff --git a/pandas/tools/merge.py b/pandas/tools/merge.py
index 7f05a045e36af..c77c043d26acd 100644
--- a/pandas/tools/merge.py
+++ b/pandas/tools/merge.py
@@ -1043,6 +1043,7 @@ def _concat_blocks(self, blocks):
'DataFrames')
return make_block(concat_values, blocks[0].items, self.new_axes[0])
else:
+
offsets = np.r_[0, np.cumsum([len(x._data.axes[0]) for
x in self.objs])]
indexer = np.concatenate([offsets[i] + b.ref_locs
@@ -1052,12 +1053,21 @@ def _concat_blocks(self, blocks):
concat_items = indexer
else:
concat_items = self.new_axes[0].take(indexer)
-
+
if self.ignore_index:
ref_items = self._get_fresh_axis()
return make_block(concat_values, concat_items, ref_items)
- return make_block(concat_values, concat_items, self.new_axes[0])
+ block = make_block(concat_values, concat_items, self.new_axes[0])
+
+ # we need to set the ref_locs in this block so we have the mapping
+ # as we now have a non-unique index across dtypes, and we need to
+ # map the column location to the block location
+ # GH3602
+ if not self.new_axes[0].is_unique:
+ block._ref_locs = indexer
+
+ return block
def _concat_single_item(self, objs, item):
all_values = []
diff --git a/pandas/tools/tests/test_merge.py b/pandas/tools/tests/test_merge.py
index 2fb527b2eee6b..e230a5b2d25b3 100644
--- a/pandas/tools/tests/test_merge.py
+++ b/pandas/tools/tests/test_merge.py
@@ -1682,6 +1682,20 @@ def test_concat_bug_2972(self):
expected.columns=['same name', 'same name']
assert_frame_equal(result, expected)
+ def test_concat_bug_3602(self):
+
+ # GH 3602, duplicate columns
+ df1 = DataFrame({'firmNo' : [0,0,0,0], 'stringvar' : ['rrr', 'rrr', 'rrr', 'rrr'], 'prc' : [6,6,6,6] })
+ df2 = DataFrame({'misc' : [1,2,3,4], 'prc' : [6,6,6,6], 'C' : [9,10,11,12]})
+ expected = DataFrame([[0,6,'rrr',9,1,6],
+ [0,6,'rrr',10,2,6],
+ [0,6,'rrr',11,3,6],
+ [0,6,'rrr',12,4,6]])
+ expected.columns = ['firmNo','prc','stringvar','C','misc','prc']
+
+ result = concat([df1,df2],axis=1)
+ assert_frame_equal(result,expected)
+
def test_concat_series_axis1_same_names_ignore_index(self):
dates = date_range('01-Jan-2013', '01-Jan-2014', freq='MS')[0:-1]
s1 = Series(randn(len(dates)), index=dates, name='value')
| closes #3602
DOC: added non-unique index issues to v0.11.1
| https://api.github.com/repos/pandas-dev/pandas/pulls/3647 | 2013-05-19T14:06:28Z | 2013-05-19T15:12:02Z | 2013-05-19T15:12:02Z | 2014-06-17T16:52:38Z |
ENH: Allow flags in str.replace keywords | diff --git a/pandas/core/strings.py b/pandas/core/strings.py
index 3521c9ff94b11..87a9ff7e9d95d 100644
--- a/pandas/core/strings.py
+++ b/pandas/core/strings.py
@@ -692,8 +692,9 @@ def contains(self, pat, case=True, flags=0, na=np.nan):
return self._wrap_result(result)
@copy(str_replace)
- def replace(self, pat, repl, n=-1, case=True):
- result = str_replace(self.series, pat, repl, n=n, case=case)
+ def replace(self, pat, repl, n=-1, case=True, flags=0):
+ result = str_replace(self.series, pat, repl, n=n, case=case,
+ flags=flags)
return self._wrap_result(result)
@copy(str_repeat)
diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py
index 2134eea186649..7763ed7bc75db 100644
--- a/pandas/tests/test_strings.py
+++ b/pandas/tests/test_strings.py
@@ -271,6 +271,13 @@ def test_replace(self):
exp = Series([u'foobarBAD', NA])
tm.assert_series_equal(result, exp)
+ #flags + unicode
+ values = Series(["abcd,\xc3\xa0".decode("utf-8")])
+ exp = Series(["abcd, \xc3\xa0".decode("utf-8")])
+ result = values.str.replace("(?<=\w),(?=\w)", ", ", flags=re.UNICODE)
+ tm.assert_series_equal(result, exp)
+
+
def test_repeat(self):
values = Series(['a', 'b', NA, 'c', NA, 'd'])
| Fix what appears to be an oversight.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3646 | 2013-05-19T04:18:48Z | 2013-05-19T21:13:54Z | 2013-05-19T21:13:53Z | 2014-07-16T08:09:44Z |
ENH: add Series.str iterator | diff --git a/RELEASE.rst b/RELEASE.rst
index 4e92ecb24574a..f97708de13442 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -43,6 +43,7 @@ pandas 0.11.1
multi-index column.
Note: The default value will change in 0.12 to make the default *to* write and
read multi-index columns in the new format. (GH3571_, GH1651_, GH3141_)
+ - Add iterator to ``Series.str`` (GH3638_)
**Improvements to existing features**
@@ -199,7 +200,7 @@ pandas 0.11.1
.. _GH3571: https://github.com/pydata/pandas/issues/3571
.. _GH1651: https://github.com/pydata/pandas/issues/1651
.. _GH3141: https://github.com/pydata/pandas/issues/3141
-
+.. _GH3638: https://github.com/pydata/pandas/issues/3638
pandas 0.11.0
=============
diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt
index a724ce96a7381..e9861301231d8 100644
--- a/doc/source/v0.11.1.txt
+++ b/doc/source/v0.11.1.txt
@@ -80,6 +80,27 @@ Enhancements
- ``DataFrame.replace()`` now allows regular expressions on contained
``Series`` with object dtype. See the examples section in the regular docs
:ref:`Replacing via String Expression <missing_data.replace_expression>`
+ - ``Series.str`` now supports iteration (GH3638_). You can iterate over the
+ individual elements of each string in the ``Series``. Each iteration yields
+ yields a ``Series`` with either a single character at each index of the
+ original ``Series`` or ``NaN``. For example,
+
+ .. ipython:: python
+
+ strs = 'go', 'bow', 'joe', 'slow'
+ ds = Series(strs)
+
+ for s in ds.str:
+ print s
+
+ s
+ s.dropna().values.item() == 'w'
+
+ The last element yielded by the iterator will be a ``Series`` containing
+ the last element of the longest string in the ``Series`` with all other
+ elements being ``NaN``. Here since ``'wikitravel'`` is the longest string
+ and there are no other strings with the same length ``'l'`` is the only
+ non-null string in the yielded ``Series``.
- Multi-index column support for reading and writing csvs
@@ -133,3 +154,4 @@ on GitHub for a complete list.
.. _GH3571: https://github.com/pydata/pandas/issues/3571
.. _GH1651: https://github.com/pydata/pandas/issues/1651
.. _GH3141: https://github.com/pydata/pandas/issues/3141
+.. _GH3638: https://github.com/pydata/pandas/issues/3638
diff --git a/pandas/core/strings.py b/pandas/core/strings.py
index 3521c9ff94b11..13e2b3b0a4cab 100644
--- a/pandas/core/strings.py
+++ b/pandas/core/strings.py
@@ -661,6 +661,14 @@ def __getitem__(self, key):
else:
return self.get(key)
+ def __iter__(self):
+ i = 0
+ g = self.get(i)
+ while g.notnull().any():
+ yield g
+ i += 1
+ g = self.get(i)
+
def _wrap_result(self, result):
return Series(result, index=self.series.index,
name=self.series.name)
diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py
index 2134eea186649..0eac88419f5e3 100644
--- a/pandas/tests/test_strings.py
+++ b/pandas/tests/test_strings.py
@@ -10,6 +10,8 @@
from numpy import nan as NA
import numpy as np
+from numpy.testing import assert_array_equal
+from numpy.random import randint
from pandas import (Index, Series, TimeSeries, DataFrame, isnull, notnull,
bdate_range, date_range)
@@ -25,6 +27,82 @@ class TestStringMethods(unittest.TestCase):
_multiprocess_can_split_ = True
+ def test_iter(self):
+ # GH3638
+ strs = 'google', 'wikimedia', 'wikipedia', 'wikitravel'
+ ds = Series(strs)
+
+ for s in ds.str:
+ # iter must yield a Series
+ self.assert_(isinstance(s, Series))
+
+ # indices of each yielded Series should be equal to the index of
+ # the original Series
+ assert_array_equal(s.index, ds.index)
+
+ for el in s:
+ # each element of the series is either a basestring or nan
+ self.assert_(isinstance(el, basestring) or isnull(el))
+
+ # desired behavior is to iterate until everything would be nan on the
+ # next iter so make sure the last element of the iterator was 'l' in
+ # this case since 'wikitravel' is the longest string
+ self.assertEqual(s.dropna().values.item(), 'l')
+
+ def test_iter_empty(self):
+ ds = Series([], dtype=object)
+
+ i, s = 100, 1
+
+ for i, s in enumerate(ds.str):
+ pass
+
+ # nothing to iterate over so nothing defined values should remain
+ # unchanged
+ self.assertEqual(i, 100)
+ self.assertEqual(s, 1)
+
+ def test_iter_single_element(self):
+ ds = Series(['a'])
+
+ for i, s in enumerate(ds.str):
+ pass
+
+ self.assertFalse(i)
+ assert_series_equal(ds, s)
+
+ def test_iter_numeric_try_string(self):
+ # behavior identical to empty series
+ dsi = Series(range(4))
+
+ i, s = 100, 'h'
+
+ for i, s in enumerate(dsi.str):
+ pass
+
+ self.assertEqual(i, 100)
+ self.assertEqual(s, 'h')
+
+ dsf = Series(np.arange(4.))
+
+ for i, s in enumerate(dsf.str):
+ pass
+
+ self.assertEqual(i, 100)
+ self.assertEqual(s, 'h')
+
+ def test_iter_object_try_string(self):
+ ds = Series([slice(None, randint(10), randint(10, 20))
+ for _ in xrange(4)])
+
+ i, s = 100, 'h'
+
+ for i, s in enumerate(ds.str):
+ pass
+
+ self.assertEqual(i, 100)
+ self.assertEqual(s, 'h')
+
def test_cat(self):
one = ['a', 'a', 'b', 'b', 'c', NA]
two = ['a', NA, 'b', 'd', 'foo', NA]
| closes #3638.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3645 | 2013-05-19T01:19:43Z | 2013-05-19T16:15:48Z | 2013-05-19T16:15:48Z | 2014-06-26T17:45:54Z |
BUG : Io sql one column (issue #3628) | diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index 3002f2f620f5e..b54a30d95bb54 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -228,7 +228,11 @@ def _write_sqlite(frame, table, names, cur):
wildcards = ','.join(['?'] * len(names))
insert_query = 'INSERT INTO %s (%s) VALUES (%s)' % (
table, col_names, wildcards)
- data = [tuple(x) for x in frame.values]
+ # pandas types are badly handled if there is only 1 column ( Issue #3628 )
+ if not len(frame.columns )==1 :
+ data = [tuple(x) for x in frame.values]
+ else :
+ data = [tuple(x) for x in frame.values.tolist()]
cur.executemany(insert_query, data)
def _write_mysql(frame, table, names, cur):
diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py
index b443c55f97b8d..1daa50c70a900 100644
--- a/pandas/io/tests/test_sql.py
+++ b/pandas/io/tests/test_sql.py
@@ -219,6 +219,18 @@ def test_keyword_as_column_names(self):
df = DataFrame({'From':np.ones(5)})
sql.write_frame(df, con = self.db, name = 'testkeywords')
+ def test_onecolumn_of_integer(self):
+ '''
+ a column_of_integers dataframe should transfer well to sql
+ '''
+ mono_df=DataFrame([1 , 2], columns=['c0'])
+ sql.write_frame(mono_df, con = self.db, name = 'mono_df')
+ # computing the sum via sql
+ con_x=self.db
+ the_sum=sum([my_c0[0] for my_c0 in con_x.execute("select * from mono_df")])
+ # it should not fail, and gives 3 ( Issue #3628 )
+ self.assertEqual(the_sum , 3)
+
class TestMySQL(unittest.TestCase):
| closes #3628
This should be the proper pull request to
- TST test the problem,
- BUG the problem of panda fail to write a single column of integer to sqlite
Test successfully on Travis CI
| https://api.github.com/repos/pandas-dev/pandas/pulls/3644 | 2013-05-18T22:14:11Z | 2013-05-19T16:49:55Z | 2013-05-19T16:49:55Z | 2014-06-12T07:39:51Z |
WIP: Support metadata at de/serialization time (moved) | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 0d7913819f115..1bd9b8c55a762 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -450,6 +450,7 @@ def __init__(self, data=None, index=None, columns=None, dtype=None,
raise PandasError('DataFrame constructor not properly called!')
NDFrame.__init__(self, mgr)
+ self.meta=dict()
@classmethod
def _from_axes(cls, data, axes):
@@ -1696,10 +1697,20 @@ def swapaxes(self, i, j):
# Picklability
def __getstate__(self):
- return self._data
+ return self._data,dict(meta=self.meta)
def __setstate__(self, state):
# old DataFrame pickle
+ attrs = {}
+ if ( isinstance(state, tuple)
+ and isinstance(state[0],BlockManager)
+ and isinstance(state[1],dict)):
+ attrs=state[1]
+
+ # put things back to the prev version and
+ # reuse the old path
+ state = state[0]
+
if isinstance(state, BlockManager):
self._data = state
elif isinstance(state[0], dict): # pragma: no cover
@@ -1711,6 +1722,9 @@ def __setstate__(self, state):
# ordinarily created in NDFrame
self._item_cache = {}
+ for k,v in attrs.items():
+ setattr(self,k,v)
+
# legacy pickle formats
def _unpickle_frame_compat(self, state): # pragma: no cover
from pandas.core.common import _unpickle_array
diff --git a/pandas/core/panel.py b/pandas/core/panel.py
index d1f87e4e7c932..b55dd6d0dc59b 100644
--- a/pandas/core/panel.py
+++ b/pandas/core/panel.py
@@ -238,6 +238,7 @@ def __init__(self, data=None, items=None, major_axis=None, minor_axis=None,
self._init_data(
data=data, items=items, major_axis=major_axis, minor_axis=minor_axis,
copy=copy, dtype=dtype)
+ self.meta = {}
def _init_data(self, data, copy, dtype, **kwargs):
""" generate ND initialization; axes are passed as required objects to __init__ """
@@ -706,10 +707,21 @@ def pop(self, item):
def __getstate__(self):
"Returned pickled representation of the panel"
- return self._data
+ return self._data,dict(meta=self.meta)
def __setstate__(self, state):
# old Panel pickle
+ attrs = {}
+ print( state)
+ if ( isinstance(state, tuple)
+ and isinstance(state[0],BlockManager)
+ and isinstance(state[1],dict)):
+ attrs = state[1]
+
+ # put things back to the prev version and
+ # reuse the old path
+ state = state[0]
+
if isinstance(state, BlockManager):
self._data = state
elif len(state) == 4: # pragma: no cover
@@ -718,6 +730,9 @@ def __setstate__(self, state):
raise ValueError('unrecognized pickle')
self._item_cache = {}
+ for k,v in attrs.items():
+ setattr(self,k,v)
+
def _unpickle_panel_compat(self, state): # pragma: no cover
"Unpickle the panel"
_unpickle = com._unpickle_array
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 8427274488cef..52e9193089445 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -509,6 +509,7 @@ def __init__(self, data=None, index=None, dtype=None, name=None,
input data
copy : boolean, default False
"""
+ self.meta = {}
pass
@property
@@ -539,7 +540,7 @@ def __contains__(self, key):
def __reduce__(self):
"""Necessary for making this object picklable"""
object_state = list(ndarray.__reduce__(self))
- subclass_state = (self.index, self.name)
+ subclass_state = (self.index, dict(name=self.name,meta=self.meta))
object_state[2] = (object_state[2], subclass_state)
return tuple(object_state)
@@ -548,6 +549,16 @@ def __setstate__(self, state):
nd_state, own_state = state
ndarray.__setstate__(self, nd_state)
+ attrs = {}
+ if len(own_state) > 1 and isinstance(own_state[1],dict):
+ attrs = own_state[1]
+
+ # and put things back they the previous pickle
+ # schema worked
+ own_state = (own_state[0],attrs.get('name'))
+
+ index, dict_or_name = own_state[0], None
+
# backwards compat
index, name = own_state[0], None
if len(own_state) > 1:
@@ -556,6 +567,9 @@ def __setstate__(self, state):
self.index = _handle_legacy_indexes([index])[0]
self.name = name
+ for k,v in attrs.items():
+ setattr(self,k,v)
+
# indexers
@property
def axes(self):
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 213547c4132b9..80f4f0aa23c37 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -27,7 +27,8 @@
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
assert_frame_equal,
- ensure_clean)
+ ensure_clean,
+ makeCustomDataframe as mkdf )
from pandas.util import py3compat
from pandas.util.compat import OrderedDict
@@ -4616,7 +4617,6 @@ def test_to_csv_from_csv(self):
@slow
def test_to_csv_moar(self):
- from pandas.util.testing import makeCustomDataframe as mkdf
path = '__tmp_to_csv_moar__'
chunksize=1000
@@ -6021,7 +6021,6 @@ def test_replace_mixed(self):
assert_frame_equal(result,expected)
# test case from
- from pandas.util.testing import makeCustomDataframe as mkdf
df = DataFrame({'A' : Series([3,0],dtype='int64'), 'B' : Series([0,3],dtype='int64') })
result = df.replace(3, df.mean().to_dict())
expected = df.copy().astype('float64')
@@ -9428,6 +9427,20 @@ def test_any_all(self):
# df.any(1, bool_only=True)
# df.all(1, bool_only=True)
+ def test_meta_serialization(self):
+ import pandas as pd
+ df=mkdf(10,5)
+ df.meta == {}
+ # create some kv pairs for serialization
+ df.meta['Im']="persistent"
+ # roundtrip
+ with ensure_clean() as path:
+ df.save(path)
+ dfrt =pd.load(path)
+
+ # still here
+ self.assertEqual(dfrt.meta['Im'],'persistent')
+
def test_consolidate_datetime64(self):
# numpy vstack bug
diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py
index 921097e3408fd..55632e9e8424f 100644
--- a/pandas/tests/test_panel.py
+++ b/pandas/tests/test_panel.py
@@ -40,6 +40,21 @@ def test_pickle(self):
unpickled = cPickle.loads(pickled)
assert_frame_equal(unpickled['ItemA'], self.panel['ItemA'])
+ def test_meta_serialization(self):
+ import pandas as pd
+
+ p = self.panel
+ p.meta = {}
+ # create some kv pairs for serialization
+ p.meta['Im']="persistent"
+ # roundtrip
+ with ensure_clean() as path:
+ p.save(path)
+ prt =pd.load(path)
+
+ # still here
+ self.assertEqual(prt.meta['Im'],'persistent')
+
def test_cumsum(self):
cumsum = self.panel.cumsum()
assert_frame_equal(cumsum['ItemA'], self.panel['ItemA'].cumsum())
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index 4f17135385748..82261027a7878 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -540,6 +540,20 @@ def test_fromDict(self):
series = Series(data, dtype=float)
self.assert_(series.dtype == np.float64)
+ def test_meta_serialization(self):
+ import pandas as pd
+ s=Series([np.random.randn(100)])
+ s.meta == {}
+ # create some kv pairs for serialization
+ s.meta['Im']="persistent"
+ # roundtrip
+ with ensure_clean() as path:
+ s.save(path)
+ srt =pd.load(path)
+
+ # still here
+ self.assertEqual(srt.meta['Im'],'persistent')
+
def test_from_json_to_json(self):
raise nose.SkipTest
| keeping https://github.com/pydata/pandas/pull/3297 alive, which had the target PR branch wrong.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3643 | 2013-05-18T15:21:13Z | 2013-07-22T20:16:36Z | null | 2014-06-24T19:48:54Z |
single example for release notes | diff --git a/doc/source/missing_data.rst b/doc/source/missing_data.rst
index ce19241030704..9df00afcfcf46 100644
--- a/doc/source/missing_data.rst
+++ b/doc/source/missing_data.rst
@@ -406,7 +406,7 @@ or you can pass the nested dictionary like so
.. ipython:: python
- df.replace(regex={'b': {'b': r'\s*\.\s*'}})
+ df.replace(regex={'b': {r'\s*\.\s*': nan}})
You can also use the group of a regular expression match when replacing (dict
of regex -> dict of regex), this works for lists as well
@@ -420,7 +420,7 @@ will be replaced with a scalar (list of regex -> regex)
.. ipython:: python
- df.replace([r'\s*\.\*', r'a|b'], nan, regex=True)
+ df.replace([r'\s*\.\s*', r'a|b'], nan, regex=True)
All of the regular expression examples can also be passed with the
``to_replace`` argument as the ``regex`` argument. In this case the ``value``
@@ -429,7 +429,7 @@ dictionary. The previous example, in this case, would then be
.. ipython:: python
- df.replace(regex=[r'\s*\.\*', r'a|b'], value=nan)
+ df.replace(regex=[r'\s*\.\s*', r'a|b'], value=nan)
This can be convenient if you do not want to pass ``regex=True`` every time you
want to use a regular expression.
diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt
index d5256bcf26d25..b3f1e0c76420f 100644
--- a/doc/source/v0.11.1.txt
+++ b/doc/source/v0.11.1.txt
@@ -59,6 +59,24 @@ Enhancements
``Series`` with object dtype. See the examples section in the regular docs
:ref:`Replacing via String Expression <missing_data.replace_expression>`
+ For example you can do
+
+ .. ipython :: python
+
+ df = DataFrame({'a': list('ab..'), 'b': [1, 2, 3, 4]})
+ df.replace(regex=r'\s*\.\s*', value=nan)
+
+ to replace all occurrences of the string ``'.'`` with zero or more
+ instances of surrounding whitespace with ``NaN``.
+
+ Regular string replacement still works as expected. For example, you can do
+
+ .. ipython :: python
+
+ df.replace('.', nan)
+
+ to replace all occurrences of the string ``'.'`` with ``NaN``.
+
See the `full release notes
<https://github.com/pydata/pandas/blob/master/RELEASE.rst>`__ or issue tracker
on GitHub for a complete list.
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 8e48ef094c419..b3054db56b718 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -6561,12 +6561,16 @@ def test_regex_replace_dict_nested(self):
dfmix = DataFrame(mix)
res = dfmix.replace({'b': {r'\s*\.\s*': nan}}, regex=True)
res2 = dfmix.copy()
+ res4 = dfmix.copy()
res2.replace({'b': {r'\s*\.\s*': nan}}, inplace=True, regex=True)
- print res2
+ res3 = dfmix.replace(regex={'b': {r'\s*\.\s*': nan}})
+ res4.replace(regex={'b': {r'\s*\.\s*': nan}}, inplace=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', nan, nan], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
+ assert_frame_equal(res3, expec)
+ assert_frame_equal(res4, expec)
def test_regex_replace_list_to_scalar(self):
mix = {'a': range(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
| https://api.github.com/repos/pandas-dev/pandas/pulls/3639 | 2013-05-17T21:27:54Z | 2013-05-19T16:53:11Z | 2013-05-19T16:53:11Z | 2014-06-17T23:36:42Z | |
systematic failure when writing a dataframe of 1 column of integer. this should solve issue Issue #3628 | diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index 3002f2f620f5e..b54a30d95bb54 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -228,7 +228,11 @@ def _write_sqlite(frame, table, names, cur):
wildcards = ','.join(['?'] * len(names))
insert_query = 'INSERT INTO %s (%s) VALUES (%s)' % (
table, col_names, wildcards)
- data = [tuple(x) for x in frame.values]
+ # pandas types are badly handled if there is only 1 column ( Issue #3628 )
+ if not len(frame.columns )==1 :
+ data = [tuple(x) for x in frame.values]
+ else :
+ data = [tuple(x) for x in frame.values.tolist()]
cur.executemany(insert_query, data)
def _write_mysql(frame, table, names, cur):
diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py
index b443c55f97b8d..1daa50c70a900 100644
--- a/pandas/io/tests/test_sql.py
+++ b/pandas/io/tests/test_sql.py
@@ -219,6 +219,18 @@ def test_keyword_as_column_names(self):
df = DataFrame({'From':np.ones(5)})
sql.write_frame(df, con = self.db, name = 'testkeywords')
+ def test_onecolumn_of_integer(self):
+ '''
+ a column_of_integers dataframe should transfer well to sql
+ '''
+ mono_df=DataFrame([1 , 2], columns=['c0'])
+ sql.write_frame(mono_df, con = self.db, name = 'mono_df')
+ # computing the sum via sql
+ con_x=self.db
+ the_sum=sum([my_c0[0] for my_c0 in con_x.execute("select * from mono_df")])
+ # it should not fail, and gives 3 ( Issue #3628 )
+ self.assertEqual(the_sum , 3)
+
class TestMySQL(unittest.TestCase):
| ...lite ( Issue #3628 )
A push of a dataframe of one column of integers fail ungraciously, because sqlite doesn't recognize well the data type.
This should solve the problem by converting back to normal types
| https://api.github.com/repos/pandas-dev/pandas/pulls/3636 | 2013-05-17T18:00:56Z | 2013-05-19T15:33:15Z | null | 2014-06-25T05:42:34Z |
API: Raise on iloc indexing with a non-integer based boolean mask (GH3631) | diff --git a/RELEASE.rst b/RELEASE.rst
index 9147968997fc7..acb4f429e81b0 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -70,6 +70,9 @@ pandas 0.11.1
- Add ``squeeze`` keyword to ``groupby`` to allow reduction from
DataFrame -> Series if groups are unique. Regression from 0.10.1,
partial revert on (GH2893_) with (GH3596_)
+ - Raise on ``iloc`` when boolean indexing with a label based indexer mask
+ e.g. a boolean Series, even with integer labels, will raise. Since ``iloc``
+ is purely positional based, the labels on the Series are not alignable (GH3631_)
**Bug Fixes**
@@ -182,6 +185,7 @@ pandas 0.11.1
.. _GH3624: https://github.com/pydata/pandas/issues/3624
.. _GH3626: https://github.com/pydata/pandas/issues/3626
.. _GH3601: https://github.com/pydata/pandas/issues/3601
+.. _GH3631: https://github.com/pydata/pandas/issues/3631
.. _GH1512: https://github.com/pydata/pandas/issues/1512
diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst
index 55b7e653c3630..43b512a934558 100644
--- a/doc/source/indexing.rst
+++ b/doc/source/indexing.rst
@@ -68,7 +68,6 @@ three types of multi-axis indexing.
- An integer e.g. ``5``
- A list or array of integers ``[4, 3, 0]``
- A slice object with ints ``1:7``
- - A boolean array
See more at :ref:`Selection by Position <indexing.integer>`
@@ -291,7 +290,6 @@ The ``.iloc`` attribute is the primary access method. The following are valid in
- An integer e.g. ``5``
- A list or array of integers ``[4, 3, 0]``
- A slice object with ints ``1:7``
-- A boolean array
.. ipython:: python
@@ -329,12 +327,6 @@ Select via integer list
df1.iloc[[1,3,5],[1,3]]
-Select via boolean array
-
-.. ipython:: python
-
- df1.iloc[:,df1.iloc[0]>0]
-
For slicing rows explicitly (equiv to deprecated ``df.irow(slice(1,3))``).
.. ipython:: python
diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt
index d5256bcf26d25..aed95188db26e 100644
--- a/doc/source/v0.11.1.txt
+++ b/doc/source/v0.11.1.txt
@@ -40,6 +40,27 @@ API changes
# no squeezing (the default, and behavior in 0.10.1)
df2.groupby("val1").apply(func)
+ - Raise on ``iloc`` when boolean indexing with a label based indexer mask
+ e.g. a boolean Series, even with integer labels, will raise. Since ``iloc``
+ is purely positional based, the labels on the Series are not alignable (GH3631_)
+
+ This case is rarely used, and there are plently of alternatives. This preserves the
+ ``iloc`` API to be *purely* positional based.
+
+ .. ipython:: python
+
+ df = DataFrame(range(5), list('ABCDE'), columns=['a'])
+ mask = (df.a%2 == 0)
+ mask
+
+ # this is what you should use
+ df.loc[mask]
+
+ # this will work as well
+ df.iloc[mask.values]
+
+ ``df.iloc[mask]`` will raise a ``ValueError``
+
Enhancements
~~~~~~~~~~~~
@@ -74,3 +95,4 @@ on GitHub for a complete list.
.. _GH3435: https://github.com/pydata/pandas/issues/3435
.. _GH1512: https://github.com/pydata/pandas/issues/1512
.. _GH2285: https://github.com/pydata/pandas/issues/2285
+.. _GH3631: https://github.com/pydata/pandas/issues/3631
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 1cbc5abdc3ea3..02f1cf4539ac4 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -775,7 +775,14 @@ class _iLocIndexer(_LocationIndexer):
_exception = IndexError
def _has_valid_type(self, key, axis):
- return isinstance(key, slice) or com.is_integer(key) or com._is_bool_indexer(key) or _is_list_like(key)
+ if com._is_bool_indexer(key):
+ if hasattr(key,'index') and isinstance(key.index,Index):
+ if key.index.inferred_type == 'integer':
+ raise NotImplementedError("iLocation based boolean indexing on an integer type is not available")
+ raise ValueError("iLocation based boolean indexing cannot use an indexable as a mask")
+ return True
+
+ return isinstance(key, slice) or com.is_integer(key) or _is_list_like(key)
def _getitem_tuple(self, tup):
@@ -811,9 +818,11 @@ def _get_slice_axis(self, slice_obj, axis=0):
def _getitem_axis(self, key, axis=0):
if isinstance(key, slice):
+ self._has_valid_type(key,axis)
return self._get_slice_axis(key, axis=axis)
elif com._is_bool_indexer(key):
+ self._has_valid_type(key,axis)
return self._getbool_axis(key, axis=axis)
# a single integer or a list of integers
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index f6d106f422911..d90aa369aa46e 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -888,6 +888,60 @@ def test_multi_assign(self):
df2.ix[mask, cols]= dft.ix[mask, cols].values
assert_frame_equal(df2,expected)
+ def test_iloc_mask(self):
+
+ # GH 3631, iloc with a mask (of a series) should raise
+ df = DataFrame(range(5), list('ABCDE'), columns=['a'])
+ mask = (df.a%2 == 0)
+ self.assertRaises(ValueError, df.iloc.__getitem__, tuple([mask]))
+ mask.index = range(len(mask))
+ self.assertRaises(NotImplementedError, df.iloc.__getitem__, tuple([mask]))
+
+ # ndarray ok
+ result = df.iloc[np.array([True] * len(mask),dtype=bool)]
+ assert_frame_equal(result,df)
+
+ # the possibilities
+ locs = np.arange(4)
+ nums = 2**locs
+ reps = map(bin, nums)
+ df = DataFrame({'locs':locs, 'nums':nums}, reps)
+
+ expected = {
+ (None,'') : '0b1100',
+ (None,'.loc') : '0b1100',
+ (None,'.iloc') : '0b1100',
+ ('index','') : '0b11',
+ ('index','.loc') : '0b11',
+ ('index','.iloc') : 'iLocation based boolean indexing cannot use an indexable as a mask',
+ ('locs','') : 'Unalignable boolean Series key provided',
+ ('locs','.loc') : 'Unalignable boolean Series key provided',
+ ('locs','.iloc') : 'iLocation based boolean indexing on an integer type is not available',
+ }
+
+ import warnings
+ warnings.filterwarnings(action='ignore', category=UserWarning)
+ result = dict()
+ for idx in [None, 'index', 'locs']:
+ mask = (df.nums>2).values
+ if idx:
+ mask = Series(mask, list(reversed(getattr(df, idx))))
+ for method in ['', '.loc', '.iloc']:
+ try:
+ if method:
+ accessor = getattr(df, method[1:])
+ else:
+ accessor = df
+ ans = str(bin(accessor[mask]['nums'].sum()))
+ except Exception, e:
+ ans = str(e)
+
+ key = tuple([idx,method])
+ r = expected.get(key)
+ if r != ans:
+ raise AssertionError("[%s] does not match [%s], received [%s]" %
+ (key,ans,r))
+ warnings.filterwarnings(action='always', category=UserWarning)
if __name__ == '__main__':
import nose
| closed #3631
| https://api.github.com/repos/pandas-dev/pandas/pulls/3635 | 2013-05-17T14:59:58Z | 2013-05-19T14:18:45Z | 2013-05-19T14:18:45Z | 2014-06-30T11:47:32Z |
BUG: (GH3626) issue with alignment of a DataFrame setitem with a piece of another DataFrame | diff --git a/RELEASE.rst b/RELEASE.rst
index 219eec42ec20f..19073c97d92eb 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -125,6 +125,7 @@ pandas 0.11.1
- Correctly parse date columns with embedded (nan/NaT) into datetime64[ns] dtype in ``read_csv``
when ``parse_dates`` is specified (GH3062_)
- Fix not consolidating before to_csv (GH3624_)
+ - Fix alignment issue when setitem in a DataFrame with a piece of a DataFrame (GH3626_)
.. _GH3164: https://github.com/pydata/pandas/issues/3164
.. _GH2786: https://github.com/pydata/pandas/issues/2786
@@ -177,6 +178,7 @@ pandas 0.11.1
.. _GH3611: https://github.com/pydata/pandas/issues/3611
.. _GH3062: https://github.com/pydata/pandas/issues/3062
.. _GH3624: https://github.com/pydata/pandas/issues/3624
+.. _GH3626: https://github.com/pydata/pandas/issues/3626
.. _GH1512: https://github.com/pydata/pandas/issues/1512
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index dd27fa5c3473c..1cbc5abdc3ea3 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -138,14 +138,14 @@ def setter(item, v):
# align to
if item in value:
v = value[item]
- v = v.reindex(self.obj[item].reindex(v.index).dropna().index)
+ v = v.reindex(self.obj[item].index & v.index)
setter(item, v.values)
else:
setter(item, np.nan)
# we have an equal len ndarray
- elif isinstance(value, np.ndarray) and value.ndim > 1:
- if len(labels) != len(value):
+ elif isinstance(value, np.ndarray) and value.ndim == 2:
+ if len(labels) != value.shape[1]:
raise ValueError('Must have equal len keys and value when'
' setting with an ndarray')
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index 65e7516d4b082..f6d106f422911 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -853,6 +853,41 @@ def test_iloc_panel_issue(self):
self.assert_(p.iloc[1, :3, 1].shape == (3,))
self.assert_(p.iloc[:3, 1, 1].shape == (3,))
+ def test_multi_assign(self):
+
+ # GH 3626, an assignement of a sub-df to a df
+ df = DataFrame({'FC':['a','b','a','b','a','b'],
+ 'PF':[0,0,0,0,1,1],
+ 'col1':range(6),
+ 'col2':range(6,12)})
+ df.ix[1,0]=np.nan
+ df2 = df.copy()
+
+ mask=~df2.FC.isnull()
+ cols=['col1', 'col2']
+
+ dft = df2 * 2
+ dft.ix[3,3] = np.nan
+
+ expected = DataFrame({'FC':['a',np.nan,'a','b','a','b'],
+ 'PF':[0,0,0,0,1,1],
+ 'col1':Series([0,1,4,6,8,10],dtype='float64'),
+ 'col2':[12,7,16,np.nan,20,22]})
+
+
+ # frame on rhs
+ df2.ix[mask, cols]= dft.ix[mask, cols]
+ assert_frame_equal(df2,expected)
+ df2.ix[mask, cols]= dft.ix[mask, cols]
+ assert_frame_equal(df2,expected)
+
+ # with an ndarray on rhs
+ df2 = df.copy()
+ df2.ix[mask, cols]= dft.ix[mask, cols].values
+ assert_frame_equal(df2,expected)
+ df2.ix[mask, cols]= dft.ix[mask, cols].values
+ assert_frame_equal(df2,expected)
+
if __name__ == '__main__':
import nose
| closes #3626
| https://api.github.com/repos/pandas-dev/pandas/pulls/3632 | 2013-05-17T13:05:51Z | 2013-05-17T16:28:17Z | 2013-05-17T16:28:17Z | 2014-06-20T17:50:44Z |
ENH: Add Series.sort ascending keyword | diff --git a/pandas/core/series.py b/pandas/core/series.py
index e807cf3f1dfd4..a04e931cf07e3 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -2012,7 +2012,7 @@ def dot(self, other):
Parameters
----------
- other : Series or DataFrame
+ other : Series or DataFrame
Returns
-------
@@ -2194,7 +2194,7 @@ def update(self, other):
#----------------------------------------------------------------------
# Reindexing, sorting
- def sort(self, axis=0, kind='quicksort', order=None):
+ def sort(self, axis=0, kind='quicksort', order=None, ascending=True):
"""
Sort values and index labels by value, in place. For compatibility with
ndarray API. No return value
@@ -2206,8 +2206,15 @@ def sort(self, axis=0, kind='quicksort', order=None):
Choice of sorting algorithm. See np.sort for more
information. 'mergesort' is the only stable algorithm
order : ignored
+ ascending : boolean, default True
+ Sort ascending. Passing False sorts descending
+
+ See Also
+ --------
+ pandas.Series.order
"""
- sortedSeries = self.order(na_last=True, kind=kind)
+ sortedSeries = self.order(na_last=True, kind=kind,
+ ascending=ascending)
true_base = self
while true_base.base is not None:
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index b988f2985877a..288d26eefce87 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -475,7 +475,7 @@ def test_constructor_dtype_datetime64(self):
np.datetime64(datetime(2013, 1, 2)),
np.datetime64(datetime(2013, 1, 3)),
]
-
+
s = Series(dates)
self.assert_(s.dtype == 'M8[ns]')
@@ -1162,7 +1162,7 @@ def test_where(self):
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
values = [2.5,3.5,4.5,5.5,6.5]
- s[mask] = values
+ s[mask] = values
expected = Series(values + range(5,10), dtype='float64')
assert_series_equal(s, expected)
self.assertEquals(s.dtype, expected.dtype)
@@ -2591,7 +2591,7 @@ def test_dot(self):
expected = Series(np.dot(a.values, b.values),
index=['1', '2', '3'])
assert_series_equal(result, expected)
-
+
#Check index alignment
b2 = b.reindex(index=reversed(b.index))
result = a.dot(b)
@@ -2723,6 +2723,11 @@ def test_sort(self):
self.assert_(np.array_equal(ts, self.ts.order()))
self.assert_(np.array_equal(ts.index, self.ts.order().index))
+ ts.sort(ascending=False)
+ self.assert_(np.array_equal(ts, self.ts.order(ascending=False)))
+ self.assert_(np.array_equal(ts.index,
+ self.ts.order(ascending=False).index))
+
def test_sort_index(self):
import random
| A convenience.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3630 | 2013-05-17T04:47:14Z | 2013-05-19T21:14:06Z | 2013-05-19T21:14:06Z | 2014-07-16T08:09:34Z |
BUG: (GH3588) fix pivoting with nan in the index | diff --git a/RELEASE.rst b/RELEASE.rst
index 0ade3e92c164a..2f98922eb403e 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -144,6 +144,7 @@ pandas 0.11.1
- Fix plotting of unordered DatetimeIndex (GH3601_)
- ``sql.write_frame`` failing when writing a single column to sqlite (GH3628_),
thanks to @stonebig
+ - Fix pivoting with ``nan`` in the index (GH3558_)
.. _GH3164: https://github.com/pydata/pandas/issues/3164
.. _GH2786: https://github.com/pydata/pandas/issues/2786
@@ -194,6 +195,7 @@ pandas 0.11.1
.. _GH3617: https://github.com/pydata/pandas/issues/3617
.. _GH3435: https://github.com/pydata/pandas/issues/3435
.. _GH3611: https://github.com/pydata/pandas/issues/3611
+.. _GH3558: https://github.com/pydata/pandas/issues/3558
.. _GH3062: https://github.com/pydata/pandas/issues/3062
.. _GH3624: https://github.com/pydata/pandas/issues/3624
.. _GH3626: https://github.com/pydata/pandas/issues/3626
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 02f1cf4539ac4..ea684ef11446c 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -469,11 +469,14 @@ def _reindex(keys, level=None):
missing = com._ensure_platform_int(missing)
missing_labels = keyarr.take(missing)
- missing_labels_indexer = com._ensure_int64(l[~check])
+ missing_indexer = com._ensure_int64(l[~check])
cur_labels = result._get_axis(axis).values
- cur_labels_indexer = com._ensure_int64(l[check])
- new_labels = lib.combine_from_indexers(cur_labels, cur_labels_indexer,
- missing_labels, missing_labels_indexer)
+ cur_indexer = com._ensure_int64(l[check])
+
+ new_labels = np.empty(tuple([len(indexer)]),dtype=object)
+ new_labels[cur_indexer] = cur_labels
+ new_labels[missing_indexer] = missing_labels
+
result = result.reindex_axis(new_labels,axis=axis)
return result
diff --git a/pandas/core/reshape.py b/pandas/core/reshape.py
index 8595e2a91906d..b2e5bb01f53af 100644
--- a/pandas/core/reshape.py
+++ b/pandas/core/reshape.py
@@ -10,12 +10,12 @@
from pandas.core.categorical import Categorical
from pandas.core.common import (notnull, _ensure_platform_int, _maybe_promote,
- _maybe_upcast)
+ _maybe_upcast, isnull)
from pandas.core.groupby import (get_group_index, _compress_group_index,
decons_group_index)
import pandas.core.common as com
import pandas.algos as algos
-
+from pandas import lib
from pandas.core.index import MultiIndex, Index
@@ -67,7 +67,14 @@ def __init__(self, values, index, level=-1, value_columns=None):
self.index = index
self.level = self.index._get_level_number(level)
- self.new_index_levels = list(index.levels)
+ levels = index.levels
+ labels = index.labels
+ def _make_index(lev,lab):
+ i = lev.__class__(_make_index_array_level(lev.values,lab))
+ i.name = lev.name
+ return i
+
+ self.new_index_levels = list([ _make_index(lev,lab) for lev,lab in zip(levels,labels) ])
self.new_index_names = list(index.names)
self.removed_name = self.new_index_names.pop(self.level)
@@ -140,6 +147,19 @@ def get_result(self):
values = com.take_nd(values, inds, axis=1)
columns = columns[inds]
+ # we might have a missing index
+ if len(index) != values.shape[0]:
+ mask = isnull(index)
+ if mask.any():
+ l = np.arange(len(index))
+ values, orig_values = np.empty((len(index),values.shape[1])), values
+ values.fill(np.nan)
+ values_indexer = com._ensure_int64(l[~mask])
+ for i, j in enumerate(values_indexer):
+ values[j] = orig_values[i]
+ else:
+ index = index.take(self.unique_groups)
+
return DataFrame(values, index=index, columns=columns)
def get_new_values(self):
@@ -201,11 +221,13 @@ def get_new_columns(self):
def get_new_index(self):
result_labels = []
for cur in self.sorted_labels[:-1]:
- result_labels.append(cur.take(self.compressor))
+ labels = cur.take(self.compressor)
+ labels = _make_index_array_level(labels,cur)
+ result_labels.append(labels)
# construct the new index
if len(self.new_index_levels) == 1:
- new_index = self.new_index_levels[0].take(self.unique_groups)
+ new_index = self.new_index_levels[0]
new_index.name = self.new_index_names[0]
else:
new_index = MultiIndex(levels=self.new_index_levels,
@@ -215,6 +237,26 @@ def get_new_index(self):
return new_index
+def _make_index_array_level(lev,lab):
+ """ create the combined index array, preserving nans, return an array """
+ mask = lab == -1
+ if not mask.any():
+ return lev
+
+ l = np.arange(len(lab))
+ mask_labels = np.empty(len(mask[mask]),dtype=object)
+ mask_labels.fill(np.nan)
+ mask_indexer = com._ensure_int64(l[mask])
+
+ labels = lev
+ labels_indexer = com._ensure_int64(l[~mask])
+
+ new_labels = np.empty(tuple([len(lab)]),dtype=object)
+ new_labels[labels_indexer] = labels
+ new_labels[mask_indexer] = mask_labels
+
+ return new_labels
+
def _unstack_multiple(data, clocs):
if len(clocs) == 0:
return data
diff --git a/pandas/lib.pyx b/pandas/lib.pyx
index 30c65d9fcdd9f..15791a984ecc5 100644
--- a/pandas/lib.pyx
+++ b/pandas/lib.pyx
@@ -416,26 +416,6 @@ def dicts_to_array(list dicts, list columns):
return result
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def combine_from_indexers(ndarray a, ndarray[int64_t] a_indexer,
- ndarray b, ndarray[int64_t] b_indexer):
- cdef:
- Py_ssize_t i, n_a, n_b
- ndarray result
-
- n_a = len(a)
- n_b = len(b)
- result = np.empty(n_a+n_b,dtype=object)
-
- for i in range(n_a):
- result[a_indexer[i]] = a[i]
- for i in range(n_b):
- result[b_indexer[i]] = b[i]
-
- return result
-
-
def fast_zip(list ndarrays):
'''
For zipping multiple ndarrays into an ndarray of tuples
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index d90aa369aa46e..e9afa1ae6ec1d 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -840,6 +840,16 @@ def test_set_index_nan(self):
result = df.set_index(['year','PRuid','QC']).reset_index().reindex(columns=df.columns)
assert_frame_equal(result,df)
+ def test_multi_nan_indexing(self):
+
+ # GH 3588
+ df = DataFrame({"a":['R1', 'R2', np.nan, 'R4'], 'b':["C1", "C2", "C3" , "C4"], "c":[10, 15, np.nan , 20]})
+ result = df.set_index(['a','b'], drop=False)
+ expected = DataFrame({"a":['R1', 'R2', np.nan, 'R4'], 'b':["C1", "C2", "C3" , "C4"], "c":[10, 15, np.nan , 20]},
+ index = [Index(['R1','R2',np.nan,'R4'],name='a'),Index(['C1','C2','C3','C4'],name='b')])
+ assert_frame_equal(result,expected)
+
+
def test_iloc_panel_issue(self):
# GH 3617
diff --git a/pandas/tools/tests/test_pivot.py b/pandas/tools/tests/test_pivot.py
index c0e0de1a23dad..e333691b1e6d2 100644
--- a/pandas/tools/tests/test_pivot.py
+++ b/pandas/tools/tests/test_pivot.py
@@ -2,7 +2,7 @@
import numpy as np
-from pandas import DataFrame, Series
+from pandas import DataFrame, Series, Index
from pandas.tools.merge import concat
from pandas.tools.pivot import pivot_table, crosstab
import pandas.util.testing as tm
@@ -129,6 +129,17 @@ def test_pivot_multi_functions(self):
expected = concat([means, stds], keys=['mean', 'std'], axis=1)
tm.assert_frame_equal(result, expected)
+ def test_pivot_index_with_nan(self):
+ # GH 3588
+ nan = np.nan
+ df = DataFrame({"a":['R1', 'R2', nan, 'R4'], 'b':["C1", "C2", "C3" , "C4"], "c":[10, 15, nan , 20]})
+ result = df.pivot('a','b','c')
+ expected = DataFrame([[nan,nan,nan,nan],[nan,10,nan,nan],
+ [nan,nan,nan,nan],[nan,nan,15,20]],
+ index = Index(['R1','R2',nan,'R4'],name='a'),
+ columns = Index(['C1','C2','C3','C4'],name='b'))
+ tm.assert_frame_equal(result, expected)
+
def test_margins(self):
def _check_output(res, col, rows=['A', 'B'], cols=['C']):
cmarg = res['All'][:-1]
| closed #3588
| https://api.github.com/repos/pandas-dev/pandas/pulls/3627 | 2013-05-16T21:04:18Z | 2013-05-19T16:55:29Z | 2013-05-19T16:55:29Z | 2014-06-14T22:46:49Z |
BUG: Fix not consolidating before to_csv (GH3624_) | diff --git a/RELEASE.rst b/RELEASE.rst
index aeaebd88c5ee7..219eec42ec20f 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -124,6 +124,7 @@ pandas 0.11.1
- Fix indexing issue in ndim >= 3 with ``iloc`` (GH3617_)
- Correctly parse date columns with embedded (nan/NaT) into datetime64[ns] dtype in ``read_csv``
when ``parse_dates`` is specified (GH3062_)
+ - Fix not consolidating before to_csv (GH3624_)
.. _GH3164: https://github.com/pydata/pandas/issues/3164
.. _GH2786: https://github.com/pydata/pandas/issues/2786
@@ -175,6 +176,7 @@ pandas 0.11.1
.. _GH3435: https://github.com/pydata/pandas/issues/3435
.. _GH3611: https://github.com/pydata/pandas/issues/3611
.. _GH3062: https://github.com/pydata/pandas/issues/3062
+.. _GH3624: https://github.com/pydata/pandas/issues/3624
.. _GH1512: https://github.com/pydata/pandas/issues/1512
diff --git a/pandas/core/format.py b/pandas/core/format.py
index 2924f1579fb97..bea4b59bfaaa4 100644
--- a/pandas/core/format.py
+++ b/pandas/core/format.py
@@ -777,6 +777,8 @@ def __init__(self, obj, path_or_buf, sep=",", na_rep='', float_format=None,
line_terminator='\n', chunksize=None, engine=None):
self.engine = engine # remove for 0.12
+
+ obj._consolidate_inplace()
self.obj = obj
self.path_or_buf = path_or_buf
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index b39fab1bd4828..de49eca7dab1c 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -4939,6 +4939,19 @@ def test_to_csv_from_csv_w_all_infs(self):
assert_frame_equal(self.frame, recons, check_names=False) # TODO to_csv drops column name
assert_frame_equal(np.isinf(self.frame), np.isinf(recons), check_names=False)
+ def test_to_csv_no_index(self):
+ # GH 3624, after appending columns, to_csv fails
+ pname = '__tmp_to_csv_no_index__'
+ with ensure_clean(pname) as path:
+ df = DataFrame({'c1':[1,2,3], 'c2':[4,5,6]})
+ df.to_csv(path, index=False)
+ result = read_csv(path)
+ assert_frame_equal(df,result)
+ df['c3'] = [7,8,9]
+ df.to_csv(path, index=False)
+ result = read_csv(path)
+ assert_frame_equal(df,result)
+
def test_to_csv_multiindex(self):
pname = '__tmp_to_csv_multiindex__'
| closes #3624
| https://api.github.com/repos/pandas-dev/pandas/pulls/3625 | 2013-05-16T17:13:03Z | 2013-05-16T17:38:59Z | 2013-05-16T17:38:59Z | 2014-07-16T08:09:28Z |
DOC: more informative PerformanceWarning in HDFStore | diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index d3b7533840a86..0ae835c81d870 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -50,7 +50,7 @@ class AttributeConflictWarning(Warning): pass
class PerformanceWarning(Warning): pass
performance_doc = """
your performance may suffer as PyTables will pickle object types that it cannot map
-directly to c-types [inferred_type->%s,key->%s]
+directly to c-types [inferred_type->%s,key->%s] [items->%s]
"""
# map object types
@@ -1861,7 +1861,7 @@ def write_array_empty(self, key, value):
getattr(self.group, key)._v_attrs.value_type = str(value.dtype)
getattr(self.group, key)._v_attrs.shape = value.shape
- def write_array(self, key, value):
+ def write_array(self, key, value, items=None):
if key in self.group:
self._handle.removeNode(self.group, key)
@@ -1904,7 +1904,11 @@ def write_array(self, key, value):
elif inferred_type == 'string':
pass
else:
- ws = performance_doc % (inferred_type,key)
+ try:
+ items = list(items)
+ except:
+ pass
+ ws = performance_doc % (inferred_type,key,items)
warnings.warn(ws, PerformanceWarning)
vlarr = self._handle.createVLArray(self.group, key,
@@ -2115,7 +2119,7 @@ def write(self, obj, **kwargs):
for i in range(nblocks):
blk = data.blocks[i]
# I have no idea why, but writing values before items fixed #2299
- self.write_array('block%d_values' % i, blk.values)
+ self.write_array('block%d_values' % i, blk.values, items=blk.items)
self.write_index('block%d_items' % i, blk.items)
class FrameStorer(BlockManagerStorer):
| https://api.github.com/repos/pandas-dev/pandas/pulls/3623 | 2013-05-16T14:29:22Z | 2013-05-16T14:51:07Z | 2013-05-16T14:51:07Z | 2014-07-16T08:09:26Z | |
BUG: (GH3062) Correctly parse date columns with embedded (nan/NaT) into datetime64[ns] in read_csv | diff --git a/RELEASE.rst b/RELEASE.rst
index 8c0d56666f4e1..aeaebd88c5ee7 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -122,6 +122,8 @@ pandas 0.11.1
- Fix ``read_csv`` to correctly encode identical na_values, e.g. ``na_values=[-999.0,-999]``
was failing (GH3611_)
- Fix indexing issue in ndim >= 3 with ``iloc`` (GH3617_)
+ - Correctly parse date columns with embedded (nan/NaT) into datetime64[ns] dtype in ``read_csv``
+ when ``parse_dates`` is specified (GH3062_)
.. _GH3164: https://github.com/pydata/pandas/issues/3164
.. _GH2786: https://github.com/pydata/pandas/issues/2786
@@ -172,6 +174,7 @@ pandas 0.11.1
.. _GH3617: https://github.com/pydata/pandas/issues/3617
.. _GH3435: https://github.com/pydata/pandas/issues/3435
.. _GH3611: https://github.com/pydata/pandas/issues/3611
+.. _GH3062: https://github.com/pydata/pandas/issues/3062
.. _GH1512: https://github.com/pydata/pandas/issues/1512
diff --git a/pandas/io/tests/test_parsers.py b/pandas/io/tests/test_parsers.py
index 4a9004b7068ba..38a31c042d120 100644
--- a/pandas/io/tests/test_parsers.py
+++ b/pandas/io/tests/test_parsers.py
@@ -531,6 +531,28 @@ def test_custom_na_values(self):
skiprows=[1])
assert_almost_equal(df3.values, expected)
+ def test_nat_parse(self):
+
+ # GH 3062
+ df = DataFrame(dict({
+ 'A' : np.asarray(range(10),dtype='float64'),
+ 'B' : pd.Timestamp('20010101') }))
+ df.iloc[3:6,:] = np.nan
+
+ with ensure_clean('__nat_parse_.csv') as path:
+ df.to_csv(path)
+ result = read_csv(path,index_col=0,parse_dates=['B'])
+ tm.assert_frame_equal(result,df)
+
+ expected = Series(dict( A = 'float64',B = 'datetime64[ns]'))
+ tm.assert_series_equal(expected,result.dtypes)
+
+ # test with NaT for the nan_rep
+ # we don't have a method to specif the Datetime na_rep (it defaults to '')
+ df.to_csv(path)
+ result = read_csv(path,index_col=0,parse_dates=['B'])
+ tm.assert_frame_equal(result,df)
+
def test_skiprows_bug(self):
# GH #505
text = """#foo,a,b,c
diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx
index f9c1b2329c16d..a633b9482da06 100644
--- a/pandas/tslib.pyx
+++ b/pandas/tslib.pyx
@@ -318,8 +318,10 @@ class Timestamp(_Timestamp):
ts.dts.us, ts.tzinfo)
+_nat_strings = set(['NaT','nat','NAT','nan','NaN','NAN'])
class NaTType(_NaT):
"""(N)ot-(A)-(T)ime, the time equivalent of NaN"""
+
def __new__(cls):
cdef _NaT base
@@ -647,8 +649,11 @@ cdef convert_to_tsobject(object ts, object tz):
obj.value = ts
pandas_datetime_to_datetimestruct(ts, PANDAS_FR_ns, &obj.dts)
elif util.is_string_object(ts):
- _string_to_dts(ts, &obj.dts)
- obj.value = pandas_datetimestruct_to_datetime(PANDAS_FR_ns, &obj.dts)
+ if ts in _nat_strings:
+ obj.value = NPY_NAT
+ else:
+ _string_to_dts(ts, &obj.dts)
+ obj.value = pandas_datetimestruct_to_datetime(PANDAS_FR_ns, &obj.dts)
elif PyDateTime_Check(ts):
if tz is not None:
# sort of a temporary hack
@@ -862,6 +867,10 @@ def array_to_datetime(ndarray[object] values, raise_=False, dayfirst=False,
iresult[i] = iNaT
continue
+ elif val in _nat_strings:
+ iresult[i] = iNaT
+ continue
+
_string_to_dts(val, &dts)
iresult[i] = pandas_datetimestruct_to_datetime(PANDAS_FR_ns,
&dts)
| closes #3062
| https://api.github.com/repos/pandas-dev/pandas/pulls/3621 | 2013-05-16T13:00:16Z | 2013-05-16T13:38:33Z | 2013-05-16T13:38:33Z | 2014-07-16T08:09:24Z |
BUG: fixed platform int issues on 32-bit | diff --git a/pandas/core/common.py b/pandas/core/common.py
index 6bb4b36862956..a52c932b30ba4 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -1612,13 +1612,13 @@ def _astype_nansafe(arr, dtype, copy = True):
if is_datetime64_dtype(arr):
if dtype == object:
return tslib.ints_to_pydatetime(arr.view(np.int64))
- elif issubclass(dtype.type, np.int):
+ elif dtype == np.int64:
return arr.view(dtype)
elif dtype != _NS_DTYPE:
raise TypeError("cannot astype a datetimelike from [%s] to [%s]" % (arr.dtype,dtype))
return arr.astype(_NS_DTYPE)
elif is_timedelta64_dtype(arr):
- if issubclass(dtype.type, np.int):
+ if dtype == np.int64:
return arr.view(dtype)
elif dtype == object:
return arr.astype(object)
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 8b684b621a540..dd27fa5c3473c 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -322,6 +322,7 @@ def _convert_for_reindex(self, key, axis=0):
keyarr = _asarray_tuplesafe(key)
if _is_integer_dtype(keyarr) and not _is_integer_index(labels):
+ keyarr = com._ensure_platform_int(keyarr)
return labels.take(keyarr)
return keyarr
@@ -466,10 +467,11 @@ def _reindex(keys, level=None):
if len(missing):
l = np.arange(len(indexer))
+ missing = com._ensure_platform_int(missing)
missing_labels = keyarr.take(missing)
- missing_labels_indexer = l[~check]
+ missing_labels_indexer = com._ensure_int64(l[~check])
cur_labels = result._get_axis(axis).values
- cur_labels_indexer = l[check]
+ cur_labels_indexer = com._ensure_int64(l[check])
new_labels = lib.combine_from_indexers(cur_labels, cur_labels_indexer,
missing_labels, missing_labels_indexer)
result = result.reindex_axis(new_labels,axis=axis)
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index 11ede8d759b38..b988f2985877a 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -1878,10 +1878,11 @@ def test_constructor_dtype_timedelta64(self):
self.assertRaises(TypeError, td.astype, 'm8[%s]' % t)
# valid astype
- td.astype('int')
+ td.astype('int64')
# this is an invalid casting
self.assertRaises(Exception, Series, [ timedelta(days=i) for i in range(3) ] + [ 'foo' ], dtype='m8[ns]' )
+ self.assertRaises(TypeError, td.astype, 'int32')
# leave as object here
td = Series([ timedelta(days=i) for i in range(3) ] + [ 'foo' ])
| https://api.github.com/repos/pandas-dev/pandas/pulls/3620 | 2013-05-16T12:02:48Z | 2013-05-16T12:02:59Z | 2013-05-16T12:02:59Z | 2014-07-16T08:09:22Z | |
BUG: Reindex data if plotting time/period index (GH3601) | diff --git a/RELEASE.rst b/RELEASE.rst
index 56e3096d23cb2..9147968997fc7 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -127,6 +127,7 @@ pandas 0.11.1
when ``parse_dates`` is specified (GH3062_)
- Fix not consolidating before to_csv (GH3624_)
- Fix alignment issue when setitem in a DataFrame with a piece of a DataFrame (GH3626_)
+ - Fix plotting of unordered DatetimeIndex (GH3601_)
.. _GH3164: https://github.com/pydata/pandas/issues/3164
.. _GH2786: https://github.com/pydata/pandas/issues/2786
@@ -180,6 +181,7 @@ pandas 0.11.1
.. _GH3062: https://github.com/pydata/pandas/issues/3062
.. _GH3624: https://github.com/pydata/pandas/issues/3624
.. _GH3626: https://github.com/pydata/pandas/issues/3626
+.. _GH3601: https://github.com/pydata/pandas/issues/3601
.. _GH1512: https://github.com/pydata/pandas/issues/1512
diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py
index 5033dc2d3a549..197b26014a760 100644
--- a/pandas/tests/test_graphics.py
+++ b/pandas/tests/test_graphics.py
@@ -677,7 +677,7 @@ def test_default_color_cycle(self):
@slow
def test_unordered_ts(self):
- df = DataFrame(np.random.randn(3, 1),
+ df = DataFrame(np.array([3.0, 2.0, 1.0]),
index=[date(2012, 10, 1),
date(2012, 9, 1),
date(2012, 8, 1)],
@@ -685,6 +685,8 @@ def test_unordered_ts(self):
ax = df.plot()
xticks = ax.lines[0].get_xdata()
self.assert_(xticks[0] < xticks[1])
+ ydata = ax.lines[0].get_ydata()
+ self.assert_(np.all(ydata == np.array([1.0, 2.0, 3.0])))
class TestDataFrameGroupByPlots(unittest.TestCase):
diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py
index 223e127223195..751f5fcdb82b2 100644
--- a/pandas/tools/plotting.py
+++ b/pandas/tools/plotting.py
@@ -947,8 +947,8 @@ def _get_xticks(self, convert_period=False):
if self.use_index:
if convert_period and isinstance(index, PeriodIndex):
- index = index.to_timestamp().order()
- x = index._mpl_repr()
+ self.data = self.data.reindex(index=index.order())
+ x = self.data.index.to_timestamp()._mpl_repr()
elif index.is_numeric():
"""
Matplotlib supports numeric values or datetime objects as
@@ -958,7 +958,8 @@ def _get_xticks(self, convert_period=False):
"""
x = index._mpl_repr()
elif is_datetype:
- x = index.order()._mpl_repr()
+ self.data = self.data.reindex(index=index.order())
+ x = self.data.index._mpl_repr()
else:
self._need_to_set_index = True
x = range(len(index))
| Just wanted to push this fix to see if it looked OK and let the Travis build start.
If it looks ok, i will add something in the release notes and try to write a proper test.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3619 | 2013-05-16T02:38:54Z | 2013-05-17T23:58:15Z | 2013-05-17T23:58:15Z | 2014-07-16T08:09:21Z |
BUG: (GH3617) Fix indexing issue with ndim >= 3 with iloc | diff --git a/RELEASE.rst b/RELEASE.rst
index 503ae0e6bb30e..75d46dd98df3a 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -120,6 +120,7 @@ pandas 0.11.1
- Fix incorrect dtype on groupby with ``as_index=False`` (GH3610_)
- Fix ``read_csv`` to correctly encode identical na_values, e.g. ``na_values=[-999.0,-999]``
was failing (GH3611_)
+ - Fix indexing issue in ndim >= 3 with ``iloc`` (GH3617_)
.. _GH3164: https://github.com/pydata/pandas/issues/3164
.. _GH2786: https://github.com/pydata/pandas/issues/2786
@@ -167,6 +168,7 @@ pandas 0.11.1
.. _GH3590: https://github.com/pydata/pandas/issues/3590
.. _GH3610: https://github.com/pydata/pandas/issues/3610
.. _GH3596: https://github.com/pydata/pandas/issues/3596
+.. _GH3617: https://github.com/pydata/pandas/issues/3617
.. _GH3435: https://github.com/pydata/pandas/issues/3435
.. _GH3611: https://github.com/pydata/pandas/issues/3611
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 29adce4e02591..8b684b621a540 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -778,8 +778,16 @@ def _has_valid_type(self, key, axis):
def _getitem_tuple(self, tup):
self._has_valid_tuple(tup)
+ try:
+ return self._getitem_lowerdim(tup)
+ except:
+ pass
+
retval = self.obj
for i, key in enumerate(tup):
+ if i >= self.obj.ndim:
+ raise IndexingError('Too many indexers')
+
if _is_null_slice(key):
continue
diff --git a/pandas/core/panel.py b/pandas/core/panel.py
index 44b62991cf7a3..3f090b273787b 100644
--- a/pandas/core/panel.py
+++ b/pandas/core/panel.py
@@ -326,7 +326,7 @@ def _init_arrays(self, arrays, arr_names, axes):
@property
def shape(self):
- return [len(getattr(self, a)) for a in self._AXIS_ORDERS]
+ return tuple([len(getattr(self, a)) for a in self._AXIS_ORDERS])
@classmethod
def from_dict(cls, data, intersect=False, orient='items', dtype=None):
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index 46fd98fc14ffb..65e7516d4b082 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -840,6 +840,19 @@ def test_set_index_nan(self):
result = df.set_index(['year','PRuid','QC']).reset_index().reindex(columns=df.columns)
assert_frame_equal(result,df)
+ def test_iloc_panel_issue(self):
+
+ # GH 3617
+ p = Panel(randn(4, 4, 4))
+
+ self.assert_(p.iloc[:3, :3, :3].shape == (3,3,3))
+ self.assert_(p.iloc[1, :3, :3].shape == (3,3))
+ self.assert_(p.iloc[:3, 1, :3].shape == (3,3))
+ self.assert_(p.iloc[:3, :3, 1].shape == (3,3))
+ self.assert_(p.iloc[1, 1, :3].shape == (3,))
+ self.assert_(p.iloc[1, :3, 1].shape == (3,))
+ self.assert_(p.iloc[:3, 1, 1].shape == (3,))
+
if __name__ == '__main__':
import nose
| closes #3617
| https://api.github.com/repos/pandas-dev/pandas/pulls/3618 | 2013-05-16T00:57:42Z | 2013-05-16T11:35:05Z | 2013-05-16T11:35:05Z | 2014-06-30T17:14:22Z |
ENH: read-html fixes | diff --git a/README.rst b/README.rst
index 3cdb2bf5b31f7..2d49c168eac60 100644
--- a/README.rst
+++ b/README.rst
@@ -92,12 +92,11 @@ Optional dependencies
- openpyxl version 1.6.1 or higher, for writing .xlsx files
- xlrd >= 0.9.0
- Needed for Excel I/O
- - `lxml <http://lxml.de>`__, or `Beautiful Soup 4 <http://www.crummy.com/software/BeautifulSoup>`__: for reading HTML tables
- - The differences between lxml and Beautiful Soup 4 are mostly speed (lxml
- is faster), however sometimes Beautiful Soup returns what you might
- intuitively expect. Both backends are implemented, so try them both to
- see which one you like. They should return very similar results.
- - Note that lxml requires Cython to build successfully
+ - Both `html5lib <https://github.com/html5lib/html5lib-python>`__ **and**
+ `Beautiful Soup 4 <http://www.crummy.com/software/BeautifulSoup>`__: for
+ reading HTML tables
+ - These can both easily be installed by ``pip install html5lib`` and ``pip
+ install beautifulsoup4``.
- `boto <https://pypi.python.org/pypi/boto>`__: necessary for Amazon S3 access.
diff --git a/RELEASE.rst b/RELEASE.rst
index 85cb4d9f40980..bbb04cecd6eba 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -30,8 +30,9 @@ pandas 0.11.1
**New features**
- - pd.read_html() can now parse HTML string, files or urls and return dataframes
- courtesy of @cpcloud. (GH3477_)
+ - ``pandas.read_html()`` can now parse HTML strings, files or urls and
+ returns a list of ``DataFrame`` s courtesy of @cpcloud. (GH3477_, GH3605_,
+ GH3606_)
- Support for reading Amazon S3 files. (GH3504_)
- Added module for reading and writing Stata files: pandas.io.stata (GH1512_)
- Added support for writing in ``to_csv`` and reading in ``read_csv``,
@@ -48,7 +49,7 @@ pandas 0.11.1
**Improvements to existing features**
- Fixed various issues with internal pprinting code, the repr() for various objects
- including TimeStamp and *Index now produces valid python code strings and
+ including TimeStamp and Index now produces valid python code strings and
can be used to recreate the object, (GH3038_, GH3379_, GH3251_, GH3460_)
- ``convert_objects`` now accepts a ``copy`` parameter (defaults to ``True``)
- ``HDFStore``
@@ -146,6 +147,9 @@ pandas 0.11.1
- ``sql.write_frame`` failing when writing a single column to sqlite (GH3628_),
thanks to @stonebig
- Fix pivoting with ``nan`` in the index (GH3558_)
+ - Fix running of bs4 tests when it is not installed (GH3605_)
+ - Fix parsing of html table (GH3606_)
+ - ``read_html()`` now only allows a single backend: ``html5lib`` (GH3616_)
.. _GH3164: https://github.com/pydata/pandas/issues/3164
.. _GH2786: https://github.com/pydata/pandas/issues/2786
@@ -209,6 +213,9 @@ pandas 0.11.1
.. _GH3141: https://github.com/pydata/pandas/issues/3141
.. _GH3628: https://github.com/pydata/pandas/issues/3628
.. _GH3638: https://github.com/pydata/pandas/issues/3638
+.. _GH3605: https://github.com/pydata/pandas/issues/3605
+.. _GH3606: https://github.com/pydata/pandas/issues/3606
+.. _Gh3616: https://github.com/pydata/pandas/issues/3616
pandas 0.11.0
=============
diff --git a/doc/source/install.rst b/doc/source/install.rst
index 9d14d1b11c6b1..658d9d78d5b29 100644
--- a/doc/source/install.rst
+++ b/doc/source/install.rst
@@ -99,12 +99,11 @@ Optional Dependencies
* `openpyxl <http://packages.python.org/openpyxl/>`__, `xlrd/xlwt <http://www.python-excel.org/>`__
* openpyxl version 1.6.1 or higher
* Needed for Excel I/O
- * `lxml <http://lxml.de>`__, or `Beautiful Soup 4 <http://www.crummy.com/software/BeautifulSoup>`__: for reading HTML tables
- * The differences between lxml and Beautiful Soup 4 are mostly speed (lxml
- is faster), however sometimes Beautiful Soup returns what you might
- intuitively expect. Both backends are implemented, so try them both to
- see which one you like. They should return very similar results.
- * Note that lxml requires Cython to build successfully
+ * Both `html5lib <https://github.com/html5lib/html5lib-python>`__ **and**
+ `Beautiful Soup 4 <http://www.crummy.com/software/BeautifulSoup>`__: for
+ reading HTML tables
+ * These can both easily be installed by ``pip install html5lib`` and ``pip
+ install beautifulsoup4``.
.. note::
diff --git a/doc/source/io.rst b/doc/source/io.rst
index 42ea4a2ca5d53..3dbf297dea5c5 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -918,18 +918,18 @@ which, if set to ``True``, will additionally output the length of the Series.
HTML
----
-Reading HTML format
+Reading HTML Content
~~~~~~~~~~~~~~~~~~~~~~
.. _io.read_html:
.. versionadded:: 0.11.1
-The toplevel :func:`~pandas.io.parsers.read_html` function can accept an HTML string/file/url
-and will parse HTML tables into pandas DataFrames.
+The toplevel :func:`~pandas.io.parsers.read_html` function can accept an HTML
+string/file/url and will parse HTML tables into list of pandas DataFrames.
-Writing to HTML format
+Writing to HTML files
~~~~~~~~~~~~~~~~~~~~~~
.. _io.html:
diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt
index a42765591c818..40fda1305e505 100644
--- a/doc/source/v0.11.1.txt
+++ b/doc/source/v0.11.1.txt
@@ -64,9 +64,27 @@ API changes
Enhancements
~~~~~~~~~~~~
-
- - ``pd.read_html()`` can now parse HTML string, files or urls and return dataframes
- courtesy of @cpcloud. (GH3477_)
+ - ``pd.read_html()`` can now parse HTML strings, files or urls and return
+ DataFrames
+ courtesy of @cpcloud. (GH3477_, GH3605_, GH3606_)
+ - ``read_html()`` (GH3616_)
+ - now works with only a *single* parser backend, that is:
+ - BeautifulSoup4 + html5lib
+ - does *not* and will never support using the html parsing library
+ included with Python as a parser backend
+ - is a bit smarter about the parent table elements of matched text: if
+ multiple matches are found then only the *unique* parents of the result
+ are returned (uniqueness is determined using ``set``).
+ - no longer tries to guess about what you want to do with empty table cells
+ - argument ``infer_types`` now defaults to ``False``.
+ - now returns DataFrames whose default column index is the elements of
+ ``<thead>`` elements in the HTML soup, if any exist.
+ - considers all ``<th>`` and ``<td>`` elements inside of ``<thead>``
+ elements.
+ - tests are now correctly skipped if the proper libraries are not
+ installed.
+ - tests now include a ground-truth csv file from the FDIC failed bank list
+ data set.
- ``HDFStore``
- will retain index attributes (freq,tz,name) on recreation (GH3499_)
@@ -203,3 +221,6 @@ on GitHub for a complete list.
.. _GH1651: https://github.com/pydata/pandas/issues/1651
.. _GH3141: https://github.com/pydata/pandas/issues/3141
.. _GH3638: https://github.com/pydata/pandas/issues/3638
+.. _GH3616: https://github.com/pydata/pandas/issues/3616
+.. _GH3605: https://github.com/pydata/pandas/issues/3605
+.. _GH3606: https://github.com/pydata/pandas/issues/3606
diff --git a/pandas/io/html.py b/pandas/io/html.py
index c29d16db8132b..732bd57bec418 100644
--- a/pandas/io/html.py
+++ b/pandas/io/html.py
@@ -16,6 +16,8 @@
except ImportError:
import_module = __import__
+import numpy as np
+
from pandas import DataFrame, MultiIndex
from pandas.io.parsers import _is_url
@@ -78,8 +80,34 @@ def _get_skiprows_iter(skiprows):
raise TypeError('{0} is not a valid type for skipping'
' rows'.format(type(skiprows)))
- def _parse_columns(self, row):
- return row.xpath('.//td|.//th')
+
+def _read(io):
+ """Try to read from a url, file or string.
+
+ Parameters
+ ----------
+ io : str, unicode, or file-like
+
+ Returns
+ -------
+ raw_text : str
+ """
+ if _is_url(io):
+ try:
+ with contextlib.closing(urllib2.urlopen(io)) as url:
+ raw_text = url.read()
+ except urllib2.URLError:
+ raise ValueError('Invalid URL: "{0}"'.format(io))
+ elif hasattr(io, 'read'):
+ raw_text = io.read()
+ elif os.path.isfile(io):
+ with open(io) as f:
+ raw_text = f.read()
+ elif isinstance(io, basestring):
+ raw_text = io
+ else:
+ raise ValueError("Cannot read object of type '{0}'".format(type(io)))
+ return raw_text
class _HtmlFrameParser(object):
@@ -114,9 +142,12 @@ class _HtmlFrameParser(object):
To subclass this class effectively you must override the following methods:
* :func:`_build_doc`
* :func:`_text_getter`
- * :func:`_parse_columns`
- * :func:`_parse_table`
- * :func:`_parse_rows`
+ * :func:`_parse_td`
+ * :func:`_parse_tables`
+ * :func:`_parse_tr`
+ * :func:`_parse_thead`
+ * :func:`_parse_tbody`
+ * :func:`_parse_tfoot`
See each method's respective documentation for details on their
functionality.
"""
@@ -125,33 +156,11 @@ def __init__(self, io, match, attrs):
self.match = match
self.attrs = attrs
- def parse_rows(self):
- """Return a list of list of each table's rows.
-
- Returns
- -------
- row_list : list of list of node-like
- A list of each table's rows, which are DOM nodes (usually <th> or
- <tr> elements).
- """
+ def parse_tables(self):
tables = self._parse_tables(self._build_doc(), self.match, self.attrs)
- assert tables, 'No tables found'
- return (self._parse_rows(table) for table in tables)
-
- def parse_raw_data(self):
- """Return a list of the raw data from each table.
-
- Returns
- -------
- data : list of list of lists of str or unicode
- Each table's data is contained in a list of lists of str or
- unicode.
- """
- return [self._parse_raw_data(rows, self._text_getter,
- self._parse_columns)
- for rows in self.parse_rows()]
+ return (self._build_table(table) for table in tables)
- def _parse_raw_data(self, rows, text_getter, column_finder):
+ def _parse_raw_data(self, rows):
"""Parse the raw data into a list of lists.
Parameters
@@ -177,23 +186,8 @@ def _parse_raw_data(self, rows, text_getter, column_finder):
-------
data : list of list of strings
"""
- # callable is back in Python 3.2
- assert callable(text_getter), '"text_getter" must be callable'
- assert callable(column_finder), '"column_finder" must be callable'
-
- data = []
-
- for row in rows:
- if _remove_whitespace(text_getter(row)):
- col = []
-
- for el in column_finder(row):
- t = _remove_whitespace(text_getter(el))
-
- if t:
- col.append(t)
- data.append(col)
-
+ data = [[_remove_whitespace(self._text_getter(col)) for col in
+ self._parse_td(row)] for row in rows]
return data
def _text_getter(self, obj):
@@ -211,8 +205,8 @@ def _text_getter(self, obj):
"""
raise NotImplementedError
- def _parse_columns(self, obj):
- """Return the column elements from a row element.
+ def _parse_td(self, obj):
+ """Return the td elements from a row element.
Parameters
----------
@@ -252,7 +246,7 @@ def _parse_tables(self, doc, match, attrs):
"""
raise NotImplementedError
- def _parse_rows(self, table):
+ def _parse_tr(self, table):
"""Return the list of row elements from the parsed table element.
Parameters
@@ -267,6 +261,51 @@ def _parse_rows(self, table):
"""
raise NotImplementedError
+ def _parse_thead(self, table):
+ """Return the header of a table.
+
+ Parameters
+ ----------
+ table : node-like
+ A table element that contains row elements.
+
+ Returns
+ -------
+ thead : node-like
+ A <thead>...</thead> element.
+ """
+ raise NotImplementedError
+
+ def _parse_tbody(self, table):
+ """Return the body of the table.
+
+ Parameters
+ ----------
+ table : node-like
+ A table element that contains row elements.
+
+ Returns
+ -------
+ tbody : node-like
+ A <tbody>...</tbody> element.
+ """
+ raise NotImplementedError
+
+ def _parse_tfoot(self, table):
+ """Return the footer of the table if any.
+
+ Parameters
+ ----------
+ table : node-like
+ A table element that contains row elements.
+
+ Returns
+ -------
+ tfoot : node-like
+ A <tfoot>...</tfoot> element.
+ """
+ raise NotImplementedError
+
def _build_doc(self):
"""Return a tree-like object that can be used to iterate over the DOM.
@@ -276,8 +315,37 @@ def _build_doc(self):
"""
raise NotImplementedError
+ def _build_table(self, table):
+ header = self._parse_raw_thead(table)
+ body = self._parse_raw_tbody(table)
+ footer = self._parse_raw_tfoot(table)
+ return header, body, footer
+
+ def _parse_raw_thead(self, table):
+ thead = self._parse_thead(table)
+ res = []
+ if thead:
+ res = map(self._text_getter, self._parse_th(thead[0]))
+ return np.array(res).squeeze() if res and len(res) == 1 else res
+
+ def _parse_raw_tfoot(self, table):
+ tfoot = self._parse_tfoot(table)
+ res = []
+ if tfoot:
+ res = map(self._text_getter, self._parse_td(tfoot[0]))
+ return np.array(res).squeeze() if res and len(res) == 1 else res
+
+ def _parse_raw_tbody(self, table):
+ tbody = self._parse_tbody(table)
+
+ try:
+ res = self._parse_tr(tbody[0])
+ except IndexError:
+ res = self._parse_tr(table)
+ return self._parse_raw_data(res)
+
-class _BeautifulSoupFrameParser(_HtmlFrameParser):
+class _BeautifulSoupLxmlFrameParser(_HtmlFrameParser):
"""HTML to DataFrame parser that uses BeautifulSoup under the hood.
See Also
@@ -291,48 +359,68 @@ class _BeautifulSoupFrameParser(_HtmlFrameParser):
:class:`pandas.io.html._HtmlFrameParser`.
"""
def __init__(self, *args, **kwargs):
- super(_BeautifulSoupFrameParser, self).__init__(*args, **kwargs)
+ super(_BeautifulSoupLxmlFrameParser, self).__init__(*args, **kwargs)
+ from bs4 import SoupStrainer
+ self._strainer = SoupStrainer('table')
def _text_getter(self, obj):
return obj.text
- def _parse_columns(self, row):
+ def _parse_td(self, row):
return row.find_all(('td', 'th'))
- def _parse_rows(self, table):
- return table.find_all(('tr', 'thead', 'tfoot'))
+ def _parse_tr(self, element):
+ return element.find_all('tr')
- def _parse_tables(self, doc, match, attrs):
- tables = doc.find_all('table', attrs=attrs)
- assert tables, 'No tables found'
+ def _parse_th(self, element):
+ return element.find_all('th')
+
+ def _parse_thead(self, table):
+ return table.find_all('thead')
+
+ def _parse_tbody(self, table):
+ return table.find_all('tbody')
+
+ def _parse_tfoot(self, table):
+ return table.find_all('tfoot')
- tables = [table for table in tables
- if table.find(text=match) is not None]
- assert tables, "No tables found matching '{0}'".format(match.pattern)
+ def _parse_tables(self, doc, match, attrs):
+ element_name = self._strainer.name
+ tables = doc.find_all(element_name, attrs=attrs)
+ if not tables:
+ raise AssertionError('No tables found')
+
+ mts = [table.find(text=match) for table in tables]
+ matched_tables = [mt for mt in mts if mt is not None]
+ tables = list(set(mt.find_parent(element_name)
+ for mt in matched_tables))
+
+ if not tables:
+ raise AssertionError("No tables found matching "
+ "'{0}'".format(match.pattern))
+ #import ipdb; ipdb.set_trace()
return tables
+ def _setup_build_doc(self):
+ raw_text = _read(self.io)
+ if not raw_text:
+ raise AssertionError('No text parsed from document')
+ return raw_text
+
def _build_doc(self):
- if _is_url(self.io):
- try:
- with contextlib.closing(urllib2.urlopen(self.io)) as url:
- raw_text = url.read()
- except urllib2.URLError:
- raise ValueError('Invalid URL: "{0}"'.format(self.io))
- elif hasattr(self.io, 'read'):
- raw_text = self.io.read()
- elif os.path.isfile(self.io):
- with open(self.io) as f:
- raw_text = f.read()
- elif isinstance(self.io, basestring):
- raw_text = self.io
- else:
- raise ValueError("Cannot read object of"
- " type '{0}'".format(type(self.io)))
- assert raw_text, 'No text parsed from document'
+ from bs4 import BeautifulSoup
+ return BeautifulSoup(self._setup_build_doc(), features='lxml',
+ parse_only=self._strainer)
+
- from bs4 import BeautifulSoup, SoupStrainer
- strainer = SoupStrainer('table')
- return BeautifulSoup(raw_text, parse_only=strainer)
+class _BeautifulSoupHtml5LibFrameParser(_BeautifulSoupLxmlFrameParser):
+ def __init__(self, *args, **kwargs):
+ super(_BeautifulSoupHtml5LibFrameParser, self).__init__(*args,
+ **kwargs)
+
+ def _build_doc(self):
+ from bs4 import BeautifulSoup
+ return BeautifulSoup(self._setup_build_doc(), features='html5lib')
def _build_node_xpath_expr(attrs):
@@ -358,6 +446,7 @@ def _build_node_xpath_expr(attrs):
_re_namespace = {'re': 'http://exslt.org/regular-expressions'}
+_valid_schemes = 'http', 'file', 'ftp'
class _LxmlFrameParser(_HtmlFrameParser):
@@ -370,7 +459,7 @@ class _LxmlFrameParser(_HtmlFrameParser):
See Also
--------
_HtmlFrameParser
- _BeautifulSoupFrameParser
+ _BeautifulSoupLxmlFrameParser
Notes
-----
@@ -383,11 +472,12 @@ def __init__(self, *args, **kwargs):
def _text_getter(self, obj):
return obj.text_content()
- def _parse_columns(self, row):
+ def _parse_td(self, row):
return row.xpath('.//td|.//th')
- def _parse_rows(self, table):
- return table.xpath('(.//tr|.//thead|.//tfoot)[normalize-space()]')
+ def _parse_tr(self, table):
+ expr = './/tr[normalize-space()]'
+ return table.xpath(expr)
def _parse_tables(self, doc, match, kwargs):
pattern = match.pattern
@@ -406,42 +496,68 @@ def _parse_tables(self, doc, match, kwargs):
if kwargs:
xpath_expr += _build_node_xpath_expr(kwargs)
tables = doc.xpath(xpath_expr, namespaces=_re_namespace)
- assert tables, "No tables found matching regex '{0}'".format(pattern)
+ if not tables:
+ raise AssertionError("No tables found matching regex "
+ "'{0}'".format(pattern))
return tables
def _build_doc(self):
"""
Raises
------
- IOError
- * If a valid URL is detected, but for some reason cannot be parsed.
- This is probably due to a faulty or non-existent internet
- connection.
ValueError
* If a URL that lxml cannot parse is passed.
+ Exception
+ * Any other ``Exception`` thrown. For example, trying to parse a
+ URL that is syntactically correct on a machine with no internet
+ connection will fail.
+
See Also
--------
pandas.io.html._HtmlFrameParser._build_doc
"""
from lxml.html import parse, fromstring
+ from lxml.html.clean import clean_html
try:
# try to parse the input in the simplest way
- return parse(self.io)
- except (UnicodeDecodeError, IOError):
- # something went wrong, check for not-a-url because it's probably a
- # huge string blob
+ r = parse(self.io)
+ except (UnicodeDecodeError, IOError) as e:
+ # if the input is a blob of html goop
if not _is_url(self.io):
- return fromstring(self.io)
- elif urlparse.urlparse(self.io).scheme not in ('http', 'ftp',
- 'file'):
- raise ValueError('"{0}" does not have a valid URL'
- ' protocol'.format(self.io))
+ r = fromstring(self.io)
else:
- raise IOError('"{0}" is a valid URL, so you probably are not'
- ' properly connected to the'
- ' internet'.format(self.io))
+ # not a url
+ scheme = urlparse.urlparse(self.io).scheme
+ if scheme not in _valid_schemes:
+ # lxml can't parse it
+ msg = ('{0} is not a valid url scheme, valid schemes are '
+ '{1}').format(scheme, _valid_schemes)
+ raise ValueError(msg)
+ else:
+ # something else happened: maybe a faulty connection
+ raise e
+ return clean_html(r)
+
+ def _parse_tbody(self, table):
+ return table.xpath('.//tbody')
+
+ def _parse_thead(self, table):
+ return table.xpath('.//thead')
+
+ def _parse_tfoot(self, table):
+ return table.xpath('.//tfoot')
+
+ def _parse_raw_thead(self, table):
+ expr = './/thead//th'
+ return [_remove_whitespace(x.text_content()) for x in
+ table.xpath(expr)]
+
+ def _parse_raw_tfoot(self, table):
+ expr = './/tfoot//th'
+ return [_remove_whitespace(x.text_content()) for x in
+ table.xpath(expr)]
def _data_to_frame(data, header, index_col, infer_types, skiprows):
@@ -449,7 +565,7 @@ def _data_to_frame(data, header, index_col, infer_types, skiprows):
Parameters
----------
- data : list of lists of str or unicode
+ data : tuple of lists
The raw data to be placed into a DataFrame. This is a list of lists of
strings or unicode. If it helps, it can be thought of as a matrix of
strings instead.
@@ -491,7 +607,9 @@ def _data_to_frame(data, header, index_col, infer_types, skiprows):
-----
The `data` parameter is guaranteed not to be a list of empty lists.
"""
- df = DataFrame(data)
+ thead, tbody, tfoot = data
+ columns = thead or None
+ df = DataFrame(tbody, columns=columns)
if skiprows is not None:
it = _get_skiprows_iter(skiprows)
@@ -530,16 +648,81 @@ def _data_to_frame(data, header, index_col, infer_types, skiprows):
# drop by default
df.set_index(cols, inplace=True)
+ if df.index.nlevels == 1:
+ if not (df.index.name or df.index.name is None):
+ df.index.name = None
+ else:
+ names = [name or None for name in df.index.names]
+ df.index = MultiIndex.from_tuples(df.index.values, names=names)
return df
-_possible_parsers = {'lxml': _LxmlFrameParser,
- 'bs4': _BeautifulSoupFrameParser}
+_invalid_parsers = {'lxml': _LxmlFrameParser,
+ 'bs4': _BeautifulSoupLxmlFrameParser}
+_valid_parsers = {'html5lib': _BeautifulSoupHtml5LibFrameParser}
+_all_parsers = _valid_parsers.copy()
+_all_parsers.update(_invalid_parsers)
-def read_html(io, match='.+', flavor='bs4', header=None, index_col=None,
- skiprows=None, infer_types=True, attrs=None):
+def _parser_dispatch(flavor):
+ """Choose the parser based on the input flavor.
+
+ Parameters
+ ----------
+ flavor : str
+ The type of parser to use. This must be a valid backend.
+
+ Returns
+ -------
+ cls : _HtmlFrameParser subclass
+ The parser class based on the requested input flavor.
+
+ Raises
+ ------
+ AssertionError
+ * If `flavor` is not a valid backend.
+ """
+ valid_parsers = _valid_parsers.keys()
+ if flavor not in valid_parsers:
+ raise AssertionError('"{0}" is not a valid flavor'.format(flavor))
+
+ if flavor == 'bs4':
+ try:
+ import_module('lxml')
+ parser_t = _BeautifulSoupLxmlFrameParser
+ except ImportError:
+ try:
+ import_module('html5lib')
+ parser_t = _BeautifulSoupHtml5LibFrameParser
+ except ImportError:
+ raise ImportError("read_html does not support the native "
+ "Python 'html.parser' backend for bs4, "
+ "please install either 'lxml' or 'html5lib'")
+ elif flavor == 'html5lib':
+ try:
+ # much better than python's builtin
+ import_module('html5lib')
+ parser_t = _BeautifulSoupHtml5LibFrameParser
+ except ImportError:
+ raise ImportError("html5lib not found please install it")
+ else:
+ parser_t = _LxmlFrameParser
+ return parser_t
+
+
+def _parse(parser, io, match, flavor, header, index_col, skiprows, infer_types,
+ attrs):
+ # bonus: re.compile is idempotent under function iteration so you can pass
+ # a compiled regex to it and it will return itself
+ p = parser(io, re.compile(match), attrs)
+ tables = p.parse_tables()
+ return [_data_to_frame(table, header, index_col, infer_types, skiprows)
+ for table in tables]
+
+
+def read_html(io, match='.+', flavor='html5lib', header=None, index_col=None,
+ skiprows=None, infer_types=False, attrs=None):
r"""Read an HTML table into a DataFrame.
Parameters
@@ -547,7 +730,8 @@ def read_html(io, match='.+', flavor='bs4', header=None, index_col=None,
io : str or file-like
A string or file like object that can be either a url, a file-like
object, or a raw string containing HTML. Note that lxml only accepts
- the http, ftp and file url protocols.
+ the http, ftp and file url protocols. If you have a URI that starts
+ with ``'https'`` you might removing the ``'s'``.
match : str or regex, optional
The set of tables containing text matching this regex or string will be
@@ -557,10 +741,10 @@ def read_html(io, match='.+', flavor='bs4', header=None, index_col=None,
This value is converted to a regular expression so that there is
consistent behavior between Beautiful Soup and lxml.
- flavor : str, {'lxml', 'bs4'}
- The parsing engine to use under the hood. lxml is faster and bs4
- (Beautiful Soup 4) is better at parsing nested tags, which are not
- uncommon when parsing tables. Defaults to 'bs4'.
+ flavor : str, {'html5lib'}
+ The parsing engine to use under the hood. Right now only ``html5lib``
+ is supported because it returns correct output whereas ``lxml`` does
+ not.
header : int or array-like or None, optional
The row (or rows for a MultiIndex) to use to make the columns headers.
@@ -661,6 +845,7 @@ def read_html(io, match='.+', flavor='bs4', header=None, index_col=None,
Parse some spam infomation from the USDA:
+ >>> from pandas import read_html, DataFrame
>>> url = ('http://ndb.nal.usda.gov/ndb/foods/show/1732?fg=&man=&'
... 'lfacet=&format=&count=&max=25&offset=&sort=&qlookup=spam')
>>> dfs = read_html(url, match='Water', header=0)
@@ -670,32 +855,16 @@ def read_html(io, match='.+', flavor='bs4', header=None, index_col=None,
You can pass nothing to the `match` argument:
+ >>> from pandas import read_html, DataFrame
>>> url = 'http://www.fdic.gov/bank/individual/failed/banklist.html'
>>> dfs = read_html(url)
>>> print(len(dfs)) # this will most likely be greater than 1
-
- Try a different parser:
-
- >>> url = 'http://www.fdic.gov/bank/individual/failed/banklist.html'
- >>> dfs = read_html(url, 'Florida', flavor='lxml', attrs={'id': 'table'})
- >>> assert dfs
- >>> assert isinstance(dfs, list)
- >>> assert all(map(lambda x: isinstance(x, DataFrame), dfs))
"""
- # annoying type check here because we don't want to spend time parsing HTML
- # only to end up failing because of an invalid value of skiprows
- if isinstance(skiprows, numbers.Integral):
- assert skiprows >= 0, ('cannot skip rows starting from the end of the '
- 'data (you passed a negative value)')
-
- valid_backends = _possible_parsers.keys()
- assert flavor in valid_backends, ("'{0}' is not a valid backend, the valid"
- " backends are "
- "{1}".format(flavor, valid_backends))
- parser = _possible_parsers[flavor]
-
- # bonus: re.compile is idempotent under function iteration so you can pass
- # a compiled regex to it and it will return itself
- p = parser(io, re.compile(match), attrs)
- return [_data_to_frame(data, header, index_col, infer_types, skiprows)
- for data in p.parse_raw_data()]
+ # Type check here. We don't want to parse only to fail because of an
+ # invalid value of an integer skiprows.
+ if isinstance(skiprows, numbers.Integral) and skiprows < 0:
+ raise AssertionError('cannot skip rows starting from the end of the '
+ 'data (you passed a negative value)')
+ parser = _parser_dispatch(flavor)
+ return _parse(parser, io, match, flavor, header, index_col, skiprows,
+ infer_types, attrs)
diff --git a/pandas/io/tests/data/banklist.csv b/pandas/io/tests/data/banklist.csv
new file mode 100644
index 0000000000000..6545d31fe5fd4
--- /dev/null
+++ b/pandas/io/tests/data/banklist.csv
@@ -0,0 +1,503 @@
+Bank Name,City,State,CERT #,Acquiring Institution,Closing Date,Updated Date
+Douglas County Bank,Douglasville,GA,21649,Hamilton State Bank,26-Apr-13,30-Apr-13
+Parkway Bank,Lenoir,NC,57158,"CertusBank, National Association",26-Apr-13,30-Apr-13
+Chipola Community Bank,Marianna,FL,58034,First Federal Bank of Florida,19-Apr-13,23-Apr-13
+Heritage Bank of North Florida,Orange Park,FL,26680,FirstAtlantic Bank,19-Apr-13,23-Apr-13
+First Federal Bank,Lexington,KY,29594,Your Community Bank,19-Apr-13,23-Apr-13
+Gold Canyon Bank,Gold Canyon,AZ,58066,"First Scottsdale Bank, National Association",5-Apr-13,9-Apr-13
+Frontier Bank,LaGrange,GA,16431,HeritageBank of the South,8-Mar-13,26-Mar-13
+Covenant Bank,Chicago,IL,22476,Liberty Bank and Trust Company,15-Feb-13,4-Mar-13
+1st Regents Bank,Andover,MN,57157,First Minnesota Bank,18-Jan-13,28-Feb-13
+Westside Community Bank,University Place,WA,33997,Sunwest Bank,11-Jan-13,24-Jan-13
+Community Bank of the Ozarks,Sunrise Beach,MO,27331,Bank of Sullivan,14-Dec-12,24-Jan-13
+Hometown Community Bank,Braselton,GA,57928,"CertusBank, National Association",16-Nov-12,24-Jan-13
+Citizens First National Bank,Princeton,IL,3731,Heartland Bank and Trust Company,2-Nov-12,24-Jan-13
+Heritage Bank of Florida,Lutz,FL,35009,Centennial Bank,2-Nov-12,24-Jan-13
+NOVA Bank,Berwyn,PA,27148,No Acquirer,26-Oct-12,24-Jan-13
+Excel Bank,Sedalia,MO,19189,Simmons First National Bank,19-Oct-12,24-Jan-13
+First East Side Savings Bank,Tamarac,FL,28144,Stearns Bank N.A.,19-Oct-12,24-Jan-13
+GulfSouth Private Bank,Destin,FL,58073,SmartBank,19-Oct-12,24-Jan-13
+First United Bank,Crete,IL,20685,"Old Plank Trail Community Bank, National Association",28-Sep-12,15-Nov-12
+Truman Bank,St. Louis,MO,27316,Simmons First National Bank,14-Sep-12,17-Dec-12
+First Commercial Bank,Bloomington,MN,35246,Republic Bank & Trust Company,7-Sep-12,17-Dec-12
+Waukegan Savings Bank,Waukegan,IL,28243,First Midwest Bank,3-Aug-12,11-Oct-12
+Jasper Banking Company,Jasper,GA,16240,Stearns Bank N.A.,27-Jul-12,17-Dec-12
+Second Federal Savings and Loan Association of Chicago,Chicago,IL,27986,Hinsdale Bank & Trust Company,20-Jul-12,14-Jan-13
+Heartland Bank,Leawood,KS,1361,Metcalf Bank,20-Jul-12,17-Dec-12
+First Cherokee State Bank,Woodstock,GA,32711,Community & Southern Bank,20-Jul-12,31-Oct-12
+Georgia Trust Bank,Buford,GA,57847,Community & Southern Bank,20-Jul-12,17-Dec-12
+The Royal Palm Bank of Florida,Naples,FL,57096,First National Bank of the Gulf Coast,20-Jul-12,7-Jan-13
+Glasgow Savings Bank,Glasgow,MO,1056,Regional Missouri Bank,13-Jul-12,11-Oct-12
+Montgomery Bank & Trust,Ailey,GA,19498,Ameris Bank,6-Jul-12,31-Oct-12
+The Farmers Bank of Lynchburg,Lynchburg,TN,1690,Clayton Bank and Trust,15-Jun-12,31-Oct-12
+Security Exchange Bank,Marietta,GA,35299,Fidelity Bank,15-Jun-12,10-Oct-12
+Putnam State Bank,Palatka,FL,27405,Harbor Community Bank,15-Jun-12,10-Oct-12
+Waccamaw Bank,Whiteville,NC,34515,First Community Bank,8-Jun-12,8-Nov-12
+Farmers' and Traders' State Bank,Shabbona,IL,9257,First State Bank,8-Jun-12,10-Oct-12
+Carolina Federal Savings Bank,Charleston,SC,35372,Bank of North Carolina,8-Jun-12,31-Oct-12
+First Capital Bank,Kingfisher,OK,416,F & M Bank,8-Jun-12,10-Oct-12
+"Alabama Trust Bank, National Association",Sylacauga,AL,35224,Southern States Bank,18-May-12,31-Oct-12
+"Security Bank, National Association",North Lauderdale,FL,23156,Banesco USA,4-May-12,31-Oct-12
+Palm Desert National Bank,Palm Desert,CA,23632,Pacific Premier Bank,27-Apr-12,31-Aug-12
+Plantation Federal Bank,Pawleys Island,SC,32503,First Federal Bank,27-Apr-12,31-Oct-12
+"Inter Savings Bank, fsb D/B/A InterBank, fsb",Maple Grove,MN,31495,Great Southern Bank,27-Apr-12,17-Oct-12
+HarVest Bank of Maryland,Gaithersburg,MD,57766,Sonabank,27-Apr-12,17-Oct-12
+Bank of the Eastern Shore,Cambridge,MD,26759,No Acquirer,27-Apr-12,17-Oct-12
+"Fort Lee Federal Savings Bank, FSB",Fort Lee,NJ,35527,Alma Bank,20-Apr-12,31-Aug-12
+Fidelity Bank,Dearborn,MI,33883,The Huntington National Bank,30-Mar-12,9-Aug-12
+Premier Bank,Wilmette,IL,35419,International Bank of Chicago,23-Mar-12,17-Oct-12
+Covenant Bank & Trust,Rock Spring,GA,58068,"Stearns Bank, N.A.",23-Mar-12,31-Oct-12
+New City Bank ,Chicago,IL,57597,No Acquirer,9-Mar-12,29-Oct-12
+Global Commerce Bank,Doraville,GA,34046,Metro City Bank,2-Mar-12,31-Oct-12
+Home Savings of America,Little Falls,MN,29178,No Acquirer,24-Feb-12,17-Dec-12
+Central Bank of Georgia,Ellaville,GA,5687,Ameris Bank,24-Feb-12,9-Aug-12
+SCB Bank,Shelbyville,IN,29761,"First Merchants Bank, National Association",10-Feb-12,25-Mar-13
+Charter National Bank and Trust,Hoffman Estates,IL,23187,"Barrington Bank & Trust Company, National Association",10-Feb-12,25-Mar-13
+BankEast,Knoxville,TN,19869,U.S.Bank National Association,27-Jan-12,8-Mar-13
+Patriot Bank Minnesota,Forest Lake,MN,34823,First Resource Bank,27-Jan-12,12-Sep-12
+Tennessee Commerce Bank ,Franklin,TN,35296,Republic Bank & Trust Company,27-Jan-12,20-Nov-12
+First Guaranty Bank and Trust Company of Jacksonville,Jacksonville,FL,16579,"CenterState Bank of Florida, N.A.",27-Jan-12,12-Sep-12
+American Eagle Savings Bank,Boothwyn,PA,31581,"Capital Bank, N.A.",20-Jan-12,25-Jan-13
+The First State Bank,Stockbridge,GA,19252,Hamilton State Bank,20-Jan-12,25-Jan-13
+Central Florida State Bank,Belleview,FL,57186,"CenterState Bank of Florida, N.A.",20-Jan-12,25-Jan-13
+Western National Bank,Phoenix,AZ,57917,Washington Federal,16-Dec-11,13-Aug-12
+Premier Community Bank of the Emerald Coast,Crestview,FL,58343,Summit Bank,16-Dec-11,12-Sep-12
+Central Progressive Bank,Lacombe,LA,19657,First NBC Bank,18-Nov-11,13-Aug-12
+Polk County Bank,Johnston,IA,14194,Grinnell State Bank,18-Nov-11,15-Aug-12
+Community Bank of Rockmart,Rockmart,GA,57860,Century Bank of Georgia,10-Nov-11,13-Aug-12
+SunFirst Bank,Saint George,UT,57087,Cache Valley Bank,4-Nov-11,16-Nov-12
+"Mid City Bank, Inc.",Omaha,NE,19397,Premier Bank,4-Nov-11,15-Aug-12
+All American Bank,Des Plaines,IL,57759,International Bank of Chicago,28-Oct-11,15-Aug-12
+Community Banks of Colorado,Greenwood Village,CO,21132,"Bank Midwest, N.A.",21-Oct-11,2-Jan-13
+Community Capital Bank,Jonesboro,GA,57036,State Bank and Trust Company,21-Oct-11,8-Nov-12
+Decatur First Bank,Decatur,GA,34392,Fidelity Bank,21-Oct-11,8-Nov-12
+Old Harbor Bank,Clearwater,FL,57537,1st United Bank,21-Oct-11,8-Nov-12
+Country Bank,Aledo,IL,35395,Blackhawk Bank & Trust,14-Oct-11,15-Aug-12
+First State Bank,Cranford,NJ,58046,Northfield Bank,14-Oct-11,8-Nov-12
+"Blue Ridge Savings Bank, Inc.",Asheville,NC,32347,Bank of North Carolina,14-Oct-11,8-Nov-12
+Piedmont Community Bank,Gray,GA,57256,State Bank and Trust Company,14-Oct-11,22-Jan-13
+Sun Security Bank,Ellington,MO,20115,Great Southern Bank,7-Oct-11,7-Nov-12
+The RiverBank,Wyoming,MN,10216,Central Bank,7-Oct-11,7-Nov-12
+First International Bank,Plano,TX,33513,American First National Bank,30-Sep-11,9-Oct-12
+Citizens Bank of Northern California,Nevada City,CA,33983,Tri Counties Bank,23-Sep-11,9-Oct-12
+Bank of the Commonwealth,Norfolk,VA,20408,Southern Bank and Trust Company,23-Sep-11,9-Oct-12
+The First National Bank of Florida,Milton,FL,25155,CharterBank,9-Sep-11,6-Sep-12
+CreekSide Bank,Woodstock,GA,58226,Georgia Commerce Bank,2-Sep-11,6-Sep-12
+Patriot Bank of Georgia,Cumming,GA,58273,Georgia Commerce Bank,2-Sep-11,2-Nov-12
+First Choice Bank,Geneva,IL,57212,Inland Bank & Trust,19-Aug-11,15-Aug-12
+First Southern National Bank,Statesboro,GA,57239,Heritage Bank of the South,19-Aug-11,2-Nov-12
+Lydian Private Bank,Palm Beach,FL,35356,"Sabadell United Bank, N.A.",19-Aug-11,2-Nov-12
+Public Savings Bank,Huntingdon Valley,PA,34130,"Capital Bank, N.A.",18-Aug-11,15-Aug-12
+The First National Bank of Olathe,Olathe,KS,4744,Enterprise Bank & Trust,12-Aug-11,23-Aug-12
+Bank of Whitman,Colfax,WA,22528,Columbia State Bank,5-Aug-11,16-Aug-12
+Bank of Shorewood,Shorewood,IL,22637,Heartland Bank and Trust Company,5-Aug-11,16-Aug-12
+Integra Bank National Association,Evansville,IN,4392,Old National Bank,29-Jul-11,16-Aug-12
+"BankMeridian, N.A.",Columbia,SC,58222,SCBT National Association,29-Jul-11,2-Nov-12
+Virginia Business Bank,Richmond,VA,58283,Xenith Bank,29-Jul-11,9-Oct-12
+Bank of Choice,Greeley,CO,2994,"Bank Midwest, N.A.",22-Jul-11,12-Sep-12
+LandMark Bank of Florida,Sarasota,FL,35244,American Momentum Bank,22-Jul-11,2-Nov-12
+Southshore Community Bank,Apollo Beach,FL,58056,American Momentum Bank,22-Jul-11,2-Nov-12
+Summit Bank,Prescott,AZ,57442,The Foothills Bank,15-Jul-11,16-Aug-12
+First Peoples Bank,Port St. Lucie,FL,34870,"Premier American Bank, N.A.",15-Jul-11,2-Nov-12
+High Trust Bank,Stockbridge,GA,19554,Ameris Bank,15-Jul-11,2-Nov-12
+One Georgia Bank,Atlanta,GA,58238,Ameris Bank,15-Jul-11,2-Nov-12
+Signature Bank,Windsor,CO,57835,Points West Community Bank,8-Jul-11,26-Oct-12
+Colorado Capital Bank,Castle Rock,CO,34522,First-Citizens Bank & Trust Company,8-Jul-11,15-Jan-13
+First Chicago Bank & Trust,Chicago,IL,27935,Northbrook Bank & Trust Company,8-Jul-11,9-Sep-12
+Mountain Heritage Bank,Clayton,GA,57593,First American Bank and Trust Company,24-Jun-11,2-Nov-12
+First Commercial Bank of Tampa Bay,Tampa,FL,27583,Stonegate Bank,17-Jun-11,2-Nov-12
+McIntosh State Bank,Jackson,GA,19237,Hamilton State Bank,17-Jun-11,2-Nov-12
+Atlantic Bank and Trust,Charleston,SC,58420,"First Citizens Bank and Trust Company, Inc.",3-Jun-11,31-Oct-12
+First Heritage Bank,Snohomish,WA,23626,Columbia State Bank,27-May-11,28-Jan-13
+Summit Bank,Burlington,WA,513,Columbia State Bank,20-May-11,22-Jan-13
+First Georgia Banking Company,Franklin,GA,57647,"CertusBank, National Association",20-May-11,13-Nov-12
+Atlantic Southern Bank,Macon,GA,57213,"CertusBank, National Association",20-May-11,31-Oct-12
+Coastal Bank,Cocoa Beach,FL,34898,"Florida Community Bank, a division of Premier American Bank, N.A.",6-May-11,30-Nov-12
+Community Central Bank,Mount Clemens,MI,34234,Talmer Bank & Trust,29-Apr-11,16-Aug-12
+The Park Avenue Bank,Valdosta,GA,19797,Bank of the Ozarks,29-Apr-11,30-Nov-12
+First Choice Community Bank,Dallas,GA,58539,Bank of the Ozarks,29-Apr-11,22-Jan-13
+Cortez Community Bank,Brooksville,FL,57625,"Florida Community Bank, a division of Premier American Bank, N.A.",29-Apr-11,30-Nov-12
+First National Bank of Central Florida,Winter Park,FL,26297,"Florida Community Bank, a division of Premier American Bank, N.A.",29-Apr-11,30-Nov-12
+Heritage Banking Group,Carthage,MS,14273,Trustmark National Bank,15-Apr-11,30-Nov-12
+Rosemount National Bank,Rosemount,MN,24099,Central Bank,15-Apr-11,16-Aug-12
+Superior Bank,Birmingham,AL,17750,"Superior Bank, National Association",15-Apr-11,30-Nov-12
+Nexity Bank,Birmingham,AL,19794,AloStar Bank of Commerce,15-Apr-11,4-Sep-12
+New Horizons Bank,East Ellijay,GA,57705,Citizens South Bank,15-Apr-11,16-Aug-12
+Bartow County Bank,Cartersville,GA,21495,Hamilton State Bank,15-Apr-11,22-Jan-13
+Nevada Commerce Bank,Las Vegas,NV,35418,City National Bank,8-Apr-11,9-Sep-12
+Western Springs National Bank and Trust,Western Springs,IL,10086,Heartland Bank and Trust Company,8-Apr-11,22-Jan-13
+The Bank of Commerce,Wood Dale,IL,34292,Advantage National Bank Group,25-Mar-11,22-Jan-13
+Legacy Bank,Milwaukee,WI,34818,Seaway Bank and Trust Company,11-Mar-11,12-Sep-12
+First National Bank of Davis,Davis,OK,4077,The Pauls Valley National Bank,11-Mar-11,20-Aug-12
+Valley Community Bank,St. Charles,IL,34187,First State Bank,25-Feb-11,12-Sep-12
+"San Luis Trust Bank, FSB ",San Luis Obispo,CA,34783,First California Bank,18-Feb-11,20-Aug-12
+Charter Oak Bank,Napa,CA,57855,Bank of Marin,18-Feb-11,12-Sep-12
+Citizens Bank of Effingham,Springfield,GA,34601,Heritage Bank of the South,18-Feb-11,2-Nov-12
+Habersham Bank,Clarkesville,GA,151,SCBT National Association,18-Feb-11,2-Nov-12
+Canyon National Bank,Palm Springs,CA,34692,Pacific Premier Bank,11-Feb-11,12-Sep-12
+Badger State Bank,Cassville,WI,13272,Royal Bank,11-Feb-11,12-Sep-12
+Peoples State Bank,Hamtramck,MI,14939,First Michigan Bank,11-Feb-11,22-Jan-13
+Sunshine State Community Bank,Port Orange,FL,35478,"Premier American Bank, N.A.",11-Feb-11,2-Nov-12
+Community First Bank Chicago,Chicago,IL,57948,Northbrook Bank & Trust Company,4-Feb-11,20-Aug-12
+North Georgia Bank,Watkinsville,GA,35242,BankSouth,4-Feb-11,2-Nov-12
+American Trust Bank,Roswell,GA,57432,Renasant Bank,4-Feb-11,31-Oct-12
+First Community Bank,Taos,NM,12261,"U.S. Bank, N.A.",28-Jan-11,12-Sep-12
+FirsTier Bank,Louisville,CO,57646,No Acquirer,28-Jan-11,12-Sep-12
+Evergreen State Bank,Stoughton,WI,5328,McFarland State Bank,28-Jan-11,12-Sep-12
+The First State Bank,Camargo,OK,2303,Bank 7,28-Jan-11,12-Sep-12
+United Western Bank,Denver,CO,31293,First-Citizens Bank & Trust Company,21-Jan-11,12-Sep-12
+The Bank of Asheville,Asheville,NC,34516,First Bank,21-Jan-11,2-Nov-12
+CommunitySouth Bank & Trust,Easley,SC,57868,"CertusBank, National Association",21-Jan-11,2-Nov-12
+Enterprise Banking Company,McDonough,GA,19758,No Acquirer,21-Jan-11,2-Nov-12
+Oglethorpe Bank,Brunswick,GA,57440,Bank of the Ozarks,14-Jan-11,2-Nov-12
+Legacy Bank,Scottsdale,AZ,57820,Enterprise Bank & Trust,7-Jan-11,12-Sep-12
+First Commercial Bank of Florida,Orlando,FL,34965,First Southern Bank,7-Jan-11,2-Nov-12
+Community National Bank,Lino Lakes,MN,23306,Farmers & Merchants Savings Bank,17-Dec-10,20-Aug-12
+First Southern Bank ,Batesville,AR,58052,Southern Bank,17-Dec-10,20-Aug-12
+"United Americas Bank, N.A.",Atlanta,GA,35065,State Bank and Trust Company,17-Dec-10,2-Nov-12
+"Appalachian Community Bank, FSB ",McCaysville,GA,58495,Peoples Bank of East Tennessee,17-Dec-10,31-Oct-12
+Chestatee State Bank,Dawsonville,GA,34578,Bank of the Ozarks,17-Dec-10,2-Nov-12
+"The Bank of Miami,N.A.",Coral Gables,FL,19040,1st United Bank,17-Dec-10,2-Nov-12
+Earthstar Bank,Southampton,PA,35561,Polonia Bank,10-Dec-10,20-Aug-12
+Paramount Bank,Farmington Hills,MI,34673,Level One Bank,10-Dec-10,20-Aug-12
+First Banking Center,Burlington,WI,5287,First Michigan Bank,19-Nov-10,20-Aug-12
+Allegiance Bank of North America,Bala Cynwyd,PA,35078,VIST Bank,19-Nov-10,20-Aug-12
+Gulf State Community Bank,Carrabelle,FL,20340,Centennial Bank,19-Nov-10,2-Nov-12
+Copper Star Bank,Scottsdale,AZ,35463,"Stearns Bank, N.A.",12-Nov-10,20-Aug-12
+Darby Bank & Trust Co.,Vidalia,GA,14580,Ameris Bank,12-Nov-10,15-Jan-13
+Tifton Banking Company,Tifton,GA,57831,Ameris Bank,12-Nov-10,2-Nov-12
+First Vietnamese American Bank,Westminster,CA,57885,Grandpoint Bank,5-Nov-10,12-Sep-12
+Pierce Commercial Bank,Tacoma,WA,34411,Heritage Bank,5-Nov-10,20-Aug-12
+Western Commercial Bank,Woodland Hills,CA,58087,First California Bank,5-Nov-10,12-Sep-12
+K Bank,Randallstown,MD,31263,Manufacturers and Traders Trust Company (M&T Bank),5-Nov-10,20-Aug-12
+"First Arizona Savings, A FSB",Scottsdale,AZ,32582,No Acquirer,22-Oct-10,20-Aug-12
+Hillcrest Bank,Overland Park,KS,22173,"Hillcrest Bank, N.A.",22-Oct-10,20-Aug-12
+First Suburban National Bank,Maywood,IL,16089,Seaway Bank and Trust Company,22-Oct-10,20-Aug-12
+The First National Bank of Barnesville,Barnesville,GA,2119,United Bank,22-Oct-10,2-Nov-12
+The Gordon Bank,Gordon,GA,33904,Morris Bank,22-Oct-10,2-Nov-12
+Progress Bank of Florida,Tampa,FL,32251,Bay Cities Bank,22-Oct-10,2-Nov-12
+First Bank of Jacksonville,Jacksonville,FL,27573,Ameris Bank,22-Oct-10,2-Nov-12
+Premier Bank,Jefferson City,MO,34016,Providence Bank,15-Oct-10,20-Aug-12
+WestBridge Bank and Trust Company,Chesterfield,MO,58205,Midland States Bank,15-Oct-10,20-Aug-12
+"Security Savings Bank, F.S.B.",Olathe,KS,30898,Simmons First National Bank,15-Oct-10,20-Aug-12
+Shoreline Bank,Shoreline,WA,35250,GBC International Bank,1-Oct-10,20-Aug-12
+Wakulla Bank,Crawfordville,FL,21777,Centennial Bank,1-Oct-10,2-Nov-12
+North County Bank,Arlington,WA,35053,Whidbey Island Bank,24-Sep-10,20-Aug-12
+Haven Trust Bank Florida,Ponte Vedra Beach,FL,58308,First Southern Bank,24-Sep-10,5-Nov-12
+Maritime Savings Bank,West Allis,WI,28612,"North Shore Bank, FSB",17-Sep-10,20-Aug-12
+Bramble Savings Bank,Milford,OH,27808,Foundation Bank,17-Sep-10,20-Aug-12
+The Peoples Bank,Winder,GA,182,Community & Southern Bank,17-Sep-10,5-Nov-12
+First Commerce Community Bank,Douglasville,GA,57448,Community & Southern Bank,17-Sep-10,15-Jan-13
+Bank of Ellijay,Ellijay,GA,58197,Community & Southern Bank,17-Sep-10,15-Jan-13
+ISN Bank,Cherry Hill,NJ,57107,Customers Bank,17-Sep-10,22-Aug-12
+Horizon Bank,Bradenton,FL,35061,Bank of the Ozarks,10-Sep-10,5-Nov-12
+Sonoma Valley Bank,Sonoma,CA,27259,Westamerica Bank,20-Aug-10,12-Sep-12
+Los Padres Bank,Solvang,CA,32165,Pacific Western Bank,20-Aug-10,12-Sep-12
+Butte Community Bank,Chico,CA,33219,"Rabobank, N.A.",20-Aug-10,12-Sep-12
+Pacific State Bank,Stockton,CA,27090,"Rabobank, N.A.",20-Aug-10,12-Sep-12
+ShoreBank,Chicago,IL,15640,Urban Partnership Bank,20-Aug-10,12-Sep-12
+Imperial Savings and Loan Association,Martinsville,VA,31623,"River Community Bank, N.A.",20-Aug-10,24-Aug-12
+Independent National Bank,Ocala,FL,27344,"CenterState Bank of Florida, N.A.",20-Aug-10,5-Nov-12
+Community National Bank at Bartow,Bartow,FL,25266,"CenterState Bank of Florida, N.A.",20-Aug-10,5-Nov-12
+Palos Bank and Trust Company,Palos Heights,IL,17599,First Midwest Bank,13-Aug-10,22-Aug-12
+Ravenswood Bank,Chicago,IL,34231,Northbrook Bank & Trust Company,6-Aug-10,22-Aug-12
+LibertyBank,Eugene,OR,31964,Home Federal Bank,30-Jul-10,22-Aug-12
+The Cowlitz Bank,Longview,WA,22643,Heritage Bank,30-Jul-10,22-Aug-12
+Coastal Community Bank,Panama City Beach,FL,9619,Centennial Bank,30-Jul-10,5-Nov-12
+Bayside Savings Bank,Port Saint Joe,FL,57669,Centennial Bank,30-Jul-10,5-Nov-12
+Northwest Bank & Trust,Acworth,GA,57658,State Bank and Trust Company,30-Jul-10,5-Nov-12
+Home Valley Bank ,Cave Junction,OR,23181,South Valley Bank & Trust,23-Jul-10,12-Sep-12
+SouthwestUSA Bank ,Las Vegas,NV,35434,Plaza Bank,23-Jul-10,22-Aug-12
+Community Security Bank ,New Prague,MN,34486,Roundbank,23-Jul-10,12-Sep-12
+Thunder Bank ,Sylvan Grove,KS,10506,The Bennington State Bank,23-Jul-10,13-Sep-12
+Williamsburg First National Bank ,Kingstree,SC,17837,"First Citizens Bank and Trust Company, Inc.",23-Jul-10,5-Nov-12
+Crescent Bank and Trust Company ,Jasper,GA,27559,Renasant Bank,23-Jul-10,5-Nov-12
+Sterling Bank ,Lantana,FL,32536,IBERIABANK,23-Jul-10,5-Nov-12
+"Mainstreet Savings Bank, FSB",Hastings,MI,28136,Commercial Bank,16-Jul-10,13-Sep-12
+Olde Cypress Community Bank,Clewiston,FL,28864,"CenterState Bank of Florida, N.A.",16-Jul-10,5-Nov-12
+Turnberry Bank,Aventura,FL,32280,NAFH National Bank,16-Jul-10,5-Nov-12
+Metro Bank of Dade County,Miami,FL,25172,NAFH National Bank,16-Jul-10,5-Nov-12
+First National Bank of the South,Spartanburg,SC,35383,NAFH National Bank,16-Jul-10,5-Nov-12
+Woodlands Bank,Bluffton,SC,32571,Bank of the Ozarks,16-Jul-10,5-Nov-12
+Home National Bank,Blackwell,OK,11636,RCB Bank,9-Jul-10,10-Dec-12
+USA Bank,Port Chester,NY,58072,New Century Bank,9-Jul-10,14-Sep-12
+Ideal Federal Savings Bank,Baltimore,MD,32456,No Acquirer,9-Jul-10,14-Sep-12
+Bay National Bank,Baltimore,MD,35462,"Bay Bank, FSB",9-Jul-10,15-Jan-13
+High Desert State Bank,Albuquerque,NM,35279,First American Bank,25-Jun-10,14-Sep-12
+First National Bank,Savannah,GA,34152,"The Savannah Bank, N.A.",25-Jun-10,5-Nov-12
+Peninsula Bank,Englewood,FL,26563,"Premier American Bank, N.A.",25-Jun-10,5-Nov-12
+Nevada Security Bank,Reno,NV,57110,Umpqua Bank,18-Jun-10,23-Aug-12
+Washington First International Bank,Seattle,WA,32955,East West Bank,11-Jun-10,14-Sep-12
+TierOne Bank,Lincoln,NE,29341,Great Western Bank,4-Jun-10,14-Sep-12
+Arcola Homestead Savings Bank,Arcola,IL,31813,No Acquirer,4-Jun-10,14-Sep-12
+First National Bank,Rosedale,MS,15814,The Jefferson Bank,4-Jun-10,5-Nov-12
+Sun West Bank,Las Vegas,NV,34785,City National Bank,28-May-10,14-Sep-12
+"Granite Community Bank, NA",Granite Bay,CA,57315,Tri Counties Bank,28-May-10,14-Sep-12
+Bank of Florida - Tampa,Tampa,FL,57814,EverBank,28-May-10,5-Nov-12
+Bank of Florida - Southwest,Naples,FL,35106,EverBank,28-May-10,5-Nov-12
+Bank of Florida - Southeast,Fort Lauderdale,FL,57360,EverBank,28-May-10,5-Nov-12
+Pinehurst Bank,Saint Paul,MN,57735,Coulee Bank,21-May-10,26-Oct-12
+Midwest Bank and Trust Company,Elmwood Park,IL,18117,"FirstMerit Bank, N.A.",14-May-10,23-Aug-12
+Southwest Community Bank,Springfield,MO,34255,Simmons First National Bank,14-May-10,23-Aug-12
+New Liberty Bank,Plymouth,MI,35586,Bank of Ann Arbor,14-May-10,23-Aug-12
+Satilla Community Bank,Saint Marys,GA,35114,Ameris Bank,14-May-10,5-Nov-12
+1st Pacific Bank of California,San Diego,CA,35517,City National Bank,7-May-10,13-Dec-12
+Towne Bank of Arizona,Mesa,AZ,57697,Commerce Bank of Arizona,7-May-10,23-Aug-12
+Access Bank,Champlin,MN,16476,PrinsBank,7-May-10,23-Aug-12
+The Bank of Bonifay,Bonifay,FL,14246,First Federal Bank of Florida,7-May-10,5-Nov-12
+Frontier Bank,Everett,WA,22710,"Union Bank, N.A.",30-Apr-10,15-Jan-13
+BC National Banks,Butler,MO,17792,Community First Bank,30-Apr-10,23-Aug-12
+Champion Bank,Creve Coeur,MO,58362,BankLiberty,30-Apr-10,23-Aug-12
+CF Bancorp,Port Huron,MI,30005,First Michigan Bank,30-Apr-10,15-Jan-13
+Westernbank Puerto Rico,Mayaguez,PR,31027,Banco Popular de Puerto Rico,30-Apr-10,5-Nov-12
+R-G Premier Bank of Puerto Rico,Hato Rey,PR,32185,Scotiabank de Puerto Rico,30-Apr-10,5-Nov-12
+Eurobank,San Juan,PR,27150,Oriental Bank and Trust,30-Apr-10,5-Nov-12
+Wheatland Bank,Naperville,IL,58429,Wheaton Bank & Trust,23-Apr-10,23-Aug-12
+Peotone Bank and Trust Company,Peotone,IL,10888,First Midwest Bank,23-Apr-10,23-Aug-12
+Lincoln Park Savings Bank,Chicago,IL,30600,Northbrook Bank & Trust Company,23-Apr-10,23-Aug-12
+New Century Bank,Chicago,IL,34821,"MB Financial Bank, N.A.",23-Apr-10,23-Aug-12
+Citizens Bank and Trust Company of Chicago,Chicago,IL,34658,Republic Bank of Chicago,23-Apr-10,23-Aug-12
+Broadway Bank,Chicago,IL,22853,"MB Financial Bank, N.A.",23-Apr-10,23-Aug-12
+"Amcore Bank, National Association",Rockford,IL,3735,Harris N.A.,23-Apr-10,23-Aug-12
+City Bank,Lynnwood,WA,21521,Whidbey Island Bank,16-Apr-10,14-Sep-12
+Tamalpais Bank,San Rafael,CA,33493,"Union Bank, N.A.",16-Apr-10,23-Aug-12
+Innovative Bank,Oakland,CA,23876,Center Bank,16-Apr-10,23-Aug-12
+Butler Bank,Lowell,MA,26619,People's United Bank,16-Apr-10,23-Aug-12
+Riverside National Bank of Florida,Fort Pierce,FL,24067,"TD Bank, N.A.",16-Apr-10,5-Nov-12
+AmericanFirst Bank,Clermont,FL,57724,"TD Bank, N.A.",16-Apr-10,31-Oct-12
+First Federal Bank of North Florida,Palatka,FL,28886,"TD Bank, N.A.",16-Apr-10,15-Jan-13
+Lakeside Community Bank,Sterling Heights,MI,34878,No Acquirer,16-Apr-10,23-Aug-12
+Beach First National Bank,Myrtle Beach,SC,34242,Bank of North Carolina,9-Apr-10,5-Nov-12
+Desert Hills Bank,Phoenix,AZ,57060,New York Community Bank,26-Mar-10,23-Aug-12
+Unity National Bank,Cartersville,GA,34678,Bank of the Ozarks,26-Mar-10,14-Sep-12
+Key West Bank,Key West,FL,34684,Centennial Bank,26-Mar-10,23-Aug-12
+McIntosh Commercial Bank,Carrollton,GA,57399,CharterBank,26-Mar-10,23-Aug-12
+State Bank of Aurora,Aurora,MN,8221,Northern State Bank,19-Mar-10,23-Aug-12
+First Lowndes Bank,Fort Deposit,AL,24957,First Citizens Bank,19-Mar-10,23-Aug-12
+Bank of Hiawassee,Hiawassee,GA,10054,Citizens South Bank,19-Mar-10,23-Aug-12
+Appalachian Community Bank,Ellijay,GA,33989,Community & Southern Bank,19-Mar-10,31-Oct-12
+Advanta Bank Corp.,Draper,UT,33535,No Acquirer,19-Mar-10,14-Sep-12
+Century Security Bank,Duluth,GA,58104,Bank of Upson,19-Mar-10,23-Aug-12
+American National Bank,Parma,OH,18806,The National Bank and Trust Company,19-Mar-10,23-Aug-12
+Statewide Bank,Covington,LA,29561,Home Bank,12-Mar-10,23-Aug-12
+Old Southern Bank,Orlando,FL,58182,Centennial Bank,12-Mar-10,23-Aug-12
+The Park Avenue Bank,New York,NY,27096,Valley National Bank,12-Mar-10,23-Aug-12
+LibertyPointe Bank,New York,NY,58071,Valley National Bank,11-Mar-10,23-Aug-12
+Centennial Bank,Ogden,UT,34430,No Acquirer,5-Mar-10,14-Sep-12
+Waterfield Bank,Germantown,MD,34976,No Acquirer,5-Mar-10,23-Aug-12
+Bank of Illinois,Normal,IL,9268,Heartland Bank and Trust Company,5-Mar-10,23-Aug-12
+Sun American Bank,Boca Raton,FL,27126,First-Citizens Bank & Trust Company,5-Mar-10,23-Aug-12
+Rainier Pacific Bank,Tacoma,WA,38129,Umpqua Bank,26-Feb-10,23-Aug-12
+Carson River Community Bank,Carson City,NV,58352,Heritage Bank of Nevada,26-Feb-10,15-Jan-13
+"La Jolla Bank, FSB",La Jolla,CA,32423,"OneWest Bank, FSB",19-Feb-10,24-Aug-12
+George Washington Savings Bank,Orland Park,IL,29952,"FirstMerit Bank, N.A.",19-Feb-10,24-Aug-12
+The La Coste National Bank,La Coste,TX,3287,Community National Bank,19-Feb-10,14-Sep-12
+Marco Community Bank,Marco Island,FL,57586,Mutual of Omaha Bank,19-Feb-10,24-Aug-12
+1st American State Bank of Minnesota,Hancock,MN,15448,"Community Development Bank, FSB",5-Feb-10,24-Aug-12
+American Marine Bank,Bainbridge Island,WA,16730,Columbia State Bank,29-Jan-10,24-Aug-12
+First Regional Bank,Los Angeles,CA,23011,First-Citizens Bank & Trust Company,29-Jan-10,24-Aug-12
+Community Bank and Trust,Cornelia,GA,5702,SCBT National Association,29-Jan-10,15-Jan-13
+"Marshall Bank, N.A.",Hallock,MN,16133,United Valley Bank,29-Jan-10,23-Aug-12
+Florida Community Bank,Immokalee,FL,5672,"Premier American Bank, N.A.",29-Jan-10,15-Jan-13
+First National Bank of Georgia,Carrollton,GA,16480,Community & Southern Bank,29-Jan-10,13-Dec-12
+Columbia River Bank,The Dalles,OR,22469,Columbia State Bank,22-Jan-10,14-Sep-12
+Evergreen Bank,Seattle,WA,20501,Umpqua Bank,22-Jan-10,15-Jan-13
+Charter Bank,Santa Fe,NM,32498,Charter Bank,22-Jan-10,23-Aug-12
+Bank of Leeton,Leeton,MO,8265,"Sunflower Bank, N.A.",22-Jan-10,15-Jan-13
+Premier American Bank,Miami,FL,57147,"Premier American Bank, N.A.",22-Jan-10,13-Dec-12
+Barnes Banking Company,Kaysville,UT,1252,No Acquirer,15-Jan-10,23-Aug-12
+St. Stephen State Bank,St. Stephen,MN,17522,First State Bank of St. Joseph,15-Jan-10,23-Aug-12
+Town Community Bank & Trust,Antioch,IL,34705,First American Bank,15-Jan-10,23-Aug-12
+Horizon Bank,Bellingham,WA,22977,Washington Federal Savings and Loan Association,8-Jan-10,23-Aug-12
+"First Federal Bank of California, F.S.B.",Santa Monica,CA,28536,"OneWest Bank, FSB",18-Dec-09,23-Aug-12
+Imperial Capital Bank,La Jolla,CA,26348,City National Bank,18-Dec-09,5-Sep-12
+Independent Bankers' Bank,Springfield,IL,26820,The Independent BankersBank (TIB),18-Dec-09,23-Aug-12
+New South Federal Savings Bank,Irondale,AL,32276,Beal Bank,18-Dec-09,23-Aug-12
+Citizens State Bank,New Baltimore,MI,1006,No Acquirer,18-Dec-09,5-Nov-12
+Peoples First Community Bank,Panama City,FL,32167,Hancock Bank,18-Dec-09,5-Nov-12
+RockBridge Commercial Bank,Atlanta,GA,58315,No Acquirer,18-Dec-09,5-Nov-12
+SolutionsBank,Overland Park,KS,4731,Arvest Bank,11-Dec-09,23-Aug-12
+"Valley Capital Bank, N.A.",Mesa,AZ,58399,Enterprise Bank & Trust,11-Dec-09,23-Aug-12
+"Republic Federal Bank, N.A.",Miami,FL,22846,1st United Bank,11-Dec-09,5-Nov-12
+Greater Atlantic Bank,Reston,VA,32583,Sonabank,4-Dec-09,5-Nov-12
+Benchmark Bank,Aurora,IL,10440,"MB Financial Bank, N.A.",4-Dec-09,23-Aug-12
+AmTrust Bank,Cleveland,OH,29776,New York Community Bank,4-Dec-09,5-Nov-12
+The Tattnall Bank,Reidsville,GA,12080,Heritage Bank of the South,4-Dec-09,5-Nov-12
+First Security National Bank,Norcross,GA,26290,State Bank and Trust Company,4-Dec-09,5-Nov-12
+The Buckhead Community Bank,Atlanta,GA,34663,State Bank and Trust Company,4-Dec-09,5-Nov-12
+Commerce Bank of Southwest Florida,Fort Myers,FL,58016,Central Bank,20-Nov-09,5-Nov-12
+Pacific Coast National Bank,San Clemente,CA,57914,Sunwest Bank,13-Nov-09,22-Aug-12
+Orion Bank,Naples,FL,22427,IBERIABANK,13-Nov-09,5-Nov-12
+"Century Bank, F.S.B.",Sarasota,FL,32267,IBERIABANK,13-Nov-09,22-Aug-12
+United Commercial Bank,San Francisco,CA,32469,East West Bank,6-Nov-09,5-Nov-12
+Gateway Bank of St. Louis,St. Louis,MO,19450,Central Bank of Kansas City,6-Nov-09,22-Aug-12
+Prosperan Bank,Oakdale,MN,35074,"Alerus Financial, N.A.",6-Nov-09,22-Aug-12
+Home Federal Savings Bank,Detroit,MI,30329,Liberty Bank and Trust Company,6-Nov-09,22-Aug-12
+United Security Bank,Sparta,GA,22286,Ameris Bank,6-Nov-09,15-Jan-13
+North Houston Bank,Houston,TX,18776,U.S. Bank N.A.,30-Oct-09,22-Aug-12
+Madisonville State Bank,Madisonville,TX,33782,U.S. Bank N.A.,30-Oct-09,22-Aug-12
+Citizens National Bank,Teague,TX,25222,U.S. Bank N.A.,30-Oct-09,22-Aug-12
+Park National Bank,Chicago,IL,11677,U.S. Bank N.A.,30-Oct-09,22-Aug-12
+Pacific National Bank,San Francisco,CA,30006,U.S. Bank N.A.,30-Oct-09,22-Aug-12
+California National Bank,Los Angeles,CA,34659,U.S. Bank N.A.,30-Oct-09,5-Sep-12
+San Diego National Bank,San Diego,CA,23594,U.S. Bank N.A.,30-Oct-09,22-Aug-12
+Community Bank of Lemont,Lemont,IL,35291,U.S. Bank N.A.,30-Oct-09,15-Jan-13
+"Bank USA, N.A.",Phoenix,AZ,32218,U.S. Bank N.A.,30-Oct-09,22-Aug-12
+First DuPage Bank,Westmont,IL,35038,First Midwest Bank,23-Oct-09,22-Aug-12
+Riverview Community Bank,Otsego,MN,57525,Central Bank,23-Oct-09,22-Aug-12
+Bank of Elmwood,Racine,WI,18321,Tri City National Bank,23-Oct-09,22-Aug-12
+Flagship National Bank,Bradenton,FL,35044,First Federal Bank of Florida,23-Oct-09,22-Aug-12
+Hillcrest Bank Florida,Naples,FL,58336,Stonegate Bank,23-Oct-09,22-Aug-12
+American United Bank,Lawrenceville,GA,57794,Ameris Bank,23-Oct-09,5-Sep-12
+Partners Bank,Naples,FL,57959,Stonegate Bank,23-Oct-09,15-Jan-13
+San Joaquin Bank,Bakersfield,CA,23266,Citizens Business Bank,16-Oct-09,22-Aug-12
+Southern Colorado National Bank,Pueblo,CO,57263,Legacy Bank,2-Oct-09,5-Sep-12
+Jennings State Bank,Spring Grove,MN,11416,Central Bank,2-Oct-09,21-Aug-12
+Warren Bank,Warren,MI,34824,The Huntington National Bank,2-Oct-09,21-Aug-12
+Georgian Bank,Atlanta,GA,57151,"First Citizens Bank and Trust Company, Inc.",25-Sep-09,21-Aug-12
+"Irwin Union Bank, F.S.B.",Louisville,KY,57068,"First Financial Bank, N.A.",18-Sep-09,5-Sep-12
+Irwin Union Bank and Trust Company,Columbus,IN,10100,"First Financial Bank, N.A.",18-Sep-09,21-Aug-12
+Venture Bank,Lacey,WA,22868,First-Citizens Bank & Trust Company,11-Sep-09,21-Aug-12
+Brickwell Community Bank,Woodbury,MN,57736,CorTrust Bank N.A.,11-Sep-09,15-Jan-13
+"Corus Bank, N.A.",Chicago,IL,13693,"MB Financial Bank, N.A.",11-Sep-09,21-Aug-12
+First State Bank,Flagstaff,AZ,34875,Sunwest Bank,4-Sep-09,15-Jan-13
+Platinum Community Bank,Rolling Meadows,IL,35030,No Acquirer,4-Sep-09,21-Aug-12
+Vantus Bank,Sioux City,IA,27732,Great Southern Bank,4-Sep-09,21-Aug-12
+InBank,Oak Forest,IL,20203,"MB Financial Bank, N.A.",4-Sep-09,21-Aug-12
+First Bank of Kansas City,Kansas City,MO,25231,Great American Bank,4-Sep-09,21-Aug-12
+Affinity Bank,Ventura,CA,27197,Pacific Western Bank,28-Aug-09,21-Aug-12
+Mainstreet Bank,Forest Lake,MN,1909,Central Bank,28-Aug-09,21-Aug-12
+Bradford Bank,Baltimore,MD,28312,Manufacturers and Traders Trust Company (M&T Bank),28-Aug-09,15-Jan-13
+Guaranty Bank,Austin,TX,32618,BBVA Compass,21-Aug-09,21-Aug-12
+CapitalSouth Bank,Birmingham,AL,22130,IBERIABANK,21-Aug-09,15-Jan-13
+First Coweta Bank,Newnan,GA,57702,United Bank,21-Aug-09,15-Jan-13
+ebank,Atlanta,GA,34682,"Stearns Bank, N.A.",21-Aug-09,21-Aug-12
+Community Bank of Nevada,Las Vegas,NV,34043,No Acquirer,14-Aug-09,21-Aug-12
+Community Bank of Arizona,Phoenix,AZ,57645,MidFirst Bank,14-Aug-09,21-Aug-12
+"Union Bank, National Association",Gilbert,AZ,34485,MidFirst Bank,14-Aug-09,21-Aug-12
+Colonial Bank,Montgomery,AL,9609,"Branch Banking & Trust Company, (BB&T)",14-Aug-09,5-Sep-12
+Dwelling House Savings and Loan Association,Pittsburgh,PA,31559,"PNC Bank, N.A.",14-Aug-09,15-Jan-13
+Community First Bank,Prineville,OR,23268,Home Federal Bank,7-Aug-09,15-Jan-13
+Community National Bank of Sarasota County,Venice,FL,27183,"Stearns Bank, N.A.",7-Aug-09,20-Aug-12
+First State Bank,Sarasota,FL,27364,"Stearns Bank, N.A.",7-Aug-09,20-Aug-12
+Mutual Bank,Harvey,IL,18659,United Central Bank,31-Jul-09,20-Aug-12
+First BankAmericano,Elizabeth,NJ,34270,Crown Bank,31-Jul-09,20-Aug-12
+Peoples Community Bank,West Chester,OH,32288,"First Financial Bank, N.A.",31-Jul-09,20-Aug-12
+Integrity Bank,Jupiter,FL,57604,Stonegate Bank,31-Jul-09,20-Aug-12
+First State Bank of Altus,Altus,OK,9873,Herring Bank,31-Jul-09,20-Aug-12
+Security Bank of Jones County,Gray,GA,8486,State Bank and Trust Company,24-Jul-09,20-Aug-12
+Security Bank of Houston County,Perry,GA,27048,State Bank and Trust Company,24-Jul-09,20-Aug-12
+Security Bank of Bibb County,Macon,GA,27367,State Bank and Trust Company,24-Jul-09,20-Aug-12
+Security Bank of North Metro,Woodstock,GA,57105,State Bank and Trust Company,24-Jul-09,20-Aug-12
+Security Bank of North Fulton,Alpharetta,GA,57430,State Bank and Trust Company,24-Jul-09,20-Aug-12
+Security Bank of Gwinnett County,Suwanee,GA,57346,State Bank and Trust Company,24-Jul-09,20-Aug-12
+Waterford Village Bank,Williamsville,NY,58065,"Evans Bank, N.A.",24-Jul-09,20-Aug-12
+Temecula Valley Bank,Temecula,CA,34341,First-Citizens Bank & Trust Company,17-Jul-09,20-Aug-12
+Vineyard Bank,Rancho Cucamonga,CA,23556,California Bank & Trust,17-Jul-09,20-Aug-12
+BankFirst,Sioux Falls,SD,34103,"Alerus Financial, N.A.",17-Jul-09,20-Aug-12
+First Piedmont Bank,Winder,GA,34594,First American Bank and Trust Company,17-Jul-09,15-Jan-13
+Bank of Wyoming,Thermopolis,WY,22754,Central Bank & Trust,10-Jul-09,20-Aug-12
+Founders Bank,Worth,IL,18390,The PrivateBank and Trust Company,2-Jul-09,20-Aug-12
+Millennium State Bank of Texas,Dallas,TX,57667,State Bank of Texas,2-Jul-09,26-Oct-12
+First National Bank of Danville,Danville,IL,3644,"First Financial Bank, N.A.",2-Jul-09,20-Aug-12
+Elizabeth State Bank,Elizabeth,IL,9262,Galena State Bank and Trust Company,2-Jul-09,20-Aug-12
+Rock River Bank,Oregon,IL,15302,The Harvard State Bank,2-Jul-09,20-Aug-12
+First State Bank of Winchester,Winchester,IL,11710,The First National Bank of Beardstown,2-Jul-09,20-Aug-12
+John Warner Bank,Clinton,IL,12093,State Bank of Lincoln,2-Jul-09,20-Aug-12
+Mirae Bank,Los Angeles,CA,57332,Wilshire State Bank,26-Jun-09,20-Aug-12
+MetroPacific Bank,Irvine,CA,57893,Sunwest Bank,26-Jun-09,20-Aug-12
+Horizon Bank,Pine City,MN,9744,"Stearns Bank, N.A.",26-Jun-09,20-Aug-12
+Neighborhood Community Bank,Newnan,GA,35285,CharterBank,26-Jun-09,20-Aug-12
+Community Bank of West Georgia,Villa Rica,GA,57436,No Acquirer,26-Jun-09,17-Aug-12
+First National Bank of Anthony,Anthony,KS,4614,Bank of Kansas,19-Jun-09,17-Aug-12
+Cooperative Bank,Wilmington,NC,27837,First Bank,19-Jun-09,17-Aug-12
+Southern Community Bank,Fayetteville,GA,35251,United Community Bank,19-Jun-09,17-Aug-12
+Bank of Lincolnwood,Lincolnwood,IL,17309,Republic Bank of Chicago,5-Jun-09,17-Aug-12
+Citizens National Bank,Macomb,IL,5757,Morton Community Bank,22-May-09,4-Sep-12
+Strategic Capital Bank,Champaign,IL,35175,Midland States Bank,22-May-09,4-Sep-12
+"BankUnited, FSB",Coral Gables,FL,32247,BankUnited,21-May-09,17-Aug-12
+Westsound Bank,Bremerton,WA,34843,Kitsap Bank,8-May-09,4-Sep-12
+America West Bank,Layton,UT,35461,Cache Valley Bank,1-May-09,17-Aug-12
+Citizens Community Bank,Ridgewood,NJ,57563,North Jersey Community Bank,1-May-09,4-Sep-12
+"Silverton Bank, NA",Atlanta,GA,26535,No Acquirer,1-May-09,17-Aug-12
+First Bank of Idaho,Ketchum,ID,34396,"U.S. Bank, N.A.",24-Apr-09,17-Aug-12
+First Bank of Beverly Hills,Calabasas,CA,32069,No Acquirer,24-Apr-09,4-Sep-12
+Michigan Heritage Bank,Farmington Hills,MI,34369,Level One Bank,24-Apr-09,17-Aug-12
+American Southern Bank,Kennesaw,GA,57943,Bank of North Georgia,24-Apr-09,17-Aug-12
+Great Basin Bank of Nevada,Elko,NV,33824,Nevada State Bank,17-Apr-09,4-Sep-12
+American Sterling Bank,Sugar Creek,MO,8266,Metcalf Bank,17-Apr-09,31-Aug-12
+New Frontier Bank,Greeley,CO,34881,No Acquirer,10-Apr-09,4-Sep-12
+Cape Fear Bank,Wilmington,NC,34639,First Federal Savings and Loan Association,10-Apr-09,17-Aug-12
+Omni National Bank,Atlanta,GA,22238,No Acquirer,27-Mar-09,17-Aug-12
+"TeamBank, NA",Paola,KS,4754,Great Southern Bank,20-Mar-09,17-Aug-12
+Colorado National Bank,Colorado Springs,CO,18896,Herring Bank,20-Mar-09,17-Aug-12
+FirstCity Bank,Stockbridge,GA,18243,No Acquirer,20-Mar-09,17-Aug-12
+Freedom Bank of Georgia,Commerce,GA,57558,Northeast Georgia Bank,6-Mar-09,17-Aug-12
+Security Savings Bank,Henderson,NV,34820,Bank of Nevada,27-Feb-09,7-Sep-12
+Heritage Community Bank,Glenwood,IL,20078,"MB Financial Bank, N.A.",27-Feb-09,17-Aug-12
+Silver Falls Bank,Silverton,OR,35399,Citizens Bank,20-Feb-09,17-Aug-12
+Pinnacle Bank of Oregon,Beaverton,OR,57342,Washington Trust Bank of Spokane,13-Feb-09,17-Aug-12
+Corn Belt Bank & Trust Co.,Pittsfield,IL,16500,The Carlinville National Bank,13-Feb-09,17-Aug-12
+Riverside Bank of the Gulf Coast,Cape Coral,FL,34563,TIB Bank,13-Feb-09,17-Aug-12
+Sherman County Bank,Loup City,NE,5431,Heritage Bank,13-Feb-09,17-Aug-12
+County Bank,Merced,CA,22574,Westamerica Bank,6-Feb-09,4-Sep-12
+Alliance Bank,Culver City,CA,23124,California Bank & Trust,6-Feb-09,16-Aug-12
+FirstBank Financial Services,McDonough,GA,57017,Regions Bank,6-Feb-09,16-Aug-12
+Ocala National Bank,Ocala,FL,26538,"CenterState Bank of Florida, N.A.",30-Jan-09,4-Sep-12
+Suburban FSB,Crofton,MD,30763,Bank of Essex,30-Jan-09,16-Aug-12
+MagnetBank,Salt Lake City,UT,58001,No Acquirer,30-Jan-09,16-Aug-12
+1st Centennial Bank,Redlands,CA,33025,First California Bank,23-Jan-09,16-Aug-12
+Bank of Clark County,Vancouver,WA,34959,Umpqua Bank,16-Jan-09,16-Aug-12
+National Bank of Commerce,Berkeley,IL,19733,Republic Bank of Chicago,16-Jan-09,16-Aug-12
+Sanderson State Bank,Sanderson,TX,11568,The Pecos County State Bank,12-Dec-08,4-Sep-12
+Haven Trust Bank,Duluth,GA,35379,"Branch Banking & Trust Company, (BB&T)",12-Dec-08,16-Aug-12
+First Georgia Community Bank,Jackson,GA,34301,United Bank,5-Dec-08,16-Aug-12
+PFF Bank & Trust ,Pomona,CA,28344,"U.S. Bank, N.A.",21-Nov-08,4-Jan-13
+Downey Savings & Loan,Newport Beach,CA,30968,"U.S. Bank, N.A.",21-Nov-08,4-Jan-13
+Community Bank,Loganville,GA,16490,Bank of Essex,21-Nov-08,4-Sep-12
+Security Pacific Bank,Los Angeles,CA,23595,Pacific Western Bank,7-Nov-08,28-Aug-12
+"Franklin Bank, SSB",Houston,TX,26870,Prosperity Bank,7-Nov-08,16-Aug-12
+Freedom Bank,Bradenton,FL,57930,Fifth Third Bank,31-Oct-08,16-Aug-12
+Alpha Bank & Trust,Alpharetta,GA,58241,"Stearns Bank, N.A.",24-Oct-08,16-Aug-12
+Meridian Bank,Eldred,IL,13789,National Bank,10-Oct-08,31-May-12
+Main Street Bank,Northville,MI,57654,Monroe Bank & Trust,10-Oct-08,16-Aug-12
+Washington Mutual Bank,Henderson,NV,32633,JP Morgan Chase Bank,25-Sep-08,16-Aug-12
+Ameribank,Northfork,WV,6782,The Citizens Savings Bank,19-Sep-08,16-Aug-12
+Silver State Bank,Henderson,NV,34194,Nevada State Bank,5-Sep-08,16-Aug-12
+Integrity Bank,Alpharetta,GA,35469,Regions Bank,29-Aug-08,16-Aug-12
+Columbian Bank & Trust,Topeka,KS,22728,Citizens Bank & Trust,22-Aug-08,16-Aug-12
+First Priority Bank,Bradenton,FL,57523,SunTrust Bank,1-Aug-08,16-Aug-12
+"First Heritage Bank, NA",Newport Beach,CA,57961,Mutual of Omaha Bank,25-Jul-08,28-Aug-12
+First National Bank of Nevada,Reno,NV,27011,Mutual of Omaha Bank,25-Jul-08,28-Aug-12
+IndyMac Bank,Pasadena,CA,29730,"OneWest Bank, FSB",11-Jul-08,28-Aug-12
+"First Integrity Bank, NA",Staples,MN,12736,First International Bank and Trust,30-May-08,28-Aug-12
+"ANB Financial, NA",Bentonville,AR,33901,Pulaski Bank and Trust Company,9-May-08,28-Aug-12
+Hume Bank,Hume,MO,1971,Security Bank,7-Mar-08,28-Aug-12
+Douglass National Bank,Kansas City,MO,24660,Liberty Bank and Trust Company,25-Jan-08,26-Oct-12
+Miami Valley Bank,Lakeview,OH,16848,The Citizens Banking Company,4-Oct-07,28-Aug-12
+NetBank,Alpharetta,GA,32575,ING DIRECT,28-Sep-07,28-Aug-12
+Metropolitan Savings Bank,Pittsburgh,PA,35353,Allegheny Valley Bank of Pittsburgh,2-Feb-07,27-Oct-10
+Bank of Ephraim,Ephraim,UT,1249,Far West Bank,25-Jun-04,9-Apr-08
+Reliance Bank,White Plains,NY,26778,Union State Bank,19-Mar-04,9-Apr-08
+Guaranty National Bank of Tallahassee,Tallahassee,FL,26838,Hancock Bank of Florida,12-Mar-04,5-Jun-12
+Dollar Savings Bank,Newark,NJ,31330,No Acquirer,14-Feb-04,9-Apr-08
+Pulaski Savings Bank,Philadelphia,PA,27203,Earthstar Bank,14-Nov-03,22-Jul-05
+First National Bank of Blanchardville,Blanchardville,WI,11639,The Park Bank,9-May-03,5-Jun-12
+Southern Pacific Bank,Torrance,CA,27094,Beal Bank,7-Feb-03,20-Oct-08
+Farmers Bank of Cheneyville,Cheneyville,LA,16445,Sabine State Bank & Trust,17-Dec-02,20-Oct-04
+Bank of Alamo,Alamo,TN,9961,No Acquirer,8-Nov-02,18-Mar-05
+AmTrade International Bank,Atlanta,GA,33784,No Acquirer,30-Sep-02,11-Sep-06
+Universal Federal Savings Bank,Chicago,IL,29355,Chicago Community Bank,27-Jun-02,9-Apr-08
+Connecticut Bank of Commerce,Stamford,CT,19183,Hudson United Bank,26-Jun-02,14-Feb-12
+New Century Bank,Shelby Township,MI,34979,No Acquirer,28-Mar-02,18-Mar-05
+Net 1st National Bank,Boca Raton,FL,26652,Bank Leumi USA,1-Mar-02,9-Apr-08
+"NextBank, NA",Phoenix,AZ,22314,No Acquirer,7-Feb-02,27-Aug-10
+Oakwood Deposit Bank Co.,Oakwood,OH,8966,The State Bank & Trust Company,1-Feb-02,25-Oct-12
+Bank of Sierra Blanca,Sierra Blanca,TX,22002,The Security State Bank of Pecos,18-Jan-02,6-Nov-03
+"Hamilton Bank, NA",Miami,FL,24382,Israel Discount Bank of New York,11-Jan-02,5-Jun-12
+Sinclair National Bank,Gravette,AR,34248,Delta Trust & Bank,7-Sep-01,10-Feb-04
+"Superior Bank, FSB",Hinsdale,IL,32646,"Superior Federal, FSB",27-Jul-01,5-Jun-12
+Malta National Bank,Malta,OH,6629,North Valley Bank,3-May-01,18-Nov-02
+First Alliance Bank & Trust Co.,Manchester,NH,34264,Southern New Hampshire Bank & Trust,2-Feb-01,18-Feb-03
+National State Bank of Metropolis,Metropolis,IL,3815,Banterra Bank of Marion,14-Dec-00,17-Mar-05
+Bank of Honolulu,Honolulu,HI,21029,Bank of the Orient,13-Oct-00,17-Mar-05
diff --git a/pandas/io/tests/data/failed_banklist.html b/pandas/io/tests/data/banklist.html
similarity index 97%
rename from pandas/io/tests/data/failed_banklist.html
rename to pandas/io/tests/data/banklist.html
index ea2a5c27996bf..8e15f37ccffdb 100644
--- a/pandas/io/tests/data/failed_banklist.html
+++ b/pandas/io/tests/data/banklist.html
@@ -455,8 +455,25 @@ <h1>Each depositor insured to at least $250,000 per insured bank</h1>
</tr>
</thead>
<tbody>
+
<tr>
-
+ <td><a href="douglascb.html">Douglas County Bank</a></td>
+ <td headers="city">Douglasville</td>
+ <td headers="state">GA</td>
+ <td headers="CERT #">21649</td>
+ <td headers="AI">Hamilton State Bank</td>
+ <td headers="Closing Date">April 26, 2013</td>
+ <td headers="Updated">April 30, 2013</td>
+</tr>
+ <tr>
+ <td><a href="parkway.html">Parkway Bank</a></td>
+ <td headers="city">Lenoir</td>
+ <td headers="state">NC</td>
+ <td headers="CERT #">57158</td>
+ <td headers="AI">CertusBank, National Association</td>
+ <td headers="Closing Date">April 26, 2013</td>
+ <td headers="Updated">April 30, 2013</td>
+</tr>
<tr>
<td><a href="chipola.html">Chipola Community Bank</a></td>
<td headers="city">Marianna</td>
@@ -5230,7 +5247,7 @@ <h1>Each depositor insured to at least $250,000 per insured bank</h1>
<!-- Instruction: change "mm/dd/yyyy" to the date the document was created or last modfied -->
<font face="arial, helvetica, sans-serif" size="1" color="#000066">Last Updated
- 04/23/2013</font></td>
+ 04/30/2013</font></td>
<td align="right"><font face="arial, helvetica, sans-serif" size="1" color="#000066">
<!-- Instruction: change the link text and href value of "Insert_Content_Email_Address@fdic.gov" to the fdic.gov e-mail address of the document's point of contact -->
diff --git a/pandas/io/tests/test_html.py b/pandas/io/tests/test_html.py
index d0468026caef3..6e2f6ec00d8ac 100644
--- a/pandas/io/tests/test_html.py
+++ b/pandas/io/tests/test_html.py
@@ -2,33 +2,48 @@
import re
from cStringIO import StringIO
from unittest import TestCase
+import collections
+import numbers
+from urllib2 import urlopen
+from contextlib import closing
+import warnings
import nose
import numpy as np
+from numpy.random import rand
from numpy.testing.decorators import slow
-from pandas.io.html import read_html, import_module
-from pandas import DataFrame, MultiIndex
-from pandas.util.testing import assert_frame_equal, network
+from pandas.io.html import read_html, import_module, _parse, _LxmlFrameParser
+from pandas.io.html import _BeautifulSoupHtml5LibFrameParser
+from pandas.io.html import _BeautifulSoupLxmlFrameParser, _remove_whitespace
+from pandas import DataFrame, MultiIndex, read_csv, Timestamp
+from pandas.util.testing import assert_frame_equal, network, get_data_path
+from pandas.util.testing import makeCustomDataframe as mkdf
-def _skip_if_no_parser():
+def _have_module(module_name):
try:
- import_module('lxml')
+ import_module(module_name)
+ return True
except ImportError:
- try:
- import_module('bs4')
- except ImportError:
- raise nose.SkipTest
+ return False
+
+def _skip_if_no(module_name):
+ if not _have_module(module_name):
+ raise nose.SkipTest
-DATA_PATH = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'data')
+
+def _skip_if_none(module_names):
+ if isinstance(module_names, basestring):
+ _skip_if_no(module_names)
+ else:
+ if not any(_have_module(module_name) for module_name in module_names):
+ raise nose.SkipTest
-def _run_read_html(*args, **kwargs):
- _skip_if_no_parser()
- return read_html(*args, **kwargs)
+DATA_PATH = get_data_path()
def isframe(x):
@@ -47,14 +62,36 @@ def assert_framelist_equal(list1, list2):
assert not frame_i.empty, 'frames are both empty'
+def _run_read_html(parser, io, match='.+', flavor='bs4', header=None,
+ index_col=None, skiprows=None, infer_types=False,
+ attrs=None):
+ if isinstance(skiprows, numbers.Integral) and skiprows < 0:
+ raise AssertionError('cannot skip rows starting from the end of the '
+ 'data (you passed a negative value)')
+ return _parse(parser, io, match, flavor, header, index_col, skiprows,
+ infer_types, attrs)
+
+
class TestLxmlReadHtml(TestCase):
+ def test_to_html_compat(self):
+ df = mkdf(4, 3, data_gen_f=lambda *args: rand(), c_idx_names=False,
+ r_idx_names=False).applymap('{0:.3f}'.format)
+ out = df.to_html()
+ res = self.run_read_html(out, attrs={'class': 'dataframe'},
+ index_col=0)[0]
+ print df.dtypes
+ print res.dtypes
+ assert_frame_equal(res, df)
+
def setUp(self):
self.spam_data = os.path.join(DATA_PATH, 'spam.html')
- self.banklist_data = os.path.join(DATA_PATH, 'failed_banklist.html')
+ self.banklist_data = os.path.join(DATA_PATH, 'banklist.html')
def run_read_html(self, *args, **kwargs):
kwargs['flavor'] = 'lxml'
- return _run_read_html(*args, **kwargs)
+ _skip_if_no('lxml')
+ parser = _LxmlFrameParser
+ return _run_read_html(parser, *args, **kwargs)
@network
def test_banklist_url(self):
@@ -85,13 +122,31 @@ def test_banklist(self):
@slow
def test_banklist_header(self):
+ def try_remove_ws(x):
+ try:
+ return _remove_whitespace(x)
+ except AttributeError:
+ return x
+
df = self.run_read_html(self.banklist_data, 'Metcalf',
- attrs={'id': 'table'}, header=0, skiprows=1)[0]
- self.assertFalse(df.empty)
- cols = ['Bank Name', 'City', 'State', 'CERT #',
- 'Acquiring Institution', 'Closing Date', 'Updated Date']
- self.assertListEqual(df.columns.values.tolist(), cols)
- self.assertEqual(df.shape[0], 499)
+ attrs={'id': 'table'}, infer_types=False)[0]
+ ground_truth = read_csv(os.path.join(DATA_PATH, 'banklist.csv'),
+ converters={'Closing Date': Timestamp,
+ 'Updated Date': Timestamp})
+ self.assertNotEqual(df.shape, ground_truth.shape)
+ self.assertRaises(AssertionError, assert_frame_equal, df,
+ ground_truth.applymap(try_remove_ws))
+
+ @slow
+ def test_gold_canyon(self):
+ gc = 'Gold Canyon'
+ with open(self.banklist_data, 'r') as f:
+ raw_text = f.read()
+
+ self.assertIn(gc, raw_text)
+ df = self.run_read_html(self.banklist_data, 'Gold Canyon',
+ attrs={'id': 'table'}, infer_types=False)[0]
+ self.assertNotIn(gc, df.to_string())
def test_spam(self):
df1 = self.run_read_html(self.spam_data, '.*Water.*',
@@ -99,8 +154,10 @@ def test_spam(self):
df2 = self.run_read_html(self.spam_data, 'Unit', infer_types=False)
assert_framelist_equal(df1, df2)
+ print df1[0]
- self.assertEqual(df1[0].ix[0, 0], 'Nutrient')
+ self.assertEqual(df1[0].ix[0, 0], 'Proximates')
+ self.assertEqual(df1[0].columns[0], 'Nutrient')
def test_spam_no_match(self):
dfs = self.run_read_html(self.spam_data)
@@ -113,8 +170,9 @@ def test_banklist_no_match(self):
self.assertIsInstance(df, DataFrame)
def test_spam_header(self):
- df = self.run_read_html(self.spam_data, '.*Water.*', header=0)[0]
- self.assertEqual(df.columns[0], 'Nutrient')
+ df = self.run_read_html(self.spam_data, '.*Water.*', header=0)
+ df = self.run_read_html(self.spam_data, '.*Water.*', header=1)[0]
+ self.assertEqual(df.columns[0], 'Water')
self.assertFalse(df.empty)
def test_skiprows_int(self):
@@ -179,26 +237,20 @@ def test_index(self):
df2 = self.run_read_html(self.spam_data, 'Unit', index_col=0)
assert_framelist_equal(df1, df2)
- def test_header(self):
- df1 = self.run_read_html(self.spam_data, '.*Water.*', header=0)
- df2 = self.run_read_html(self.spam_data, 'Unit', header=0)
- assert_framelist_equal(df1, df2)
- self.assertEqual(df1[0].columns[0], 'Nutrient')
-
def test_header_and_index(self):
- df1 = self.run_read_html(self.spam_data, '.*Water.*', header=0,
+ df1 = self.run_read_html(self.spam_data, '.*Water.*', header=1,
index_col=0)
- df2 = self.run_read_html(self.spam_data, 'Unit', header=0, index_col=0)
+ df2 = self.run_read_html(self.spam_data, 'Unit', header=1, index_col=0)
assert_framelist_equal(df1, df2)
def test_infer_types(self):
- df1 = self.run_read_html(self.spam_data, '.*Water.*', header=0,
- index_col=0, infer_types=False)
- df2 = self.run_read_html(self.spam_data, 'Unit', header=0, index_col=0,
+ df1 = self.run_read_html(self.spam_data, '.*Water.*', index_col=0,
+ infer_types=False)
+ df2 = self.run_read_html(self.spam_data, 'Unit', index_col=0,
infer_types=False)
assert_framelist_equal(df1, df2)
- df2 = self.run_read_html(self.spam_data, 'Unit', header=0, index_col=0,
+ df2 = self.run_read_html(self.spam_data, 'Unit', index_col=0,
infer_types=True)
self.assertRaises(AssertionError, assert_framelist_equal, df1, df2)
@@ -304,21 +356,137 @@ def test_negative_skiprows_banklist(self):
@slow
def test_multiple_matches(self):
- url = self.banklist_data
- dfs = self.run_read_html(url, match=r'Florida')
- self.assertIsInstance(dfs, list)
+ url = 'http://code.google.com/p/pythonxy/wiki/StandardPlugins'
+ dfs = self.run_read_html(url, match='Python',
+ attrs={'class': 'wikitable'})
self.assertGreater(len(dfs), 1)
- for df in dfs:
- self.assertIsInstance(df, DataFrame)
+
+ @network
+ def test_pythonxy_plugins_table(self):
+ url = 'http://code.google.com/p/pythonxy/wiki/StandardPlugins'
+ dfs = self.run_read_html(url, match='Python',
+ attrs={'class': 'wikitable'})
+ zz = [df.iloc[0, 0] for df in dfs]
+ self.assertListEqual(sorted(zz), sorted(['Python', 'SciTE']))
def test_invalid_flavor():
url = 'google.com'
- nose.tools.assert_raises(AssertionError, _run_read_html, url, 'google',
+ nose.tools.assert_raises(AssertionError, read_html, url, 'google',
flavor='not a* valid**++ flaver')
-class TestBs4ReadHtml(TestLxmlReadHtml):
+@slow
+class TestBs4LxmlParser(TestLxmlReadHtml):
+ def test(self):
+ pass
+
def run_read_html(self, *args, **kwargs):
kwargs['flavor'] = 'bs4'
- return _run_read_html(*args, **kwargs)
+ _skip_if_no('lxml')
+ parser = _BeautifulSoupLxmlFrameParser
+ return _run_read_html(parser, *args, **kwargs)
+
+
+@slow
+class TestBs4Html5LibParser(TestBs4LxmlParser):
+ def test(self):
+ pass
+
+ def run_read_html(self, *args, **kwargs):
+ kwargs['flavor'] = 'bs4'
+ _skip_if_no('html5lib')
+ parser = _BeautifulSoupHtml5LibFrameParser
+ return _run_read_html(parser, *args, **kwargs)
+
+ @slow
+ def test_banklist_header(self):
+ def try_remove_ws(x):
+ try:
+ return _remove_whitespace(x)
+ except AttributeError:
+ return x
+
+ df = self.run_read_html(self.banklist_data, 'Metcalf',
+ attrs={'id': 'table'}, infer_types=True)[0]
+ ground_truth = read_csv(os.path.join(DATA_PATH, 'banklist.csv'),
+ converters={'Updated Date': Timestamp,
+ 'Closing Date': Timestamp})
+ # these will not
+ self.assertTupleEqual(df.shape, ground_truth.shape)
+ old = ['First Vietnamese American Bank In Vietnamese',
+ 'Westernbank Puerto Rico En Espanol',
+ 'R-G Premier Bank of Puerto Rico En Espanol',
+ 'Eurobank En Espanol', 'Sanderson State Bank En Espanol',
+ 'Washington Mutual Bank (Including its subsidiary Washington '
+ 'Mutual Bank FSB)',
+ 'Silver State Bank En Espanol',
+ 'AmTrade International BankEn Espanol',
+ 'Hamilton Bank, NA En Espanol',
+ 'The Citizens Savings BankPioneer Community Bank, Inc.']
+ new = ['First Vietnamese American Bank', 'Westernbank Puerto Rico',
+ 'R-G Premier Bank of Puerto Rico', 'Eurobank',
+ 'Sanderson State Bank', 'Washington Mutual Bank',
+ 'Silver State Bank', 'AmTrade International Bank',
+ 'Hamilton Bank, NA', 'The Citizens Savings Bank']
+ dfnew = df.applymap(try_remove_ws).replace(old, new)
+ gtnew = ground_truth.applymap(try_remove_ws)
+ assert_frame_equal(dfnew, gtnew)
+
+ @slow
+ def test_gold_canyon(self):
+ gc = 'Gold Canyon'
+ with open(self.banklist_data, 'r') as f:
+ raw_text = f.read()
+
+ self.assertIn(gc, raw_text)
+ df = self.run_read_html(self.banklist_data, 'Gold Canyon',
+ attrs={'id': 'table'}, infer_types=False)[0]
+ self.assertIn(gc, df.to_string())
+
+
+def get_elements_from_url(url, flavor, element='table'):
+ _skip_if_no('bs4')
+ _skip_if_no(flavor)
+ from bs4 import BeautifulSoup, SoupStrainer
+ strainer = SoupStrainer(element)
+ with closing(urlopen(url)) as f:
+ soup = BeautifulSoup(f, features=flavor, parse_only=strainer)
+ return soup.find_all(element)
+
+
+@slow
+def test_bs4_finds_tables():
+ url = ('http://ndb.nal.usda.gov/ndb/foods/show/1732?fg=&man=&'
+ 'lfacet=&format=&count=&max=25&offset=&sort=&qlookup=spam')
+ flavors = 'lxml', 'html5lib'
+ with warnings.catch_warnings():
+ warnings.filterwarnings('ignore')
+
+ for flavor in flavors:
+ assert get_elements_from_url(url, flavor, 'table')
+
+
+def get_lxml_elements(url, element):
+
+ _skip_if_no('lxml')
+ from lxml.html import parse
+ doc = parse(url)
+ return doc.xpath('.//{0}'.format(element))
+
+
+@slow
+def test_lxml_finds_tables():
+ url = ('http://ndb.nal.usda.gov/ndb/foods/show/1732?fg=&man=&'
+ 'lfacet=&format=&count=&max=25&offset=&sort=&qlookup=spam')
+ assert get_lxml_elements(url, 'table')
+
+
+@slow
+def test_lxml_finds_tbody():
+ url = ('http://ndb.nal.usda.gov/ndb/foods/show/1732?fg=&man=&'
+ 'lfacet=&format=&count=&max=25&offset=&sort=&qlookup=spam')
+ assert get_lxml_elements(url, 'tbody')
+
+
+
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index 86387989a7a87..f38fe61d453c2 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -126,16 +126,18 @@ def assert_almost_equal(a, b, check_less_precise = False):
return assert_dict_equal(a, b)
if isinstance(a, basestring):
- assert a == b, (a, b)
+ assert a == b, "{0} != {1}".format(a, b)
return True
if isiterable(a):
np.testing.assert_(isiterable(b))
- assert(len(a) == len(b))
+ na, nb = len(a), len(b)
+ assert na == nb, "{0} != {1}".format(na, nb)
+
if np.array_equal(a, b):
return True
else:
- for i in xrange(len(a)):
+ for i in xrange(na):
assert_almost_equal(a[i], b[i], check_less_precise)
return True
@@ -169,7 +171,7 @@ def assert_almost_equal(a, b, check_less_precise = False):
np.testing.assert_almost_equal(
1, a / b, decimal=decimal, err_msg=err_msg(a, b), verbose=False)
else:
- assert(a == b)
+ assert a == b, "%s != %s" % (a, b)
def is_sorted(seq):
| Some updates and bug fixes. See release notes for more details.
- ~~`vbench` stuff~~ sort of pointless right now since we don't really have control over the speed of the parsing library
- ~~Figure out why `lxml` chooses to ignore things~~ reported a bug w/ example to lxml people
- ~~Figure out why `bs4`'s `thead.find_all(['th', 'td'])` parses differently than `lxml`'s `thead.xpath('.//thead//th|.//thead//td')` even when `lxml` is the `bs4` backend.~~ same as above
| https://api.github.com/repos/pandas-dev/pandas/pulls/3616 | 2013-05-15T22:17:07Z | 2013-05-20T11:43:11Z | 2013-05-20T11:43:11Z | 2014-06-18T01:06:50Z |
BUG: (GH3611) Fix read_csv to correctly encode identical na_values | diff --git a/RELEASE.rst b/RELEASE.rst
index 1f5bd2591470b..503ae0e6bb30e 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -118,6 +118,8 @@ pandas 0.11.1
- Fix modulo and integer division on Series,DataFrames to act similary to ``float`` dtypes to return
``np.nan`` or ``np.inf`` as appropriate (GH3590_)
- Fix incorrect dtype on groupby with ``as_index=False`` (GH3610_)
+ - Fix ``read_csv`` to correctly encode identical na_values, e.g. ``na_values=[-999.0,-999]``
+ was failing (GH3611_)
.. _GH3164: https://github.com/pydata/pandas/issues/3164
.. _GH2786: https://github.com/pydata/pandas/issues/2786
@@ -166,6 +168,7 @@ pandas 0.11.1
.. _GH3610: https://github.com/pydata/pandas/issues/3610
.. _GH3596: https://github.com/pydata/pandas/issues/3596
.. _GH3435: https://github.com/pydata/pandas/issues/3435
+.. _GH3611: https://github.com/pydata/pandas/issues/3611
pandas 0.11.0
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 60b6d6c81fdd3..f4eeb36e5e8d0 100644
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -1712,12 +1712,14 @@ def _clean_na_values(na_values, keep_default_na=True):
else:
if not com.is_list_like(na_values):
na_values = [na_values]
- na_values = set(list(na_values))
+ na_values = set(_stringify_na_values(na_values))
if keep_default_na:
na_values = na_values | _NA_VALUES
return na_values
+def _stringify_na_values(na_values):
+ return [ str(x) for x in na_values ]
def _clean_index_names(columns, index_col):
if not _is_index_col(index_col):
@@ -1768,7 +1770,7 @@ def _get_empty_meta(columns, index_col, index_names):
def _get_na_values(col, na_values):
if isinstance(na_values, dict):
if col in na_values:
- return set(list(na_values[col]))
+ return set(_stringify_na_values(list(na_values[col])))
else:
return _NA_VALUES
else:
diff --git a/pandas/io/tests/test_parsers.py b/pandas/io/tests/test_parsers.py
index 2e4689d7aa620..4a9004b7068ba 100644
--- a/pandas/io/tests/test_parsers.py
+++ b/pandas/io/tests/test_parsers.py
@@ -498,6 +498,17 @@ def test_quoting(self):
df = self.read_table(StringIO(good_line_small), sep='\t')
self.assert_(len(df) == 3)
+ def test_non_string_na_values(self):
+ # GH3611, na_values that are not a string are an issue
+ with ensure_clean('__non_string_na_values__.csv') as path:
+ df = DataFrame({'A' : [-999, 2, 3], 'B' : [1.2, -999, 4.5]})
+ df.to_csv(path, sep=' ', index=False)
+ result1 = read_csv(path, sep= ' ', header=0, na_values=['-999.0','-999'])
+ result2 = read_csv(path, sep= ' ', header=0, na_values=[-999,-999.0])
+ result3 = read_csv(path, sep= ' ', header=0, na_values=[-999.0,-999])
+ tm.assert_frame_equal(result1,result2)
+ tm.assert_frame_equal(result2,result3)
+
def test_custom_na_values(self):
data = """A,B,C
ignore,this,row
| eg. na_values=[-999.0,-999] was failing
closes #3611
| https://api.github.com/repos/pandas-dev/pandas/pulls/3615 | 2013-05-15T22:16:46Z | 2013-05-16T01:01:21Z | 2013-05-16T01:01:20Z | 2014-06-27T14:18:58Z |
ENH add date to DatetimeIndex | diff --git a/RELEASE.rst b/RELEASE.rst
index 1f5bd2591470b..e44c2d0d37e08 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -49,6 +49,7 @@ pandas 0.11.1
- support datelike columns with a timezone as data_columns (GH2852_)
- table writing performance improvements.
- Add modulo operator to Series, DataFrame
+ - Add ``date`` method to DatetimeIndex
**API Changes**
@@ -275,7 +276,7 @@ pandas 0.11.0
on rhs (GH3216_)
- Treat boolean values as integers (values 1 and 0) for numeric
operations. (GH2641_)
- - Add ``time()`` method to DatetimeIndex (GH3180_)
+ - Add ``time`` method to DatetimeIndex (GH3180_)
- Return NA when using Series.str[...] for values that are not long enough
(GH3223_)
- Display cursor coordinate information in time-series plots (GH1670_)
diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py
index 46e2488fb70e6..a918e9eb18e8b 100644
--- a/pandas/tseries/index.py
+++ b/pandas/tseries/index.py
@@ -1319,11 +1319,18 @@ def freqstr(self):
@property
def time(self):
"""
- Returns array of datetime.time. The time of the day
+ Returns numpy array of datetime.time. The time part of the Timestamps.
"""
# can't call self.map() which tries to treat func as ufunc
# and causes recursion warnings on python 2.6
- return _algos.arrmap_object(self.asobject, lambda x:x.time())
+ return _algos.arrmap_object(self.asobject, lambda x: x.time())
+
+ @property
+ def date(self):
+ """
+ Returns numpy array of datetime.date. The date part of the Timestamps.
+ """
+ return _algos.arrmap_object(self.asobject, lambda x: x.date())
def normalize(self):
diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py
index 9b20ac1e3f055..beee5caa871c5 100644
--- a/pandas/tseries/tests/test_timeseries.py
+++ b/pandas/tseries/tests/test_timeseries.py
@@ -1847,6 +1847,12 @@ def test_time(self):
expected = [t.time() for t in rng]
self.assert_((result == expected).all())
+ def test_date(self):
+ rng = pd.date_range('1/1/2000', freq='12H', periods=10)
+ result = pd.Index(rng).date
+ expected = [t.date() for t in rng]
+ self.assert_((result == expected).all())
+
class TestLegacySupport(unittest.TestCase):
_multiprocess_can_split_ = True
| Allows the date to be pulled out from a DatetimeIndex easily/efficiently.
Similar to #3180, which was for the time-part to be extracted.
_From SO question: http://stackoverflow.com/questions/16563552/pandas-fancy-indexing-a-dataframe_.
~~I haven't added this to release notes, as maybe too late for 11.1?~~
| https://api.github.com/repos/pandas-dev/pandas/pulls/3614 | 2013-05-15T21:00:01Z | 2013-05-17T16:00:23Z | 2013-05-17T16:00:22Z | 2014-06-26T11:26:58Z |
BUG: Fix incorrect dtype on groupby with ``as_index=False`` (GH3610_) | diff --git a/RELEASE.rst b/RELEASE.rst
index d81a0e405ddd9..006da5f8e76af 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -114,6 +114,7 @@ pandas 0.11.1
in a frame (GH3594_)
- Fix modulo and integer division on Series,DataFrames to act similary to ``float`` dtypes to return
``np.nan`` or ``np.inf`` as appropriate (GH3590_)
+ - Fix incorrect dtype on groupby with ``as_index=False`` (GH3610_)
.. _GH3164: https://github.com/pydata/pandas/issues/3164
.. _GH2786: https://github.com/pydata/pandas/issues/2786
@@ -159,6 +160,7 @@ pandas 0.11.1
.. _GH3556: https://github.com/pydata/pandas/issues/3556
.. _GH3594: https://github.com/pydata/pandas/issues/3594
.. _GH3590: https://github.com/pydata/pandas/issues/3590
+.. _GH3610: https://github.com/pydata/pandas/issues/3610
.. _GH3435: https://github.com/pydata/pandas/issues/3435
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index 7762803b029e9..093c61ba5af5c 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -1713,7 +1713,7 @@ def aggregate(self, arg, *args, **kwargs):
result.insert(0, name, values)
result.index = np.arange(len(result))
- return result
+ return result.convert_objects()
def _aggregate_multiple_funcs(self, arg):
from pandas.tools.merge import concat
@@ -2054,7 +2054,7 @@ def _wrap_aggregated_output(self, output, names=None):
if self.axis == 1:
result = result.T
- return result
+ return result.convert_objects()
def _wrap_agged_blocks(self, blocks):
obj = self._obj_with_exclusions
@@ -2094,7 +2094,7 @@ def _wrap_agged_blocks(self, blocks):
if self.axis == 1:
result = result.T
- return result
+ return result.convert_objects()
from pandas.tools.plotting import boxplot_frame_groupby
diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py
index 23077655d5144..c1c4217cb6f62 100644
--- a/pandas/tests/test_groupby.py
+++ b/pandas/tests/test_groupby.py
@@ -1842,6 +1842,14 @@ def test_apply_with_mixed_dtype(self):
result = df.apply(lambda x: x, axis=1)
assert_series_equal(df.get_dtype_counts(), result.get_dtype_counts())
+
+ # GH 3610 incorrect dtype conversion with as_index=False
+ df = DataFrame({"c1" : [1,2,6,6,8]})
+ df["c2"] = df.c1/2.0
+ result1 = df.groupby("c2").mean().reset_index().c2
+ result2 = df.groupby("c2", as_index=False).mean().c2
+ assert_series_equal(result1,result2)
+
def test_groupby_list_infer_array_like(self):
result = self.df.groupby(list(self.df['A'])).mean()
expected = self.df.groupby(self.df['A']).mean()
| closes #3610
| https://api.github.com/repos/pandas-dev/pandas/pulls/3613 | 2013-05-15T18:57:28Z | 2013-05-15T19:27:59Z | 2013-05-15T19:27:59Z | 2014-06-12T13:38:01Z |
BUG/TST: fix failing html tests | diff --git a/pandas/io/tests/test_html.py b/pandas/io/tests/test_html.py
index d0468026caef3..65c50dc96db97 100644
--- a/pandas/io/tests/test_html.py
+++ b/pandas/io/tests/test_html.py
@@ -13,24 +13,16 @@
from pandas.util.testing import assert_frame_equal, network
-def _skip_if_no_parser():
+def _skip_if_no(module_name):
try:
- import_module('lxml')
+ import_module(module_name)
except ImportError:
- try:
- import_module('bs4')
- except ImportError:
- raise nose.SkipTest
+ raise nose.SkipTest
DATA_PATH = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'data')
-def _run_read_html(*args, **kwargs):
- _skip_if_no_parser()
- return read_html(*args, **kwargs)
-
-
def isframe(x):
return isinstance(x, DataFrame)
@@ -53,8 +45,9 @@ def setUp(self):
self.banklist_data = os.path.join(DATA_PATH, 'failed_banklist.html')
def run_read_html(self, *args, **kwargs):
+ _skip_if_no('lxml')
kwargs['flavor'] = 'lxml'
- return _run_read_html(*args, **kwargs)
+ return read_html(*args, **kwargs)
@network
def test_banklist_url(self):
@@ -314,11 +307,12 @@ def test_multiple_matches(self):
def test_invalid_flavor():
url = 'google.com'
- nose.tools.assert_raises(AssertionError, _run_read_html, url, 'google',
+ nose.tools.assert_raises(AssertionError, read_html, url, 'google',
flavor='not a* valid**++ flaver')
class TestBs4ReadHtml(TestLxmlReadHtml):
def run_read_html(self, *args, **kwargs):
+ _skip_if_no('bs4')
kwargs['flavor'] = 'bs4'
- return _run_read_html(*args, **kwargs)
+ return read_html(*args, **kwargs)
| closes #3605
| https://api.github.com/repos/pandas-dev/pandas/pulls/3607 | 2013-05-14T22:51:47Z | 2013-05-15T22:18:50Z | null | 2014-06-25T21:04:16Z |
Categorical cleanup | diff --git a/RELEASE.rst b/RELEASE.rst
index 31627cec01d1e..5b4cd67f4a246 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -48,6 +48,7 @@ pandas 0.11.1
to append an index with a different name than the existing
- support datelike columns with a timezone as data_columns (GH2852_)
- table writing performance improvements.
+ - Simplified the API and added a describe method to Categorical
**API Changes**
diff --git a/doc/source/groupby.rst b/doc/source/groupby.rst
index f0ca08d22d7dc..bc2ff9bbe1013 100644
--- a/doc/source/groupby.rst
+++ b/doc/source/groupby.rst
@@ -606,8 +606,8 @@ versions of pandas, but users were generally discarding the NA group anyway
Grouping with ordered factors
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Categorical variables represented as instance of pandas's ``Factor`` class can
-be used as group keys. If so, the order of the levels will be preserved:
+Categorical variables represented as instance of pandas's ``Categorical`` class
+can be used as group keys. If so, the order of the levels will be preserved:
.. ipython:: python
diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py
index a093a81a6516d..916bb2deb417e 100644
--- a/pandas/core/categorical.py
+++ b/pandas/core/categorical.py
@@ -5,6 +5,7 @@
from pandas.core.algorithms import factorize
from pandas.core.index import Index
import pandas.core.common as com
+from pandas.core.frame import DataFrame
def _cat_compare_op(op):
@@ -32,23 +33,68 @@ class Categorical(object):
Parameters
----------
labels : ndarray of integers
- levels : Index-like (unique)
-
- data : array-like
+ If levels is given, the integer at label `i` is the index of the level
+ for that label. I.e., the level at labels[i] is levels[labels[i]].
+ Otherwise, if levels is None, these are just the labels and the levels
+ are assumed to be the unique labels. See from_array.
+ levels : Index-like (unique), optional
+ The unique levels for each label. If not given, the levels are assumed
+ to be the unique values of labels.
+ name : str, optional
+ Name for the Categorical variable. If levels is None, will attempt
+ to infer from labels.
Returns
-------
**Attributes**
* labels : ndarray
* levels : ndarray
+
+ Examples
+ --------
+ >>> from pandas import Categorical
+ >>> Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3])
+ Categorical:
+ array([1, 2, 3, 1, 2, 3])
+ Levels (3): Int64Index([1, 2, 3])
+
+ >>> Categorical([0,1,2,0,1,2], ['a', 'b', 'c'])
+ Categorical:
+ array(['a', 'b', 'c', 'a', 'b', 'c'], dtype=object)
+ Levels (3): Index(['a', 'b', 'c'], dtype=object)
+
+ >>> Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
+ Categorical:
+ array(['a', 'b', 'c', 'a', 'b', 'c'], dtype=object)
+ Levels (3): Index(['a', 'b', 'c'], dtype=object)
"""
- def __init__(self, labels, levels, name=None):
+ def __init__(self, labels, levels=None, name=None):
+ if levels is None:
+ if name is None:
+ name = getattr(labels, 'name', None)
+ if isinstance(labels, Index) and hasattr(labels, 'factorize'):
+ labels, levels = labels.factorize()
+ else:
+ try:
+ labels, levels = factorize(labels, sort=True)
+ except TypeError:
+ labels, levels = factorize(labels, sort=False)
+
self.labels = labels
self.levels = levels
self.name = name
@classmethod
def from_array(cls, data):
+ """
+ Make a Categorical type from a single array-like object.
+
+ Parameters
+ ----------
+ data : array-like
+ Can be an Index or array-like. The levels are assumed to be
+ the unique values of `data`.
+ """
if isinstance(data, Index) and hasattr(data, 'factorize'):
labels, levels = data.factorize()
else:
@@ -131,4 +177,28 @@ def equals(self, other):
return (self.levels.equals(other.levels) and
np.array_equal(self.labels, other.labels))
-Factor = Categorical
+ def describe(self):
+ """
+ Returns a dataframe with frequency and counts by level.
+ """
+ #Hack?
+ grouped = DataFrame(self.labels).groupby(0)
+ counts = grouped.count().values.squeeze()
+ freqs = counts/float(counts.sum())
+ return DataFrame.from_dict(dict(
+ counts=counts,
+ freqs=freqs,
+ levels=self.levels)).set_index('levels')
+
+
+class Factor(Categorical):
+ def __init__(self, labels, levels=None, name=None):
+ from warnings import warn
+ warn("Factor is deprecated. Use Categorical instead", FutureWarning)
+ super(Factor, self).__init__(labels, levels, name)
+
+ @classmethod
+ def from_array(cls, data):
+ from warnings import warn
+ warn("Factor is deprecated. Use Categorical instead", FutureWarning)
+ return super(Factor, cls).from_array(data)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 5b2dc6dd96efb..6190208432926 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1659,8 +1659,8 @@ def convert_objects(self, convert_dates=True, convert_numeric=False, copy=True):
-------
converted : DataFrame
"""
- return self._constructor(self._data.convert(convert_dates=convert_dates,
- convert_numeric=convert_numeric,
+ return self._constructor(self._data.convert(convert_dates=convert_dates,
+ convert_numeric=convert_numeric,
copy=copy))
#----------------------------------------------------------------------
@@ -3330,7 +3330,7 @@ def fillna(self, value=None, method=None, axis=0, inplace=False,
a reference to the filled object, which is self if inplace=True
limit : int, default None
Maximum size gap to forward or backward fill
- downcast : dict, default is None, a dict of item->dtype of what to
+ downcast : dict, default is None, a dict of item->dtype of what to
downcast if possible
See also
@@ -3380,7 +3380,7 @@ def fillna(self, value=None, method=None, axis=0, inplace=False,
result[k].fillna(v, inplace=True)
return result
else:
- new_data = self._data.fillna(value, inplace=inplace,
+ new_data = self._data.fillna(value, inplace=inplace,
downcast=downcast)
if inplace:
@@ -3791,8 +3791,8 @@ def combine(self, other, func, fill_value=None, overwrite=True):
result[col] = arr
# convert_objects just in case
- return self._constructor(result,
- index=new_index,
+ return self._constructor(result,
+ index=new_index,
columns=new_columns).convert_objects(
convert_dates=True,
copy=False)
@@ -3825,7 +3825,7 @@ def combiner(x, y, needs_i8_conversion=False):
y_values = y_values.view('i8')
else:
mask = isnull(x_values)
-
+
return expressions.where(mask, y_values, x_values, raise_on_error=True)
return self.combine(other, combiner, overwrite=False)
@@ -5406,11 +5406,11 @@ def group_agg(values, bounds, f):
def factor_agg(factor, vec, func):
"""
- Aggregate array based on Factor
+ Aggregate array based on Categorical
Parameters
----------
- factor : Factor
+ factor : Categorical
length n
vec : sequence
length n
@@ -5419,7 +5419,11 @@ def factor_agg(factor, vec, func):
Returns
-------
- ndarray corresponding to Factor levels
+ ndarray corresponding to factor levels
+
+ See Also
+ --------
+ pandas.Categorical
"""
indexer = np.argsort(factor.labels)
unique_labels = np.arange(len(factor.levels))
diff --git a/pandas/core/panel.py b/pandas/core/panel.py
index 44b62991cf7a3..a19011d094499 100644
--- a/pandas/core/panel.py
+++ b/pandas/core/panel.py
@@ -10,7 +10,7 @@
_try_sort, _default_index,
_infer_dtype_from_scalar,
notnull)
-from pandas.core.categorical import Factor
+from pandas.core.categorical import Categorical
from pandas.core.index import (Index, MultiIndex, _ensure_index,
_get_combined_index)
from pandas.core.indexing import _maybe_droplevels, _is_list_like
@@ -82,8 +82,8 @@ def panel_index(time, panels, names=['time', 'panel']):
(1962, 'C')], dtype=object)
"""
time, panels = _ensure_like_indices(time, panels)
- time_factor = Factor.from_array(time)
- panel_factor = Factor.from_array(panels)
+ time_factor = Categorical.from_array(time)
+ panel_factor = Categorical.from_array(panels)
labels = [time_factor.labels, panel_factor.labels]
levels = [time_factor.levels, panel_factor.levels]
diff --git a/pandas/core/reshape.py b/pandas/core/reshape.py
index 8595e2a91906d..9f67094cfd28a 100644
--- a/pandas/core/reshape.py
+++ b/pandas/core/reshape.py
@@ -749,9 +749,9 @@ def make_axis_dummies(frame, axis='minor', transform=None):
axis : {'major', 'minor'}, default 'minor'
transform : function, default None
Function to apply to axis labels first. For example, to
- get "day of week" dummies in a time series regression
+ get "day of week" dummies in a time series regression
you might call::
-
+
make_axis_dummies(panel, axis='major',
transform=lambda d: d.weekday())
Returns
@@ -810,6 +810,6 @@ def block2d_to_blocknd(values, items, shape, labels, ref_items=None):
def factor_indexer(shape, labels):
- """ given a tuple of shape and a list of Factor lables, return the expanded label indexer """
+ """ given a tuple of shape and a list of Categorical labels, return the expanded label indexer """
mult = np.array(shape)[::-1].cumprod()[::-1]
return com._ensure_platform_int(np.sum(np.array(labels).T * np.append(mult, [1]), axis=1).T)
diff --git a/pandas/tests/test_factor.py b/pandas/tests/test_factor.py
index de2fcaa94b59d..48db7afa29aaa 100644
--- a/pandas/tests/test_factor.py
+++ b/pandas/tests/test_factor.py
@@ -9,6 +9,7 @@
from pandas.core.api import value_counts
from pandas.core.categorical import Categorical
from pandas.core.index import Index, Int64Index, MultiIndex
+from pandas.core.frame import DataFrame
from pandas.util.testing import assert_almost_equal
import pandas.core.common as com
@@ -111,6 +112,29 @@ def test_na_flags_int_levels(self):
self.assert_(np.array_equal(com.isnull(cat), labels == -1))
+ def test_levels_none(self):
+ factor = Categorical(['a', 'b', 'b', 'a',
+ 'a', 'c', 'c', 'c'])
+ self.assert_(factor.equals(self.factor))
+
+ def test_describe(self):
+ # string type
+ desc = self.factor.describe()
+ expected = DataFrame.from_dict(dict(counts=[3, 2, 3],
+ freqs=[3/8., 2/8., 3/8.],
+ levels=['a', 'b', 'c'])
+ ).set_index('levels')
+ tm.assert_frame_equal(desc, expected)
+
+ # check an integer one
+ desc = Categorical([1,2,3,1,2,3,3,2,1,1,1]).describe()
+ expected = DataFrame.from_dict(dict(counts=[5, 3, 3],
+ freqs=[5/11., 3/11., 3/11.],
+ levels=[1,2,3]
+ )
+ ).set_index('levels')
+ tm.assert_frame_equal(desc, expected)
+
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
diff --git a/pandas/tools/merge.py b/pandas/tools/merge.py
index 7f05a045e36af..a9428d472c42e 100644
--- a/pandas/tools/merge.py
+++ b/pandas/tools/merge.py
@@ -5,7 +5,7 @@
import itertools
import numpy as np
-from pandas.core.categorical import Factor
+from pandas.core.categorical import Categorical
from pandas.core.frame import DataFrame, _merge_doc
from pandas.core.generic import NDFrame
from pandas.core.groupby import get_group_index
@@ -1190,7 +1190,7 @@ def _make_concat_multiindex(indexes, keys, levels=None, names=None):
names = [None] * len(zipped)
if levels is None:
- levels = [Factor.from_array(zp).levels for zp in zipped]
+ levels = [Categorical.from_array(zp).levels for zp in zipped]
else:
levels = [_ensure_index(x) for x in levels]
else:
@@ -1228,7 +1228,7 @@ def _make_concat_multiindex(indexes, keys, levels=None, names=None):
levels.extend(concat_index.levels)
label_list.extend(concat_index.labels)
else:
- factor = Factor.from_array(concat_index)
+ factor = Categorical.from_array(concat_index)
levels.append(factor.levels)
label_list.append(factor.labels)
| Make categorical a little more user-friendly and add some documentation.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3603 | 2013-05-14T19:41:21Z | 2013-05-19T21:12:04Z | 2013-05-19T21:12:04Z | 2014-06-27T14:18:49Z |
BUG: Fix integer modulo and division to make integer and float dtypes work similarly for invalid values | diff --git a/RELEASE.rst b/RELEASE.rst
index 31627cec01d1e..0b6ed0b4d2853 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -48,6 +48,7 @@ pandas 0.11.1
to append an index with a different name than the existing
- support datelike columns with a timezone as data_columns (GH2852_)
- table writing performance improvements.
+ - Add modulo operator to Series, DataFrame
**API Changes**
@@ -110,6 +111,8 @@ pandas 0.11.1
is a ``list`` or ``tuple``.
- Fixed bug where a time-series was being selected in preference to an actual column name
in a frame (GH3594_)
+ - Fix modulo and integer division on Series,DataFrames to act similary to ``float`` dtypes to return
+ ``np.nan`` or ``np.inf`` as appropriate (GH3590_)
.. _GH3164: https://github.com/pydata/pandas/issues/3164
.. _GH2786: https://github.com/pydata/pandas/issues/2786
@@ -153,6 +156,7 @@ pandas 0.11.1
.. _GH3593: https://github.com/pydata/pandas/issues/3593
.. _GH3556: https://github.com/pydata/pandas/issues/3556
.. _GH3594: https://github.com/pydata/pandas/issues/3594
+.. _GH3590: https://github.com/pydata/pandas/issues/3590
.. _GH3435: https://github.com/pydata/pandas/issues/3435
diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt
index 74818f9542cae..3719d9eb09dee 100644
--- a/doc/source/v0.11.1.txt
+++ b/doc/source/v0.11.1.txt
@@ -9,6 +9,17 @@ enhancements along with a large number of bug fixes.
API changes
~~~~~~~~~~~
+ - Fix modulo and integer division on Series,DataFrames to act similary to ``float`` dtypes to return
+ ``np.nan`` or ``np.inf`` as appropriate (GH3590_). This correct a numpy bug that treats ``integer``
+ and ``float`` dtypes differently.
+
+ .. ipython:: python
+
+ p = DataFrame({ 'first' : [3,4,5,8], 'second' : [0,0,0,3] })
+ p % 0
+ p % p
+ p / p
+ p / 0
Enhancements
~~~~~~~~~~~~
@@ -33,4 +44,5 @@ on GitHub for a complete list.
.. _GH3477: https://github.com/pydata/pandas/issues/3477
.. _GH3492: https://github.com/pydata/pandas/issues/3492
.. _GH3499: https://github.com/pydata/pandas/issues/3499
+.. _GH3590: https://github.com/pydata/pandas/issues/3590
.. _GH3435: https://github.com/pydata/pandas/issues/3435
diff --git a/pandas/core/common.py b/pandas/core/common.py
index 2da2db052cb93..6bb4b36862956 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -793,13 +793,16 @@ def changeit():
# try to directly set by expanding our array to full
# length of the boolean
- om = other[mask]
- om_at = om.astype(result.dtype)
- if (om == om_at).all():
- new_other = result.values.copy()
- new_other[mask] = om_at
- result[:] = new_other
- return result, False
+ try:
+ om = other[mask]
+ om_at = om.astype(result.dtype)
+ if (om == om_at).all():
+ new_other = result.values.copy()
+ new_other[mask] = om_at
+ result[:] = new_other
+ return result, False
+ except:
+ pass
# we are forced to change the dtype of the result as the input isn't compatible
r, fill_value = _maybe_upcast(result, fill_value=other, dtype=dtype, copy=True)
@@ -948,6 +951,27 @@ def _lcd_dtypes(a_dtype, b_dtype):
return np.float64
return np.object
+def _fill_zeros(result, y, fill):
+ """ if we have an integer value (or array in y)
+ and we have 0's, fill them with the fill,
+ return the result """
+
+ if fill is not None:
+ if not isinstance(y, np.ndarray):
+ dtype, value = _infer_dtype_from_scalar(y)
+ y = pa.empty(result.shape,dtype=dtype)
+ y.fill(value)
+
+ if is_integer_dtype(y):
+
+ mask = y.ravel() == 0
+ if mask.any():
+ shape = result.shape
+ result, changed = _maybe_upcast_putmask(result.ravel(),mask,fill)
+ result = result.reshape(shape)
+
+ return result
+
def _interp_wrapper(f, wrap_dtype, na_override=None):
def wrapper(arr, mask, limit=None):
view = arr.view(wrap_dtype)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 5b2dc6dd96efb..c1f2f38dabd8b 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -189,10 +189,12 @@ class DataConflictError(Exception):
# Factory helper methods
-def _arith_method(op, name, str_rep = None, default_axis='columns'):
+def _arith_method(op, name, str_rep = None, default_axis='columns', fill_zeros=None):
def na_op(x, y):
try:
result = expressions.evaluate(op, str_rep, x, y, raise_on_error=True)
+ result = com._fill_zeros(result,y,fill_zeros)
+
except TypeError:
xrav = x.ravel()
result = np.empty(x.size, dtype=x.dtype)
@@ -841,20 +843,23 @@ def __contains__(self, key):
__sub__ = _arith_method(operator.sub, '__sub__', '-', default_axis=None)
__mul__ = _arith_method(operator.mul, '__mul__', '*', default_axis=None)
__truediv__ = _arith_method(operator.truediv, '__truediv__', '/',
- default_axis=None)
+ default_axis=None, fill_zeros=np.inf)
__floordiv__ = _arith_method(operator.floordiv, '__floordiv__',
- default_axis=None)
+ default_axis=None, fill_zeros=np.inf)
__pow__ = _arith_method(operator.pow, '__pow__', '**', default_axis=None)
+ __mod__ = _arith_method(operator.mod, '__mod__', '*', default_axis=None, fill_zeros=np.nan)
+
__radd__ = _arith_method(_radd_compat, '__radd__', default_axis=None)
__rmul__ = _arith_method(operator.mul, '__rmul__', default_axis=None)
__rsub__ = _arith_method(lambda x, y: y - x, '__rsub__', default_axis=None)
__rtruediv__ = _arith_method(lambda x, y: y / x, '__rtruediv__',
- default_axis=None)
+ default_axis=None, fill_zeros=np.inf)
__rfloordiv__ = _arith_method(lambda x, y: y // x, '__rfloordiv__',
- default_axis=None)
+ default_axis=None, fill_zeros=np.inf)
__rpow__ = _arith_method(lambda x, y: y ** x, '__rpow__',
default_axis=None)
+ __rmod__ = _arith_method(operator.mod, '__rmod__', default_axis=None, fill_zeros=np.nan)
# boolean operators
__and__ = _arith_method(operator.and_, '__and__', '&')
@@ -863,9 +868,10 @@ def __contains__(self, key):
# Python 2 division methods
if not py3compat.PY3:
- __div__ = _arith_method(operator.div, '__div__', '/', default_axis=None)
+ __div__ = _arith_method(operator.div, '__div__', '/',
+ default_axis=None, fill_zeros=np.inf)
__rdiv__ = _arith_method(lambda x, y: y / x, '__rdiv__',
- default_axis=None)
+ default_axis=None, fill_zeros=np.inf)
def __neg__(self):
arr = operator.neg(self.values)
diff --git a/pandas/core/series.py b/pandas/core/series.py
index a2816d93d6f1e..e807cf3f1dfd4 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -55,14 +55,17 @@
# Wrapper function for Series arithmetic methods
-def _arith_method(op, name):
+def _arith_method(op, name, fill_zeros=None):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
"""
def na_op(x, y):
try:
+
result = op(x, y)
+ result = com._fill_zeros(result,y,fill_zeros)
+
except TypeError:
result = pa.empty(len(x), dtype=x.dtype)
if isinstance(y, pa.Array):
@@ -1258,16 +1261,18 @@ def iteritems(self):
__add__ = _arith_method(operator.add, '__add__')
__sub__ = _arith_method(operator.sub, '__sub__')
__mul__ = _arith_method(operator.mul, '__mul__')
- __truediv__ = _arith_method(operator.truediv, '__truediv__')
- __floordiv__ = _arith_method(operator.floordiv, '__floordiv__')
+ __truediv__ = _arith_method(operator.truediv, '__truediv__', fill_zeros=np.inf)
+ __floordiv__ = _arith_method(operator.floordiv, '__floordiv__', fill_zeros=np.inf)
__pow__ = _arith_method(operator.pow, '__pow__')
+ __mod__ = _arith_method(operator.mod, '__mod__', fill_zeros=np.nan)
__radd__ = _arith_method(_radd_compat, '__add__')
__rmul__ = _arith_method(operator.mul, '__mul__')
__rsub__ = _arith_method(lambda x, y: y - x, '__sub__')
- __rtruediv__ = _arith_method(lambda x, y: y / x, '__truediv__')
- __rfloordiv__ = _arith_method(lambda x, y: y // x, '__floordiv__')
+ __rtruediv__ = _arith_method(lambda x, y: y / x, '__truediv__', fill_zeros=np.inf)
+ __rfloordiv__ = _arith_method(lambda x, y: y // x, '__floordiv__', fill_zeros=np.inf)
__rpow__ = _arith_method(lambda x, y: y ** x, '__pow__')
+ __rmod__ = _arith_method(operator.mod, '__mod__', fill_zeros=np.nan)
# comparisons
__gt__ = _comp_method(operator.gt, '__gt__')
@@ -1301,8 +1306,8 @@ def __invert__(self):
# Python 2 division operators
if not py3compat.PY3:
- __div__ = _arith_method(operator.div, '__div__')
- __rdiv__ = _arith_method(lambda x, y: y / x, '__div__')
+ __div__ = _arith_method(operator.div, '__div__', fill_zeros=np.inf)
+ __rdiv__ = _arith_method(lambda x, y: y / x, '__div__', fill_zeros=np.inf)
__idiv__ = __div__
#----------------------------------------------------------------------
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index ce89dda63597f..f77503bd1487d 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -4011,6 +4011,50 @@ def test_operators_none_as_na(self):
result = op(df.fillna(7), df)
assert_frame_equal(result, expected)
+ def test_modulo(self):
+
+ # GH3590, modulo as ints
+ p = DataFrame({ 'first' : [3,4,5,8], 'second' : [0,0,0,3] })
+
+ ### this is technically wrong as the integer portion is coerced to float ###
+ expected = DataFrame({ 'first' : Series([0,0,0,0],dtype='float64'), 'second' : Series([np.nan,np.nan,np.nan,0]) })
+ result = p % p
+ assert_frame_equal(result,expected)
+
+ # numpy has a slightly different (wrong) treatement
+ result2 = DataFrame(p.values % p.values,index=p.index,columns=p.columns,dtype='float64')
+ result2.iloc[0:3,1] = np.nan
+ assert_frame_equal(result2,expected)
+
+ result = p % 0
+ expected = DataFrame(np.nan,index=p.index,columns=p.columns)
+ assert_frame_equal(result,expected)
+
+ # numpy has a slightly different (wrong) treatement
+ result2 = DataFrame(p.values.astype('float64') % 0,index=p.index,columns=p.columns)
+ assert_frame_equal(result2,expected)
+
+ def test_div(self):
+
+ # integer div, but deal with the 0's
+ p = DataFrame({ 'first' : [3,4,5,8], 'second' : [0,0,0,3] })
+ result = p / p
+
+ ### this is technically wrong as the integer portion is coerced to float ###
+ expected = DataFrame({ 'first' : Series([1,1,1,1],dtype='float64'), 'second' : Series([np.inf,np.inf,np.inf,1]) })
+ assert_frame_equal(result,expected)
+
+ result2 = DataFrame(p.values.astype('float64')/p.values,index=p.index,columns=p.columns).fillna(np.inf)
+ assert_frame_equal(result2,expected)
+
+ result = p / 0
+ expected = DataFrame(np.inf,index=p.index,columns=p.columns)
+ assert_frame_equal(result,expected)
+
+ # numpy has a slightly different (wrong) treatement
+ result2 = DataFrame(p.values.astype('float64')/0,index=p.index,columns=p.columns).fillna(np.inf)
+ assert_frame_equal(result2,expected)
+
def test_logical_operators(self):
import operator
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index d98cfe3e385cb..11ede8d759b38 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -1766,6 +1766,49 @@ def test_neg(self):
def test_invert(self):
assert_series_equal(-(self.series < 0), ~(self.series < 0))
+ def test_modulo(self):
+
+ # GH3590, modulo as ints
+ p = DataFrame({ 'first' : [3,4,5,8], 'second' : [0,0,0,3] })
+ result = p['first'] % p['second']
+ expected = Series(p['first'].values % p['second'].values,dtype='float64')
+ expected.iloc[0:3] = np.nan
+ assert_series_equal(result,expected)
+
+ result = p['first'] % 0
+ expected = Series(np.nan,index=p.index)
+ assert_series_equal(result,expected)
+
+ p = p.astype('float64')
+ result = p['first'] % p['second']
+ expected = Series(p['first'].values % p['second'].values)
+ assert_series_equal(result,expected)
+
+ def test_div(self):
+
+ # integer div, but deal with the 0's
+ p = DataFrame({ 'first' : [3,4,5,8], 'second' : [0,0,0,3] })
+ result = p['first'] / p['second']
+ expected = Series(p['first'].values / p['second'].values,dtype='float64')
+ expected.iloc[0:3] = np.inf
+ assert_series_equal(result,expected)
+
+ result = p['first'] / 0
+ expected = Series(np.inf,index=p.index)
+ assert_series_equal(result,expected)
+
+ p = p.astype('float64')
+ result = p['first'] / p['second']
+ expected = Series(p['first'].values / p['second'].values)
+ assert_series_equal(result,expected)
+
+ p = DataFrame({ 'first' : [3,4,5,8], 'second' : [1,1,1,1] })
+ result = p['first'] / p['second']
+ if py3compat.PY3:
+ assert_series_equal(result,p['first'].astype('float64'))
+ else:
+ assert_series_equal(result,p['first'])
+
def test_operators(self):
def _check_op(series, other, op, pos_only=False):
| closes #3590
This is a numpy oddity that treats them differently.
```
In [131]: p = DataFrame({ 'first' : [3,4,5,8], 'second' : [0,0,0,3] })
In [132]: p % 0
Out[132]:
first second
0 NaN NaN
1 NaN NaN
2 NaN NaN
3 NaN NaN
In [133]: p % p
Out[133]:
first second
0 0 NaN
1 0 NaN
2 0 NaN
3 0 0
In [134]: p / p
Out[134]:
first second
0 1 inf
1 1 inf
2 1 inf
3 1 1.000000
In [135]: p / 0
Out[135]:
first second
0 inf inf
1 inf inf
2 inf inf
3 inf inf
```
Numpy does this (on integers), floats are as like above
```
In [3]: x
Out[3]:
array([[3, 0],
[4, 0],
[5, 0],
[8, 3]])
In [4]: x % 0
Out[4]:
array([[0, 0],
[0, 0],
[0, 0],
[0, 0]])
In [5]: x % x
Out[5]:
array([[0, 0],
[0, 0],
[0, 0],
[0, 0]])
In [6]: x / x
Out[6]:
array([[1, 0],
[1, 0],
[1, 0],
[1, 1]])
In [7]: x / 0
Out[7]:
array([[0, 0],
[0, 0],
[0, 0],
[0, 0]])
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/3600 | 2013-05-14T14:28:08Z | 2013-05-14T21:48:10Z | 2013-05-14T21:48:10Z | 2014-07-16T08:09:04Z |
BUG: Add squeeze keyword to groupby to allow reduction in returned type | diff --git a/RELEASE.rst b/RELEASE.rst
index 006da5f8e76af..1f5bd2591470b 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -65,6 +65,9 @@ pandas 0.11.1
``timedelta64[ns]`` to ``object/int`` (GH3425_)
- Do not allow datetimelike/timedeltalike creation except with valid types
(e.g. cannot pass ``datetime64[ms]``) (GH3423_)
+ - Add ``squeeze`` keyword to ``groupby`` to allow reduction from
+ DataFrame -> Series if groups are unique. Regression from 0.10.1,
+ partial revert on (GH2893_) with (GH3596_)
**Bug Fixes**
@@ -161,6 +164,7 @@ pandas 0.11.1
.. _GH3594: https://github.com/pydata/pandas/issues/3594
.. _GH3590: https://github.com/pydata/pandas/issues/3590
.. _GH3610: https://github.com/pydata/pandas/issues/3610
+.. _GH3596: https://github.com/pydata/pandas/issues/3596
.. _GH3435: https://github.com/pydata/pandas/issues/3435
diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt
index 3719d9eb09dee..c89118298a675 100644
--- a/doc/source/v0.11.1.txt
+++ b/doc/source/v0.11.1.txt
@@ -21,6 +21,26 @@ API changes
p / p
p / 0
+ - Add ``squeeze`` keyword to ``groupby`` to allow reduction from
+ DataFrame -> Series if groups are unique. This is a Regression from 0.10.1.
+ We are reverting back to the prior behavior. This means groupby will return the
+ same shaped objects whether the groups are unique or not. revert on (GH2893_)
+ with (GH3596_).
+
+ .. ipython:: python
+
+ df2 = DataFrame([{"val1": 1, "val2" : 20}, {"val1":1, "val2": 19},
+ {"val1":1, "val2": 27}, {"val1":1, "val2": 12}])
+ def func(dataf):
+ return dataf["val2"] - dataf["val2"].mean()
+
+ # squeezing the result frame to a series (because we have unique groups)
+ df2.groupby("val1", squeeze=True).apply(func)
+
+ # no squeezing (the default, and behavior in 0.10.1)
+ df2.groupby("val1").apply(func)
+
+
Enhancements
~~~~~~~~~~~~
- ``pd.read_html()`` can now parse HTML string, files or urls and return dataframes
@@ -44,5 +64,7 @@ on GitHub for a complete list.
.. _GH3477: https://github.com/pydata/pandas/issues/3477
.. _GH3492: https://github.com/pydata/pandas/issues/3492
.. _GH3499: https://github.com/pydata/pandas/issues/3499
+.. _GH2893: https://github.com/pydata/pandas/issues/2893
+.. _GH3596: https://github.com/pydata/pandas/issues/3596
.. _GH3590: https://github.com/pydata/pandas/issues/3590
.. _GH3435: https://github.com/pydata/pandas/issues/3435
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index ed90aab715cfd..4a80e2f65fd71 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -107,7 +107,7 @@ def get(self, key, default=None):
return default
def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True,
- group_keys=True):
+ group_keys=True, squeeze=False):
"""
Group series using mapper (dict or key function, apply given function
to group, return result as series) or by a series of columns
@@ -131,6 +131,9 @@ def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True,
Sort group keys. Get better performance by turning this off
group_keys : boolean, default True
When calling apply, add group keys to index to identify pieces
+ squeeze : boolean, default False
+ reduce the dimensionaility of the return type if possible, otherwise
+ return a consistent type
Examples
--------
@@ -150,7 +153,8 @@ def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True,
from pandas.core.groupby import groupby
axis = self._get_axis_number(axis)
return groupby(self, by, axis=axis, level=level, as_index=as_index,
- sort=sort, group_keys=group_keys)
+ sort=sort, group_keys=group_keys,
+ squeeze=squeeze)
def asfreq(self, freq, method=None, how=None, normalize=False):
"""
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index 093c61ba5af5c..122355581956d 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -169,7 +169,7 @@ class GroupBy(object):
def __init__(self, obj, keys=None, axis=0, level=None,
grouper=None, exclusions=None, selection=None, as_index=True,
- sort=True, group_keys=True):
+ sort=True, group_keys=True, squeeze=False):
self._selection = selection
if isinstance(obj, NDFrame):
@@ -189,6 +189,7 @@ def __init__(self, obj, keys=None, axis=0, level=None,
self.keys = keys
self.sort = sort
self.group_keys = group_keys
+ self.squeeze = squeeze
if grouper is None:
grouper, exclusions = _get_grouper(obj, keys, axis=axis,
@@ -1841,15 +1842,22 @@ def _wrap_applied_output(self, keys, values, not_indexed_same=False):
all_indexed_same = _all_indexes_same([x.index for x in values])
singular_series = len(values) == 1 and applied_index.nlevels == 1
- # assign the name to this series
- if singular_series:
- values[0].name = keys[0]
+ # GH3596
+ # provide a reduction (Frame -> Series) if groups are unique
+ if self.squeeze:
- # GH2893
- # we have series in the values array, we want to produce a series:
- # if any of the sub-series are not indexed the same
- # OR we don't have a multi-index and we have only a single values
- if singular_series or not all_indexed_same:
+ # assign the name to this series
+ if singular_series:
+ values[0].name = keys[0]
+
+ # GH2893
+ # we have series in the values array, we want to produce a series:
+ # if any of the sub-series are not indexed the same
+ # OR we don't have a multi-index and we have only a single values
+ return self._concat_objects(keys, values,
+ not_indexed_same=not_indexed_same)
+
+ if not all_indexed_same:
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py
index c1c4217cb6f62..c56fca49cce48 100644
--- a/pandas/tests/test_groupby.py
+++ b/pandas/tests/test_groupby.py
@@ -263,14 +263,14 @@ def test_groupby_nonobject_dtype(self):
def test_groupby_return_type(self):
- # GH2893
+ # GH2893, return a reduced type
df1 = DataFrame([{"val1": 1, "val2" : 20}, {"val1":1, "val2": 19},
{"val1":2, "val2": 27}, {"val1":2, "val2": 12}])
def func(dataf):
return dataf["val2"] - dataf["val2"].mean()
- result = df1.groupby("val1").apply(func)
+ result = df1.groupby("val1", squeeze=True).apply(func)
self.assert_(isinstance(result,Series))
df2 = DataFrame([{"val1": 1, "val2" : 20}, {"val1":1, "val2": 19},
@@ -278,9 +278,14 @@ def func(dataf):
def func(dataf):
return dataf["val2"] - dataf["val2"].mean()
- result = df2.groupby("val1").apply(func)
+ result = df2.groupby("val1", squeeze=True).apply(func)
self.assert_(isinstance(result,Series))
+ # GH3596, return a consistent type (regression in 0.11 from 0.10.1)
+ df = DataFrame([[1,1],[1,1]],columns=['X','Y'])
+ result = df.groupby('X',squeeze=False).count()
+ self.assert_(isinstance(result,DataFrame))
+
def test_agg_regression1(self):
grouped = self.tsframe.groupby([lambda x: x.year, lambda x: x.month])
result = grouped.agg(np.mean)
| This will allow a reduction in the returned type from DataFrame -> Series
if the groups are unique.
This is a fix for a regression from 0.10.1.
Allows functionaility in #2893, by specifying `squeeze=True`
in groupby call. #3596 functionaility is back as the default
This returns a Series because we are passing `squeeze=True`
```
In [9]: df2 = DataFrame([{"val1": 1, "val2" : 20},
{"val1":1, "val2": 19},{"val1":1, "val2": 27}, {"val1":1, "val2": 12}])
In [10]: df2
Out[10]:
val1 val2
0 1 20
1 1 19
2 1 27
3 1 12
In [11]: def func(dataf):
....: return dataf["val2"] - dataf["val2"].mean()
....:
In [12]: df2.groupby("val1", squeeze=True).apply(func)
Out[12]:
0 0.5
1 -0.5
2 7.5
3 -7.5
Name: 1, dtype: float64
```
Traditionally returns a DataFrame (even though have unique groups)
Implicity (`squeeze=False`)
```
In [13]: df = DataFrame([[1,1],[1,1]],columns=['X','Y'])
In [14]: df
Out[14]:
X Y
0 1 1
1 1 1
In [15]: df.groupby('X').count()
Out[15]:
X Y
X
1 2 2
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/3599 | 2013-05-14T11:54:01Z | 2013-05-15T20:19:14Z | 2013-05-15T20:19:14Z | 2014-07-06T08:10:53Z |
BUG: Fixed bug where a time-series was being selected in preference to an actual column name | diff --git a/RELEASE.rst b/RELEASE.rst
index 70060ee1b3497..31627cec01d1e 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -108,6 +108,8 @@ pandas 0.11.1
- Fixed bug in reset_index with ``NaN`` in a multi-index (GH3586_)
- ``fillna`` methods now raise a ``TypeError`` when the ``value`` parameter
is a ``list`` or ``tuple``.
+ - Fixed bug where a time-series was being selected in preference to an actual column name
+ in a frame (GH3594_)
.. _GH3164: https://github.com/pydata/pandas/issues/3164
.. _GH2786: https://github.com/pydata/pandas/issues/2786
@@ -150,6 +152,7 @@ pandas 0.11.1
.. _GH3579: https://github.com/pydata/pandas/issues/3579
.. _GH3593: https://github.com/pydata/pandas/issues/3593
.. _GH3556: https://github.com/pydata/pandas/issues/3556
+.. _GH3594: https://github.com/pydata/pandas/issues/3594
.. _GH3435: https://github.com/pydata/pandas/issues/3435
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 8b6acd8c7c53e..bc8b7a3646a33 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -883,6 +883,10 @@ def _convert_to_index_sliceable(obj, key):
elif isinstance(key, basestring):
+ # we are an actual column
+ if key in obj._data.items:
+ return None
+
# we need a timelike key here
if idx.is_all_dates:
try:
diff --git a/pandas/io/tests/test_parsers.py b/pandas/io/tests/test_parsers.py
index 5ff832431c917..2e4689d7aa620 100644
--- a/pandas/io/tests/test_parsers.py
+++ b/pandas/io/tests/test_parsers.py
@@ -1730,6 +1730,33 @@ def test_fwf(self):
self.assertRaises(ValueError, read_fwf, StringIO(data3),
colspecs=colspecs, widths=[6, 10, 10, 7])
+ def test_fwf_regression(self):
+ # GH 3594
+ #### turns out 'T060' is parsable as a datetime slice!
+
+ tzlist = [1,10,20,30,60,80,100]
+ ntz = len(tzlist)
+ tcolspecs = [16]+[8]*ntz
+ tcolnames = ['SST'] + ["T%03d" % z for z in tzlist[1:]]
+ data = """ 2009164202000 9.5403 9.4105 8.6571 7.8372 6.0612 5.8843 5.5192
+ 2009164203000 9.5435 9.2010 8.6167 7.8176 6.0804 5.8728 5.4869
+ 2009164204000 9.5873 9.1326 8.4694 7.5889 6.0422 5.8526 5.4657
+ 2009164205000 9.5810 9.0896 8.4009 7.4652 6.0322 5.8189 5.4379
+ 2009164210000 9.6034 9.0897 8.3822 7.4905 6.0908 5.7904 5.4039
+"""
+
+ df = read_fwf(StringIO(data),
+ index_col=0,
+ header=None,
+ names=tcolnames,
+ widths=tcolspecs,
+ parse_dates=True,
+ date_parser=lambda s: datetime.strptime(s,'%Y%j%H%M%S'))
+
+ for c in df.columns:
+ res = df.loc[:,c]
+ self.assert_(len(res))
+
def test_verbose_import(self):
text = """a,b,c,d
one,1,2,3
diff --git a/pandas/sparse/frame.py b/pandas/sparse/frame.py
index e893f83f6d640..9694cc005d178 100644
--- a/pandas/sparse/frame.py
+++ b/pandas/sparse/frame.py
@@ -43,6 +43,10 @@ def shape(self):
def axes(self):
return [self.sp_frame.columns, self.sp_frame.index]
+ @property
+ def items(self):
+ return self.sp_frame.columns
+
@property
def blocks(self):
""" return our series in the column order """
| close #3594
| https://api.github.com/repos/pandas-dev/pandas/pulls/3597 | 2013-05-14T00:36:12Z | 2013-05-14T10:51:08Z | 2013-05-14T10:51:08Z | 2014-07-04T18:54:47Z |
BUG: (GH3593) fixed a bug in the incorrect conversion of datetime64[ns] in combine_first | diff --git a/RELEASE.rst b/RELEASE.rst
index 4085d350f3766..862d458f34e22 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -104,6 +104,7 @@ pandas 0.11.1
- ``combine_first`` not returning the same dtype in cases where it can (GH3552_)
- Fixed bug with ``Panel.transpose`` argument aliases (GH3556_)
- Fixed platform bug in ``PeriodIndex.take`` (GH3579_)
+ - Fixed bud in incorrect conversion of datetime64[ns] in ``combine_first`` (GH3593_)
- Fixed bug in reset_index with ``NaN`` in a multi-index (GH3586_)
.. _GH3164: https://github.com/pydata/pandas/issues/3164
@@ -145,6 +146,7 @@ pandas 0.11.1
.. _GH3586: https://github.com/pydata/pandas/issues/3586
.. _GH3493: https://github.com/pydata/pandas/issues/3493
.. _GH3579: https://github.com/pydata/pandas/issues/3579
+.. _GH3593: https://github.com/pydata/pandas/issues/3593
.. _GH3556: https://github.com/pydata/pandas/issues/3556
diff --git a/pandas/core/common.py b/pandas/core/common.py
index f71627be1296d..2da2db052cb93 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -921,6 +921,33 @@ def _possibly_downcast_to_dtype(result, dtype):
return result
+def _lcd_dtypes(a_dtype, b_dtype):
+ """ return the lcd dtype to hold these types """
+
+ if is_datetime64_dtype(a_dtype) or is_datetime64_dtype(b_dtype):
+ return _NS_DTYPE
+ elif is_timedelta64_dtype(a_dtype) or is_timedelta64_dtype(b_dtype):
+ return _TD_DTYPE
+ elif is_complex_dtype(a_dtype):
+ if is_complex_dtype(b_dtype):
+ return a_dtype
+ return np.float64
+ elif is_integer_dtype(a_dtype):
+ if is_integer_dtype(b_dtype):
+ if a_dtype.itemsize == b_dtype.itemsize:
+ return a_dtype
+ return np.int64
+ return np.float64
+ elif is_float_dtype(a_dtype):
+ if is_float_dtype(b_dtype):
+ if a_dtype.itemsize == b_dtype.itemsize:
+ return a_dtype
+ else:
+ return np.float64
+ elif is_integer(b_dtype):
+ return np.float64
+ return np.object
+
def _interp_wrapper(f, wrap_dtype, na_override=None):
def wrapper(arr, mask, limit=None):
view = arr.view(wrap_dtype)
@@ -1524,6 +1551,13 @@ def is_float_dtype(arr_or_dtype):
tipo = arr_or_dtype.dtype.type
return issubclass(tipo, np.floating)
+def is_complex_dtype(arr_or_dtype):
+ if isinstance(arr_or_dtype, np.dtype):
+ tipo = arr_or_dtype.type
+ else:
+ tipo = arr_or_dtype.dtype.type
+ return issubclass(tipo, np.complexfloating)
+
def is_list_like(arg):
return hasattr(arg, '__iter__') and not isinstance(arg, basestring) or hasattr(arg,'len')
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 3df95b27f8736..1b01c92f03a32 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -3738,8 +3738,11 @@ def combine(self, other, func, fill_value=None, overwrite=True):
result = {}
for col in new_columns:
- series = this[col].values
- otherSeries = other[col].values
+ series = this[col]
+ otherSeries = other[col]
+
+ this_dtype = series.dtype
+ other_dtype = otherSeries.dtype
this_mask = isnull(series)
other_mask = isnull(otherSeries)
@@ -3756,18 +3759,40 @@ def combine(self, other, func, fill_value=None, overwrite=True):
series[this_mask] = fill_value
otherSeries[other_mask] = fill_value
- arr = func(series, otherSeries)
+ # if we have different dtypes, possibily promote
+ new_dtype = this_dtype
+ if this_dtype != other_dtype:
+ new_dtype = com._lcd_dtypes(this_dtype,other_dtype)
+ series = series.astype(new_dtype)
+ otherSeries = otherSeries.astype(new_dtype)
+
+ # see if we need to be represented as i8 (datetimelike)
+ # try to keep us at this dtype
+ needs_i8_conversion = com.needs_i8_conversion(new_dtype)
+ if needs_i8_conversion:
+ this_dtype = new_dtype
+ arr = func(series, otherSeries, True)
+ else:
+ arr = func(series, otherSeries)
if do_fill:
arr = com.ensure_float(arr)
arr[this_mask & other_mask] = NA
+ # try to downcast back to the original dtype
+ if needs_i8_conversion:
+ arr = com._possibly_cast_to_datetime(arr, this_dtype)
+ else:
+ arr = com._possibly_downcast_to_dtype(arr, this_dtype)
+
result[col] = arr
# convert_objects just in case
return self._constructor(result,
index=new_index,
- columns=new_columns).convert_objects(copy=False)
+ columns=new_columns).convert_objects(
+ convert_dates=True,
+ copy=False)
def combine_first(self, other):
"""
@@ -3788,8 +3813,18 @@ def combine_first(self, other):
-------
combined : DataFrame
"""
- def combiner(x, y):
- return expressions.where(isnull(x), y, x, raise_on_error=True)
+ def combiner(x, y, needs_i8_conversion=False):
+ x_values = x.values if hasattr(x,'values') else x
+ y_values = y.values if hasattr(y,'values') else y
+ if needs_i8_conversion:
+ mask = isnull(x)
+ x_values = x_values.view('i8')
+ y_values = y_values.view('i8')
+ else:
+ mask = isnull(x_values)
+
+ return expressions.where(mask, y_values, x_values, raise_on_error=True)
+
return self.combine(other, combiner, overwrite=False)
def update(self, other, join='left', overwrite=True, filter_func=None,
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index b6459b0e461b4..d058d20427ad7 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -258,14 +258,15 @@ def downcast(self, dtypes = None):
return blocks
- def astype(self, dtype, copy = True, raise_on_error = True):
+ def astype(self, dtype, copy = True, raise_on_error = True, values = None):
"""
Coerce to the new type (if copy=True, return a new copy)
raise on an except if raise == True
"""
try:
- newb = make_block(com._astype_nansafe(self.values, dtype, copy = copy),
- self.items, self.ref_items, fastpath=True)
+ if values is None:
+ values = com._astype_nansafe(self.values, dtype, copy = copy)
+ newb = make_block(values, self.items, self.ref_items, fastpath=True)
except:
if raise_on_error is True:
raise
@@ -708,6 +709,15 @@ def is_bool(self):
""" we can be a bool if we have only bool values but are of type object """
return lib.is_bool_array(self.values.ravel())
+ def astype(self, dtype, copy=True, raise_on_error=True, values=None):
+ """ allow astypes to datetime64[ns],timedelta64[ns] with coercion """
+ dtype = np.dtype(dtype)
+ if dtype == _NS_DTYPE or dtype == _TD_DTYPE:
+ values = com._possibly_convert_datetime(self.values,dtype)
+ else:
+ values = None
+ return super(ObjectBlock, self).astype(dtype=dtype,copy=copy,raise_on_error=raise_on_error,values=values)
+
def convert(self, convert_dates = True, convert_numeric = True, copy = True):
""" attempt to coerce any object types to better types
return a copy of the block (if copy = True)
diff --git a/pandas/core/series.py b/pandas/core/series.py
index cebf2f4ef9d1f..8a3f353aa7c4a 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -17,7 +17,8 @@
from pandas.core.common import (isnull, notnull, _is_bool_indexer,
_default_index, _maybe_promote, _maybe_upcast,
_asarray_tuplesafe, is_integer_dtype,
- _infer_dtype_from_scalar, is_list_like)
+ _infer_dtype_from_scalar, is_list_like,
+ _NS_DTYPE, _TD_DTYPE)
from pandas.core.index import (Index, MultiIndex, InvalidIndexError,
_ensure_index, _handle_legacy_indexes)
from pandas.core.indexing import _SeriesIndexer, _check_bool_indexer, _check_slice_bounds
@@ -929,9 +930,13 @@ def astype(self, dtype):
"""
See numpy.ndarray.astype
"""
- casted = com._astype_nansafe(self.values, dtype)
- return self._constructor(casted, index=self.index, name=self.name,
- dtype=casted.dtype)
+ dtype = np.dtype(dtype)
+ if dtype == _NS_DTYPE or dtype == _TD_DTYPE:
+ values = com._possibly_cast_to_datetime(self.values,dtype)
+ else:
+ values = com._astype_nansafe(self.values, dtype)
+ return self._constructor(values, index=self.index, name=self.name,
+ dtype=values.dtype)
def convert_objects(self, convert_dates=True, convert_numeric=True, copy=True):
"""
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 7e7813e048bd1..ce24c72f75882 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -7907,6 +7907,25 @@ def test_combine_first_mixed_bug(self):
expected = Series([True,True,False])
assert_series_equal(result,expected)
+ # GH 3593, converting datetime64[ns] incorrecly
+ df0 = DataFrame({"a":[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)]})
+ df1 = DataFrame({"a":[None, None, None]})
+ df2 = df1.combine_first(df0)
+ assert_frame_equal(df2,df0)
+
+ df2 = df0.combine_first(df1)
+ assert_frame_equal(df2,df0)
+
+ df0 = DataFrame({"a":[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)]})
+ df1 = DataFrame({"a":[datetime(2000, 1, 2), None, None]})
+ df2 = df1.combine_first(df0)
+ result = df0.copy()
+ result.iloc[0,:] = df1.iloc[0,:]
+ assert_frame_equal(df2,result)
+
+ df2 = df0.combine_first(df1)
+ assert_frame_equal(df2,df0)
+
def test_update(self):
df = DataFrame([[1.5, nan, 3.],
[1.5, nan, 3.],
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index 6fbce9df753d8..94d29e9233fb6 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -1856,7 +1856,7 @@ def test_operators_timedelta64(self):
v1 = date_range('2012-1-1', periods=3, freq='D')
v2 = date_range('2012-1-2', periods=3, freq='D')
rs = Series(v2) - Series(v1)
- xp = Series(1e9 * 3600 * 24, rs.index).astype('timedelta64[ns]')
+ xp = Series(1e9 * 3600 * 24, rs.index).astype('int64').astype('timedelta64[ns]')
assert_series_equal(rs, xp)
self.assert_(rs.dtype=='timedelta64[ns]')
| closes #3593
| https://api.github.com/repos/pandas-dev/pandas/pulls/3595 | 2013-05-13T22:07:42Z | 2013-05-13T22:49:40Z | 2013-05-13T22:49:40Z | 2014-06-19T14:13:13Z |
BUG: fix take platorm issue with PeriodIndex (GH3579) | diff --git a/RELEASE.rst b/RELEASE.rst
index eaff573a7510a..4085d350f3766 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -103,6 +103,7 @@ pandas 0.11.1
- Fix ``.diff`` on datelike and timedelta operations (GH3100_)
- ``combine_first`` not returning the same dtype in cases where it can (GH3552_)
- Fixed bug with ``Panel.transpose`` argument aliases (GH3556_)
+ - Fixed platform bug in ``PeriodIndex.take`` (GH3579_)
- Fixed bug in reset_index with ``NaN`` in a multi-index (GH3586_)
.. _GH3164: https://github.com/pydata/pandas/issues/3164
@@ -143,6 +144,7 @@ pandas 0.11.1
.. _GH3562: https://github.com/pydata/pandas/issues/3562
.. _GH3586: https://github.com/pydata/pandas/issues/3586
.. _GH3493: https://github.com/pydata/pandas/issues/3493
+.. _GH3579: https://github.com/pydata/pandas/issues/3579
.. _GH3556: https://github.com/pydata/pandas/issues/3556
diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py
index 051d8c43a48a8..23077655d5144 100644
--- a/pandas/tests/test_groupby.py
+++ b/pandas/tests/test_groupby.py
@@ -318,6 +318,15 @@ def test_agg_period_index(self):
rs = df.groupby(level=0).sum()
self.assert_(isinstance(rs.index, PeriodIndex))
+ # GH 3579
+ index = period_range(start='1999-01', periods=5, freq='M')
+ s1 = Series(np.random.rand(len(index)), index=index)
+ s2 = Series(np.random.rand(len(index)), index=index)
+ series = [('s1', s1), ('s2',s2)]
+ df = DataFrame.from_items(series)
+ grouped = df.groupby(df.index.month)
+ list(grouped)
+
def test_agg_must_agg(self):
grouped = self.df.groupby('A')['C']
self.assertRaises(Exception, grouped.agg, lambda x: x.describe())
diff --git a/pandas/tseries/period.py b/pandas/tseries/period.py
index abb7486de9351..34c640392bda9 100644
--- a/pandas/tseries/period.py
+++ b/pandas/tseries/period.py
@@ -1125,6 +1125,7 @@ def take(self, indices, axis=None):
"""
Analogous to ndarray.take
"""
+ indices = com._ensure_platform_int(indices)
taken = self.values.take(indices, axis=axis)
taken = taken.view(PeriodIndex)
taken.freq = self.freq
| closes #3579
| https://api.github.com/repos/pandas-dev/pandas/pulls/3591 | 2013-05-13T18:02:38Z | 2013-05-13T18:55:40Z | 2013-05-13T18:55:40Z | 2014-06-24T14:21:10Z |
BUG: Fixed bug in reset_index with ``NaN`` in a multi-index (GH3586_) | diff --git a/RELEASE.rst b/RELEASE.rst
index efd6b87e59c62..eaff573a7510a 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -103,6 +103,7 @@ pandas 0.11.1
- Fix ``.diff`` on datelike and timedelta operations (GH3100_)
- ``combine_first`` not returning the same dtype in cases where it can (GH3552_)
- Fixed bug with ``Panel.transpose`` argument aliases (GH3556_)
+ - Fixed bug in reset_index with ``NaN`` in a multi-index (GH3586_)
.. _GH3164: https://github.com/pydata/pandas/issues/3164
.. _GH2786: https://github.com/pydata/pandas/issues/2786
@@ -140,6 +141,7 @@ pandas 0.11.1
.. _GH3492: https://github.com/pydata/pandas/issues/3492
.. _GH3552: https://github.com/pydata/pandas/issues/3552
.. _GH3562: https://github.com/pydata/pandas/issues/3562
+.. _GH3586: https://github.com/pydata/pandas/issues/3586
.. _GH3493: https://github.com/pydata/pandas/issues/3493
.. _GH3556: https://github.com/pydata/pandas/issues/3556
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index ad1429fcea1ca..725d10c2270d3 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -2808,9 +2808,18 @@ def reset_index(self, level=None, drop=False, inplace=False, col_level=0,
else:
new_obj = self.copy()
- def _maybe_cast(values):
+ def _maybe_cast(values, labels=None):
+
if values.dtype == np.object_:
values = lib.maybe_convert_objects(values)
+
+ # if we have the labels, extract the values with a mask
+ if labels is not None:
+ mask = labels == -1
+ values = values.take(labels)
+ if mask.any():
+ values, changed = com._maybe_upcast_putmask(values,mask,np.nan)
+
return values
new_index = np.arange(len(new_obj))
@@ -2843,9 +2852,9 @@ def _maybe_cast(values):
col_name = tuple(name_lst)
# to ndarray and maybe infer different dtype
- level_values = _maybe_cast(lev.values)
+ level_values = _maybe_cast(lev.values, lab)
if level is None or i in level:
- new_obj.insert(0, col_name, level_values.take(lab))
+ new_obj.insert(0, col_name, level_values)
elif not drop:
name = self.index.name
@@ -2865,8 +2874,8 @@ def _maybe_cast(values):
self.index.tz is not None):
values = self.index.asobject
else:
- values = self.index.values
- new_obj.insert(0, name, _maybe_cast(values))
+ values = _maybe_cast(self.index.values)
+ new_obj.insert(0, name, values)
new_obj.index = new_index
if not inplace:
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index f70c781847cc7..01651f2674a90 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -799,6 +799,25 @@ def test_indexing_mixed_frame_bug(self):
self.assert_(df.iloc[0,2] == '-----')
#if I look at df, then element [0,2] equals '_'. If instead I type df.ix[idx,'test'], I get '-----', finally by typing df.iloc[0,2] I get '_'.
+
+
+ def test_set_index_nan(self):
+
+ # GH 3586
+ df = DataFrame({'PRuid': {17: 'nonQC', 18: 'nonQC', 19: 'nonQC', 20: '10', 21: '11', 22: '12', 23: '13',
+ 24: '24', 25: '35', 26: '46', 27: '47', 28: '48', 29: '59', 30: '10'},
+ 'QC': {17: 0.0, 18: 0.0, 19: 0.0, 20: nan, 21: nan, 22: nan, 23: nan, 24: 1.0, 25: nan,
+ 26: nan, 27: nan, 28: nan, 29: nan, 30: nan},
+ 'data': {17: 7.9544899999999998, 18: 8.0142609999999994, 19: 7.8591520000000008, 20: 0.86140349999999999,
+ 21: 0.87853110000000001, 22: 0.8427041999999999, 23: 0.78587700000000005, 24: 0.73062459999999996,
+ 25: 0.81668560000000001, 26: 0.81927080000000008, 27: 0.80705009999999999, 28: 0.81440240000000008,
+ 29: 0.80140849999999997, 30: 0.81307740000000006},
+ 'year': {17: 2006, 18: 2007, 19: 2008, 20: 1985, 21: 1985, 22: 1985, 23: 1985,
+ 24: 1985, 25: 1985, 26: 1985, 27: 1985, 28: 1985, 29: 1985, 30: 1986}}).reset_index()
+
+ result = df.set_index(['year','PRuid','QC']).reset_index().reindex(columns=df.columns)
+ assert_frame_equal(result,df)
+
if __name__ == '__main__':
import nose
| closes #3586
| https://api.github.com/repos/pandas-dev/pandas/pulls/3587 | 2013-05-13T13:29:29Z | 2013-05-13T17:35:20Z | 2013-05-13T17:35:20Z | 2014-06-29T12:28:29Z |
raise on fillna passed a list or tuple | diff --git a/RELEASE.rst b/RELEASE.rst
index 4085d350f3766..042b827617327 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -105,6 +105,8 @@ pandas 0.11.1
- Fixed bug with ``Panel.transpose`` argument aliases (GH3556_)
- Fixed platform bug in ``PeriodIndex.take`` (GH3579_)
- Fixed bug in reset_index with ``NaN`` in a multi-index (GH3586_)
+ - ``fillna`` methods now raise a ``TypeError`` when the ``value`` parameter
+ is a ``list`` or ``tuple``.
.. _GH3164: https://github.com/pydata/pandas/issues/3164
.. _GH2786: https://github.com/pydata/pandas/issues/2786
@@ -146,6 +148,7 @@ pandas 0.11.1
.. _GH3493: https://github.com/pydata/pandas/issues/3493
.. _GH3579: https://github.com/pydata/pandas/issues/3579
.. _GH3556: https://github.com/pydata/pandas/issues/3556
+.. _GH3435: https://github.com/pydata/pandas/issues/3435
pandas 0.11.0
diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt
index 76565df8f593c..74818f9542cae 100644
--- a/doc/source/v0.11.1.txt
+++ b/doc/source/v0.11.1.txt
@@ -21,6 +21,8 @@ Enhancements
an index with a different frequency than the existing, or attempting
to append an index with a different name than the existing
- support datelike columns with a timezone as data_columns (GH2852_)
+ - ``fillna`` methods now raise a ``TypeError`` if the ``value`` parameter is
+ a list or tuple.
See the `full release notes
<https://github.com/pydata/pandas/blob/master/RELEASE.rst>`__ or issue tracker
@@ -31,3 +33,4 @@ on GitHub for a complete list.
.. _GH3477: https://github.com/pydata/pandas/issues/3477
.. _GH3492: https://github.com/pydata/pandas/issues/3492
.. _GH3499: https://github.com/pydata/pandas/issues/3499
+.. _GH3435: https://github.com/pydata/pandas/issues/3435
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 3df95b27f8736..777749c6b35dc 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -3319,7 +3319,7 @@ def fillna(self, value=None, method=None, axis=0, inplace=False,
value : scalar or dict
Value to use to fill holes (e.g. 0), alternately a dict of values
specifying which value to use for each column (columns not in the
- dict will not be filled)
+ dict will not be filled). This value cannot be a list.
axis : {0, 1}, default 0
0: fill column-by-column
1: fill row-by-row
@@ -3341,6 +3341,9 @@ def fillna(self, value=None, method=None, axis=0, inplace=False,
-------
filled : DataFrame
"""
+ if isinstance(value, (list, tuple)):
+ raise TypeError('"value" parameter must be a scalar or dict, but '
+ 'you passed a "{0}"'.format(type(value).__name__))
self._consolidate_inplace()
axis = self._get_axis_number(axis)
diff --git a/pandas/core/panel.py b/pandas/core/panel.py
index 869bb31acad6b..44b62991cf7a3 100644
--- a/pandas/core/panel.py
+++ b/pandas/core/panel.py
@@ -1007,6 +1007,9 @@ def fillna(self, value=None, method=None):
--------
DataFrame.reindex, DataFrame.asfreq
"""
+ if isinstance(value, (list, tuple)):
+ raise TypeError('"value" parameter must be a scalar or dict, but '
+ 'you passed a "{0}"'.format(type(value).__name__))
if value is None:
if method is None:
raise ValueError('must specify a fill method or value')
diff --git a/pandas/core/series.py b/pandas/core/series.py
index cebf2f4ef9d1f..d9aacf1b0b080 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -2729,6 +2729,9 @@ def fillna(self, value=None, method=None, inplace=False,
-------
filled : Series
"""
+ if isinstance(value, (list, tuple)):
+ raise TypeError('"value" parameter must be a scalar or dict, but '
+ 'you passed a "{0}"'.format(type(value).__name__))
if not self._can_hold_na:
return self.copy() if not inplace else None
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 7e7813e048bd1..ce284b6b72f24 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -6039,6 +6039,12 @@ def test_fillna_invalid_method(self):
except ValueError, inst:
self.assert_('ffil' in str(inst))
+ def test_fillna_invalid_value(self):
+ # list
+ self.assertRaises(TypeError, self.frame.fillna, [1, 2])
+ # tuple
+ self.assertRaises(TypeError, self.frame.fillna, (1, 2))
+
def test_replace_inplace(self):
self.tsframe['A'][:5] = nan
self.tsframe['A'][-5:] = nan
diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py
index 081af101b643b..3640025bbf95c 100644
--- a/pandas/tests/test_panel.py
+++ b/pandas/tests/test_panel.py
@@ -1099,6 +1099,9 @@ def test_fillna(self):
self.assertRaises(ValueError, self.panel.fillna)
self.assertRaises(ValueError, self.panel.fillna, 5, method='ffill')
+ self.assertRaises(TypeError, self.panel.fillna, [1, 2])
+ self.assertRaises(TypeError, self.panel.fillna, (1, 2))
+
def test_ffill_bfill(self):
assert_panel_equal(self.panel.ffill(),
self.panel.fillna(method='ffill'))
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index 6fbce9df753d8..915becec8d7ff 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -3822,6 +3822,11 @@ def test_fillna_int(self):
s.fillna(method='ffill', inplace=True)
assert_series_equal(s.fillna(method='ffill', inplace=False), s)
+ def test_fillna_raise(self):
+ s = Series(np.random.randint(-100, 100, 50))
+ self.assertRaises(TypeError, s.fillna, [1, 2])
+ self.assertRaises(TypeError, s.fillna, (1, 2))
+
#------------------------------------------------------------------------------
# TimeSeries-specific
| fixes #3435.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3585 | 2013-05-13T05:40:00Z | 2013-05-13T22:51:06Z | 2013-05-13T22:51:05Z | 2014-06-18T07:35:10Z |
ENH: add regex functionality to DataFrame.replace | diff --git a/doc/source/api.rst b/doc/source/api.rst
index ca95a739ed661..c5b83e4af6999 100644
--- a/doc/source/api.rst
+++ b/doc/source/api.rst
@@ -465,6 +465,7 @@ Missing data handling
DataFrame.dropna
DataFrame.fillna
+ DataFrame.replace
Reshaping, sorting, transposing
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -492,7 +493,6 @@ Combining / joining / merging
DataFrame.append
DataFrame.join
DataFrame.merge
- DataFrame.replace
DataFrame.update
Time series-related
diff --git a/doc/source/missing_data.rst b/doc/source/missing_data.rst
index 133d83513041e..70db8abf3c503 100644
--- a/doc/source/missing_data.rst
+++ b/doc/source/missing_data.rst
@@ -334,6 +334,133 @@ missing and interpolate over them:
ser.replace([1, 2, 3], method='pad')
+String/Regular Expression Replacement
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. note::
+
+ Python strings prefixed with the ``r`` character such as ``r'hello world'``
+ are so-called "raw" strings. They have different semantics regarding
+ backslashes than strings without this prefix. Backslashes in raw strings
+ will be interpreted as an escaped backslash, e.g., ``r'\' == '\\'``. You
+ should `read about them
+ <http://docs.python.org/2/reference/lexical_analysis.html#string-literals>`_
+ if this is unclear.
+
+Replace the '.' with ``nan`` (str -> str)
+
+.. ipython:: python
+
+ from numpy.random import rand, randn
+ from numpy import nan
+ from pandas import DataFrame
+ d = {'a': range(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
+ df = DataFrame(d)
+ df.replace('.', nan)
+
+Now do it with a regular expression that removes surrounding whitespace
+(regex -> regex)
+
+.. ipython:: python
+
+ df.replace(r'\s*\.\s*', nan, regex=True)
+
+Replace a few different values (list -> list)
+
+.. ipython:: python
+
+ df.replace(['a', '.'], ['b', nan])
+
+list of regex -> list of regex
+
+.. ipython:: python
+
+ df.replace([r'\.', r'(a)'], ['dot', '\1stuff'], regex=True)
+
+Only search in column ``'b'`` (dict -> dict)
+
+.. ipython:: python
+
+ df.replace({'b': '.'}, {'b': nan})
+
+Same as the previous example, but use a regular expression for
+searching instead (dict of regex -> dict)
+
+.. ipython:: python
+
+ df.replace({'b': r'\s*\.\s*'}, {'b': nan}, regex=True)
+
+You can pass nested dictionaries of regular expressions that use ``regex=True``
+
+.. ipython:: python
+
+ df.replace({'b': {'b': r''}}, regex=True)
+
+or you can pass the nested dictionary like so
+
+.. ipython:: python
+
+ df.replace(regex={'b': {'b': r'\s*\.\s*'}})
+
+You can also use the group of a regular expression match when replacing (dict
+of regex -> dict of regex), this works for lists as well
+
+.. ipython:: python
+
+ df.replace({'b': r'\s*(\.)\s*'}, {'b': r'\1ty'}, regex=True)
+
+You can pass a list of regular expressions, of which those that match
+will be replaced with a scalar (list of regex -> regex)
+
+.. ipython:: python
+
+ df.replace([r'\s*\.\*', r'a|b'], nan, regex=True)
+
+All of the regular expression examples can also be passed with the
+``to_replace`` argument as the ``regex`` argument. In this case the ``value``
+argument must be passed explicity by name or ``regex`` must be a nested
+dictionary. The previous example, in this case, would then be
+
+.. ipython:: python
+
+ df.replace(regex=[r'\s*\.\*', r'a|b'], value=nan)
+
+This can be convenient if you do not want to pass ``regex=True`` every time you
+want to use a regular expression.
+
+.. note::
+
+ Anywhere in the above ``replace`` examples that you see a regular expression
+ a compiled regular expression is valid as well.
+
+Numeric Replacement
+^^^^^^^^^^^^^^^^^^^
+
+Similiar to ``DataFrame.fillna``
+
+.. ipython:: python
+
+ from numpy.random import rand, randn
+ from numpy import nan
+ from pandas import DataFrame
+ from pandas.util.testing import assert_frame_equal
+ df = DataFrame(randn(10, 2))
+ df[rand(df.shape[0]) > 0.5] = 1.5
+ df.replace(1.5, nan)
+
+Replacing more than one value via lists works as well
+
+.. ipython:: python
+
+ df00 = df.values[0, 0]
+ df.replace([1.5, df00], [nan, 'a'])
+ df[1].dtype
+
+You can also operate on the DataFrame in place
+
+.. ipython:: python
+
+ df.replace(1.5, nan, inplace=True)
Missing data casting rules and indexing
---------------------------------------
diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt
index 4d983905f9aaa..c16eb64631198 100644
--- a/doc/source/v0.11.1.txt
+++ b/doc/source/v0.11.1.txt
@@ -55,6 +55,9 @@ Enhancements
- ``fillna`` methods now raise a ``TypeError`` if the ``value`` parameter is
a list or tuple.
- Added module for reading and writing Stata files: pandas.io.stata (GH1512_)
+ - ``DataFrame.replace()`` now allows regular expressions on contained
+ ``Series`` with object dtype. See the examples section in the regular docs
+ and the generated documentation for the method for more details.
See the `full release notes
<https://github.com/pydata/pandas/blob/master/RELEASE.rst>`__ or issue tracker
@@ -70,3 +73,4 @@ on GitHub for a complete list.
.. _GH3590: https://github.com/pydata/pandas/issues/3590
.. _GH3435: https://github.com/pydata/pandas/issues/3435
.. _GH1512: https://github.com/pydata/pandas/issues/1512
+.. _GH2285: https://github.com/pydata/pandas/issues/2285
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 73f789a9425c6..39742557ccc56 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -17,6 +17,7 @@
import operator
import sys
import collections
+import itertools
from numpy import nan as NA
import numpy as np
@@ -32,7 +33,8 @@
_maybe_convert_indices)
from pandas.core.internals import (BlockManager,
create_block_manager_from_arrays,
- create_block_manager_from_blocks)
+ create_block_manager_from_blocks,
+ _re_compilable)
from pandas.core.series import Series, _radd_compat
import pandas.core.expressions as expressions
from pandas.compat.scipy import scoreatpercentile as _quantile
@@ -3431,17 +3433,46 @@ def bfill(self, axis=0, inplace=False, limit=None):
return self.fillna(method='bfill', axis=axis, inplace=inplace,
limit=limit)
- def replace(self, to_replace, value=None, method='pad', axis=0,
- inplace=False, limit=None):
- """
- Replace values given in 'to_replace' with 'value' or using 'method'
+ def replace(self, to_replace=None, value=None, method='pad', axis=0,
+ inplace=False, limit=None, regex=False, infer_types=False):
+ """Replace values given in 'to_replace' with 'value' or using 'method'.
Parameters
----------
- value : scalar or dict, default None
+ to_replace : str, regex, list, dict, Series, numeric, or None
+ * str or regex:
+ - str: string exactly matching `to_replace` will be replaced
+ with `value`
+ - regex: regexs matching `to_replace` will be replaced with
+ `value`
+ * list of str, regex, or numeric:
+ - First, if `to_replace` and `value` are both lists, they
+ **must** be the same length.
+ - Second, if ``regex=True`` then all of the strings in **both**
+ lists will be interpreted as regexs otherwise they will match
+ directly. This doesn't matter much for `value` since there
+ are only a few possible substitution regexes you can use.
+ - str and regex rules apply as above.
+ * dict:
+ - Nested dictionaries, e.g., {'a': {'b': nan}}, are read as
+ follows: look in column 'a' for the value 'b' and replace it
+ with nan. You can nest regular expressions as well. Note that
+ column names (the top-level dictionary keys in a nested
+ dictionary) **cannot** be regular expressions.
+ - Keys map to column names and values map to substitution
+ values. You can treat this as a special case of passing two
+ lists except that you are specifying the column to search in.
+ * None:
+ - This means that the ``regex`` argument must be a string,
+ compiled regular expression, or list, dict, ndarray or Series
+ of such elements. If `value` is also ``None`` then this
+ **must** be a nested dictionary or ``Series``.
+ See the examples section for examples of each of these.
+ value : scalar, dict, list, str, regex, default None
Value to use to fill holes (e.g. 0), alternately a dict of values
specifying which value to use for each column (columns not in the
- dict will not be filled)
+ dict will not be filled). Regular expressions, strings and lists or
+ dicts of such objects are also allowed.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default 'pad'
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
@@ -3456,23 +3487,91 @@ def replace(self, to_replace, value=None, method='pad', axis=0,
a reference to the filled object, which is self if inplace=True
limit : int, default None
Maximum size gap to forward or backward fill
+ regex : bool or same types as `to_replace`, default False
+ Whether to interpret `to_replace` and/or `value` as regular
+ expressions. If this is ``True`` then `to_replace` *must* be a
+ string. Otherwise, `to_replace` must be ``None`` because this
+ parameter will be interpreted as a regular expression or a list,
+ dict, or array of regular expressions.
+ infer_types : bool, default True
+ If ``True`` attempt to convert object blocks to a better dtype.
See also
--------
- reindex, asfreq
+ reindex, asfreq, fillna, interpolate
Returns
-------
filled : DataFrame
- """
+
+ Raises
+ ------
+ AssertionError
+ * If `regex` is not a ``bool`` and `to_replace` is not ``None``.
+ TypeError
+ * If `to_replace` is a ``dict`` and `value` is not a ``list``,
+ ``dict``, ``ndarray``, or ``Series``
+ * If `to_replace` is ``None`` and `regex` is not compilable into a
+ regular expression or is a list, dict, ndarray, or Series.
+ ValueError
+ * If `to_replace` and `value` are ``list`` s or ``ndarray`` s, but
+ they are not the same length.
+
+ Notes
+ -----
+ * Regex substitution is performed under the hood with ``re.sub``. The
+ rules for substitution for ``re.sub`` are the same.
+ * Regular expressions will only substitute on strings, meaning you
+ cannot provide, for example, a regular expression matching floating
+ point numbers and expect the columns in your frame that have a
+ numeric dtype to be matched. However, if those floating point numbers
+ *are* strings, then you can do this.
+ * This method has *a lot* of options. You are encouraged to experiment
+ and play with this method to gain intuition about how it works.
+ """
+ if not isinstance(regex, bool) and to_replace is not None:
+ raise AssertionError("'to_replace' must be 'None' if 'regex' is "
+ "not a bool")
self._consolidate_inplace()
axis = self._get_axis_number(axis)
+ method = com._clean_fill_method(method)
if value is None:
- return self._interpolate(to_replace, method, axis, inplace, limit)
+ if not isinstance(to_replace, (dict, Series)):
+ if not isinstance(regex, (dict, Series)):
+ raise TypeError('If "to_replace" and "value" are both None'
+ ' then regex must be a mapping')
+ to_replace = regex
+ regex = True
+
+ items = to_replace.items()
+ keys, values = itertools.izip(*items)
+
+ are_mappings = [isinstance(v, (dict, Series)) for v in values]
+
+ if any(are_mappings):
+ if not all(are_mappings):
+ raise TypeError("If a nested mapping is passed, all values"
+ " of the top level mapping must be "
+ "mappings")
+ # passed a nested dict/Series
+ to_rep_dict = {}
+ value_dict = {}
+
+ for k, v in items:
+ to_rep_dict[k] = v.keys()
+ value_dict[k] = v.values()
+
+ to_replace, value = to_rep_dict, value_dict
+ else:
+ to_replace, value = keys, values
+
+ return self.replace(to_replace, value, method=method, axis=axis,
+ inplace=inplace, limit=limit, regex=regex,
+ infer_types=infer_types)
else:
- if len(self.columns) == 0:
+ if not len(self.columns):
return self
new_data = self._data
@@ -3483,17 +3582,20 @@ def replace(self, to_replace, value=None, method='pad', axis=0,
if c in value and c in self:
new_data = new_data.replace(src, value[c],
filter=[ c ],
- inplace=inplace)
+ inplace=inplace,
+ regex=regex)
- elif not isinstance(value, (list, np.ndarray)):
+ elif not isinstance(value, (list, np.ndarray)): # {'A': NA} -> 0
new_data = self._data
for k, src in to_replace.iteritems():
if k in self:
new_data = new_data.replace(src, value,
filter = [ k ],
- inplace=inplace)
+ inplace=inplace,
+ regex=regex)
else:
- raise ValueError('Fill value must be scalar or dict or Series')
+ raise TypeError('Fill value must be scalar, dict, or '
+ 'Series')
elif isinstance(to_replace, (list, np.ndarray)):
# [NA, ''] -> [0, 'missing']
@@ -3504,63 +3606,93 @@ def replace(self, to_replace, value=None, method='pad', axis=0,
(len(to_replace), len(value)))
new_data = self._data.replace_list(to_replace, value,
- inplace=inplace)
+ inplace=inplace,
+ regex=regex)
else: # [NA, ''] -> 0
new_data = self._data.replace(to_replace, value,
- inplace=inplace)
-
+ inplace=inplace, regex=regex)
+ elif to_replace is None:
+ if not (_re_compilable(regex) or
+ isinstance(regex, (list, dict, np.ndarray, Series))):
+ raise TypeError("'regex' must be a string or a compiled "
+ "regular expression or a list or dict of "
+ "strings or regular expressions, you "
+ "passed a {0}".format(type(regex)))
+ return self.replace(regex, value, method=method, axis=axis,
+ inplace=inplace, limit=limit, regex=True,
+ infer_types=infer_types)
else:
# dest iterable dict-like
if isinstance(value, (dict, Series)): # NA -> {'A' : 0, 'B' : -1}
-
new_data = self._data
+
for k, v in value.iteritems():
if k in self:
new_data = new_data.replace(to_replace, v,
filter=[ k ],
- inplace=inplace)
+ inplace=inplace,
+ regex=regex)
elif not isinstance(value, (list, np.ndarray)): # NA -> 0
new_data = self._data.replace(to_replace, value,
- inplace=inplace)
+ inplace=inplace, regex=regex)
else:
- raise ValueError('Invalid to_replace type: %s' %
- type(to_replace)) # pragma: no cover
+ raise TypeError('Invalid "to_replace" type: '
+ '{0}'.format(type(to_replace))) # pragma: no cover
+ if infer_types:
+ new_data = new_data.convert()
if inplace:
self._data = new_data
else:
return self._constructor(new_data)
- def _interpolate(self, to_replace, method, axis, inplace, limit):
+ def interpolate(self, to_replace, method='pad', axis=0, inplace=False,
+ limit=None):
+ """Interpolate values according to different methods.
+
+ Parameters
+ ----------
+ to_replace : dict, Series
+ method : str
+ axis : int
+ inplace : bool
+ limit : int, default None
+
+ Returns
+ -------
+ frame : interpolated
+
+ See Also
+ --------
+ reindex, replace, fillna
+ """
if self._is_mixed_type and axis == 1:
return self.T.replace(to_replace, method=method, limit=limit).T
method = com._clean_fill_method(method)
if isinstance(to_replace, (dict, Series)):
- if axis == 1:
- return self.T.replace(to_replace, method=method,
- limit=limit).T
-
- rs = self if inplace else self.copy()
- for k, v in to_replace.iteritems():
- if k in rs:
- rs[k].replace(v, method=method, limit=limit,
- inplace=True)
- return rs if not inplace else None
-
+ if axis == 0:
+ return self.replace(to_replace, method=method, inplace=inplace,
+ limit=limit, axis=axis)
+ elif axis == 1:
+ obj = self.T
+ if inplace:
+ obj.replace(to_replace, method=method, limit=limit,
+ inplace=inplace, axis=0)
+ return obj.T
+ return obj.replace(to_replace, method=method, limit=limit,
+ inplace=inplace, axis=0).T
+ else:
+ raise ValueError('Invalid value for axis')
else:
-
- new_data = self._data.interpolate(method = method,
- axis = axis,
- limit = limit,
- inplace = inplace,
- missing = to_replace,
- coerce = False)
+ new_data = self._data.interpolate(method=method, axis=axis,
+ limit=limit, inplace=inplace,
+ missing=to_replace, coerce=False)
if inplace:
self._data = new_data
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index d058d20427ad7..849776940512e 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -1,5 +1,7 @@
import itertools
+import re
from datetime import datetime
+import collections
from numpy import nan
import numpy as np
@@ -16,6 +18,10 @@
from pandas.util import py3compat
+def _re_compilable(ex):
+ return isinstance(ex, (basestring, re._pattern_type))
+
+
class Block(object):
"""
Canonical n-dimensional unit of homogeneous dtype contained in a pandas
@@ -318,9 +324,12 @@ def to_native_types(self, slicer=None, na_rep='', **kwargs):
values[mask] = na_rep
return values.tolist()
- def replace(self, to_replace, value, inplace=False, filter=None):
- """ replace the to_replace value with value, possible to create new blocks here
- this is just a call to putmask """
+ def replace(self, to_replace, value, inplace=False, filter=None,
+ regex=False):
+ """ replace the to_replace value with value, possible to create new
+ blocks here this is just a call to putmask. regex is not used here.
+ It is used in ObjectBlocks. It is here for API
+ compatibility."""
mask = com.mask_missing(self.values, to_replace)
if filter is not None:
for i, item in enumerate(self.items):
@@ -750,6 +759,101 @@ def should_store(self, value):
(np.integer, np.floating, np.complexfloating,
np.datetime64, np.bool_))
+ def replace(self, to_replace, value, inplace=False, filter=None,
+ regex=False):
+ blk = [self]
+ to_rep_is_list = (isinstance(to_replace, collections.Iterable) and not
+ isinstance(to_replace, basestring))
+ value_is_list = (isinstance(value, collections.Iterable) and not
+ isinstance(to_replace, basestring))
+ both_lists = to_rep_is_list and value_is_list
+ either_list = to_rep_is_list or value_is_list
+
+ if not either_list and not regex:
+ blk = super(ObjectBlock, self).replace(to_replace, value,
+ inplace=inplace,
+ filter=filter, regex=regex)
+ elif both_lists and regex:
+ for to_rep, v in itertools.izip(to_replace, value):
+ blk[0], = blk[0]._replace_single(to_rep, v, inplace=inplace,
+ filter=filter, regex=regex)
+ elif to_rep_is_list and regex:
+ for to_rep in to_replace:
+ blk[0], = blk[0]._replace_single(to_rep, value,
+ inplace=inplace,
+ filter=filter, regex=regex)
+ else:
+ blk[0], = blk[0]._replace_single(to_replace, value,
+ inplace=inplace, filter=filter,
+ regex=regex)
+ return blk
+
+ def _replace_single(self, to_replace, value, inplace=False, filter=None,
+ regex=False):
+ # to_replace is regex compilable
+ to_rep_re = _re_compilable(to_replace)
+
+ # regex is regex compilable
+ regex_re = _re_compilable(regex)
+
+ if to_rep_re and regex_re:
+ raise AssertionError('only one of to_replace and regex can be '
+ 'regex compilable')
+
+ if regex_re:
+ to_replace = regex
+
+ regex = regex_re or to_rep_re
+
+ # try to get the pattern attribute (compiled re) or it's a string
+ try:
+ pattern = to_replace.pattern
+ except AttributeError:
+ pattern = to_replace
+
+ # if the pattern is not empty and to_replace is either a string or a
+ # regex
+ if regex and pattern:
+ rx = re.compile(to_replace)
+ else:
+ # if the thing to replace is not a string or compiled regex call
+ # the superclass method -> to_replace is some kind of object
+ return super(ObjectBlock, self).replace(to_replace, value,
+ inplace=inplace,
+ filter=filter, regex=regex)
+
+ new_values = self.values if inplace else self.values.copy()
+
+ # deal with replacing values with objects (strings) that match but
+ # whose replacement is not a string (numeric, nan, object)
+ if isnull(value) or not isinstance(value, basestring):
+ def re_replacer(s):
+ try:
+ return value if rx.search(s) is not None else s
+ except TypeError:
+ return s
+ else:
+ # value is guaranteed to be a string here, s can be either a string
+ # or null if it's null it gets returned
+ def re_replacer(s):
+ try:
+ return rx.sub(value, s)
+ except TypeError:
+ return s
+
+ f = np.vectorize(re_replacer, otypes=[self.dtype])
+
+ try:
+ filt = map(self.items.get_loc, filter)
+ except TypeError:
+ filt = slice(None)
+
+ new_values[filt] = f(new_values[filt])
+
+ return [self if inplace else make_block(new_values, self.items,
+ self.ref_items, fastpath=True)]
+
+
class DatetimeBlock(Block):
_can_hold_na = True
@@ -1136,7 +1240,9 @@ def _verify_integrity(self):
if len(self.items) != tot_items:
raise AssertionError('Number of manager items must equal union of '
- 'block items')
+ 'block items\n# manager items: {0}, # '
+ 'tot_items: {1}'.format(len(self.items),
+ tot_items))
def apply(self, f, *args, **kwargs):
""" iterate over the blocks, collect and create a new block manager
@@ -1203,7 +1309,7 @@ def convert(self, *args, **kwargs):
def replace(self, *args, **kwargs):
return self.apply('replace', *args, **kwargs)
- def replace_list(self, src_lst, dest_lst, inplace=False):
+ def replace_list(self, src_lst, dest_lst, inplace=False, regex=False):
""" do a list replace """
# figure out our mask a-priori to avoid repeated replacements
@@ -1220,16 +1326,20 @@ def comp(s):
# its possible to get multiple result blocks here
# replace ALWAYS will return a list
rb = [ blk if inplace else blk.copy() ]
- for i, d in enumerate(dest_lst):
+ for i, (s, d) in enumerate(zip(src_lst, dest_lst)):
new_rb = []
for b in rb:
- # get our mask for this element, sized to this
- # particular block
- m = masks[i][b.ref_locs]
- if m.any():
- new_rb.extend(b.putmask(m, d, inplace=True))
+ if b.dtype == np.object_:
+ new_rb.extend(b.replace(s, d, inplace=inplace,
+ regex=regex))
else:
- new_rb.append(b)
+ # get our mask for this element, sized to this
+ # particular block
+ m = masks[i][b.ref_locs]
+ if m.any():
+ new_rb.extend(b.putmask(m, d, inplace=True))
+ else:
+ new_rb.append(b)
rb = new_rb
result_blocks.extend(rb)
@@ -2165,7 +2275,6 @@ def _lcd_dtype(l):
else:
return _lcd_dtype(counts[FloatBlock])
-
def _consolidate(blocks, items):
"""
Merge blocks having same dtype
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index de49eca7dab1c..8e48ef094c419 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -4,7 +4,7 @@
from StringIO import StringIO
import cPickle as pickle
import operator
-import os
+import re
import unittest
import nose
@@ -6131,9 +6131,8 @@ def test_replace_inplace(self):
res = tsframe.replace(nan, 0, inplace=True)
assert_frame_equal(tsframe, self.tsframe.fillna(0))
- tsframe = self.tsframe.copy()
- res = tsframe.replace(nan, method='pad', inplace=True)
- assert_frame_equal(tsframe, self.tsframe.fillna(method='pad'))
+ self.assertRaises(TypeError, self.tsframe.replace, nan, method='pad',
+ inplace=True)
# mixed type
self.mixed_frame['foo'][5:20] = nan
@@ -6144,9 +6143,499 @@ def test_replace_inplace(self):
assert_frame_equal(result, expected)
tsframe = self.tsframe.copy()
- res = tsframe.replace([nan], [0], inplace=True)
+ tsframe.replace([nan], [0], inplace=True)
assert_frame_equal(tsframe, self.tsframe.fillna(0))
+ def test_regex_replace_scalar(self):
+ obj = {'a': list('ab..'), 'b': list('efgh')}
+ dfobj = DataFrame(obj)
+ mix = {'a': range(4), 'b': list('ab..')}
+ dfmix = DataFrame(mix)
+
+ ### simplest cases
+ ## regex -> value
+ # obj frame
+ res = dfobj.replace(r'\s*\.\s*', nan, regex=True)
+ assert_frame_equal(dfobj, res.fillna('.'))
+
+ # mixed
+ res = dfmix.replace(r'\s*\.\s*', nan, regex=True)
+ assert_frame_equal(dfmix, res.fillna('.'))
+
+ ## regex -> regex
+ # obj frame
+ res = dfobj.replace(r'\s*(\.)\s*', r'\1\1\1', regex=True)
+ objc = obj.copy()
+ objc['a'] = ['a', 'b', '...', '...']
+ expec = DataFrame(objc)
+ assert_frame_equal(res, expec)
+
+ # with mixed
+ res = dfmix.replace(r'\s*(\.)\s*', r'\1\1\1', regex=True)
+ mixc = mix.copy()
+ mixc['b'] = ['a', 'b', '...', '...']
+ expec = DataFrame(mixc)
+ assert_frame_equal(res, expec)
+
+ # everything with compiled regexs as well
+ res = dfobj.replace(re.compile(r'\s*\.\s*'), nan, regex=True)
+ assert_frame_equal(dfobj, res.fillna('.'))
+
+ # mixed
+ res = dfmix.replace(re.compile(r'\s*\.\s*'), nan, regex=True)
+ assert_frame_equal(dfmix, res.fillna('.'))
+
+ ## regex -> regex
+ # obj frame
+ res = dfobj.replace(re.compile(r'\s*(\.)\s*'), r'\1\1\1')
+ objc = obj.copy()
+ objc['a'] = ['a', 'b', '...', '...']
+ expec = DataFrame(objc)
+ assert_frame_equal(res, expec)
+
+ # with mixed
+ res = dfmix.replace(re.compile(r'\s*(\.)\s*'), r'\1\1\1')
+ mixc = mix.copy()
+ mixc['b'] = ['a', 'b', '...', '...']
+ expec = DataFrame(mixc)
+ assert_frame_equal(res, expec)
+
+ res = dfmix.replace(regex=re.compile(r'\s*(\.)\s*'), value=r'\1\1\1')
+ mixc = mix.copy()
+ mixc['b'] = ['a', 'b', '...', '...']
+ expec = DataFrame(mixc)
+ assert_frame_equal(res, expec)
+
+ res = dfmix.replace(regex=r'\s*(\.)\s*', value=r'\1\1\1')
+ mixc = mix.copy()
+ mixc['b'] = ['a', 'b', '...', '...']
+ expec = DataFrame(mixc)
+ assert_frame_equal(res, expec)
+
+ def test_regex_replace_scalar_inplace(self):
+ obj = {'a': list('ab..'), 'b': list('efgh')}
+ dfobj = DataFrame(obj)
+ mix = {'a': range(4), 'b': list('ab..')}
+ dfmix = DataFrame(mix)
+
+ ### simplest cases
+ ## regex -> value
+ # obj frame
+ res = dfobj.copy()
+ res.replace(r'\s*\.\s*', nan, regex=True, inplace=True)
+ assert_frame_equal(dfobj, res.fillna('.'))
+
+ # mixed
+ res = dfmix.copy()
+ res.replace(r'\s*\.\s*', nan, regex=True, inplace=True)
+ assert_frame_equal(dfmix, res.fillna('.'))
+
+ ## regex -> regex
+ # obj frame
+ res = dfobj.copy()
+ res.replace(r'\s*(\.)\s*', r'\1\1\1', regex=True, inplace=True)
+ objc = obj.copy()
+ objc['a'] = ['a', 'b', '...', '...']
+ expec = DataFrame(objc)
+ assert_frame_equal(res, expec)
+
+ # with mixed
+ res = dfmix.copy()
+ res.replace(r'\s*(\.)\s*', r'\1\1\1', regex=True, inplace=True)
+ mixc = mix.copy()
+ mixc['b'] = ['a', 'b', '...', '...']
+ expec = DataFrame(mixc)
+ assert_frame_equal(res, expec)
+
+ # everything with compiled regexs as well
+ res = dfobj.copy()
+ res.replace(re.compile(r'\s*\.\s*'), nan, regex=True, inplace=True)
+ assert_frame_equal(dfobj, res.fillna('.'))
+
+ # mixed
+ res = dfmix.copy()
+ res.replace(re.compile(r'\s*\.\s*'), nan, regex=True, inplace=True)
+ assert_frame_equal(dfmix, res.fillna('.'))
+
+ ## regex -> regex
+ # obj frame
+ res = dfobj.copy()
+ res.replace(re.compile(r'\s*(\.)\s*'), r'\1\1\1', regex=True,
+ inplace=True)
+ objc = obj.copy()
+ objc['a'] = ['a', 'b', '...', '...']
+ expec = DataFrame(objc)
+ assert_frame_equal(res, expec)
+
+ # with mixed
+ res = dfmix.copy()
+ res.replace(re.compile(r'\s*(\.)\s*'), r'\1\1\1', regex=True,
+ inplace=True)
+ mixc = mix.copy()
+ mixc['b'] = ['a', 'b', '...', '...']
+ expec = DataFrame(mixc)
+ assert_frame_equal(res, expec)
+
+ res = dfobj.copy()
+ res.replace(regex=r'\s*\.\s*', value=nan, inplace=True)
+ assert_frame_equal(dfobj, res.fillna('.'))
+
+ # mixed
+ res = dfmix.copy()
+ res.replace(regex=r'\s*\.\s*', value=nan, inplace=True)
+ assert_frame_equal(dfmix, res.fillna('.'))
+
+ ## regex -> regex
+ # obj frame
+ res = dfobj.copy()
+ res.replace(regex=r'\s*(\.)\s*', value=r'\1\1\1', inplace=True)
+ objc = obj.copy()
+ objc['a'] = ['a', 'b', '...', '...']
+ expec = DataFrame(objc)
+ assert_frame_equal(res, expec)
+
+ # with mixed
+ res = dfmix.copy()
+ res.replace(regex=r'\s*(\.)\s*', value=r'\1\1\1', inplace=True)
+ mixc = mix.copy()
+ mixc['b'] = ['a', 'b', '...', '...']
+ expec = DataFrame(mixc)
+ assert_frame_equal(res, expec)
+
+ # everything with compiled regexs as well
+ res = dfobj.copy()
+ res.replace(regex=re.compile(r'\s*\.\s*'), value=nan, inplace=True)
+ assert_frame_equal(dfobj, res.fillna('.'))
+
+ # mixed
+ res = dfmix.copy()
+ res.replace(regex=re.compile(r'\s*\.\s*'), value=nan, inplace=True)
+ assert_frame_equal(dfmix, res.fillna('.'))
+
+ ## regex -> regex
+ # obj frame
+ res = dfobj.copy()
+ res.replace(regex=re.compile(r'\s*(\.)\s*'), value=r'\1\1\1',
+ inplace=True)
+ objc = obj.copy()
+ objc['a'] = ['a', 'b', '...', '...']
+ expec = DataFrame(objc)
+ assert_frame_equal(res, expec)
+
+ # with mixed
+ res = dfmix.copy()
+ res.replace(regex=re.compile(r'\s*(\.)\s*'), value=r'\1\1\1',
+ inplace=True)
+ mixc = mix.copy()
+ mixc['b'] = ['a', 'b', '...', '...']
+ expec = DataFrame(mixc)
+ assert_frame_equal(res, expec)
+
+ def test_regex_replace_list_obj(self):
+ obj = {'a': list('ab..'), 'b': list('efgh'), 'c': list('helo')}
+ dfobj = DataFrame(obj)
+
+ ## lists of regexes and values
+ # list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]
+ to_replace_res = [r'\s*\.\s*', r'e|f|g']
+ values = [nan, 'crap']
+ res = dfobj.replace(to_replace_res, values, regex=True)
+ expec = DataFrame({'a': ['a', 'b', nan, nan], 'b': ['crap'] * 3 +
+ ['h'], 'c': ['h', 'crap', 'l', 'o']})
+ assert_frame_equal(res, expec)
+
+ # list of [re1, re2, ..., reN] -> [re1, re2, .., reN]
+ to_replace_res = [r'\s*(\.)\s*', r'(e|f|g)']
+ values = [r'\1\1', r'\1_crap']
+ res = dfobj.replace(to_replace_res, values, regex=True)
+ expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['e_crap',
+ 'f_crap',
+ 'g_crap', 'h'],
+ 'c': ['h', 'e_crap', 'l', 'o']})
+
+ assert_frame_equal(res, expec)
+
+ # list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN
+ # or vN)]
+ to_replace_res = [r'\s*(\.)\s*', r'e']
+ values = [r'\1\1', r'crap']
+ res = dfobj.replace(to_replace_res, values, regex=True)
+ expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['crap', 'f', 'g',
+ 'h'],
+ 'c': ['h', 'crap', 'l', 'o']})
+ assert_frame_equal(res, expec)
+
+ to_replace_res = [r'\s*(\.)\s*', r'e']
+ values = [r'\1\1', r'crap']
+ res = dfobj.replace(value=values, regex=to_replace_res)
+ expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['crap', 'f', 'g',
+ 'h'],
+ 'c': ['h', 'crap', 'l', 'o']})
+ assert_frame_equal(res, expec)
+
+ def test_regex_replace_list_obj_inplace(self):
+ ### same as above with inplace=True
+ ## lists of regexes and values
+ obj = {'a': list('ab..'), 'b': list('efgh'), 'c': list('helo')}
+ dfobj = DataFrame(obj)
+
+ ## lists of regexes and values
+ # list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]
+ to_replace_res = [r'\s*\.\s*', r'e|f|g']
+ values = [nan, 'crap']
+ res = dfobj.copy()
+ res.replace(to_replace_res, values, inplace=True, regex=True)
+ expec = DataFrame({'a': ['a', 'b', nan, nan], 'b': ['crap'] * 3 +
+ ['h'], 'c': ['h', 'crap', 'l', 'o']})
+ assert_frame_equal(res, expec)
+
+ # list of [re1, re2, ..., reN] -> [re1, re2, .., reN]
+ to_replace_res = [r'\s*(\.)\s*', r'(e|f|g)']
+ values = [r'\1\1', r'\1_crap']
+ res = dfobj.copy()
+ res.replace(to_replace_res, values, inplace=True, regex=True)
+ expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['e_crap',
+ 'f_crap',
+ 'g_crap', 'h'],
+ 'c': ['h', 'e_crap', 'l', 'o']})
+
+ assert_frame_equal(res, expec)
+
+ # list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN
+ # or vN)]
+ to_replace_res = [r'\s*(\.)\s*', r'e']
+ values = [r'\1\1', r'crap']
+ res = dfobj.copy()
+ res.replace(to_replace_res, values, inplace=True, regex=True)
+ expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['crap', 'f', 'g',
+ 'h'],
+ 'c': ['h', 'crap', 'l', 'o']})
+ assert_frame_equal(res, expec)
+
+ to_replace_res = [r'\s*(\.)\s*', r'e']
+ values = [r'\1\1', r'crap']
+ res = dfobj.copy()
+ res.replace(value=values, regex=to_replace_res, inplace=True)
+ expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['crap', 'f', 'g',
+ 'h'],
+ 'c': ['h', 'crap', 'l', 'o']})
+ assert_frame_equal(res, expec)
+
+ def test_regex_replace_list_mixed(self):
+ ## mixed frame to make sure this doesn't break things
+ mix = {'a': range(4), 'b': list('ab..')}
+ dfmix = DataFrame(mix)
+
+ ## lists of regexes and values
+ # list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]
+ to_replace_res = [r'\s*\.\s*', r'a']
+ values = [nan, 'crap']
+ mix2 = {'a': range(4), 'b': list('ab..'), 'c': list('halo')}
+ dfmix2 = DataFrame(mix2)
+ res = dfmix2.replace(to_replace_res, values, regex=True)
+ expec = DataFrame({'a': mix2['a'], 'b': ['crap', 'b', nan, nan],
+ 'c': ['h', 'crap', 'l', 'o']})
+ assert_frame_equal(res, expec)
+
+ # list of [re1, re2, ..., reN] -> [re1, re2, .., reN]
+ to_replace_res = [r'\s*(\.)\s*', r'(a|b)']
+ values = [r'\1\1', r'\1_crap']
+ res = dfmix.replace(to_replace_res, values, regex=True)
+ expec = DataFrame({'a': mix['a'], 'b': ['a_crap', 'b_crap', '..',
+ '..']})
+
+ assert_frame_equal(res, expec)
+
+ # list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN
+ # or vN)]
+ to_replace_res = [r'\s*(\.)\s*', r'a', r'(b)']
+ values = [r'\1\1', r'crap', r'\1_crap']
+ res = dfmix.replace(to_replace_res, values, regex=True)
+ expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b_crap', '..', '..']})
+ assert_frame_equal(res, expec)
+
+ to_replace_res = [r'\s*(\.)\s*', r'a', r'(b)']
+ values = [r'\1\1', r'crap', r'\1_crap']
+ res = dfmix.replace(regex=to_replace_res, value=values)
+ expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b_crap', '..', '..']})
+ assert_frame_equal(res, expec)
+
+ def test_regex_replace_list_mixed_inplace(self):
+ mix = {'a': range(4), 'b': list('ab..')}
+ dfmix = DataFrame(mix)
+ # the same inplace
+ ## lists of regexes and values
+ # list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]
+ to_replace_res = [r'\s*\.\s*', r'a']
+ values = [nan, 'crap']
+ res = dfmix.copy()
+ res.replace(to_replace_res, values, inplace=True, regex=True)
+ expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b', nan, nan]})
+ assert_frame_equal(res, expec)
+
+ # list of [re1, re2, ..., reN] -> [re1, re2, .., reN]
+ to_replace_res = [r'\s*(\.)\s*', r'(a|b)']
+ values = [r'\1\1', r'\1_crap']
+ res = dfmix.copy()
+ res.replace(to_replace_res, values, inplace=True, regex=True)
+ expec = DataFrame({'a': mix['a'], 'b': ['a_crap', 'b_crap', '..',
+ '..']})
+
+ assert_frame_equal(res, expec)
+
+ # list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN
+ # or vN)]
+ to_replace_res = [r'\s*(\.)\s*', r'a', r'(b)']
+ values = [r'\1\1', r'crap', r'\1_crap']
+ res = dfmix.copy()
+ res.replace(to_replace_res, values, inplace=True, regex=True)
+ expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b_crap', '..', '..']})
+ assert_frame_equal(res, expec)
+
+ to_replace_res = [r'\s*(\.)\s*', r'a', r'(b)']
+ values = [r'\1\1', r'crap', r'\1_crap']
+ res = dfmix.copy()
+ res.replace(regex=to_replace_res, value=values, inplace=True)
+ expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b_crap', '..', '..']})
+ assert_frame_equal(res, expec)
+
+ def test_regex_replace_dict_mixed(self):
+ mix = {'a': range(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
+ dfmix = DataFrame(mix)
+
+ ## dicts
+ # single dict {re1: v1}, search the whole frame
+ # need test for this...
+
+ # list of dicts {re1: v1, re2: v2, ..., re3: v3}, search the whole
+ # frame
+ res = dfmix.replace({'b': r'\s*\.\s*'}, {'b': nan}, regex=True)
+ res2 = dfmix.copy()
+ res2.replace({'b': r'\s*\.\s*'}, {'b': nan}, inplace=True, regex=True)
+ expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', nan, nan], 'c':
+ mix['c']})
+ assert_frame_equal(res, expec)
+ assert_frame_equal(res2, expec)
+
+ # list of dicts {re1: re11, re2: re12, ..., reN: re1N}, search the
+ # whole frame
+ res = dfmix.replace({'b': r'\s*(\.)\s*'}, {'b': r'\1ty'}, regex=True)
+ res2 = dfmix.copy()
+ res2.replace({'b': r'\s*(\.)\s*'}, {'b': r'\1ty'}, inplace=True,
+ regex=True)
+ expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', '.ty', '.ty'], 'c':
+ mix['c']})
+ assert_frame_equal(res, expec)
+ assert_frame_equal(res2, expec)
+
+ res = dfmix.replace(regex={'b': r'\s*(\.)\s*'}, value={'b': r'\1ty'})
+ res2 = dfmix.copy()
+ res2.replace(regex={'b': r'\s*(\.)\s*'}, value={'b': r'\1ty'},
+ inplace=True)
+ expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', '.ty', '.ty'], 'c':
+ mix['c']})
+ assert_frame_equal(res, expec)
+ assert_frame_equal(res2, expec)
+
+ # scalar -> dict
+ # to_replace regex, {value: value}
+ res = dfmix.replace('a', {'b': nan}, regex=True)
+ res2 = dfmix.copy()
+ res2.replace('a', {'b': nan}, regex=True, inplace=True)
+ expec = DataFrame({'a': mix['a'], 'b': [nan, 'b', '.', '.'], 'c':
+ mix['c']})
+ assert_frame_equal(res, expec)
+ assert_frame_equal(res2, expec)
+
+ res = dfmix.replace('a', {'b': nan}, regex=True)
+ res2 = dfmix.copy()
+ res2.replace(regex='a', value={'b': nan}, inplace=True)
+ expec = DataFrame({'a': mix['a'], 'b': [nan, 'b', '.', '.'], 'c':
+ mix['c']})
+ assert_frame_equal(res, expec)
+ assert_frame_equal(res2, expec)
+
+ def test_regex_replace_dict_nested(self):
+ # nested dicts will not work until this is implemented for Series
+ mix = {'a': range(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
+ dfmix = DataFrame(mix)
+ res = dfmix.replace({'b': {r'\s*\.\s*': nan}}, regex=True)
+ res2 = dfmix.copy()
+ res2.replace({'b': {r'\s*\.\s*': nan}}, inplace=True, regex=True)
+ print res2
+ expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', nan, nan], 'c':
+ mix['c']})
+ assert_frame_equal(res, expec)
+ assert_frame_equal(res2, expec)
+
+ def test_regex_replace_list_to_scalar(self):
+ mix = {'a': range(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
+ df = DataFrame(mix)
+ res = df.replace([r'\s*\.\s*', 'a|b'], nan, regex=True)
+ res2 = df.copy()
+ res3 = df.copy()
+ res2.replace([r'\s*\.\s*', 'a|b'], nan, regex=True, inplace=True)
+ res3.replace(regex=[r'\s*\.\s*', 'a|b'], value=nan, inplace=True)
+ expec = DataFrame({'a': mix['a'], 'b': np.array([nan] * 4, object),
+ 'c': [nan, nan, nan, 'd']})
+ assert_frame_equal(res, expec)
+ assert_frame_equal(res2, expec)
+ assert_frame_equal(res3, expec)
+
+ def test_regex_replace_str_to_numeric(self):
+ # what happens when you try to replace a numeric value with a regex?
+ mix = {'a': range(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
+ df = DataFrame(mix)
+ res = df.replace(r'\s*\.\s*', 0, regex=True)
+ res2 = df.copy()
+ res2.replace(r'\s*\.\s*', 0, inplace=True, regex=True)
+ res3 = df.copy()
+ res3.replace(regex=r'\s*\.\s*', value=0, inplace=True)
+ expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', 0, 0], 'c':
+ mix['c']})
+ assert_frame_equal(res, expec)
+ assert_frame_equal(res2, expec)
+ assert_frame_equal(res3, expec)
+
+ def test_regex_replace_regex_list_to_numeric(self):
+ mix = {'a': range(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
+ df = DataFrame(mix)
+ res = df.replace([r'\s*\.\s*', 'b'], 0, regex=True)
+ res2 = df.copy()
+ res2.replace([r'\s*\.\s*', 'b'], 0, regex=True, inplace=True)
+ res3 = df.copy()
+ res3.replace(regex=[r'\s*\.\s*', 'b'], value=0, inplace=True)
+ expec = DataFrame({'a': mix['a'], 'b': ['a', 0, 0, 0], 'c': ['a', 0,
+ nan,
+ 'd']})
+ assert_frame_equal(res, expec)
+ assert_frame_equal(res2, expec)
+ assert_frame_equal(res3, expec)
+
+ def test_regex_replace_series_of_regexes(self):
+ mix = {'a': range(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
+ df = DataFrame(mix)
+ s1 = Series({'b': r'\s*\.\s*'})
+ s2 = Series({'b': nan})
+ res = df.replace(s1, s2, regex=True)
+ res2 = df.copy()
+ res2.replace(s1, s2, inplace=True, regex=True)
+ res3 = df.copy()
+ res3.replace(regex=s1, value=s2, inplace=True)
+ expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', nan, nan], 'c':
+ mix['c']})
+ assert_frame_equal(res, expec)
+ assert_frame_equal(res2, expec)
+ assert_frame_equal(res3, expec)
+
+ def test_regex_replace_numeric_to_object_conversion(self):
+ mix = {'a': range(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
+ df = DataFrame(mix)
+ res = df.replace(0, 'a')
+ expec = DataFrame({'a': ['a', 1, 2, 3], 'b': mix['b'], 'c': mix['c']})
+ assert_frame_equal(res, expec)
+ self.assertEqual(res.a.dtype, np.object_)
+
def test_replace(self):
self.tsframe['A'][:5] = nan
self.tsframe['A'][-5:] = nan
@@ -6163,7 +6652,7 @@ def test_replace(self):
df = DataFrame(index=['a', 'b'])
assert_frame_equal(df, df.replace(5, 7))
- def test_resplace_series_dict(self):
+ def test_replace_series_dict(self):
# from GH 3064
df = DataFrame({'zero': {'a': 0.0, 'b': 1}, 'one': {'a': 2.0, 'b': 0}})
result = df.replace(0, {'zero': 0.5, 'one': 1.0})
@@ -6227,48 +6716,32 @@ def test_replace_mixed(self):
expected.iloc[1,1] = m[1]
assert_frame_equal(result,expected)
- def test_replace_interpolate(self):
- padded = self.tsframe.replace(nan, method='pad')
- assert_frame_equal(padded, self.tsframe.fillna(method='pad'))
+ def test_interpolate(self):
+ pass
+
+ def test_replace_value_is_none(self):
+ self.assertRaises(TypeError, self.tsframe.replace, nan, method='pad')
+ orig_value = self.tsframe.iloc[0, 0]
+ orig2 = self.tsframe.iloc[1, 0]
- result = self.tsframe.replace(to_replace={'A': nan}, method='pad',
+ self.tsframe.iloc[0, 0] = nan
+ self.tsframe.iloc[1, 0] = 1
+
+ result = self.tsframe.replace(to_replace={nan: 0}, method='pad',
axis=1)
expected = self.tsframe.T.replace(
- to_replace={'A': nan}, method='pad').T
+ to_replace={nan: 0}, method='pad').T
assert_frame_equal(result, expected)
- result = self.tsframe.replace(to_replace={'A': nan, 'B': -1e8},
+ result = self.tsframe.replace(to_replace={nan: 0, 1: -1e8},
method='bfill')
tsframe = self.tsframe.copy()
- b = tsframe['B']
- b[b == -1e8] = nan
- tsframe['B'] = b
- expected = tsframe.fillna(method='bfill')
+ tsframe.iloc[0, 0] = 0
+ tsframe.iloc[1, 0] = -1e8
+ expected = tsframe
assert_frame_equal(expected, result)
-
- bfilled = self.tsframe.replace(nan, method='bfill')
- assert_frame_equal(bfilled, self.tsframe.fillna(method='bfill'))
-
- frame = self.tsframe.copy()
- frame[frame == 0] = 1
- frame.ix[-5:, 2] = 0
- result = frame.replace([nan, 0], method='pad')
-
- expected = frame.copy()
- expected[expected == 0] = nan
- expected = expected.fillna(method='pad')
- assert_frame_equal(result, expected)
-
- result = self.mixed_frame.replace(nan, method='pad', axis=1)
- expected = self.mixed_frame.fillna(method='pad', axis=1)
- assert_frame_equal(result, expected)
-
- # no nans
- self.tsframe['A'][:5] = 1e8
- result = self.tsframe.replace(1e8, method='bfill')
- self.tsframe['A'].replace(1e8, nan, inplace=True)
- expected = self.tsframe.fillna(method='bfill')
- assert_frame_equal(result, expected)
+ self.tsframe.iloc[0, 0] = orig_value
+ self.tsframe.iloc[1, 0] = orig2
def test_replace_for_new_dtypes(self):
@@ -6351,7 +6824,7 @@ def test_replace_input_formats(self):
expected[k] = v.replace(to_rep[k], 0)
assert_frame_equal(filled, DataFrame(expected))
- self.assertRaises(ValueError, df.replace, to_rep, [np.nan, 0, ''])
+ self.assertRaises(TypeError, df.replace, to_rep, [np.nan, 0, ''])
# scalar to dict
values = {'A': 0, 'B': -1, 'C': 'missing'}
@@ -6389,8 +6862,8 @@ def test_replace_axis(self):
zero_filled = self.tsframe.replace(nan, 0, axis=1)
assert_frame_equal(zero_filled, self.tsframe.fillna(0, axis=1))
- padded = self.tsframe.replace(nan, method='pad', axis=1)
- assert_frame_equal(padded, self.tsframe.fillna(method='pad', axis=1))
+ self.assertRaises(TypeError, self.tsframe.replace, method='pad',
+ axis=1)
# mixed type
self.mixed_frame['foo'][5:20] = nan
@@ -6400,22 +6873,9 @@ def test_replace_axis(self):
expected = self.mixed_frame.fillna(value=-1e8, axis=1)
assert_frame_equal(result, expected)
- def test_replace_limit(self):
- padded = self.tsframe.replace(nan, method='pad', limit=2)
- assert_frame_equal(padded, self.tsframe.fillna(method='pad',
- limit=2))
- bfilled = self.tsframe.replace(nan, method='bfill', limit=2)
- assert_frame_equal(padded, self.tsframe.fillna(method='bfill',
- limit=2))
-
- padded = self.tsframe.replace(nan, method='pad', axis=1, limit=2)
- assert_frame_equal(padded, self.tsframe.fillna(method='pad',
- axis=1, limit=2))
-
- bfill = self.tsframe.replace(nan, method='bfill', axis=1, limit=2)
- assert_frame_equal(padded, self.tsframe.fillna(method='bfill',
- axis=1, limit=2))
+ def test_replace_limit(self):
+ pass
def test_combine_multiple_frames_dtypes(self):
from pandas import concat
| addresses #2285. cc @jreback and #3582.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3584 | 2013-05-13T03:37:37Z | 2013-05-17T19:24:58Z | 2013-05-17T19:24:58Z | 2014-06-26T07:33:10Z |
Moving pandasjson back into mainline pandas | diff --git a/LICENSES/ULTRAJSON_LICENSE b/LICENSES/ULTRAJSON_LICENSE
new file mode 100644
index 0000000000000..defca46e7f820
--- /dev/null
+++ b/LICENSES/ULTRAJSON_LICENSE
@@ -0,0 +1,34 @@
+Copyright (c) 2011-2013, ESN Social Software AB and Jonas Tarnstrom
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of the ESN Social Software AB nor the
+ names of its contributors may be used to endorse or promote products
+ derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ESN SOCIAL SOFTWARE AB OR JONAS TARNSTROM BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+Portions of code from MODP_ASCII - Ascii transformations (upper/lower, etc)
+http://code.google.com/p/stringencoders/
+Copyright (c) 2007 Nick Galbreath -- nickg [at] modp [dot] com. All rights reserved.
+
+Numeric decoder derived from from TCL library
+http://www.opensource.apple.com/source/tcl/tcl-14/tcl/license.terms
+ * Copyright (c) 1988-1993 The Regents of the University of California.
+ * Copyright (c) 1994 Sun Microsystems, Inc.
\ No newline at end of file
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index ad1429fcea1ca..ffc02b5407a33 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1659,8 +1659,8 @@ def convert_objects(self, convert_dates=True, convert_numeric=False, copy=True):
-------
converted : DataFrame
"""
- return self._constructor(self._data.convert(convert_dates=convert_dates,
- convert_numeric=convert_numeric,
+ return self._constructor(self._data.convert(convert_dates=convert_dates,
+ convert_numeric=convert_numeric,
copy=copy))
#----------------------------------------------------------------------
@@ -3321,7 +3321,7 @@ def fillna(self, value=None, method=None, axis=0, inplace=False,
a reference to the filled object, which is self if inplace=True
limit : int, default None
Maximum size gap to forward or backward fill
- downcast : dict, default is None, a dict of item->dtype of what to
+ downcast : dict, default is None, a dict of item->dtype of what to
downcast if possible
See also
@@ -3368,7 +3368,7 @@ def fillna(self, value=None, method=None, axis=0, inplace=False,
result[k].fillna(v, inplace=True)
return result
else:
- new_data = self._data.fillna(value, inplace=inplace,
+ new_data = self._data.fillna(value, inplace=inplace,
downcast=downcast)
if inplace:
@@ -3756,8 +3756,8 @@ def combine(self, other, func, fill_value=None, overwrite=True):
result[col] = arr
# convert_objects just in case
- return self._constructor(result,
- index=new_index,
+ return self._constructor(result,
+ index=new_index,
columns=new_columns).convert_objects(copy=False)
def combine_first(self, other):
@@ -5278,6 +5278,106 @@ def mask(self, cond):
"""
return self.where(~cond, NA)
+
+@classmethod
+def from_json(cls, json, orient="columns", dtype=None, numpy=True):
+ """
+ Convert JSON string to DataFrame
+
+ Parameters
+ ----------
+ json : The JSON string to parse.
+ orient : {'split', 'records', 'index', 'columns', 'values'},
+ default 'columns'
+ The format of the JSON string
+ split : dict like
+ {index -> [index], columns -> [columns], data -> [values]}
+ records : list like [{column -> value}, ... , {column -> value}]
+ index : dict like {index -> {column -> value}}
+ columns : dict like {column -> {index -> value}}
+ values : just the values array
+ dtype : dtype of the resulting DataFrame
+ nupmpy: direct decoding to numpy arrays. default True but falls back
+ to standard decoding if a problem occurs.
+
+ Returns
+ -------
+ result : DataFrame
+ """
+ from pandas.json import loads
+
+ df = None
+
+ if dtype is not None and orient == "split":
+ numpy = False
+
+ if numpy:
+ try:
+ if orient == "columns":
+ args = loads(json, dtype=dtype, numpy=True, labelled=True)
+ if args:
+ args = (args[0].T, args[2], args[1])
+ df = DataFrame(*args)
+ elif orient == "split":
+ decoded = loads(json, dtype=dtype, numpy=True)
+ decoded = dict((str(k), v) for k, v in decoded.iteritems())
+ df = DataFrame(**decoded)
+ elif orient == "values":
+ df = DataFrame(loads(json, dtype=dtype, numpy=True))
+ else:
+ df = DataFrame(*loads(json, dtype=dtype, numpy=True,
+ labelled=True))
+ except ValueError:
+ numpy = False
+ if not numpy:
+ if orient == "columns":
+ df = DataFrame(loads(json), dtype=dtype)
+ elif orient == "split":
+ decoded = dict((str(k), v)
+ for k, v in loads(json).iteritems())
+ df = DataFrame(dtype=dtype, **decoded)
+ elif orient == "index":
+ df = DataFrame(loads(json), dtype=dtype).T
+ else:
+ df = DataFrame(loads(json), dtype=dtype)
+
+ return df
+DataFrame.from_json = from_json
+
+
+def to_json(self, orient="columns", double_precision=10,
+ force_ascii=True):
+ """
+ Convert DataFrame to a JSON string.
+
+ Note NaN's and None will be converted to null and datetime objects
+ will be converted to UNIX timestamps.
+
+ Parameters
+ ----------
+ orient : {'split', 'records', 'index', 'columns', 'values'},
+ default 'columns'
+ The format of the JSON string
+ split : dict like
+ {index -> [index], columns -> [columns], data -> [values]}
+ records : list like [{column -> value}, ... , {column -> value}]
+ index : dict like {index -> {column -> value}}
+ columns : dict like {column -> {index -> value}}
+ values : just the values array
+ double_precision : The number of decimal places to use when encoding
+ floating point values, default 10.
+ force_ascii : force encoded string to be ASCII, default True.
+
+ Returns
+ -------
+ result : JSON compatible string
+ """
+ from pandas.json import dumps
+ return dumps(self, orient=orient, double_precision=double_precision,
+ ensure_ascii=force_ascii)
+DataFrame.to_json = to_json
+
+
_EMPTY_SERIES = Series([])
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 3509e226d46fb..14a8839fe3256 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -1962,7 +1962,7 @@ def dot(self, other):
Parameters
----------
- other : Series or DataFrame
+ other : Series or DataFrame
Returns
-------
@@ -3241,6 +3241,88 @@ def str(self):
from pandas.core.strings import StringMethods
return StringMethods(self)
+
+@classmethod
+def from_json(cls, json, orient="index", dtype=None, numpy=True):
+ """
+ Convert JSON string to Series
+
+ Parameters
+ ----------
+ json : The JSON string to parse.
+ orient : {'split', 'records', 'index'}, default 'index'
+ The format of the JSON string
+ split : dict like
+ {index -> [index], name -> name, data -> [values]}
+ records : list like [value, ... , value]
+ index : dict like {index -> value}
+ dtype : dtype of the resulting Series
+ nupmpy: direct decoding to numpy arrays. default True but falls back
+ to standard decoding if a problem occurs.
+
+ Returns
+ -------
+ result : Series
+ """
+ from pandas.json import loads
+ s = None
+
+ if dtype is not None and orient == "split":
+ numpy = False
+
+ if numpy:
+ try:
+ if orient == "split":
+ decoded = loads(json, dtype=dtype, numpy=True)
+ decoded = dict((str(k), v) for k, v in decoded.iteritems())
+ s = Series(**decoded)
+ elif orient == "columns" or orient == "index":
+ s = Series(*loads(json, dtype=dtype, numpy=True,
+ labelled=True))
+ else:
+ s = Series(loads(json, dtype=dtype, numpy=True))
+ except ValueError:
+ numpy = False
+ if not numpy:
+ if orient == "split":
+ decoded = dict((str(k), v)
+ for k, v in loads(json).iteritems())
+ s = Series(dtype=dtype, **decoded)
+ else:
+ s = Series(loads(json), dtype=dtype)
+
+ return s
+Series.from_json = from_json
+
+def to_json(self, orient="index", double_precision=10, force_ascii=True):
+ """
+ Convert Series to a JSON string
+
+ Note NaN's and None will be converted to null and datetime objects
+ will be converted to UNIX timestamps.
+
+ Parameters
+ ----------
+ orient : {'split', 'records', 'index'}, default 'index'
+ The format of the JSON string
+ split : dict like
+ {index -> [index], name -> name, data -> [values]}
+ records : list like [value, ... , value]
+ index : dict like {index -> value}
+ double_precision : The number of decimal places to use when encoding
+ floating point values, default 10.
+ force_ascii : force encoded string to be ASCII, default True.
+
+ Returns
+ -------
+ result : JSON compatible string
+ """
+ from pandas.json import dumps
+ return dumps(self, orient=orient, double_precision=double_precision,
+ ensure_ascii=force_ascii)
+Series.to_json = to_json
+
+
_INDEX_TYPES = ndarray, Index, list, tuple
#------------------------------------------------------------------------------
diff --git a/pandas/io/tests/test_json/test_pandas.py b/pandas/io/tests/test_json/test_pandas.py
new file mode 100644
index 0000000000000..506aa382487d6
--- /dev/null
+++ b/pandas/io/tests/test_json/test_pandas.py
@@ -0,0 +1,240 @@
+# pylint: disable-msg=W0612,E1101
+from copy import deepcopy
+from datetime import datetime, timedelta
+from StringIO import StringIO
+import cPickle as pickle
+import operator
+import os
+import unittest
+
+import numpy as np
+
+from pandas import Series, DataFrame, DatetimeIndex
+import pandas as pd
+
+from pandas.util.testing import (assert_almost_equal, assert_frame_equal,
+ assert_series_equal)
+import pandas.util.testing as tm
+
+_seriesd = tm.getSeriesData()
+_tsd = tm.getTimeSeriesData()
+
+_frame = DataFrame(_seriesd)
+_frame2 = DataFrame(_seriesd, columns=['D', 'C', 'B', 'A'])
+_intframe = DataFrame(dict((k, v.astype(int))
+ for k, v in _seriesd.iteritems()))
+
+_tsframe = DataFrame(_tsd)
+
+_mixed_frame = _frame.copy()
+
+
+class TestPandasObjects(unittest.TestCase):
+
+ def setUp(self):
+ self.ts = tm.makeTimeSeries()
+ self.ts.name = 'ts'
+
+ self.series = tm.makeStringSeries()
+ self.series.name = 'series'
+
+ self.objSeries = tm.makeObjectSeries()
+ self.objSeries.name = 'objects'
+
+ self.empty_series = Series([], index=[])
+ self.empty_frame = DataFrame({})
+
+ self.frame = _frame.copy()
+ self.frame2 = _frame2.copy()
+ self.intframe = _intframe.copy()
+ self.tsframe = _tsframe.copy()
+ self.mixed_frame = _mixed_frame.copy()
+
+ def test_frame_from_json_to_json(self):
+
+ def _check_orient(df, orient, dtype=None, numpy=True):
+ df = df.sort()
+ dfjson = df.to_json(orient=orient)
+ unser = DataFrame.from_json(dfjson, orient=orient, dtype=dtype,
+ numpy=numpy)
+ unser = unser.sort()
+ if df.index.dtype.type == np.datetime64:
+ unser.index = DatetimeIndex(unser.index.values.astype('i8'))
+ if orient == "records":
+ # index is not captured in this orientation
+ assert_almost_equal(df.values, unser.values)
+ self.assert_(df.columns.equals(unser.columns))
+ elif orient == "values":
+ # index and cols are not captured in this orientation
+ assert_almost_equal(df.values, unser.values)
+ elif orient == "split":
+ # index and col labels might not be strings
+ unser.index = [str(i) for i in unser.index]
+ unser.columns = [str(i) for i in unser.columns]
+ unser = unser.sort()
+ assert_almost_equal(df.values, unser.values)
+ else:
+ assert_frame_equal(df, unser)
+
+ def _check_all_orients(df, dtype=None):
+ _check_orient(df, "columns", dtype=dtype)
+ _check_orient(df, "records", dtype=dtype)
+ _check_orient(df, "split", dtype=dtype)
+ _check_orient(df, "index", dtype=dtype)
+ _check_orient(df, "values", dtype=dtype)
+
+ _check_orient(df, "columns", dtype=dtype, numpy=False)
+ _check_orient(df, "records", dtype=dtype, numpy=False)
+ _check_orient(df, "split", dtype=dtype, numpy=False)
+ _check_orient(df, "index", dtype=dtype, numpy=False)
+ _check_orient(df, "values", dtype=dtype, numpy=False)
+
+ # basic
+ _check_all_orients(self.frame)
+ self.assertEqual(self.frame.to_json(),
+ self.frame.to_json(orient="columns"))
+
+ _check_all_orients(self.intframe, dtype=self.intframe.values.dtype)
+
+ # big one
+ # index and columns are strings as all unserialised JSON object keys
+ # are assumed to be strings
+ biggie = DataFrame(np.zeros((200, 4)),
+ columns=[str(i) for i in range(4)],
+ index=[str(i) for i in range(200)])
+ _check_all_orients(biggie)
+
+ # dtypes
+ _check_all_orients(DataFrame(biggie, dtype=np.float64),
+ dtype=np.float64)
+ _check_all_orients(DataFrame(biggie, dtype=np.int), dtype=np.int)
+ _check_all_orients(DataFrame(biggie, dtype='<U3'), dtype='<U3')
+
+ # empty
+ _check_all_orients(self.empty_frame)
+
+ # time series data
+ _check_all_orients(self.tsframe)
+
+ # mixed data
+ index = pd.Index(['a', 'b', 'c', 'd', 'e'])
+ data = {
+ 'A': [0., 1., 2., 3., 4.],
+ 'B': [0., 1., 0., 1., 0.],
+ 'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'],
+ 'D': [True, False, True, False, True]
+ }
+ df = DataFrame(data=data, index=index)
+ _check_orient(df, "split")
+ _check_orient(df, "records")
+ _check_orient(df, "values")
+ _check_orient(df, "columns")
+ # index oriented is problematic as it is read back in in a transposed
+ # state, so the columns are interpreted as having mixed data and
+ # given object dtypes.
+ # force everything to have object dtype beforehand
+ _check_orient(df.transpose().transpose(), "index")
+
+ def test_frame_from_json_bad_data(self):
+ self.assertRaises(ValueError, DataFrame.from_json, '{"key":b:a:d}')
+
+ # too few indices
+ json = ('{"columns":["A","B"],'
+ '"index":["2","3"],'
+ '"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}"')
+ self.assertRaises(ValueError, DataFrame.from_json, json,
+ orient="split")
+
+ # too many columns
+ json = ('{"columns":["A","B","C"],'
+ '"index":["1","2","3"],'
+ '"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}"')
+ self.assertRaises(AssertionError, DataFrame.from_json, json,
+ orient="split")
+
+ # bad key
+ json = ('{"badkey":["A","B"],'
+ '"index":["2","3"],'
+ '"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}"')
+ self.assertRaises(TypeError, DataFrame.from_json, json,
+ orient="split")
+
+ def test_frame_from_json_nones(self):
+ df = DataFrame([[1, 2], [4, 5, 6]])
+ unser = DataFrame.from_json(df.to_json())
+ self.assert_(np.isnan(unser['2'][0]))
+
+ df = DataFrame([['1', '2'], ['4', '5', '6']])
+ unser = DataFrame.from_json(df.to_json())
+ self.assert_(unser['2'][0] is None)
+
+ unser = DataFrame.from_json(df.to_json(), numpy=False)
+ self.assert_(unser['2'][0] is None)
+
+ # infinities get mapped to nulls which get mapped to NaNs during
+ # deserialisation
+ df = DataFrame([[1, 2], [4, 5, 6]])
+ df[2][0] = np.inf
+ unser = DataFrame.from_json(df.to_json())
+ self.assert_(np.isnan(unser['2'][0]))
+
+ df[2][0] = np.NINF
+ unser = DataFrame.from_json(df.to_json())
+ self.assert_(np.isnan(unser['2'][0]))
+
+ def test_frame_to_json_except(self):
+ df = DataFrame([1, 2, 3])
+ self.assertRaises(ValueError, df.to_json, orient="garbage")
+
+ def test_series_from_json_to_json(self):
+
+ def _check_orient(series, orient, dtype=None, numpy=True):
+ series = series.sort_index()
+ unser = Series.from_json(series.to_json(orient=orient),
+ orient=orient, numpy=numpy, dtype=dtype)
+ unser = unser.sort_index()
+ if series.index.dtype.type == np.datetime64:
+ unser.index = DatetimeIndex(unser.index.values.astype('i8'))
+ if orient == "records" or orient == "values":
+ assert_almost_equal(series.values, unser.values)
+ else:
+ try:
+ assert_series_equal(series, unser)
+ except:
+ raise
+ if orient == "split":
+ self.assert_(series.name == unser.name)
+
+ def _check_all_orients(series, dtype=None):
+ _check_orient(series, "columns", dtype=dtype)
+ _check_orient(series, "records", dtype=dtype)
+ _check_orient(series, "split", dtype=dtype)
+ _check_orient(series, "index", dtype=dtype)
+ _check_orient(series, "values", dtype=dtype)
+
+ _check_orient(series, "columns", dtype=dtype, numpy=False)
+ _check_orient(series, "records", dtype=dtype, numpy=False)
+ _check_orient(series, "split", dtype=dtype, numpy=False)
+ _check_orient(series, "index", dtype=dtype, numpy=False)
+ _check_orient(series, "values", dtype=dtype, numpy=False)
+
+ # basic
+ _check_all_orients(self.series)
+ self.assertEqual(self.series.to_json(),
+ self.series.to_json(orient="index"))
+
+ objSeries = Series([str(d) for d in self.objSeries],
+ index=self.objSeries.index,
+ name=self.objSeries.name)
+ _check_all_orients(objSeries)
+ _check_all_orients(self.empty_series)
+ _check_all_orients(self.ts)
+
+ # dtype
+ s = Series(range(6), index=['a','b','c','d','e','f'])
+ _check_all_orients(Series(s, dtype=np.float64), dtype=np.float64)
+ _check_all_orients(Series(s, dtype=np.int), dtype=np.int)
+
+ def test_series_to_json_except(self):
+ s = Series([1, 2, 3])
+ self.assertRaises(ValueError, s.to_json, orient="garbage")
diff --git a/pandas/io/tests/test_json/test_ujson.py b/pandas/io/tests/test_json/test_ujson.py
new file mode 100644
index 0000000000000..833abcb32fa98
--- /dev/null
+++ b/pandas/io/tests/test_json/test_ujson.py
@@ -0,0 +1,1230 @@
+import unittest
+from unittest import TestCase
+
+import pandas.json as ujson
+try:
+ import json
+except ImportError:
+ import simplejson as json
+import math
+import nose
+import platform
+import sys
+import time
+import datetime
+import calendar
+import StringIO
+import re
+from functools import partial
+import pandas.util.py3compat as py3compat
+
+import numpy as np
+from pandas.util.testing import assert_almost_equal
+from numpy.testing import (assert_array_equal,
+ assert_array_almost_equal_nulp,
+ assert_approx_equal)
+from pandas import DataFrame, Series, Index
+import pandas.util.testing as tm
+
+
+def _skip_if_python_ver(skip_major, skip_minor=None):
+ major, minor = sys.version_info[:2]
+ if major == skip_major and (skip_minor is None or minor == skip_minor):
+ raise nose.SkipTest
+
+json_unicode = (json.dumps if sys.version_info[0] >= 3
+ else partial(json.dumps, encoding="utf-8"))
+
+class UltraJSONTests(TestCase):
+ def test_encodeDictWithUnicodeKeys(self):
+ input = { u"key1": u"value1", u"key1": u"value1", u"key1": u"value1", u"key1": u"value1", u"key1": u"value1", u"key1": u"value1" }
+ output = ujson.encode(input)
+
+ input = { u"بن": u"value1", u"بن": u"value1", u"بن": u"value1", u"بن": u"value1", u"بن": u"value1", u"بن": u"value1", u"بن": u"value1" }
+ output = ujson.encode(input)
+
+ pass
+
+ def test_encodeDoubleConversion(self):
+ input = math.pi
+ output = ujson.encode(input)
+ self.assertEquals(round(input, 5), round(json.loads(output), 5))
+ self.assertEquals(round(input, 5), round(ujson.decode(output), 5))
+
+ def test_encodeWithDecimal(self):
+ input = 1.0
+ output = ujson.encode(input)
+ self.assertEquals(output, "1.0")
+
+ def test_encodeDoubleNegConversion(self):
+ input = -math.pi
+ output = ujson.encode(input)
+ self.assertEquals(round(input, 5), round(json.loads(output), 5))
+ self.assertEquals(round(input, 5), round(ujson.decode(output), 5))
+
+ def test_encodeArrayOfNestedArrays(self):
+ input = [[[[]]]] * 20
+ output = ujson.encode(input)
+ self.assertEquals(input, json.loads(output))
+ #self.assertEquals(output, json.dumps(input))
+ self.assertEquals(input, ujson.decode(output))
+ input = np.array(input)
+ assert_array_equal(input, ujson.decode(output, numpy=True, dtype=input.dtype))
+
+ def test_encodeArrayOfDoubles(self):
+ input = [ 31337.31337, 31337.31337, 31337.31337, 31337.31337] * 10
+ output = ujson.encode(input)
+ self.assertEquals(input, json.loads(output))
+ #self.assertEquals(output, json.dumps(input))
+ self.assertEquals(input, ujson.decode(output))
+ assert_array_equal(np.array(input), ujson.decode(output, numpy=True))
+
+ def test_doublePrecisionTest(self):
+ input = 30.012345678901234
+ output = ujson.encode(input, double_precision = 15)
+ self.assertEquals(input, json.loads(output))
+ self.assertEquals(input, ujson.decode(output))
+
+ output = ujson.encode(input, double_precision = 9)
+ self.assertEquals(round(input, 9), json.loads(output))
+ self.assertEquals(round(input, 9), ujson.decode(output))
+
+ output = ujson.encode(input, double_precision = 3)
+ self.assertEquals(round(input, 3), json.loads(output))
+ self.assertEquals(round(input, 3), ujson.decode(output))
+
+ output = ujson.encode(input)
+ self.assertEquals(round(input, 5), json.loads(output))
+ self.assertEquals(round(input, 5), ujson.decode(output))
+
+ def test_invalidDoublePrecision(self):
+ input = 30.12345678901234567890
+ output = ujson.encode(input, double_precision = 20)
+ # should snap to the max, which is 15
+ self.assertEquals(round(input, 15), json.loads(output))
+ self.assertEquals(round(input, 15), ujson.decode(output))
+
+ output = ujson.encode(input, double_precision = -1)
+ # also should snap to the max, which is 15
+ self.assertEquals(round(input, 15), json.loads(output))
+ self.assertEquals(round(input, 15), ujson.decode(output))
+
+ # will throw typeError
+ self.assertRaises(TypeError, ujson.encode, input, double_precision = '9')
+ # will throw typeError
+ self.assertRaises(TypeError, ujson.encode, input, double_precision = None)
+
+
+ def test_encodeStringConversion(self):
+ input = "A string \\ / \b \f \n \r \t"
+ output = ujson.encode(input)
+ self.assertEquals(input, json.loads(output))
+ self.assertEquals(output, '"A string \\\\ \\/ \\b \\f \\n \\r \\t"')
+ self.assertEquals(input, ujson.decode(output))
+ pass
+
+ def test_decodeUnicodeConversion(self):
+ pass
+
+ def test_encodeUnicodeConversion1(self):
+ input = "Räksmörgås اسامة بن محمد بن عوض بن لادن"
+ enc = ujson.encode(input)
+ dec = ujson.decode(enc)
+ self.assertEquals(enc, json_unicode(input))
+ self.assertEquals(dec, json.loads(enc))
+
+ def test_encodeControlEscaping(self):
+ input = "\x19"
+ enc = ujson.encode(input)
+ dec = ujson.decode(enc)
+ self.assertEquals(input, dec)
+ self.assertEquals(enc, json_unicode(input))
+
+
+ def test_encodeUnicodeConversion2(self):
+ input = "\xe6\x97\xa5\xd1\x88"
+ enc = ujson.encode(input)
+ dec = ujson.decode(enc)
+ self.assertEquals(enc, json_unicode(input))
+ self.assertEquals(dec, json.loads(enc))
+
+ def test_encodeUnicodeSurrogatePair(self):
+ _skip_if_python_ver(2, 5)
+ _skip_if_python_ver(2, 6)
+ input = "\xf0\x90\x8d\x86"
+ enc = ujson.encode(input)
+ dec = ujson.decode(enc)
+
+ self.assertEquals(enc, json_unicode(input))
+ self.assertEquals(dec, json.loads(enc))
+
+ def test_encodeUnicode4BytesUTF8(self):
+ _skip_if_python_ver(2, 5)
+ _skip_if_python_ver(2, 6)
+ input = "\xf0\x91\x80\xb0TRAILINGNORMAL"
+ enc = ujson.encode(input)
+ dec = ujson.decode(enc)
+
+ self.assertEquals(enc, json_unicode(input))
+ self.assertEquals(dec, json.loads(enc))
+
+ def test_encodeUnicode4BytesUTF8Highest(self):
+ _skip_if_python_ver(2, 5)
+ _skip_if_python_ver(2, 6)
+ input = "\xf3\xbf\xbf\xbfTRAILINGNORMAL"
+ enc = ujson.encode(input)
+
+ dec = ujson.decode(enc)
+
+ self.assertEquals(enc, json_unicode(input))
+ self.assertEquals(dec, json.loads(enc))
+
+
+ def test_encodeArrayInArray(self):
+ input = [[[[]]]]
+ output = ujson.encode(input)
+
+ self.assertEquals(input, json.loads(output))
+ self.assertEquals(output, json.dumps(input))
+ self.assertEquals(input, ujson.decode(output))
+ assert_array_equal(np.array(input), ujson.decode(output, numpy=True))
+ pass
+
+ def test_encodeIntConversion(self):
+ input = 31337
+ output = ujson.encode(input)
+ self.assertEquals(input, json.loads(output))
+ self.assertEquals(output, json.dumps(input))
+ self.assertEquals(input, ujson.decode(output))
+ pass
+
+ def test_encodeIntNegConversion(self):
+ input = -31337
+ output = ujson.encode(input)
+ self.assertEquals(input, json.loads(output))
+ self.assertEquals(output, json.dumps(input))
+ self.assertEquals(input, ujson.decode(output))
+ pass
+
+
+ def test_encodeLongNegConversion(self):
+ input = -9223372036854775808
+ output = ujson.encode(input)
+
+ outputjson = json.loads(output)
+ outputujson = ujson.decode(output)
+
+ self.assertEquals(input, json.loads(output))
+ self.assertEquals(output, json.dumps(input))
+ self.assertEquals(input, ujson.decode(output))
+ pass
+
+ def test_encodeListConversion(self):
+ input = [ 1, 2, 3, 4 ]
+ output = ujson.encode(input)
+ self.assertEquals(input, json.loads(output))
+ self.assertEquals(input, ujson.decode(output))
+ assert_array_equal(np.array(input), ujson.decode(output, numpy=True))
+ pass
+
+ def test_encodeDictConversion(self):
+ input = { "k1": 1, "k2": 2, "k3": 3, "k4": 4 }
+ output = ujson.encode(input)
+ self.assertEquals(input, json.loads(output))
+ self.assertEquals(input, ujson.decode(output))
+ self.assertEquals(input, ujson.decode(output))
+ pass
+
+ def test_encodeNoneConversion(self):
+ input = None
+ output = ujson.encode(input)
+ self.assertEquals(input, json.loads(output))
+ self.assertEquals(output, json.dumps(input))
+ self.assertEquals(input, ujson.decode(output))
+ pass
+
+ def test_encodeTrueConversion(self):
+ input = True
+ output = ujson.encode(input)
+ self.assertEquals(input, json.loads(output))
+ self.assertEquals(output, json.dumps(input))
+ self.assertEquals(input, ujson.decode(output))
+ pass
+
+ def test_encodeFalseConversion(self):
+ input = False
+ output = ujson.encode(input)
+ self.assertEquals(input, json.loads(output))
+ self.assertEquals(output, json.dumps(input))
+ self.assertEquals(input, ujson.decode(output))
+ pass
+
+ # def test_encodeDatetimeConversion(self):
+ # ts = time.time()
+ # input = datetime.datetime.fromtimestamp(ts)
+ # output = ujson.encode(input)
+ # expected = calendar.timegm(input.utctimetuple())
+ # self.assertEquals(int(expected), json.loads(output))
+ # self.assertEquals(int(expected), ujson.decode(output))
+ # pass
+
+ # def test_encodeDateConversion(self):
+ # ts = time.time()
+ # input = datetime.date.fromtimestamp(ts)
+
+ # output = ujson.encode(input)
+ # tup = ( input.year, input.month, input.day, 0, 0, 0 )
+
+ # expected = calendar.timegm(tup)
+ # self.assertEquals(int(expected), json.loads(output))
+ # self.assertEquals(int(expected), ujson.decode(output))
+
+ def test_datetime_nanosecond_unit(self):
+ from datetime import datetime
+ from pandas.lib import Timestamp
+
+ val = datetime.now()
+ stamp = Timestamp(val)
+
+ roundtrip = ujson.decode(ujson.encode(val))
+ self.assert_(roundtrip == stamp.value)
+
+ def test_encodeToUTF8(self):
+ _skip_if_python_ver(2, 5)
+ input = "\xe6\x97\xa5\xd1\x88"
+ enc = ujson.encode(input, ensure_ascii=False)
+ dec = ujson.decode(enc)
+ self.assertEquals(enc, json_unicode(input, ensure_ascii=False))
+ self.assertEquals(dec, json.loads(enc))
+
+ def test_decodeFromUnicode(self):
+ input = u"{\"obj\": 31337}"
+ dec1 = ujson.decode(input)
+ dec2 = ujson.decode(str(input))
+ self.assertEquals(dec1, dec2)
+
+ def test_encodeRecursionMax(self):
+ # 8 is the max recursion depth
+
+ class O2:
+ member = 0
+ pass
+
+ class O1:
+ member = 0
+ pass
+
+ input = O1()
+ input.member = O2()
+ input.member.member = input
+
+ try:
+ output = ujson.encode(input)
+ assert False, "Expected overflow exception"
+ except(OverflowError):
+ pass
+
+ def test_encodeDoubleNan(self):
+ input = np.nan
+ assert ujson.encode(input) == 'null', "Expected null"
+
+ def test_encodeDoubleInf(self):
+ input = np.inf
+ assert ujson.encode(input) == 'null', "Expected null"
+
+ def test_encodeDoubleNegInf(self):
+ input = -np.inf
+ assert ujson.encode(input) == 'null', "Expected null"
+
+
+ def test_decodeJibberish(self):
+ input = "fdsa sda v9sa fdsa"
+ try:
+ ujson.decode(input)
+ assert False, "Expected exception!"
+ except(ValueError):
+ return
+ assert False, "Wrong exception"
+
+ def test_decodeBrokenArrayStart(self):
+ input = "["
+ try:
+ ujson.decode(input)
+ assert False, "Expected exception!"
+ except(ValueError):
+ return
+ assert False, "Wrong exception"
+
+ def test_decodeBrokenObjectStart(self):
+ input = "{"
+ try:
+ ujson.decode(input)
+ assert False, "Expected exception!"
+ except(ValueError):
+ return
+ assert False, "Wrong exception"
+
+ def test_decodeBrokenArrayEnd(self):
+ input = "]"
+ try:
+ ujson.decode(input)
+ assert False, "Expected exception!"
+ except(ValueError):
+ return
+ assert False, "Wrong exception"
+
+ def test_decodeBrokenObjectEnd(self):
+ input = "}"
+ try:
+ ujson.decode(input)
+ assert False, "Expected exception!"
+ except(ValueError):
+ return
+ assert False, "Wrong exception"
+
+ def test_decodeStringUnterminated(self):
+ input = "\"TESTING"
+ try:
+ ujson.decode(input)
+ assert False, "Expected exception!"
+ except(ValueError):
+ return
+ assert False, "Wrong exception"
+
+ def test_decodeStringUntermEscapeSequence(self):
+ input = "\"TESTING\\\""
+ try:
+ ujson.decode(input)
+ assert False, "Expected exception!"
+ except(ValueError):
+ return
+ assert False, "Wrong exception"
+
+ def test_decodeStringBadEscape(self):
+ input = "\"TESTING\\\""
+ try:
+ ujson.decode(input)
+ assert False, "Expected exception!"
+ except(ValueError):
+ return
+ assert False, "Wrong exception"
+
+ def test_decodeTrueBroken(self):
+ input = "tru"
+ try:
+ ujson.decode(input)
+ assert False, "Expected exception!"
+ except(ValueError):
+ return
+ assert False, "Wrong exception"
+
+ def test_decodeFalseBroken(self):
+ input = "fa"
+ try:
+ ujson.decode(input)
+ assert False, "Expected exception!"
+ except(ValueError):
+ return
+ assert False, "Wrong exception"
+
+ def test_decodeNullBroken(self):
+ input = "n"
+ try:
+ ujson.decode(input)
+ assert False, "Expected exception!"
+ except(ValueError):
+ return
+ assert False, "Wrong exception"
+
+
+ def test_decodeBrokenDictKeyTypeLeakTest(self):
+ input = '{{1337:""}}'
+ for x in xrange(1000):
+ try:
+ ujson.decode(input)
+ assert False, "Expected exception!"
+ except(ValueError),e:
+ continue
+
+ assert False, "Wrong exception"
+
+ def test_decodeBrokenDictLeakTest(self):
+ input = '{{"key":"}'
+ for x in xrange(1000):
+ try:
+ ujson.decode(input)
+ assert False, "Expected exception!"
+ except(ValueError):
+ continue
+
+ assert False, "Wrong exception"
+
+ def test_decodeBrokenListLeakTest(self):
+ input = '[[[true'
+ for x in xrange(1000):
+ try:
+ ujson.decode(input)
+ assert False, "Expected exception!"
+ except(ValueError):
+ continue
+
+ assert False, "Wrong exception"
+
+ def test_decodeDictWithNoKey(self):
+ input = "{{{{31337}}}}"
+ try:
+ ujson.decode(input)
+ assert False, "Expected exception!"
+ except(ValueError):
+ return
+
+ assert False, "Wrong exception"
+
+ def test_decodeDictWithNoColonOrValue(self):
+ input = "{{{{\"key\"}}}}"
+ try:
+ ujson.decode(input)
+ assert False, "Expected exception!"
+ except(ValueError):
+ return
+
+ assert False, "Wrong exception"
+
+ def test_decodeDictWithNoValue(self):
+ input = "{{{{\"key\":}}}}"
+ try:
+ ujson.decode(input)
+ assert False, "Expected exception!"
+ except(ValueError):
+ return
+
+ assert False, "Wrong exception"
+
+ def test_decodeNumericIntPos(self):
+ input = "31337"
+ self.assertEquals (31337, ujson.decode(input))
+
+ def test_decodeNumericIntNeg(self):
+ input = "-31337"
+ self.assertEquals (-31337, ujson.decode(input))
+
+ def test_encodeUnicode4BytesUTF8Fail(self):
+ _skip_if_python_ver(3)
+ input = "\xfd\xbf\xbf\xbf\xbf\xbf"
+ try:
+ enc = ujson.encode(input)
+ assert False, "Expected exception"
+ except OverflowError:
+ pass
+
+ def test_encodeNullCharacter(self):
+ input = "31337 \x00 1337"
+ output = ujson.encode(input)
+ self.assertEquals(input, json.loads(output))
+ self.assertEquals(output, json.dumps(input))
+ self.assertEquals(input, ujson.decode(output))
+
+ input = "\x00"
+ output = ujson.encode(input)
+ self.assertEquals(input, json.loads(output))
+ self.assertEquals(output, json.dumps(input))
+ self.assertEquals(input, ujson.decode(output))
+
+ self.assertEquals('" \\u0000\\r\\n "', ujson.dumps(u" \u0000\r\n "))
+ pass
+
+ def test_decodeNullCharacter(self):
+ input = "\"31337 \\u0000 31337\""
+ self.assertEquals(ujson.decode(input), json.loads(input))
+
+
+ def test_encodeListLongConversion(self):
+ input = [9223372036854775807, 9223372036854775807, 9223372036854775807,
+ 9223372036854775807, 9223372036854775807, 9223372036854775807 ]
+ output = ujson.encode(input)
+ self.assertEquals(input, json.loads(output))
+ self.assertEquals(input, ujson.decode(output))
+ assert_array_equal(np.array(input), ujson.decode(output, numpy=True,
+ dtype=np.int64))
+ pass
+
+ def test_encodeLongConversion(self):
+ input = 9223372036854775807
+ output = ujson.encode(input)
+ self.assertEquals(input, json.loads(output))
+ self.assertEquals(output, json.dumps(input))
+ self.assertEquals(input, ujson.decode(output))
+ pass
+
+ def test_numericIntExp(self):
+ input = "1337E40"
+ output = ujson.decode(input)
+ self.assertEquals(output, json.loads(input))
+
+ def test_numericIntFrcExp(self):
+ input = "1.337E40"
+ output = ujson.decode(input)
+ self.assertAlmostEqual(output, json.loads(input))
+
+ def test_decodeNumericIntExpEPLUS(self):
+ input = "1337E+40"
+ output = ujson.decode(input)
+ self.assertAlmostEqual(output, json.loads(input))
+
+ def test_decodeNumericIntExpePLUS(self):
+ input = "1.337e+40"
+ output = ujson.decode(input)
+ self.assertAlmostEqual(output, json.loads(input))
+
+ def test_decodeNumericIntExpE(self):
+ input = "1337E40"
+ output = ujson.decode(input)
+ self.assertAlmostEqual(output, json.loads(input))
+
+ def test_decodeNumericIntExpe(self):
+ input = "1337e40"
+ output = ujson.decode(input)
+ self.assertAlmostEqual(output, json.loads(input))
+
+ def test_decodeNumericIntExpEMinus(self):
+ input = "1.337E-4"
+ output = ujson.decode(input)
+ self.assertAlmostEqual(output, json.loads(input))
+
+ def test_decodeNumericIntExpeMinus(self):
+ input = "1.337e-4"
+ output = ujson.decode(input)
+ self.assertAlmostEqual(output, json.loads(input))
+
+ def test_dumpToFile(self):
+ f = StringIO.StringIO()
+ ujson.dump([1, 2, 3], f)
+ self.assertEquals("[1,2,3]", f.getvalue())
+
+ def test_dumpToFileLikeObject(self):
+ class filelike:
+ def __init__(self):
+ self.bytes = ''
+ def write(self, bytes):
+ self.bytes += bytes
+ f = filelike()
+ ujson.dump([1, 2, 3], f)
+ self.assertEquals("[1,2,3]", f.bytes)
+
+ def test_dumpFileArgsError(self):
+ try:
+ ujson.dump([], '')
+ except TypeError:
+ pass
+ else:
+ assert False, 'expected TypeError'
+
+ def test_loadFile(self):
+ f = StringIO.StringIO("[1,2,3,4]")
+ self.assertEquals([1, 2, 3, 4], ujson.load(f))
+ f = StringIO.StringIO("[1,2,3,4]")
+ assert_array_equal(np.array([1, 2, 3, 4]), ujson.load(f, numpy=True))
+
+ def test_loadFileLikeObject(self):
+ class filelike:
+ def read(self):
+ try:
+ self.end
+ except AttributeError:
+ self.end = True
+ return "[1,2,3,4]"
+ f = filelike()
+ self.assertEquals([1, 2, 3, 4], ujson.load(f))
+ f = filelike()
+ assert_array_equal(np.array([1, 2, 3, 4]), ujson.load(f, numpy=True))
+
+ def test_loadFileArgsError(self):
+ try:
+ ujson.load("[]")
+ except TypeError:
+ pass
+ else:
+ assert False, "expected TypeError"
+
+ def test_version(self):
+ assert re.match(r'^\d+\.\d+(\.\d+)?$', ujson.__version__), \
+ "ujson.__version__ must be a string like '1.4.0'"
+
+ def test_encodeNumericOverflow(self):
+ try:
+ ujson.encode(12839128391289382193812939)
+ except OverflowError:
+ pass
+ else:
+ assert False, "expected OverflowError"
+
+ def test_encodeNumericOverflowNested(self):
+ for n in xrange(0, 100):
+ class Nested:
+ x = 12839128391289382193812939
+
+ nested = Nested()
+
+ try:
+ ujson.encode(nested)
+ except OverflowError:
+ pass
+ else:
+ assert False, "expected OverflowError"
+
+ def test_decodeNumberWith32bitSignBit(self):
+ #Test that numbers that fit within 32 bits but would have the
+ # sign bit set (2**31 <= x < 2**32) are decoded properly.
+ boundary1 = 2**31
+ boundary2 = 2**32
+ docs = (
+ '{"id": 3590016419}',
+ '{"id": %s}' % 2**31,
+ '{"id": %s}' % 2**32,
+ '{"id": %s}' % ((2**32)-1),
+ )
+ results = (3590016419, 2**31, 2**32, 2**32-1)
+ for doc,result in zip(docs, results):
+ self.assertEqual(ujson.decode(doc)['id'], result)
+
+ def test_encodeBigEscape(self):
+ for x in xrange(10):
+ if py3compat.PY3:
+ base = '\u00e5'.encode('utf-8')
+ else:
+ base = "\xc3\xa5"
+ input = base * 1024 * 1024 * 2
+ output = ujson.encode(input)
+
+ def test_decodeBigEscape(self):
+ for x in xrange(10):
+ if py3compat.PY3:
+ base = '\u00e5'.encode('utf-8')
+ else:
+ base = "\xc3\xa5"
+ quote = py3compat.str_to_bytes("\"")
+ input = quote + (base * 1024 * 1024 * 2) + quote
+ output = ujson.decode(input)
+
+ def test_toDict(self):
+ d = {u"key": 31337}
+
+ class DictTest:
+ def toDict(self):
+ return d
+
+ o = DictTest()
+ output = ujson.encode(o)
+ dec = ujson.decode(output)
+ self.assertEquals(dec, d)
+
+
+class NumpyJSONTests(TestCase):
+
+ def testBool(self):
+ b = np.bool(True)
+ self.assertEqual(ujson.decode(ujson.encode(b)), b)
+
+ def testBoolArray(self):
+ inpt = np.array([True, False, True, True, False, True, False , False],
+ dtype=np.bool)
+ outp = np.array(ujson.decode(ujson.encode(inpt)), dtype=np.bool)
+ assert_array_equal(inpt, outp)
+
+ def testInt(self):
+ num = np.int(2562010)
+ self.assertEqual(np.int(ujson.decode(ujson.encode(num))), num)
+
+ num = np.int8(127)
+ self.assertEqual(np.int8(ujson.decode(ujson.encode(num))), num)
+
+ num = np.int16(2562010)
+ self.assertEqual(np.int16(ujson.decode(ujson.encode(num))), num)
+
+ num = np.int32(2562010)
+ self.assertEqual(np.int32(ujson.decode(ujson.encode(num))), num)
+
+ num = np.int64(2562010)
+ self.assertEqual(np.int64(ujson.decode(ujson.encode(num))), num)
+
+ num = np.uint8(255)
+ self.assertEqual(np.uint8(ujson.decode(ujson.encode(num))), num)
+
+ num = np.uint16(2562010)
+ self.assertEqual(np.uint16(ujson.decode(ujson.encode(num))), num)
+
+ num = np.uint32(2562010)
+ self.assertEqual(np.uint32(ujson.decode(ujson.encode(num))), num)
+
+ num = np.uint64(2562010)
+ self.assertEqual(np.uint64(ujson.decode(ujson.encode(num))), num)
+
+ def testIntArray(self):
+ arr = np.arange(100, dtype=np.int)
+ dtypes = (np.int, np.int8, np.int16, np.int32, np.int64,
+ np.uint, np.uint8, np.uint16, np.uint32, np.uint64)
+ for dtype in dtypes:
+ inpt = arr.astype(dtype)
+ outp = np.array(ujson.decode(ujson.encode(inpt)), dtype=dtype)
+ assert_array_equal(inpt, outp)
+
+ def testIntMax(self):
+ num = np.int(np.iinfo(np.int).max)
+ self.assertEqual(np.int(ujson.decode(ujson.encode(num))), num)
+
+ num = np.int8(np.iinfo(np.int8).max)
+ self.assertEqual(np.int8(ujson.decode(ujson.encode(num))), num)
+
+ num = np.int16(np.iinfo(np.int16).max)
+ self.assertEqual(np.int16(ujson.decode(ujson.encode(num))), num)
+
+ num = np.int32(np.iinfo(np.int32).max)
+ self.assertEqual(np.int32(ujson.decode(ujson.encode(num))), num)
+
+ num = np.uint8(np.iinfo(np.uint8).max)
+ self.assertEqual(np.uint8(ujson.decode(ujson.encode(num))), num)
+
+ num = np.uint16(np.iinfo(np.uint16).max)
+ self.assertEqual(np.uint16(ujson.decode(ujson.encode(num))), num)
+
+ num = np.uint32(np.iinfo(np.uint32).max)
+ self.assertEqual(np.uint32(ujson.decode(ujson.encode(num))), num)
+
+ if platform.architecture()[0] != '32bit':
+ num = np.int64(np.iinfo(np.int64).max)
+ self.assertEqual(np.int64(ujson.decode(ujson.encode(num))), num)
+
+ # uint64 max will always overflow as it's encoded to signed
+ num = np.uint64(np.iinfo(np.int64).max)
+ self.assertEqual(np.uint64(ujson.decode(ujson.encode(num))), num)
+
+ def testFloat(self):
+ num = np.float(256.2013)
+ self.assertEqual(np.float(ujson.decode(ujson.encode(num))), num)
+
+ num = np.float32(256.2013)
+ self.assertEqual(np.float32(ujson.decode(ujson.encode(num))), num)
+
+ num = np.float64(256.2013)
+ self.assertEqual(np.float64(ujson.decode(ujson.encode(num))), num)
+
+ def testFloatArray(self):
+ arr = np.arange(12.5, 185.72, 1.7322, dtype=np.float)
+ dtypes = (np.float, np.float32, np.float64)
+
+ for dtype in dtypes:
+ inpt = arr.astype(dtype)
+ outp = np.array(ujson.decode(ujson.encode(inpt, double_precision=15)), dtype=dtype)
+ assert_array_almost_equal_nulp(inpt, outp)
+
+ def testFloatMax(self):
+ num = np.float(np.finfo(np.float).max/10)
+ assert_approx_equal(np.float(ujson.decode(ujson.encode(num))), num, 15)
+
+ num = np.float32(np.finfo(np.float32).max/10)
+ assert_approx_equal(np.float32(ujson.decode(ujson.encode(num))), num, 15)
+
+ num = np.float64(np.finfo(np.float64).max/10)
+ assert_approx_equal(np.float64(ujson.decode(ujson.encode(num))), num, 15)
+
+ def testArrays(self):
+ arr = np.arange(100);
+
+ arr = arr.reshape((10, 10))
+ assert_array_equal(np.array(ujson.decode(ujson.encode(arr))), arr)
+ assert_array_equal(ujson.decode(ujson.encode(arr), numpy=True), arr)
+
+ arr = arr.reshape((5, 5, 4))
+ assert_array_equal(np.array(ujson.decode(ujson.encode(arr))), arr)
+ assert_array_equal(ujson.decode(ujson.encode(arr), numpy=True), arr)
+
+ arr = arr.reshape((100, 1))
+ assert_array_equal(np.array(ujson.decode(ujson.encode(arr))), arr)
+ assert_array_equal(ujson.decode(ujson.encode(arr), numpy=True), arr)
+
+ arr = np.arange(96);
+ arr = arr.reshape((2, 2, 2, 2, 3, 2))
+ assert_array_equal(np.array(ujson.decode(ujson.encode(arr))), arr)
+ assert_array_equal(ujson.decode(ujson.encode(arr), numpy=True), arr)
+
+ l = ['a', list(), dict(), dict(), list(),
+ 42, 97.8, ['a', 'b'], {'key': 'val'}]
+ arr = np.array(l)
+ assert_array_equal(np.array(ujson.decode(ujson.encode(arr))), arr)
+
+ arr = np.arange(100.202, 200.202, 1, dtype=np.float32);
+ arr = arr.reshape((5, 5, 4))
+ outp = np.array(ujson.decode(ujson.encode(arr)), dtype=np.float32)
+ assert_array_almost_equal_nulp(arr, outp)
+ outp = ujson.decode(ujson.encode(arr), numpy=True, dtype=np.float32)
+ assert_array_almost_equal_nulp(arr, outp)
+
+ def testArrayNumpyExcept(self):
+
+ input = ujson.dumps([42, {}, 'a'])
+ try:
+ ujson.decode(input, numpy=True)
+ assert False, "Expected exception!"
+ except(TypeError):
+ pass
+ except:
+ assert False, "Wrong exception"
+
+ input = ujson.dumps(['a', 'b', [], 'c'])
+ try:
+ ujson.decode(input, numpy=True)
+ assert False, "Expected exception!"
+ except(ValueError):
+ pass
+ except:
+ assert False, "Wrong exception"
+
+ input = ujson.dumps([['a'], 42])
+ try:
+ ujson.decode(input, numpy=True)
+ assert False, "Expected exception!"
+ except(ValueError):
+ pass
+ except:
+ assert False, "Wrong exception"
+
+ input = ujson.dumps([42, ['a'], 42])
+ try:
+ ujson.decode(input, numpy=True)
+ assert False, "Expected exception!"
+ except(ValueError):
+ pass
+ except:
+ assert False, "Wrong exception"
+
+ input = ujson.dumps([{}, []])
+ try:
+ ujson.decode(input, numpy=True)
+ assert False, "Expected exception!"
+ except(ValueError):
+ pass
+ except:
+ assert False, "Wrong exception"
+
+ input = ujson.dumps([42, None])
+ try:
+ ujson.decode(input, numpy=True)
+ assert False, "Expected exception!"
+ except(TypeError):
+ pass
+ except:
+ assert False, "Wrong exception"
+
+ input = ujson.dumps([{'a': 'b'}])
+ try:
+ ujson.decode(input, numpy=True, labelled=True)
+ assert False, "Expected exception!"
+ except(ValueError):
+ pass
+ except:
+ assert False, "Wrong exception"
+
+ input = ujson.dumps({'a': {'b': {'c': 42}}})
+ try:
+ ujson.decode(input, numpy=True, labelled=True)
+ assert False, "Expected exception!"
+ except(ValueError):
+ pass
+ except:
+ assert False, "Wrong exception"
+
+ input = ujson.dumps([{'a': 42, 'b': 23}, {'c': 17}])
+ try:
+ ujson.decode(input, numpy=True, labelled=True)
+ assert False, "Expected exception!"
+ except(ValueError):
+ pass
+ except:
+ assert False, "Wrong exception"
+
+ def testArrayNumpyLabelled(self):
+ input = {'a': []}
+ output = ujson.loads(ujson.dumps(input), numpy=True, labelled=True)
+ self.assertTrue((np.empty((1, 0)) == output[0]).all())
+ self.assertTrue((np.array(['a']) == output[1]).all())
+ self.assertTrue(output[2] is None)
+
+ input = [{'a': 42}]
+ output = ujson.loads(ujson.dumps(input), numpy=True, labelled=True)
+ self.assertTrue((np.array([42]) == output[0]).all())
+ self.assertTrue(output[1] is None)
+ self.assertTrue((np.array([u'a']) == output[2]).all())
+
+ input = [{'a': 42, 'b':31}, {'a': 24, 'c': 99}, {'a': 2.4, 'b': 78}]
+ output = ujson.loads(ujson.dumps(input), numpy=True, labelled=True)
+ expectedvals = np.array([42, 31, 24, 99, 2.4, 78], dtype=int).reshape((3,2))
+ self.assertTrue((expectedvals == output[0]).all())
+ self.assertTrue(output[1] is None)
+ self.assertTrue((np.array([u'a', 'b']) == output[2]).all())
+
+
+ input = {1: {'a': 42, 'b':31}, 2: {'a': 24, 'c': 99}, 3: {'a': 2.4, 'b': 78}}
+ output = ujson.loads(ujson.dumps(input), numpy=True, labelled=True)
+ expectedvals = np.array([42, 31, 24, 99, 2.4, 78], dtype=int).reshape((3,2))
+ self.assertTrue((expectedvals == output[0]).all())
+ self.assertTrue((np.array(['1','2','3']) == output[1]).all())
+ self.assertTrue((np.array(['a', 'b']) == output[2]).all())
+
+class PandasJSONTests(TestCase):
+
+ def testDataFrame(self):
+ df = DataFrame([[1,2,3], [4,5,6]], index=['a', 'b'], columns=['x', 'y', 'z'])
+
+ # column indexed
+ outp = DataFrame(ujson.decode(ujson.encode(df)))
+ self.assertTrue((df == outp).values.all())
+ assert_array_equal(df.columns, outp.columns)
+ assert_array_equal(df.index, outp.index)
+
+ dec = _clean_dict(ujson.decode(ujson.encode(df, orient="split")))
+ outp = DataFrame(**dec)
+ self.assertTrue((df == outp).values.all())
+ assert_array_equal(df.columns, outp.columns)
+ assert_array_equal(df.index, outp.index)
+
+ outp = DataFrame(ujson.decode(ujson.encode(df, orient="records")))
+ outp.index = df.index
+ self.assertTrue((df == outp).values.all())
+ assert_array_equal(df.columns, outp.columns)
+
+ outp = DataFrame(ujson.decode(ujson.encode(df, orient="values")))
+ outp.index = df.index
+ self.assertTrue((df.values == outp.values).all())
+
+ outp = DataFrame(ujson.decode(ujson.encode(df, orient="index")))
+ self.assertTrue((df.transpose() == outp).values.all())
+ assert_array_equal(df.transpose().columns, outp.columns)
+ assert_array_equal(df.transpose().index, outp.index)
+
+
+ def testDataFrameNumpy(self):
+ df = DataFrame([[1,2,3], [4,5,6]], index=['a', 'b'], columns=['x', 'y', 'z'])
+
+ # column indexed
+ outp = DataFrame(ujson.decode(ujson.encode(df), numpy=True))
+ self.assertTrue((df == outp).values.all())
+ assert_array_equal(df.columns, outp.columns)
+ assert_array_equal(df.index, outp.index)
+
+ dec = _clean_dict(ujson.decode(ujson.encode(df, orient="split"),
+ numpy=True))
+ outp = DataFrame(**dec)
+ self.assertTrue((df == outp).values.all())
+ assert_array_equal(df.columns, outp.columns)
+ assert_array_equal(df.index, outp.index)
+
+ outp = DataFrame(ujson.decode(ujson.encode(df, orient="index"), numpy=True))
+ self.assertTrue((df.transpose() == outp).values.all())
+ assert_array_equal(df.transpose().columns, outp.columns)
+ assert_array_equal(df.transpose().index, outp.index)
+
+ def testDataFrameNested(self):
+ df = DataFrame([[1,2,3], [4,5,6]], index=['a', 'b'], columns=['x', 'y', 'z'])
+
+ nested = {'df1': df, 'df2': df.copy()}
+
+ exp = {'df1': ujson.decode(ujson.encode(df)),
+ 'df2': ujson.decode(ujson.encode(df))}
+ self.assertTrue(ujson.decode(ujson.encode(nested)) == exp)
+
+ exp = {'df1': ujson.decode(ujson.encode(df, orient="index")),
+ 'df2': ujson.decode(ujson.encode(df, orient="index"))}
+ self.assertTrue(ujson.decode(ujson.encode(nested, orient="index")) == exp)
+
+ exp = {'df1': ujson.decode(ujson.encode(df, orient="records")),
+ 'df2': ujson.decode(ujson.encode(df, orient="records"))}
+ self.assertTrue(ujson.decode(ujson.encode(nested, orient="records")) == exp)
+
+ exp = {'df1': ujson.decode(ujson.encode(df, orient="values")),
+ 'df2': ujson.decode(ujson.encode(df, orient="values"))}
+ self.assertTrue(ujson.decode(ujson.encode(nested, orient="values")) == exp)
+
+ exp = {'df1': ujson.decode(ujson.encode(df, orient="split")),
+ 'df2': ujson.decode(ujson.encode(df, orient="split"))}
+ self.assertTrue(ujson.decode(ujson.encode(nested, orient="split")) == exp)
+
+ def testDataFrameNumpyLabelled(self):
+ df = DataFrame([[1,2,3], [4,5,6]], index=['a', 'b'], columns=['x', 'y', 'z'])
+
+ # column indexed
+ outp = DataFrame(*ujson.decode(ujson.encode(df), numpy=True, labelled=True))
+ self.assertTrue((df.T == outp).values.all())
+ assert_array_equal(df.T.columns, outp.columns)
+ assert_array_equal(df.T.index, outp.index)
+
+ outp = DataFrame(*ujson.decode(ujson.encode(df, orient="records"), numpy=True, labelled=True))
+ outp.index = df.index
+ self.assertTrue((df == outp).values.all())
+ assert_array_equal(df.columns, outp.columns)
+
+ outp = DataFrame(*ujson.decode(ujson.encode(df, orient="index"), numpy=True, labelled=True))
+ self.assertTrue((df == outp).values.all())
+ assert_array_equal(df.columns, outp.columns)
+ assert_array_equal(df.index, outp.index)
+
+ def testSeries(self):
+ s = Series([10, 20, 30, 40, 50, 60], name="series", index=[6,7,8,9,10,15])
+ s.sort()
+
+ # column indexed
+ outp = Series(ujson.decode(ujson.encode(s)))
+ outp.sort()
+ self.assertTrue((s == outp).values.all())
+
+ outp = Series(ujson.decode(ujson.encode(s), numpy=True))
+ outp.sort()
+ self.assertTrue((s == outp).values.all())
+
+ dec = _clean_dict(ujson.decode(ujson.encode(s, orient="split")))
+ outp = Series(**dec)
+ self.assertTrue((s == outp).values.all())
+ self.assertTrue(s.name == outp.name)
+
+ dec = _clean_dict(ujson.decode(ujson.encode(s, orient="split"),
+ numpy=True))
+ outp = Series(**dec)
+ self.assertTrue((s == outp).values.all())
+ self.assertTrue(s.name == outp.name)
+
+ outp = Series(ujson.decode(ujson.encode(s, orient="records"), numpy=True))
+ self.assertTrue((s == outp).values.all())
+
+ outp = Series(ujson.decode(ujson.encode(s, orient="records")))
+ self.assertTrue((s == outp).values.all())
+
+ outp = Series(ujson.decode(ujson.encode(s, orient="values"), numpy=True))
+ self.assertTrue((s == outp).values.all())
+
+ outp = Series(ujson.decode(ujson.encode(s, orient="values")))
+ self.assertTrue((s == outp).values.all())
+
+ outp = Series(ujson.decode(ujson.encode(s, orient="index")))
+ outp.sort()
+ self.assertTrue((s == outp).values.all())
+
+ outp = Series(ujson.decode(ujson.encode(s, orient="index"), numpy=True))
+ outp.sort()
+ self.assertTrue((s == outp).values.all())
+
+ def testSeriesNested(self):
+ s = Series([10, 20, 30, 40, 50, 60], name="series", index=[6,7,8,9,10,15])
+ s.sort()
+
+ nested = {'s1': s, 's2': s.copy()}
+
+ exp = {'s1': ujson.decode(ujson.encode(s)),
+ 's2': ujson.decode(ujson.encode(s))}
+ self.assertTrue(ujson.decode(ujson.encode(nested)) == exp)
+
+ exp = {'s1': ujson.decode(ujson.encode(s, orient="split")),
+ 's2': ujson.decode(ujson.encode(s, orient="split"))}
+ self.assertTrue(ujson.decode(ujson.encode(nested, orient="split")) == exp)
+
+ exp = {'s1': ujson.decode(ujson.encode(s, orient="records")),
+ 's2': ujson.decode(ujson.encode(s, orient="records"))}
+ self.assertTrue(ujson.decode(ujson.encode(nested, orient="records")) == exp)
+
+ exp = {'s1': ujson.decode(ujson.encode(s, orient="values")),
+ 's2': ujson.decode(ujson.encode(s, orient="values"))}
+ self.assertTrue(ujson.decode(ujson.encode(nested, orient="values")) == exp)
+
+ exp = {'s1': ujson.decode(ujson.encode(s, orient="index")),
+ 's2': ujson.decode(ujson.encode(s, orient="index"))}
+ self.assertTrue(ujson.decode(ujson.encode(nested, orient="index")) == exp)
+
+ def testIndex(self):
+ i = Index([23, 45, 18, 98, 43, 11], name="index")
+
+ # column indexed
+ outp = Index(ujson.decode(ujson.encode(i)))
+ self.assert_(i.equals(outp))
+
+ outp = Index(ujson.decode(ujson.encode(i), numpy=True))
+ self.assert_(i.equals(outp))
+
+ dec = _clean_dict(ujson.decode(ujson.encode(i, orient="split")))
+ outp = Index(**dec)
+ self.assert_(i.equals(outp))
+ self.assertTrue(i.name == outp.name)
+
+ dec = _clean_dict(ujson.decode(ujson.encode(i, orient="split"),
+ numpy=True))
+ outp = Index(**dec)
+ self.assert_(i.equals(outp))
+ self.assertTrue(i.name == outp.name)
+
+ outp = Index(ujson.decode(ujson.encode(i, orient="values")))
+ self.assert_(i.equals(outp))
+
+ outp = Index(ujson.decode(ujson.encode(i, orient="values"), numpy=True))
+ self.assert_(i.equals(outp))
+
+ outp = Index(ujson.decode(ujson.encode(i, orient="records")))
+ self.assert_(i.equals(outp))
+
+ outp = Index(ujson.decode(ujson.encode(i, orient="records"), numpy=True))
+ self.assert_(i.equals(outp))
+
+ outp = Index(ujson.decode(ujson.encode(i, orient="index")))
+ self.assert_(i.equals(outp))
+
+ outp = Index(ujson.decode(ujson.encode(i, orient="index"), numpy=True))
+ self.assert_(i.equals(outp))
+
+ def test_datetimeindex(self):
+ from pandas.tseries.index import date_range, DatetimeIndex
+
+ rng = date_range('1/1/2000', periods=20)
+
+ encoded = ujson.encode(rng)
+ decoded = DatetimeIndex(np.array(ujson.decode(encoded)))
+
+ self.assert_(rng.equals(decoded))
+
+ ts = Series(np.random.randn(len(rng)), index=rng)
+ decoded = Series(ujson.decode(ujson.encode(ts)))
+ idx_values = decoded.index.values.astype(np.int64)
+ decoded.index = DatetimeIndex(idx_values)
+ tm.assert_series_equal(np.round(ts, 5), decoded)
+
+"""
+def test_decodeNumericIntFrcOverflow(self):
+input = "X.Y"
+raise NotImplementedError("Implement this test!")
+
+
+def test_decodeStringUnicodeEscape(self):
+input = "\u3131"
+raise NotImplementedError("Implement this test!")
+
+def test_decodeStringUnicodeBrokenEscape(self):
+input = "\u3131"
+raise NotImplementedError("Implement this test!")
+
+def test_decodeStringUnicodeInvalidEscape(self):
+input = "\u3131"
+raise NotImplementedError("Implement this test!")
+
+def test_decodeStringUTF8(self):
+input = "someutfcharacters"
+raise NotImplementedError("Implement this test!")
+
+
+
+"""
+
+def _clean_dict(d):
+ return dict((str(k), v) for k, v in d.iteritems())
+
+if __name__ == '__main__':
+ # unittest.main()
+ import nose
+ # nose.runmodule(argv=[__file__,'-vvs','-x', '--ipdb-failure'],
+ # exit=False)
+ nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure'],
+ exit=False)
diff --git a/pandas/src/ujson/lib/ultrajson.h b/pandas/src/ujson/lib/ultrajson.h
new file mode 100644
index 0000000000000..eae665f00f03e
--- /dev/null
+++ b/pandas/src/ujson/lib/ultrajson.h
@@ -0,0 +1,298 @@
+/*
+Copyright (c) 2011, Jonas Tarnstrom and ESN Social Software AB
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+3. All advertising materials mentioning features or use of this software
+ must display the following acknowledgement:
+ This product includes software developed by ESN Social Software AB (www.esn.me).
+4. Neither the name of the ESN Social Software AB nor the
+ names of its contributors may be used to endorse or promote products
+ derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY ESN SOCIAL SOFTWARE AB ''AS IS'' AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ESN SOCIAL SOFTWARE AB BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+Portions of code from:
+MODP_ASCII - Ascii transformations (upper/lower, etc)
+http://code.google.com/p/stringencoders/
+Copyright (c) 2007 Nick Galbreath -- nickg [at] modp [dot] com. All rights reserved.
+
+*/
+
+/*
+Ultra fast JSON encoder and decoder
+Developed by Jonas Tarnstrom (jonas@esn.me).
+
+Encoder notes:
+------------------
+
+:: Cyclic references ::
+Cyclic referenced objects are not detected.
+Set JSONObjectEncoder.recursionMax to suitable value or make sure input object
+tree doesn't have cyclic references.
+
+*/
+
+#ifndef __ULTRAJSON_H__
+#define __ULTRAJSON_H__
+
+#include <stdio.h>
+#include <wchar.h>
+
+//#define JSON_DECODE_NUMERIC_AS_DOUBLE
+
+// Don't output any extra whitespaces when encoding
+#define JSON_NO_EXTRA_WHITESPACE
+
+// Max decimals to encode double floating point numbers with
+#ifndef JSON_DOUBLE_MAX_DECIMALS
+#define JSON_DOUBLE_MAX_DECIMALS 15
+#endif
+
+// Max recursion depth, default for encoder
+#ifndef JSON_MAX_RECURSION_DEPTH
+#define JSON_MAX_RECURSION_DEPTH 1024
+#endif
+
+/*
+Dictates and limits how much stack space for buffers UltraJSON will use before resorting to provided heap functions */
+#ifndef JSON_MAX_STACK_BUFFER_SIZE
+#define JSON_MAX_STACK_BUFFER_SIZE 131072
+#endif
+
+#ifdef _WIN32
+
+typedef __int64 JSINT64;
+typedef unsigned __int64 JSUINT64;
+
+typedef __int32 JSINT32;
+typedef unsigned __int32 JSUINT32;
+typedef unsigned __int8 JSUINT8;
+typedef unsigned __int16 JSUTF16;
+typedef unsigned __int32 JSUTF32;
+typedef __int64 JSLONG;
+
+#define EXPORTFUNCTION __declspec(dllexport)
+
+#define FASTCALL_MSVC __fastcall
+#define FASTCALL_ATTR
+#define INLINE_PREFIX __inline
+
+#else
+
+#include <sys/types.h>
+typedef int64_t JSINT64;
+typedef u_int64_t JSUINT64;
+
+typedef int32_t JSINT32;
+typedef u_int32_t JSUINT32;
+
+#define FASTCALL_MSVC
+#define FASTCALL_ATTR __attribute__((fastcall))
+#define INLINE_PREFIX inline
+
+typedef u_int8_t JSUINT8;
+typedef u_int16_t JSUTF16;
+typedef u_int32_t JSUTF32;
+
+typedef int64_t JSLONG;
+
+#define EXPORTFUNCTION
+#endif
+
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+#define __LITTLE_ENDIAN__
+#else
+
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#define __BIG_ENDIAN__
+#endif
+
+#endif
+
+#if !defined(__LITTLE_ENDIAN__) && !defined(__BIG_ENDIAN__)
+#error "Endianess not supported"
+#endif
+
+enum JSTYPES
+{
+ JT_NULL, // NULL
+ JT_TRUE, //boolean true
+ JT_FALSE, //boolean false
+ JT_INT, //(JSINT32 (signed 32-bit))
+ JT_LONG, //(JSINT64 (signed 64-bit))
+ JT_DOUBLE, //(double)
+ JT_UTF8, //(char 8-bit)
+ JT_ARRAY, // Array structure
+ JT_OBJECT, // Key/Value structure
+ JT_INVALID, // Internal, do not return nor expect
+};
+
+typedef void * JSOBJ;
+typedef void * JSITER;
+
+typedef struct __JSONTypeContext
+{
+ int type;
+ void *encoder;
+ void *prv;
+} JSONTypeContext;
+
+/*
+Function pointer declarations, suitable for implementing UltraJSON */
+typedef void (*JSPFN_ITERBEGIN)(JSOBJ obj, JSONTypeContext *tc);
+typedef int (*JSPFN_ITERNEXT)(JSOBJ obj, JSONTypeContext *tc);
+typedef void (*JSPFN_ITEREND)(JSOBJ obj, JSONTypeContext *tc);
+typedef JSOBJ (*JSPFN_ITERGETVALUE)(JSOBJ obj, JSONTypeContext *tc);
+typedef char *(*JSPFN_ITERGETNAME)(JSOBJ obj, JSONTypeContext *tc, size_t *outLen);
+typedef void *(*JSPFN_MALLOC)(size_t size);
+typedef void (*JSPFN_FREE)(void *pptr);
+typedef void *(*JSPFN_REALLOC)(void *base, size_t size);
+
+typedef struct __JSONObjectEncoder
+{
+ void (*beginTypeContext)(JSOBJ obj, JSONTypeContext *tc);
+ void (*endTypeContext)(JSOBJ obj, JSONTypeContext *tc);
+ const char *(*getStringValue)(JSOBJ obj, JSONTypeContext *tc, size_t *_outLen);
+ JSINT64 (*getLongValue)(JSOBJ obj, JSONTypeContext *tc);
+ JSINT32 (*getIntValue)(JSOBJ obj, JSONTypeContext *tc);
+ double (*getDoubleValue)(JSOBJ obj, JSONTypeContext *tc);
+
+ /*
+ Begin iteration of an iteratable object (JS_ARRAY or JS_OBJECT)
+ Implementor should setup iteration state in ti->prv
+ */
+ JSPFN_ITERBEGIN iterBegin;
+
+ /*
+ Retrieve next object in an iteration. Should return 0 to indicate iteration has reached end or 1 if there are more items.
+ Implementor is responsible for keeping state of the iteration. Use ti->prv fields for this
+ */
+ JSPFN_ITERNEXT iterNext;
+
+ /*
+ Ends the iteration of an iteratable object.
+ Any iteration state stored in ti->prv can be freed here
+ */
+ JSPFN_ITEREND iterEnd;
+
+ /*
+ Returns a reference to the value object of an iterator
+ The is responsible for the life-cycle of the returned string. Use iterNext/iterEnd and ti->prv to keep track of current object
+ */
+ JSPFN_ITERGETVALUE iterGetValue;
+
+ /*
+ Return name of iterator.
+ The is responsible for the life-cycle of the returned string. Use iterNext/iterEnd and ti->prv to keep track of current object
+ */
+ JSPFN_ITERGETNAME iterGetName;
+
+ /*
+ Release a value as indicated by setting ti->release = 1 in the previous getValue call.
+ The ti->prv array should contain the necessary context to release the value
+ */
+ void (*releaseObject)(JSOBJ obj);
+
+ /* Library functions
+ Set to NULL to use STDLIB malloc,realloc,free */
+ JSPFN_MALLOC malloc;
+ JSPFN_REALLOC realloc;
+ JSPFN_FREE free;
+
+ /*
+ Configuration for max recursion, set to 0 to use default (see JSON_MAX_RECURSION_DEPTH)*/
+ int recursionMax;
+
+ /*
+ Configuration for max decimals of double floating poiunt numbers to encode (0-9) */
+ int doublePrecision;
+
+ /*
+ If true output will be ASCII with all characters above 127 encoded as \uXXXX. If false output will be UTF-8 or what ever charset strings are brought as */
+ int forceASCII;
+
+
+ /*
+ Set to an error message if error occured */
+ const char *errorMsg;
+ JSOBJ errorObj;
+
+ /* Buffer stuff */
+ char *start;
+ char *offset;
+ char *end;
+ int heap;
+ int level;
+
+} JSONObjectEncoder;
+
+
+/*
+Encode an object structure into JSON.
+
+Arguments:
+obj - An anonymous type representing the object
+enc - Function definitions for querying JSOBJ type
+buffer - Preallocated buffer to store result in. If NULL function allocates own buffer
+cbBuffer - Length of buffer (ignored if buffer is NULL)
+
+Returns:
+Encoded JSON object as a null terminated char string.
+
+NOTE:
+If the supplied buffer wasn't enough to hold the result the function will allocate a new buffer.
+Life cycle of the provided buffer must still be handled by caller.
+
+If the return value doesn't equal the specified buffer caller must release the memory using
+JSONObjectEncoder.free or free() as specified when calling this function.
+*/
+EXPORTFUNCTION char *JSON_EncodeObject(JSOBJ obj, JSONObjectEncoder *enc, char *buffer, size_t cbBuffer);
+
+
+
+typedef struct __JSONObjectDecoder
+{
+ JSOBJ (*newString)(wchar_t *start, wchar_t *end);
+ int (*objectAddKey)(JSOBJ obj, JSOBJ name, JSOBJ value);
+ int (*arrayAddItem)(JSOBJ obj, JSOBJ value);
+ JSOBJ (*newTrue)(void);
+ JSOBJ (*newFalse)(void);
+ JSOBJ (*newNull)(void);
+ JSOBJ (*newObject)(void *decoder);
+ JSOBJ (*endObject)(JSOBJ obj);
+ JSOBJ (*newArray)(void *decoder);
+ JSOBJ (*endArray)(JSOBJ obj);
+ JSOBJ (*newInt)(JSINT32 value);
+ JSOBJ (*newLong)(JSINT64 value);
+ JSOBJ (*newDouble)(double value);
+ void (*releaseObject)(JSOBJ obj, void *decoder);
+ JSPFN_MALLOC malloc;
+ JSPFN_FREE free;
+ JSPFN_REALLOC realloc;
+
+ char *errorStr;
+ char *errorOffset;
+
+
+
+} JSONObjectDecoder;
+
+EXPORTFUNCTION JSOBJ JSON_DecodeObject(JSONObjectDecoder *dec, const char *buffer, size_t cbBuffer);
+
+#endif
diff --git a/pandas/src/ujson/python/py_defines.h b/pandas/src/ujson/python/py_defines.h
new file mode 100644
index 0000000000000..1544c2e3cf34d
--- /dev/null
+++ b/pandas/src/ujson/python/py_defines.h
@@ -0,0 +1,15 @@
+#include <Python.h>
+
+#if PY_MAJOR_VERSION >= 3
+
+#define PyInt_Check PyLong_Check
+#define PyInt_AS_LONG PyLong_AsLong
+#define PyInt_FromLong PyLong_FromLong
+
+#define PyString_Check PyBytes_Check
+#define PyString_GET_SIZE PyBytes_GET_SIZE
+#define PyString_AS_STRING PyBytes_AS_STRING
+
+#define PyString_FromString PyUnicode_FromString
+
+#endif
diff --git a/pandas/src/ujson/python/version.h b/pandas/src/ujson/python/version.h
new file mode 100644
index 0000000000000..9449441411192
--- /dev/null
+++ b/pandas/src/ujson/python/version.h
@@ -0,0 +1 @@
+#define UJSON_VERSION "1.18"
diff --git a/scripts/json_manip.py b/scripts/json_manip.py
new file mode 100644
index 0000000000000..e76a99cca344a
--- /dev/null
+++ b/scripts/json_manip.py
@@ -0,0 +1,421 @@
+"""
+
+Tasks
+-------
+
+Search and transform jsonable structures, specifically to make it 'easy' to make tabular/csv output for other consumers.
+
+Example
+~~~~~~~~~~~~~
+
+ *give me a list of all the fields called 'id' in this stupid, gnarly
+ thing*
+
+ >>> Q('id',gnarly_data)
+ ['id1','id2','id3']
+
+
+Observations:
+---------------------
+
+1) 'simple data structures' exist and are common. They are tedious
+ to search.
+
+2) The DOM is another nested / treeish structure, and jQuery selector is
+ a good tool for that.
+
+3a) R, Numpy, Excel and other analysis tools want 'tabular' data. These
+ analyses are valuable and worth doing.
+
+3b) Dot/Graphviz, NetworkX, and some other analyses *like* treeish/dicty
+ things, and those analyses are also worth doing!
+
+3c) Some analyses are best done using 'one-off' and custom code in C, Python,
+ or another 'real' programming language.
+
+4) Arbitrary transforms are tedious and error prone. SQL is one solution,
+ XSLT is another,
+
+5) the XPATH/XML/XSLT family is.... not universally loved :) They are
+ very complete, and the completeness can make simple cases... gross.
+
+6) For really complicated data structures, we can write one-off code. Getting
+ 80% of the way is mostly okay. There will always have to be programmers
+ in the loop.
+
+7) Re-inventing SQL is probably a failure mode. So is reinventing XPATH, XSLT
+ and the like. Be wary of mission creep! Re-use when possible (e.g., can
+ we put the thing into a DOM using
+
+8) If the interface is good, people can improve performance later.
+
+
+Simplifying
+---------------
+
+
+1) Assuming 'jsonable' structures
+
+2) keys are strings or stringlike. Python allows any hashable to be a key.
+ for now, we pretend that doesn't happen.
+
+3) assumes most dicts are 'well behaved'. DAG, no cycles!
+
+4) assume that if people want really specialized transforms, they can do it
+ themselves.
+
+"""
+
+from collections import Counter, namedtuple
+import csv
+import itertools
+from itertools import product
+from operator import attrgetter as aget, itemgetter as iget
+import operator
+import sys
+
+
+
+## note 'url' appears multiple places and not all extensions have same struct
+ex1 = {
+ 'name': 'Gregg',
+ 'extensions': [
+ {'id':'hello',
+ 'url':'url1'},
+ {'id':'gbye',
+ 'url':'url2',
+ 'more': dict(url='url3')},
+ ]
+}
+
+## much longer example
+ex2 = {u'metadata': {u'accessibilities': [{u'name': u'accessibility.tabfocus',
+ u'value': 7},
+ {u'name': u'accessibility.mouse_focuses_formcontrol', u'value': False},
+ {u'name': u'accessibility.browsewithcaret', u'value': False},
+ {u'name': u'accessibility.win32.force_disabled', u'value': False},
+ {u'name': u'accessibility.typeaheadfind.startlinksonly', u'value': False},
+ {u'name': u'accessibility.usebrailledisplay', u'value': u''},
+ {u'name': u'accessibility.typeaheadfind.timeout', u'value': 5000},
+ {u'name': u'accessibility.typeaheadfind.enabletimeout', u'value': True},
+ {u'name': u'accessibility.tabfocus_applies_to_xul', u'value': False},
+ {u'name': u'accessibility.typeaheadfind.flashBar', u'value': 1},
+ {u'name': u'accessibility.typeaheadfind.autostart', u'value': True},
+ {u'name': u'accessibility.blockautorefresh', u'value': False},
+ {u'name': u'accessibility.browsewithcaret_shortcut.enabled',
+ u'value': True},
+ {u'name': u'accessibility.typeaheadfind.enablesound', u'value': True},
+ {u'name': u'accessibility.typeaheadfind.prefillwithselection',
+ u'value': True},
+ {u'name': u'accessibility.typeaheadfind.soundURL', u'value': u'beep'},
+ {u'name': u'accessibility.typeaheadfind', u'value': False},
+ {u'name': u'accessibility.typeaheadfind.casesensitive', u'value': 0},
+ {u'name': u'accessibility.warn_on_browsewithcaret', u'value': True},
+ {u'name': u'accessibility.usetexttospeech', u'value': u''},
+ {u'name': u'accessibility.accesskeycausesactivation', u'value': True},
+ {u'name': u'accessibility.typeaheadfind.linksonly', u'value': False},
+ {u'name': u'isInstantiated', u'value': True}],
+ u'extensions': [{u'id': u'216ee7f7f4a5b8175374cd62150664efe2433a31',
+ u'isEnabled': True},
+ {u'id': u'1aa53d3b720800c43c4ced5740a6e82bb0b3813e', u'isEnabled': False},
+ {u'id': u'01ecfac5a7bd8c9e27b7c5499e71c2d285084b37', u'isEnabled': True},
+ {u'id': u'1c01f5b22371b70b312ace94785f7b0b87c3dfb2', u'isEnabled': True},
+ {u'id': u'fb723781a2385055f7d024788b75e959ad8ea8c3', u'isEnabled': True}],
+ u'fxVersion': u'9.0',
+ u'location': u'zh-CN',
+ u'operatingSystem': u'WINNT Windows NT 5.1',
+ u'surveyAnswers': u'',
+ u'task_guid': u'd69fbd15-2517-45b5-8a17-bb7354122a75',
+ u'tpVersion': u'1.2',
+ u'updateChannel': u'beta'},
+ u'survey_data': {
+ u'extensions': [{u'appDisabled': False,
+ u'id': u'testpilot?labs.mozilla.com',
+ u'isCompatible': True,
+ u'isEnabled': True,
+ u'isPlatformCompatible': True,
+ u'name': u'Test Pilot'},
+ {u'appDisabled': True,
+ u'id': u'dict?www.youdao.com',
+ u'isCompatible': False,
+ u'isEnabled': False,
+ u'isPlatformCompatible': True,
+ u'name': u'Youdao Word Capturer'},
+ {u'appDisabled': False,
+ u'id': u'jqs?sun.com',
+ u'isCompatible': True,
+ u'isEnabled': True,
+ u'isPlatformCompatible': True,
+ u'name': u'Java Quick Starter'},
+ {u'appDisabled': False,
+ u'id': u'?20a82645-c095-46ed-80e3-08825760534b?',
+ u'isCompatible': True,
+ u'isEnabled': True,
+ u'isPlatformCompatible': True,
+ u'name': u'Microsoft .NET Framework Assistant'},
+ {u'appDisabled': False,
+ u'id': u'?a0d7ccb3-214d-498b-b4aa-0e8fda9a7bf7?',
+ u'isCompatible': True,
+ u'isEnabled': True,
+ u'isPlatformCompatible': True,
+ u'name': u'WOT'}],
+ u'version_number': 1}}
+
+# class SurveyResult(object):
+
+# def __init__(self, record):
+# self.record = record
+# self.metadata, self.survey_data = self._flatten_results()
+
+# def _flatten_results(self):
+# survey_data = self.record['survey_data']
+# extensions = DataFrame(survey_data['extensions'])
+
+def denorm(queries,iterable_of_things,default=None):
+ """
+ 'repeat', or 'stutter' to 'tableize' for downstream.
+ (I have no idea what a good word for this is!)
+
+ Think ``kronecker`` products, or:
+
+ ``SELECT single,multiple FROM table;``
+
+ single multiple
+ ------- ---------
+ id1 val1
+ id1 val2
+
+
+ Args:
+
+ queries: iterable of ``Q`` queries.
+ iterable_of_things: to be queried.
+
+ Returns:
+
+ list of 'stuttered' output, where if a query returns
+ a 'single', it gets repeated appropriately.
+
+
+ """
+
+ def _denorm(queries,thing):
+ fields = []
+ results = []
+ for q in queries:
+ #print q
+ r = Ql(q,thing)
+ #print "-- result: ", r
+ if not r:
+ r = [default]
+ if type(r[0]) is type({}):
+ fields.append(sorted(r[0].keys())) # dicty answers
+ else:
+ fields.append([q]) # stringy answer
+
+ results.append(r)
+
+ #print results
+ #print fields
+ flist = list(flatten(*map(iter,fields)))
+
+ prod = itertools.product(*results)
+ for p in prod:
+ U = dict()
+ for (ii,thing) in enumerate(p):
+ #print ii,thing
+ if type(thing) is type({}):
+ U.update(thing)
+ else:
+ U[fields[ii][0]] = thing
+
+ yield U
+
+ return list(flatten(*[_denorm(queries,thing) for thing in iterable_of_things]))
+
+
+def default_iget(fields,default=None,):
+ """ itemgetter with 'default' handling, that *always* returns lists
+
+ API CHANGES from ``operator.itemgetter``
+
+ Note: Sorry to break the iget api... (fields vs *fields)
+ Note: *always* returns a list... unlike itemgetter,
+ which can return tuples or 'singles'
+ """
+ myiget = operator.itemgetter(*fields)
+ L = len(fields)
+ def f(thing):
+ try:
+ ans = list(myiget(thing))
+ if L < 2:
+ ans = [ans,]
+ return ans
+ except KeyError:
+ # slower!
+ return [thing.get(x,default) for x in fields]
+
+ f.__doc__ = "itemgetter with default %r for fields %r" %(default,fields)
+ f.__name__ = "default_itemgetter"
+ return f
+
+
+def flatten(*stack):
+ """
+ helper function for flattening iterables of generators in a
+ sensible way.
+ """
+ stack = list(stack)
+ while stack:
+ try: x = stack[0].next()
+ except StopIteration:
+ stack.pop(0)
+ continue
+ if hasattr(x,'next') and callable(getattr(x,'next')):
+ stack.insert(0, x)
+
+ #if isinstance(x, (GeneratorType,listerator)):
+ else: yield x
+
+
+def _Q(filter_, thing):
+ """ underlying machinery for Q function recursion """
+ T = type(thing)
+ if T is type({}):
+ for k,v in thing.iteritems():
+ #print k,v
+ if filter_ == k:
+ if type(v) is type([]):
+ yield iter(v)
+ else:
+ yield v
+
+ if type(v) in (type({}),type([])):
+ yield Q(filter_,v)
+
+ elif T is type([]):
+ for k in thing:
+ #print k
+ yield Q(filter_,k)
+
+ else:
+ # no recursion.
+ pass
+
+def Q(filter_,thing):
+ """
+ type(filter):
+ - list: a flattened list of all searches (one list)
+ - dict: dict with vals each of which is that search
+
+ Notes:
+
+ [1] 'parent thing', with space, will do a descendent
+ [2] this will come back 'flattened' jQuery style
+ [3] returns a generator. Use ``Ql`` if you want a list.
+
+ """
+ if type(filter_) is type([]):
+ return flatten(*[_Q(x,thing) for x in filter_])
+ elif type(filter_) is type({}):
+ d = dict.fromkeys(filter_.keys())
+ #print d
+ for k in d:
+ #print flatten(Q(k,thing))
+ d[k] = Q(k,thing)
+
+ return d
+
+ else:
+ if " " in filter_: # i.e. "antecendent post"
+ parts = filter_.strip().split()
+ r = None
+ for p in parts:
+ r = Ql(p,thing)
+ thing = r
+
+ return r
+
+ else: # simple.
+ return flatten(_Q(filter_,thing))
+
+def Ql(filter_,thing):
+ """ same as Q, but returns a list, not a generator """
+ res = Q(filter_,thing)
+
+ if type(filter_) is type({}):
+ for k in res:
+ res[k] = list(res[k])
+ return res
+
+ else:
+ return list(res)
+
+
+
+def countit(fields,iter_of_iter,default=None):
+ """
+ note: robust to fields not being in i_of_i, using ``default``
+ """
+ C = Counter() # needs hashables
+ T = namedtuple("Thing",fields)
+ get = default_iget(*fields,default=default)
+ return Counter(
+ (T(*get(thing)) for thing in iter_of_iter)
+ )
+
+
+## right now this works for one row...
+def printout(queries,things,default=None, f=sys.stdout, **kwargs):
+ """ will print header and objects
+
+ **kwargs go to csv.DictWriter
+
+ help(csv.DictWriter) for more.
+ """
+
+ results = denorm(queries,things,default=None)
+ fields = set(itertools.chain(*(x.keys() for x in results)))
+
+ W = csv.DictWriter(f=f,fieldnames=fields,**kwargs)
+ #print "---prod---"
+ #print list(prod)
+ W.writeheader()
+ for r in results:
+ W.writerow(r)
+
+
+def test_run():
+ print "\n>>> print list(Q('url',ex1))"
+ print list(Q('url',ex1))
+ assert list(Q('url',ex1)) == ['url1','url2','url3']
+ assert Ql('url',ex1) == ['url1','url2','url3']
+
+ print "\n>>> print list(Q(['name','id'],ex1))"
+ print list(Q(['name','id'],ex1))
+ assert Ql(['name','id'],ex1) == ['Gregg','hello','gbye']
+
+
+ print "\n>>> print Ql('more url',ex1)"
+ print Ql('more url',ex1)
+
+
+ print "\n>>> list(Q('extensions',ex1))"
+ print list(Q('extensions',ex1))
+
+ print "\n>>> print Ql('extensions',ex1)"
+ print Ql('extensions',ex1)
+
+ print "\n>>> printout(['name','extensions'],[ex1,], extrasaction='ignore')"
+ printout(['name','extensions'],[ex1,], extrasaction='ignore')
+
+ print "\n\n"
+
+ from pprint import pprint as pp
+
+ print "-- note that the extension fields are also flattened! (and N/A) -- "
+ pp(denorm(['location','fxVersion','notthere','survey_data extensions'],[ex2,], default="N/A")[:2])
+
+
+if __name__ == "__main__":
+ pass
diff --git a/setup.py b/setup.py
index 3e56144e25378..c451efb17afc7 100755
--- a/setup.py
+++ b/setup.py
@@ -250,6 +250,11 @@ def initialize_options(self):
for f in files:
if f in self._clean_exclude:
continue
+
+ # XXX
+ if 'ujson' in f:
+ continue
+
if os.path.splitext(f)[-1] in ('.pyc', '.so', '.o',
'.pyo',
'.pyd', '.c', '.orig'):
@@ -472,6 +477,21 @@ def pxd(name):
root, _ = os.path.splitext(ext.sources[0])
ext.sources[0] = root + suffix
+ujson_ext = Extension('pandas.json',
+ depends=['pandas/src/ujson/lib/ultrajson.h'],
+ sources=['pandas/src/ujson/python/ujson.c',
+ 'pandas/src/ujson/python/objToJSON.c',
+ 'pandas/src/ujson/python/JSONtoObj.c',
+ 'pandas/src/ujson/lib/ultrajsonenc.c',
+ 'pandas/src/ujson/lib/ultrajsondec.c',
+ 'pandas/src/datetime/np_datetime.c',
+ 'pandas/src/datetime/np_datetime_strings.c'],
+ include_dirs=['pandas/src/ujson/python',
+ 'pandas/src/ujson/lib'] + common_include)
+
+
+extensions.append(ujson_ext)
+
if _have_setuptools:
setuptools_kwargs["test_suite"] = "nose.collector"
@@ -500,6 +520,7 @@ def pxd(name):
'pandas.tseries',
'pandas.tseries.tests',
'pandas.io.tests',
+ 'pandas.io.tests.test_json',
'pandas.stats.tests',
],
package_data={'pandas.io': ['tests/data/legacy_hdf/*.h5',
| This extension works perfectly fine on Python 2.x and all platforms to my knowledge except win32 on MinGW (it's fine with VC2008+ AFAIK). It might have problems on some of @yarikoptic's esoteric Debian platforms, we should identify which ones those are and disable the extension in the setup.py until we have a chance to sort out what's wrong in the C code. I don't think that's a bad compromise for now in the interest of having a usable extension for a lot of folks.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3583 | 2013-05-12T01:36:52Z | 2013-06-11T19:27:46Z | null | 2014-06-26T17:48:41Z |
DOC: add mention of idx* methods in max/min methods of Series/DataFrame | diff --git a/doc/source/basics.rst b/doc/source/basics.rst
index da3bbcb4f0dc2..f3c1a6617a5d5 100644
--- a/doc/source/basics.rst
+++ b/doc/source/basics.rst
@@ -401,6 +401,10 @@ value, ``idxmin`` and ``idxmax`` return the first matching index:
df3
df3['A'].idxmin()
+.. note::
+
+ ``idxmin`` and ``idxmax`` are called ``argmin`` and ``argmax`` in NumPy.
+
.. _basics.discretization:
Value counts (histogramming)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index ad1429fcea1ca..86a3e79a2fcec 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -4770,6 +4770,18 @@ def mean(self, axis=0, skipna=True, level=None):
extras='')
@Appender(_stat_doc)
def min(self, axis=0, skipna=True, level=None):
+ """
+ Notes
+ -----
+ This method returns the minimum of the values in the DataFrame. If you
+ want the *index* of the minimum, use ``DataFrame.idxmin``. This is the
+ equivalent of the ``numpy.ndarray`` method ``argmin``.
+
+ See Also
+ --------
+ DataFrame.idxmin
+ Series.idxmin
+ """
if level is not None:
return self._agg_by_level('min', axis=axis, level=level,
skipna=skipna)
@@ -4780,6 +4792,18 @@ def min(self, axis=0, skipna=True, level=None):
extras='')
@Appender(_stat_doc)
def max(self, axis=0, skipna=True, level=None):
+ """
+ Notes
+ -----
+ This method returns the maximum of the values in the DataFrame. If you
+ want the *index* of the maximum, use ``DataFrame.idxmax``. This is the
+ equivalent of the ``numpy.ndarray`` method ``argmax``.
+
+ See Also
+ --------
+ DataFrame.idxmax
+ Series.idxmax
+ """
if level is not None:
return self._agg_by_level('max', axis=axis, level=level,
skipna=skipna)
@@ -4939,6 +4963,14 @@ def idxmin(self, axis=0, skipna=True):
Returns
-------
idxmin : Series
+
+ Notes
+ -----
+ This method is the DataFrame version of ``ndarray.argmin``.
+
+ See Also
+ --------
+ Series.idxmin
"""
axis = self._get_axis_number(axis)
indices = nanops.nanargmin(self.values, axis=axis, skipna=skipna)
@@ -4962,6 +4994,14 @@ def idxmax(self, axis=0, skipna=True):
Returns
-------
idxmax : Series
+
+ Notes
+ -----
+ This method is the DataFrame version of ``ndarray.argmax``.
+
+ See Also
+ --------
+ Series.idxmax
"""
axis = self._get_axis_number(axis)
indices = nanops.nanargmax(self.values, axis=axis, skipna=skipna)
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 3509e226d46fb..cebf2f4ef9d1f 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -1516,6 +1516,18 @@ def mad(self, skipna=True, level=None):
na_action=_doc_exclude_na, extras='')
@Appender(_stat_doc)
def min(self, axis=None, out=None, skipna=True, level=None):
+ """
+ Notes
+ -----
+ This method returns the minimum of the values in the Series. If you
+ want the *index* of the minimum, use ``Series.idxmin``. This is the
+ equivalent of the ``numpy.ndarray`` method ``argmin``.
+
+ See Also
+ --------
+ Series.idxmin
+ DataFrame.idxmin
+ """
if level is not None:
return self._agg_by_level('min', level=level, skipna=skipna)
return nanops.nanmin(self.values, skipna=skipna)
@@ -1524,6 +1536,18 @@ def min(self, axis=None, out=None, skipna=True, level=None):
na_action=_doc_exclude_na, extras='')
@Appender(_stat_doc)
def max(self, axis=None, out=None, skipna=True, level=None):
+ """
+ Notes
+ -----
+ This method returns the maximum of the values in the Series. If you
+ want the *index* of the maximum, use ``Series.idxmax``. This is the
+ equivalent of the ``numpy.ndarray`` method ``argmax``.
+
+ See Also
+ --------
+ Series.idxmax
+ DataFrame.idxmax
+ """
if level is not None:
return self._agg_by_level('max', level=level, skipna=skipna)
return nanops.nanmax(self.values, skipna=skipna)
@@ -1592,6 +1616,14 @@ def idxmin(self, axis=None, out=None, skipna=True):
Returns
-------
idxmin : Index of minimum of values
+
+ Notes
+ -----
+ This method is the Series version of ``ndarray.argmin``.
+
+ See Also
+ --------
+ DataFrame.idxmin
"""
i = nanops.nanargmin(self.values, skipna=skipna)
if i == -1:
@@ -1610,6 +1642,14 @@ def idxmax(self, axis=None, out=None, skipna=True):
Returns
-------
idxmax : Index of minimum of values
+
+ Notes
+ -----
+ This method is the Series version of ``ndarray.argmax``.
+
+ See Also
+ --------
+ DataFrame.idxmax
"""
i = nanops.nanargmax(self.values, skipna=skipna)
if i == -1:
| closes #3574.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3580 | 2013-05-11T21:15:29Z | 2013-05-13T17:34:35Z | 2013-05-13T17:34:35Z | 2014-07-16T08:08:44Z |
ENH: allow to_csv to write multi-index columns, read_csv to read with header=list arg | diff --git a/RELEASE.rst b/RELEASE.rst
index acb4f429e81b0..74bafd419af54 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -34,6 +34,15 @@ pandas 0.11.1
courtesy of @cpcloud. (GH3477_)
- Support for reading Amazon S3 files. (GH3504_)
- Added module for reading and writing Stata files: pandas.io.stata (GH1512_)
+ - Added support for writing in ``to_csv`` and reading in ``read_csv``,
+ multi-index columns. The ``header`` option in ``read_csv`` now accepts a
+ list of the rows from which to read the index. Added the option,
+ ``tupleize_cols`` to provide compatiblity for the pre 0.11.1 behavior of
+ writing and reading multi-index columns via a list of tuples. The default in
+ 0.11.1 is to write lists of tuples and *not* interpret list of tuples as a
+ multi-index column.
+ Note: The default value will change in 0.12 to make the default *to* write and
+ read multi-index columns in the new format. (GH3571_, GH1651_, GH3141_)
**Improvements to existing features**
@@ -180,6 +189,7 @@ pandas 0.11.1
.. _GH3596: https://github.com/pydata/pandas/issues/3596
.. _GH3617: https://github.com/pydata/pandas/issues/3617
.. _GH3435: https://github.com/pydata/pandas/issues/3435
+<<<<<<< HEAD
.. _GH3611: https://github.com/pydata/pandas/issues/3611
.. _GH3062: https://github.com/pydata/pandas/issues/3062
.. _GH3624: https://github.com/pydata/pandas/issues/3624
@@ -187,6 +197,11 @@ pandas 0.11.1
.. _GH3601: https://github.com/pydata/pandas/issues/3601
.. _GH3631: https://github.com/pydata/pandas/issues/3631
.. _GH1512: https://github.com/pydata/pandas/issues/1512
+=======
+.. _GH3571: https://github.com/pydata/pandas/issues/3571
+.. _GH1651: https://github.com/pydata/pandas/issues/1651
+.. _GH3141: https://github.com/pydata/pandas/issues/3141
+>>>>>>> DOC: updated releasenotes, v0.11.1 whatsnew, io.rst
pandas 0.11.0
diff --git a/doc/source/io.rst b/doc/source/io.rst
index f15f758c42b18..42ea4a2ca5d53 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -57,7 +57,10 @@ They can take a number of arguments:
specified, data types will be inferred.
- ``header``: row number to use as the column names, and the start of the
data. Defaults to 0 if no ``names`` passed, otherwise ``None``. Explicitly
- pass ``header=0`` to be able to replace existing names.
+ pass ``header=0`` to be able to replace existing names. The header can be
+ a list of integers that specify row locations for a multi-index on the columns
+ E.g. [0,1,3]. Interveaning rows that are not specified will be skipped.
+ (E.g. 2 in this example are skipped)
- ``skiprows``: A collection of numbers for rows in the file to skip. Can
also be an integer to skip the first ``n`` rows
- ``index_col``: column number, column name, or list of column numbers/names,
@@ -112,6 +115,10 @@ They can take a number of arguments:
- ``error_bad_lines``: if False then any lines causing an error will be skipped :ref:`bad lines <io.bad_lines>`
- ``usecols``: a subset of columns to return, results in much faster parsing
time and lower memory usage.
+ - ``mangle_dupe_cols``: boolean, default True, then duplicate columns will be specified
+ as 'X.0'...'X.N', rather than 'X'...'X'
+ - ``tupleize_cols``: boolean, default True, if False, convert a list of tuples
+ to a multi-index of columns, otherwise, leave the column index as a list of tuples
.. ipython:: python
:suppress:
@@ -762,6 +769,36 @@ column numbers to turn multiple columns into a ``MultiIndex``:
df
df.ix[1978]
+.. _io.multi_index_columns:
+
+Specifying a multi-index columns
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+By specifying list of row locations for the ``header`` argument, you
+can read in a multi-index for the columns. Specifying non-consecutive
+rows will skip the interveaing rows.
+
+.. ipython:: python
+
+ from pandas.util.testing import makeCustomDataframe as mkdf
+ df = mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)
+ df.to_csv('mi.csv',tupleize_cols=False)
+ print open('mi.csv').read()
+ pd.read_csv('mi.csv',header=[0,1,2,3],index_col=[0,1],tupleize_cols=False)
+
+Note: The default behavior in 0.11.1 remains unchanged (``tupleize_cols=True``),
+but starting with 0.12, the default *to* write and read multi-index columns will be in the new
+format (``tupleize_cols=False``)
+
+Note: If an ``index_col`` is not specified (e.g. you don't have an index, or wrote it
+with ``df.to_csv(..., index=False``), then any ``names`` on the columns index will be *lost*.
+
+.. ipython:: python
+ :suppress:
+
+ import os
+ os.remove('mi.csv')
+
.. _io.sniff:
Automatically "sniffing" the delimiter
@@ -845,6 +882,8 @@ function takes a number of arguments. Only the first is required.
- ``sep`` : Field delimiter for the output file (default ",")
- ``encoding``: a string representing the encoding to use if the contents are
non-ascii, for python versions prior to 3
+ - ``tupleize_cols``: boolean, default True, if False, write as a list of tuples,
+ otherwise write in an expanded line format suitable for ``read_csv``
Writing a formatted string
~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -876,6 +915,9 @@ The Series object also has a ``to_string`` method, but with only the ``buf``,
which, if set to ``True``, will additionally output the length of the Series.
+HTML
+----
+
Reading HTML format
~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt
index aed95188db26e..a724ce96a7381 100644
--- a/doc/source/v0.11.1.txt
+++ b/doc/source/v0.11.1.txt
@@ -73,6 +73,7 @@ Enhancements
an index with a different frequency than the existing, or attempting
to append an index with a different name than the existing
- support datelike columns with a timezone as data_columns (GH2852_)
+
- ``fillna`` methods now raise a ``TypeError`` if the ``value`` parameter is
a list or tuple.
- Added module for reading and writing Stata files: pandas.io.stata (GH1512_)
@@ -80,6 +81,39 @@ Enhancements
``Series`` with object dtype. See the examples section in the regular docs
:ref:`Replacing via String Expression <missing_data.replace_expression>`
+ - Multi-index column support for reading and writing csvs
+
+ - The ``header`` option in ``read_csv`` now accepts a
+ list of the rows from which to read the index.
+
+ - The option, ``tupleize_cols`` can now be specified in both ``to_csv`` and
+ ``read_csv``, to provide compatiblity for the pre 0.11.1 behavior of
+ writing and reading multi-index columns via a list of tuples. The default in
+ 0.11.1 is to write lists of tuples and *not* interpret list of tuples as a
+ multi-index column.
+
+ Note: The default behavior in 0.11.1 remains unchanged, but starting with 0.12,
+ the default *to* write and read multi-index columns will be in the new
+ format. (GH3571_, GH1651_, GH3141_)
+
+ - If an ``index_col`` is not specified (e.g. you don't have an index, or wrote it
+ with ``df.to_csv(..., index=False``), then any ``names`` on the columns index will
+ be *lost*.
+
+ .. ipython:: python
+
+ from pandas.util.testing import makeCustomDataframe as mkdf
+ df = mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)
+ df.to_csv('mi.csv',tupleize_cols=False)
+ print open('mi.csv').read()
+ pd.read_csv('mi.csv',header=[0,1,2,3],index_col=[0,1],tupleize_cols=False)
+
+ .. ipython:: python
+ :suppress:
+
+ import os
+ os.remove('mi.csv')
+
See the `full release notes
<https://github.com/pydata/pandas/blob/master/RELEASE.rst>`__ or issue tracker
on GitHub for a complete list.
@@ -96,3 +130,6 @@ on GitHub for a complete list.
.. _GH1512: https://github.com/pydata/pandas/issues/1512
.. _GH2285: https://github.com/pydata/pandas/issues/2285
.. _GH3631: https://github.com/pydata/pandas/issues/3631
+.. _GH3571: https://github.com/pydata/pandas/issues/3571
+.. _GH1651: https://github.com/pydata/pandas/issues/1651
+.. _GH3141: https://github.com/pydata/pandas/issues/3141
diff --git a/pandas/core/format.py b/pandas/core/format.py
index bea4b59bfaaa4..cd4364edc6662 100644
--- a/pandas/core/format.py
+++ b/pandas/core/format.py
@@ -772,9 +772,10 @@ def grouper(x):
class CSVFormatter(object):
def __init__(self, obj, path_or_buf, sep=",", na_rep='', float_format=None,
- cols=None, header=True, index=True, index_label=None,
- mode='w', nanRep=None, encoding=None, quoting=None,
- line_terminator='\n', chunksize=None, engine=None):
+ cols=None, header=True, index=True, index_label=None,
+ mode='w', nanRep=None, encoding=None, quoting=None,
+ line_terminator='\n', chunksize=None, engine=None,
+ tupleize_cols=True):
self.engine = engine # remove for 0.12
@@ -803,6 +804,15 @@ def __init__(self, obj, path_or_buf, sep=",", na_rep='', float_format=None,
msg= "columns.is_unique == False not supported with engine='python'"
raise NotImplementedError(msg)
+ self.tupleize_cols = tupleize_cols
+ self.has_mi_columns = isinstance(obj.columns, MultiIndex
+ ) and not self.tupleize_cols
+
+ # validate mi options
+ if self.has_mi_columns:
+ if cols is not None:
+ raise Exception("cannot specify cols with a multi_index on the columns")
+
if cols is not None:
if isinstance(cols,Index):
cols = cols.to_native_types(na_rep=na_rep,float_format=float_format)
@@ -958,48 +968,82 @@ def _save_header(self):
obj = self.obj
index_label = self.index_label
cols = self.cols
+ has_mi_columns = self.has_mi_columns
header = self.header
+ encoded_labels = []
has_aliases = isinstance(header, (tuple, list, np.ndarray))
- if has_aliases or self.header:
- if self.index:
- # should write something for index label
- if index_label is not False:
- if index_label is None:
- if isinstance(obj.index, MultiIndex):
- index_label = []
- for i, name in enumerate(obj.index.names):
- if name is None:
- name = ''
- index_label.append(name)
+ if not (has_aliases or self.header):
+ return
+
+ if self.index:
+ # should write something for index label
+ if index_label is not False:
+ if index_label is None:
+ if isinstance(obj.index, MultiIndex):
+ index_label = []
+ for i, name in enumerate(obj.index.names):
+ if name is None:
+ name = ''
+ index_label.append(name)
+ else:
+ index_label = obj.index.name
+ if index_label is None:
+ index_label = ['']
else:
- index_label = obj.index.name
- if index_label is None:
- index_label = ['']
- else:
- index_label = [index_label]
- elif not isinstance(index_label, (list, tuple, np.ndarray)):
- # given a string for a DF with Index
- index_label = [index_label]
+ index_label = [index_label]
+ elif not isinstance(index_label, (list, tuple, np.ndarray)):
+ # given a string for a DF with Index
+ index_label = [index_label]
- encoded_labels = list(index_label)
- else:
- encoded_labels = []
+ encoded_labels = list(index_label)
+ else:
+ encoded_labels = []
- if has_aliases:
- if len(header) != len(cols):
- raise ValueError(('Writing %d cols but got %d aliases'
- % (len(cols), len(header))))
- else:
- write_cols = header
+ if has_aliases:
+ if len(header) != len(cols):
+ raise ValueError(('Writing %d cols but got %d aliases'
+ % (len(cols), len(header))))
else:
- write_cols = cols
- encoded_cols = list(write_cols)
-
- writer.writerow(encoded_labels + encoded_cols)
+ write_cols = header
else:
- encoded_cols = list(cols)
- writer.writerow(encoded_cols)
+ write_cols = cols
+
+ if not has_mi_columns:
+ encoded_labels += list(write_cols)
+
+ else:
+
+ if not has_mi_columns:
+ encoded_labels += list(cols)
+
+ # write out the mi
+ if has_mi_columns:
+ columns = obj.columns
+
+ # write out the names for each level, then ALL of the values for each level
+ for i in range(columns.nlevels):
+
+ # we need at least 1 index column to write our col names
+ col_line = []
+ if self.index:
+
+ # name is the first column
+ col_line.append( columns.names[i] )
+
+ if isinstance(index_label,list) and len(index_label)>1:
+ col_line.extend([ '' ] * (len(index_label)-1))
+
+ col_line.extend(columns.get_level_values(i))
+
+ writer.writerow(col_line)
+
+ # add blanks for the columns, so that we
+ # have consistent seps
+ encoded_labels.extend([ '' ] * len(columns))
+
+ # write out the index label line
+ writer.writerow(encoded_labels)
def _save(self):
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 39742557ccc56..d91d21db3ec1b 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1250,7 +1250,7 @@ def _from_arrays(cls, arrays, columns, index, dtype=None):
@classmethod
def from_csv(cls, path, header=0, sep=',', index_col=0,
- parse_dates=True, encoding=None):
+ parse_dates=True, encoding=None, tupleize_cols=False):
"""
Read delimited file into DataFrame
@@ -1266,6 +1266,9 @@ def from_csv(cls, path, header=0, sep=',', index_col=0,
is used. Different default from read_table
parse_dates : boolean, default True
Parse dates. Different default from read_table
+ tupleize_cols : boolean, default True
+ write multi_index columns as a list of tuples (if True)
+ or new (expanded format) if False)
Notes
-----
@@ -1280,7 +1283,7 @@ def from_csv(cls, path, header=0, sep=',', index_col=0,
from pandas.io.parsers import read_table
return read_table(path, header=header, sep=sep,
parse_dates=parse_dates, index_col=index_col,
- encoding=encoding)
+ encoding=encoding,tupleize_cols=False)
@classmethod
def from_dta(dta, path, parse_dates=True, convert_categoricals=True, encoding=None, index_col=None):
@@ -1391,7 +1394,8 @@ def to_panel(self):
def to_csv(self, path_or_buf, sep=",", na_rep='', float_format=None,
cols=None, header=True, index=True, index_label=None,
mode='w', nanRep=None, encoding=None, quoting=None,
- line_terminator='\n', chunksize=None,**kwds):
+ line_terminator='\n', chunksize=None,
+ tupleize_cols=True, **kwds):
"""
Write DataFrame to a comma-separated values (csv) file
@@ -1429,6 +1433,9 @@ def to_csv(self, path_or_buf, sep=",", na_rep='', float_format=None,
quoting : optional constant from csv module
defaults to csv.QUOTE_MINIMAL
chunksize : rows to write at a time
+ tupleize_cols : boolean, default True
+ write multi_index columns as a list of tuples (if True)
+ or new (expanded format) if False)
"""
if nanRep is not None: # pragma: no cover
import warnings
@@ -1445,7 +1452,8 @@ def to_csv(self, path_or_buf, sep=",", na_rep='', float_format=None,
float_format=float_format, cols=cols,
header=header, index=index,
index_label=index_label,mode=mode,
- chunksize=chunksize,engine=kwds.get("engine") )
+ chunksize=chunksize,engine=kwds.get("engine"),
+ tupleize_cols=tupleize_cols)
formatter.save()
def to_excel(self, excel_writer, sheet_name='sheet1', na_rep='',
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 044b25041afd9..61be871e62595 100644
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -52,9 +52,11 @@ class DateConversionError(Exception):
dialect : string or csv.Dialect instance, default None
If None defaults to Excel dialect. Ignored if sep longer than 1 char
See csv.Dialect documentation for more details
-header : int, default 0 if names parameter not specified, otherwise None
+header : int, default 0 if names parameter not specified,
Row to use for the column labels of the parsed DataFrame. Specify None if
- there is no header row.
+ there is no header row. Can be a list of integers that specify row
+ locations for a multi-index on the columns E.g. [0,1,3]. Interveaning
+ rows that are not specified (E.g. 2 in this example are skipped)
skiprows : list-like or integer
Row numbers to skip (0-indexed) or number of rows to skip (int)
at the start of the file
@@ -125,6 +127,11 @@ class DateConversionError(Exception):
usecols : array-like
Return a subset of the columns.
Results in much faster parsing time and lower memory usage.
+mangle_dupe_cols: boolean, default True
+ Duplicate columns will be specified as 'X.0'...'X.N', rather than 'X'...'X'
+tupleize_cols: boolean, default False
+ Leave a list of tuples on columns as is (default is to convert to
+ a Multi Index on the columns)
Returns
-------
@@ -292,6 +299,7 @@ def _read(filepath_or_buffer, kwds):
'squeeze': False,
'compression': None,
'mangle_dupe_cols': True,
+ 'tupleize_cols':True,
}
@@ -378,7 +386,8 @@ def parser_f(filepath_or_buffer,
verbose=False,
encoding=None,
squeeze=False,
- mangle_dupe_cols=True
+ mangle_dupe_cols=True,
+ tupleize_cols=True,
):
# Alias sep -> delimiter.
@@ -436,7 +445,8 @@ def parser_f(filepath_or_buffer,
error_bad_lines=error_bad_lines,
low_memory=low_memory,
buffer_lines=buffer_lines,
- mangle_dupe_cols=mangle_dupe_cols
+ mangle_dupe_cols=mangle_dupe_cols,
+ tupleize_cols=tupleize_cols,
)
return _read(filepath_or_buffer, kwds)
@@ -677,10 +687,8 @@ def read(self, nrows=None):
if self.options.get('as_recarray'):
return ret
- index, columns, col_dict = ret
-
# May alter columns / col_dict
- # index, columns, col_dict = self._create_index(col_dict, columns)
+ index, columns, col_dict = self._create_index(ret)
df = DataFrame(col_dict, columns=columns, index=index)
@@ -688,8 +696,9 @@ def read(self, nrows=None):
return df[df.columns[0]]
return df
- def _create_index(self, col_dict, columns):
- pass
+ def _create_index(self, ret):
+ index, columns, col_dict = ret
+ return index, columns, col_dict
def get_chunk(self, size=None):
if size is None:
@@ -709,6 +718,7 @@ def __init__(self, kwds):
self.index_col = kwds.pop('index_col', None)
self.index_names = None
+ self.col_names = None
self.parse_dates = kwds.pop('parse_dates', False)
self.date_parser = kwds.pop('date_parser', None)
@@ -718,10 +728,31 @@ def __init__(self, kwds):
self.na_values = kwds.get('na_values')
self.true_values = kwds.get('true_values')
self.false_values = kwds.get('false_values')
+ self.tupleize_cols = kwds.get('tupleize_cols',True)
self._date_conv = _make_date_converter(date_parser=self.date_parser,
dayfirst=self.dayfirst)
+ # validate header options for mi
+ self.header = kwds.get('header')
+ if isinstance(self.header,(list,tuple,np.ndarray)):
+ if kwds.get('as_recarray'):
+ raise Exception("cannot specify as_recarray when "
+ "specifying a multi-index header")
+ if kwds.get('usecols'):
+ raise Exception("cannot specify usecols when "
+ "specifying a multi-index header")
+ if kwds.get('names'):
+ raise Exception("cannot specify names when "
+ "specifying a multi-index header")
+
+ # validate index_col that only contains integers
+ if self.index_col is not None:
+ if not (isinstance(self.index_col,(list,tuple,np.ndarray)) and all(
+ [ com.is_integer(i) for i in self.index_col ]) or com.is_integer(self.index_col)):
+ raise Exception("index_col must only contain row numbers "
+ "when specifying a multi-index header")
+
self._name_processed = False
@property
@@ -743,7 +774,62 @@ def _should_parse_dates(self, i):
else:
return (j in self.parse_dates) or (name in self.parse_dates)
- def _make_index(self, data, alldata, columns):
+
+ def _extract_multi_indexer_columns(self, header, index_names, col_names, passed_names=False):
+ """ extract and return the names, index_names, col_names
+ header is a list-of-lists returned from the parsers """
+ if len(header) < 2:
+ return header[0], index_names, col_names, passed_names
+
+ # the names are the tuples of the header that are not the index cols
+ # 0 is the name of the index, assuming index_col is a list of column
+ # numbers
+ ic = self.index_col
+ if ic is None:
+ ic = []
+
+ if not isinstance(ic, (list,tuple,np.ndarray)):
+ ic = [ ic ]
+ sic = set(ic)
+
+ orig_header = list(header)
+
+ # clean the index_names
+ index_names = header.pop(-1)
+ (index_names, names,
+ index_col) = _clean_index_names(index_names, self.index_col)
+
+ # extract the columns
+ field_count = len(header[0])
+ def extract(r):
+ return tuple([ r[i] for i in range(field_count) if i not in sic ])
+ columns = zip(*[ extract(r) for r in header ])
+ names = ic + columns
+
+ # if we find 'Unnamed' all of a single level, then our header was too long
+ for n in range(len(columns[0])):
+ if all([ 'Unnamed' in c[n] for c in columns ]):
+ raise _parser.CParserError("Passed header=[%s] are too many rows for this "
+ "multi_index of columns" % ','.join([ str(x) for x in self.header ]))
+
+ # clean the column names (if we have an index_col)
+ if len(ic):
+ col_names = [ r[0] if len(r[0]) and 'Unnamed' not in r[0] else None for r in header ]
+ else:
+ col_names = [ None ] * len(header)
+
+ passed_names = True
+
+ return names, index_names, col_names, passed_names
+
+ def _maybe_make_multi_index_columns(self, columns, col_names=None):
+ # possibly create a column mi here
+ if not self.tupleize_cols and len(columns) and not isinstance(
+ columns, MultiIndex) and all([ isinstance(c,tuple) for c in columns]):
+ columns = MultiIndex.from_tuples(columns,names=col_names)
+ return columns
+
+ def _make_index(self, data, alldata, columns, indexnamerow=False):
if not _is_index_col(self.index_col) or len(self.index_col) == 0:
index = None
@@ -760,7 +846,15 @@ def _make_index(self, data, alldata, columns):
index = self._get_complex_date_index(data, columns)
index = self._agg_index(index, try_parse_dates=False)
- return index
+ # add names for the index
+ if indexnamerow:
+ coffset = len(indexnamerow) - len(columns)
+ index.names = indexnamerow[:coffset]
+
+ # maybe create a mi on the columns
+ columns = self._maybe_make_multi_index_columns(columns, self.col_names)
+
+ return index, columns
_implicit_index = False
@@ -942,7 +1036,12 @@ def __init__(self, src, **kwds):
if self._reader.header is None:
self.names = None
else:
- self.names = list(self._reader.header)
+ if len(self._reader.header) > 1:
+ # we have a multi index in the columns
+ self.names, self.index_names, self.col_names, passed_names = self._extract_multi_indexer_columns(
+ self._reader.header, self.index_names, self.col_names, passed_names)
+ else:
+ self.names = list(self._reader.header[0])
if self.names is None:
if self.prefix:
@@ -958,12 +1057,14 @@ def __init__(self, src, **kwds):
if not self._has_complex_date_col:
if (self._reader.leading_cols == 0 and
- _is_index_col(self.index_col)):
+ _is_index_col(self.index_col)):
self._name_processed = True
- (self.index_names, self.names,
- self.index_col) = _clean_index_names(self.names,
- self.index_col)
+ (index_names, self.names,
+ self.index_col) = _clean_index_names(self.names, self.index_col)
+
+ if self.index_names is None:
+ self.index_names = index_names
if self._reader.header is None and not passed_names:
self.index_names = [None] * len(self.index_names)
@@ -1049,7 +1150,10 @@ def read(self, nrows=None):
data = dict((k, v) for k, (i, v) in zip(names, data))
names, data = self._do_date_conversions(names, data)
- index = self._make_index(data, alldata, names)
+ index, names = self._make_index(data, alldata, names)
+
+ # maybe create a mi on the columns
+ names = self._maybe_make_multi_index_columns(names, self.col_names)
return index, names, data
@@ -1061,7 +1165,7 @@ def _filter_usecols(self, names):
return names
def _get_index_names(self):
- names = list(self._reader.header)
+ names = list(self._reader.header[0])
idx_names = None
if self._reader.leading_cols == 0 and self.index_col is not None:
@@ -1169,7 +1273,6 @@ def __init__(self, f, **kwds):
raise Exception("usecols not supported with engine='python'"
" or multicharacter separators (yet).")
- self.header = kwds['header']
self.encoding = kwds['encoding']
self.compression = kwds['compression']
self.skiprows = kwds['skiprows']
@@ -1208,6 +1311,13 @@ def __init__(self, f, **kwds):
self.data = f
self.columns = self._infer_columns()
+ # we are processing a multi index column
+ if len(self.columns) > 1:
+ self.columns, self.index_names, self.col_names, _ = self._extract_multi_indexer_columns(
+ self.columns, self.index_names, self.col_names)
+ else:
+ self.columns = self.columns[0]
+
# get popped off for index
self.orig_names = list(self.columns)
@@ -1215,9 +1325,11 @@ def __init__(self, f, **kwds):
# multiple date column thing turning into a real spaghetti factory
if not self._has_complex_date_col:
- (self.index_names,
+ (index_names,
self.orig_names, _) = self._get_index_name(self.columns)
self._name_processed = True
+ if self.index_names is None:
+ self.index_names = index_names
self._first_chunk = True
def _make_reader(self, f):
@@ -1321,10 +1433,7 @@ def read(self, rows=None):
columns, data = self._do_date_conversions(self.columns, data)
data = self._convert_data(data)
- index = self._make_index(data, alldata, columns)
- if indexnamerow:
- coffset = len(indexnamerow) - len(columns)
- index.names = indexnamerow[:coffset]
+ index, columns = self._make_index(data, alldata, columns, indexnamerow)
return index, columns, data
@@ -1350,36 +1459,58 @@ def _infer_columns(self):
names = self.names
if self.header is not None:
- if len(self.buf) > 0:
- line = self.buf[0]
- else:
- line = self._next_line()
+ header = self.header
- while self.pos <= self.header:
- line = self._next_line()
+ # we have a mi columns, so read and extra line
+ if isinstance(header,(list,tuple,np.ndarray)):
+ have_mi_columns = True
+ header = list(header) + [header[-1]+1]
+ else:
+ have_mi_columns = False
+ header = [ header ]
columns = []
- for i, c in enumerate(line):
- if c == '':
- columns.append('Unnamed: %d' % i)
+ for level, hr in enumerate(header):
+
+ if len(self.buf) > 0:
+ line = self.buf[0]
else:
- columns.append(c)
+ line = self._next_line()
+
+ while self.pos <= hr:
+ line = self._next_line()
- if self.mangle_dupe_cols:
- counts = {}
- for i, col in enumerate(columns):
- cur_count = counts.get(col, 0)
- if cur_count > 0:
- columns[i] = '%s.%d' % (col, cur_count)
- counts[col] = cur_count + 1
+ this_columns = []
+ for i, c in enumerate(line):
+ if c == '':
+ if have_mi_columns:
+ this_columns.append('Unnamed: %d_level_%d' % (i,level))
+ else:
+ this_columns.append('Unnamed: %d' % i)
+ else:
+ this_columns.append(c)
+
+ if not have_mi_columns:
+ if self.mangle_dupe_cols:
+ counts = {}
+ for i, col in enumerate(this_columns):
+ cur_count = counts.get(col, 0)
+ if cur_count > 0:
+ this_columns[i] = '%s.%d' % (col, cur_count)
+ counts[col] = cur_count + 1
+
+ columns.append(this_columns)
self._clear_buffer()
if names is not None:
- if len(names) != len(columns):
+ if len(names) != len(columns[0]):
raise Exception('Number of passed names did not match '
'number of header fields in the file')
- columns = names
+ if len(columns) > 1:
+ raise Exception('Cannot pass names with multi-index columns')
+ columns = [ names ]
+
else:
if len(self.buf) > 0:
line = self.buf[0]
@@ -1389,11 +1520,11 @@ def _infer_columns(self):
ncols = len(line)
if not names:
if self.prefix:
- columns = ['X%d' % i for i in range(ncols)]
+ columns = [ ['X%d' % i for i in range(ncols)] ]
else:
- columns = range(ncols)
+ columns = [ range(ncols) ]
else:
- columns = names
+ columns = [ names ]
return columns
diff --git a/pandas/io/tests/test_cparser.py b/pandas/io/tests/test_cparser.py
index b352b189a74b8..0c5b168ee8de5 100644
--- a/pandas/io/tests/test_cparser.py
+++ b/pandas/io/tests/test_cparser.py
@@ -179,7 +179,7 @@ def test_header_not_enough_lines(self):
reader = TextReader(StringIO(data), delimiter=',', header=2,
as_recarray=True)
header = reader.header
- expected = ['a', 'b', 'c']
+ expected = [['a', 'b', 'c']]
self.assertEquals(header, expected)
recs = reader.read()
diff --git a/pandas/io/tests/test_parsers.py b/pandas/io/tests/test_parsers.py
index 38a31c042d120..be47f28749848 100644
--- a/pandas/io/tests/test_parsers.py
+++ b/pandas/io/tests/test_parsers.py
@@ -20,6 +20,7 @@
TextFileReader, TextParser)
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
+ makeCustomDataframe as mkdf,
network,
ensure_clean)
import pandas.util.testing as tm
@@ -994,6 +995,49 @@ def test_header_not_first_line(self):
expected = self.read_csv(StringIO(data2), header=0, index_col=0)
tm.assert_frame_equal(df, expected)
+ def test_header_multi_index(self):
+ expected = mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)
+
+ data = """\
+C0,,C_l0_g0,C_l0_g1,C_l0_g2
+
+C1,,C_l1_g0,C_l1_g1,C_l1_g2
+C2,,C_l2_g0,C_l2_g1,C_l2_g2
+C3,,C_l3_g0,C_l3_g1,C_l3_g2
+R0,R1,,,
+R_l0_g0,R_l1_g0,R0C0,R0C1,R0C2
+R_l0_g1,R_l1_g1,R1C0,R1C1,R1C2
+R_l0_g2,R_l1_g2,R2C0,R2C1,R2C2
+R_l0_g3,R_l1_g3,R3C0,R3C1,R3C2
+R_l0_g4,R_l1_g4,R4C0,R4C1,R4C2
+"""
+
+ # basic test with both engines
+ for engine in ['c','python']:
+ df = read_csv(StringIO(data), header=[0,2,3,4],index_col=[0,1], tupleize_cols=False,
+ engine=engine)
+ tm.assert_frame_equal(df, expected)
+
+ # skipping lines in the header
+ df = read_csv(StringIO(data), header=[0,2,3,4],index_col=[0,1], tupleize_cols=False)
+ tm.assert_frame_equal(df, expected)
+
+ #### invalid options ####
+
+ # no as_recarray
+ self.assertRaises(Exception, read_csv, StringIO(data), header=[0,1,2,3],
+ index_col=[0,1], as_recarray=True, tupleize_cols=False)
+
+ # names
+ self.assertRaises(Exception, read_csv, StringIO(data), header=[0,1,2,3],
+ index_col=[0,1], names=['foo','bar'], tupleize_cols=False)
+ # usecols
+ self.assertRaises(Exception, read_csv, StringIO(data), header=[0,1,2,3],
+ index_col=[0,1], usecols=['foo','bar'], tupleize_cols=False)
+ # non-numeric index_col
+ self.assertRaises(Exception, read_csv, StringIO(data), header=[0,1,2,3],
+ index_col=['foo','bar'], tupleize_cols=False)
+
def test_pass_names_with_index(self):
lines = self.data1.split('\n')
no_header = '\n'.join(lines[1:])
diff --git a/pandas/src/parser.pyx b/pandas/src/parser.pyx
index 694a769641b0d..ee92e2e60960c 100644
--- a/pandas/src/parser.pyx
+++ b/pandas/src/parser.pyx
@@ -143,6 +143,8 @@ cdef extern from "parser/tokenizer.h":
char thousands
int header # Boolean: 1: has header, 0: no header
+ int header_start # header row start
+ int header_end # header row end
void *skipset
int skip_footer
@@ -230,7 +232,7 @@ cdef class TextReader:
cdef:
parser_t *parser
object file_handle
- bint factorize, na_filter, verbose, has_usecols
+ bint factorize, na_filter, verbose, has_usecols, has_mi_columns
int parser_start
list clocks
char *c_encoding
@@ -242,7 +244,7 @@ cdef class TextReader:
object na_values, true_values, false_values
object memory_map
object as_recarray
- object header, names
+ object header, orig_header, names, header_start, header_end
object low_memory
object skiprows
object compact_ints, use_unsigned
@@ -250,12 +252,15 @@ cdef class TextReader:
object encoding
object compression
object mangle_dupe_cols
+ object tupleize_cols
set noconvert, usecols
def __cinit__(self, source,
delimiter=b',',
header=0,
+ header_start=0,
+ header_end=0,
names=None,
memory_map=False,
@@ -300,12 +305,14 @@ cdef class TextReader:
skiprows=None,
skip_footer=0,
verbose=False,
- mangle_dupe_cols=True):
+ mangle_dupe_cols=True,
+ tupleize_cols=True):
self.parser = parser_new()
self.parser.chunksize = tokenize_chunksize
self.mangle_dupe_cols=mangle_dupe_cols
+ self.tupleize_cols=tupleize_cols
# For timekeeping
self.clocks = []
@@ -433,13 +440,34 @@ cdef class TextReader:
self.leading_cols = 0
# TODO: no header vs. header is not the first row
+ self.has_mi_columns = 0
+ self.orig_header = header
if header is None:
# sentinel value
+ self.parser.header_start = -1
+ self.parser.header_end = -1
self.parser.header = -1
self.parser_start = 0
+ self.header = []
else:
- self.parser.header = header
- self.parser_start = header + 1
+ if isinstance(header, list) and len(header):
+ # need to artifically skip the final line
+ # which is still a header line
+ header = list(header)
+ header.append(header[-1]+1)
+
+ self.parser.header_start = header[0]
+ self.parser.header_end = header[-1]
+ self.parser.header = header[0]
+ self.parser_start = header[-1] + 1
+ self.has_mi_columns = 1
+ self.header = header
+ else:
+ self.parser.header_start = header
+ self.parser.header_end = header
+ self.parser.header = header
+ self.parser_start = header + 1
+ self.header = [ header ]
self.names = names
self.header, self.table_width = self._get_header()
@@ -534,8 +562,10 @@ cdef class TextReader:
' got %s type' % type(source))
cdef _get_header(self):
+ # header is now a list of lists, so field_count should use header[0]
+
cdef:
- size_t i, start, data_line, field_count, passed_count
+ size_t i, start, data_line, field_count, passed_count, hr
char *word
object name
int status
@@ -544,49 +574,59 @@ cdef class TextReader:
header = []
- if self.parser.header >= 0:
- # Header is in the file
+ if self.parser.header_start >= 0:
- if self.parser.lines < self.parser.header + 1:
- self._tokenize_rows(self.parser.header + 2)
+ # Header is in the file
+ for level, hr in enumerate(self.header):
- # e.g., if header=3 and file only has 2 lines
- if self.parser.lines < self.parser.header + 1:
- raise CParserError('Passed header=%d but only %d lines in file'
- % (self.parser.header, self.parser.lines))
+ this_header = []
- field_count = self.parser.line_fields[self.parser.header]
- start = self.parser.line_start[self.parser.header]
+ if self.parser.lines < hr + 1:
+ self._tokenize_rows(hr + 2)
- # TODO: Py3 vs. Py2
- counts = {}
- for i in range(field_count):
- word = self.parser.words[start + i]
+ # e.g., if header=3 and file only has 2 lines
+ if self.parser.lines < hr + 1:
+ msg = self.orig_header
+ if isinstance(msg,list):
+ msg = "[%s], len of %d," % (','.join([ str(m) for m in msg ]),len(msg))
+ raise CParserError('Passed header=%s but only %d lines in file'
+ % (msg, self.parser.lines))
- if self.c_encoding == NULL and not PY3:
- name = PyBytes_FromString(word)
- else:
- if self.c_encoding == NULL or self.c_encoding == b'utf-8':
- name = PyUnicode_FromString(word)
- else:
- name = PyUnicode_Decode(word, strlen(word),
- self.c_encoding, errors)
+ field_count = self.parser.line_fields[hr]
+ start = self.parser.line_start[hr]
- if name == '':
- name = 'Unnamed: %d' % i
+ # TODO: Py3 vs. Py2
+ counts = {}
+ for i in range(field_count):
+ word = self.parser.words[start + i]
+ if self.c_encoding == NULL and not PY3:
+ name = PyBytes_FromString(word)
+ else:
+ if self.c_encoding == NULL or self.c_encoding == b'utf-8':
+ name = PyUnicode_FromString(word)
+ else:
+ name = PyUnicode_Decode(word, strlen(word),
+ self.c_encoding, errors)
+
+ if name == '':
+ if self.has_mi_columns:
+ name = 'Unnamed: %d_level_%d' % (i,level)
+ else:
+ name = 'Unnamed: %d' % i
+
+ count = counts.get(name, 0)
+ if count > 0 and self.mangle_dupe_cols and not self.has_mi_columns:
+ this_header.append('%s.%d' % (name, count))
+ else:
+ this_header.append(name)
+ counts[name] = count + 1
- count = counts.get(name, 0)
- if count > 0 and self.mangle_dupe_cols:
- header.append('%s.%d' % (name, count))
- else:
- header.append(name)
- counts[name] = count + 1
-
- data_line = self.parser.header + 1
+ data_line = hr + 1
+ header.append(this_header)
if self.names is not None:
- header = self.names
+ header = [ self.names ]
elif self.names is not None:
# Enforce this unless usecols
@@ -597,11 +637,11 @@ cdef class TextReader:
if self.parser.lines < 1:
self._tokenize_rows(1)
- header = self.names
+ header = [ self.names ]
data_line = 0
if self.parser.lines < 1:
- field_count = len(header)
+ field_count = len(header[0])
else:
field_count = self.parser.line_fields[data_line]
else:
@@ -613,7 +653,7 @@ cdef class TextReader:
# Corner case, not enough lines in the file
if self.parser.lines < data_line + 1:
- field_count = len(header)
+ field_count = len(header[0])
else: # not self.has_usecols:
field_count = self.parser.line_fields[data_line]
@@ -622,7 +662,7 @@ cdef class TextReader:
if self.names is not None:
field_count = max(field_count, len(self.names))
- passed_count = len(header)
+ passed_count = len(header[0])
# if passed_count > field_count:
# raise CParserError('Column names have %d fields, '
@@ -1038,10 +1078,10 @@ cdef class TextReader:
if self.header is not None:
j = i - self.leading_cols
# hack for #2442
- if j == len(self.header):
+ if j == len(self.header[0]):
return j
else:
- return self.header[j]
+ return self.header[0][j]
else:
return None
@@ -1762,6 +1802,9 @@ def _to_structured_array(dict columns, object names):
if names is None:
names = ['%d' % i for i in range(len(columns))]
+ else:
+ # single line header
+ names = names[0]
dt = np.dtype([(str(name), columns[i].dtype)
for i, name in enumerate(names)])
diff --git a/pandas/src/parser/tokenizer.c b/pandas/src/parser/tokenizer.c
index 09cddd07e1c1d..81fda37acbb71 100644
--- a/pandas/src/parser/tokenizer.c
+++ b/pandas/src/parser/tokenizer.c
@@ -463,7 +463,7 @@ static int end_line(parser_t *self) {
/* printf("Line: %d, Fields: %d, Ex-fields: %d\n", self->lines, fields, ex_fields); */
- if (!(self->lines <= self->header + 1)
+ if (!(self->lines <= self->header_end + 1)
&& (self->expected_fields < 0 && fields > ex_fields)) {
// increment file line count
self->file_lines++;
@@ -498,7 +498,7 @@ static int end_line(parser_t *self) {
}
else {
/* missing trailing delimiters */
- if ((self->lines >= self->header + 1) && fields < ex_fields) {
+ if ((self->lines >= self->header_end + 1) && fields < ex_fields) {
/* Might overrun the buffer when closing fields */
if (make_stream_space(self, ex_fields - fields) < 0) {
diff --git a/pandas/src/parser/tokenizer.h b/pandas/src/parser/tokenizer.h
index 566e89ae5f9a7..5ba1b99a29d39 100644
--- a/pandas/src/parser/tokenizer.h
+++ b/pandas/src/parser/tokenizer.h
@@ -195,6 +195,8 @@ typedef struct parser_t {
char thousands;
int header; // Boolean: 1: has header, 0: no header
+ int header_start; // header row start
+ int header_end; // header row end
void *skipset;
int skip_footer;
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 8e48ef094c419..fa2e8131b6916 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -4755,9 +4755,15 @@ def test_to_csv_moar(self):
def _do_test(df,path,r_dtype=None,c_dtype=None,rnlvl=None,cnlvl=None,
dupe_col=False):
- with ensure_clean(path) as path:
- df.to_csv(path,encoding='utf8',chunksize=chunksize)
- recons = DataFrame.from_csv(path,parse_dates=False)
+ if cnlvl:
+ header = range(cnlvl)
+ with ensure_clean(path) as path:
+ df.to_csv(path,encoding='utf8',chunksize=chunksize,tupleize_cols=False)
+ recons = DataFrame.from_csv(path,header=range(cnlvl),tupleize_cols=False,parse_dates=False)
+ else:
+ with ensure_clean(path) as path:
+ df.to_csv(path,encoding='utf8',chunksize=chunksize)
+ recons = DataFrame.from_csv(path,header=0,parse_dates=False)
def _to_uni(x):
if not isinstance(x,unicode):
@@ -4773,16 +4779,6 @@ def _to_uni(x):
recons.index = ix
recons = recons.iloc[:,rnlvl-1:]
- if cnlvl:
- def stuple_to_tuple(x):
- import re
- x = x.split(",")
- x = map(lambda x: re.sub("[\'\"\s\(\)]","",x),x)
- return x
-
- cols=MultiIndex.from_tuples(map(stuple_to_tuple,recons.columns))
- recons.columns = cols
-
type_map = dict(i='i',f='f',s='O',u='O',dt='O',p='O')
if r_dtype:
if r_dtype == 'u': # unicode
@@ -4827,7 +4823,6 @@ def stuple_to_tuple(x):
assert_frame_equal(df, recons,check_names=False,check_less_precise=True)
-
N = 100
chunksize=1000
@@ -4962,6 +4957,7 @@ def test_to_csv_multiindex(self):
frame.index = new_index
with ensure_clean(pname) as path:
+
frame.to_csv(path, header=False)
frame.to_csv(path, cols=['A', 'B'])
@@ -4973,7 +4969,7 @@ def test_to_csv_multiindex(self):
self.assertEqual(frame.index.names, df.index.names)
self.frame.index = old_index # needed if setUP becomes a classmethod
- # try multiindex with dates
+ # try multiindex with dates
tsframe = self.tsframe
old_index = tsframe.index
new_index = [old_index, np.arange(len(old_index))]
@@ -4994,6 +4990,102 @@ def test_to_csv_multiindex(self):
assert_almost_equal(recons.values, self.tsframe.values)
self.tsframe.index = old_index # needed if setUP becomes classmethod
+ with ensure_clean(pname) as path:
+ # GH3571, GH1651, GH3141
+
+ def _make_frame(names=None):
+ if names is True:
+ names = ['first','second']
+ return DataFrame(np.random.randint(0,10,size=(3,3)),
+ columns=MultiIndex.from_tuples([('bah', 'foo'),
+ ('bah', 'bar'),
+ ('ban', 'baz')],
+ names=names))
+
+ # column & index are multi-index
+ df = mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)
+ df.to_csv(path,tupleize_cols=False)
+ result = read_csv(path,header=[0,1,2,3],index_col=[0,1],tupleize_cols=False)
+ assert_frame_equal(df,result)
+
+ # column is mi
+ df = mkdf(5,3,r_idx_nlevels=1,c_idx_nlevels=4)
+ df.to_csv(path,tupleize_cols=False)
+ result = read_csv(path,header=[0,1,2,3],index_col=0,tupleize_cols=False)
+ assert_frame_equal(df,result)
+
+ # dup column names?
+ df = mkdf(5,3,r_idx_nlevels=3,c_idx_nlevels=4)
+ df.to_csv(path,tupleize_cols=False)
+ result = read_csv(path,header=[0,1,2,3],index_col=[0,1],tupleize_cols=False)
+ result.columns = ['R2','A','B','C']
+ new_result = result.reset_index().set_index(['R0','R1','R2'])
+ new_result.columns = df.columns
+ assert_frame_equal(df,new_result)
+
+ # writing with no index
+ df = _make_frame()
+ df.to_csv(path,tupleize_cols=False,index=False)
+ result = read_csv(path,header=[0,1],tupleize_cols=False)
+ assert_frame_equal(df,result)
+
+ # we lose the names here
+ df = _make_frame(True)
+ df.to_csv(path,tupleize_cols=False,index=False)
+ result = read_csv(path,header=[0,1],tupleize_cols=False)
+ self.assert_(all([ x is None for x in result.columns.names ]))
+ result.columns.names = df.columns.names
+ assert_frame_equal(df,result)
+
+ # tupleize_cols=True and index=False
+ df = _make_frame(True)
+ df.to_csv(path,tupleize_cols=True,index=False)
+ result = read_csv(path,header=0,tupleize_cols=True,index_col=None)
+ result.columns = df.columns
+ assert_frame_equal(df,result)
+
+ # whatsnew example
+ df = _make_frame()
+ df.to_csv(path,tupleize_cols=False)
+ result = read_csv(path,header=[0,1],index_col=[0],tupleize_cols=False)
+ assert_frame_equal(df,result)
+
+ df = _make_frame(True)
+ df.to_csv(path,tupleize_cols=False)
+ result = read_csv(path,header=[0,1],index_col=[0],tupleize_cols=False)
+ assert_frame_equal(df,result)
+
+ # column & index are multi-index (compatibility)
+ df = mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)
+ df.to_csv(path,tupleize_cols=True)
+ result = read_csv(path,header=0,index_col=[0,1],tupleize_cols=True)
+ result.columns = df.columns
+ assert_frame_equal(df,result)
+
+ # invalid options
+ df = _make_frame(True)
+ df.to_csv(path,tupleize_cols=False)
+
+ # catch invalid headers
+ try:
+ read_csv(path,tupleize_cols=False,header=range(3),index_col=0)
+ except (Exception), detail:
+ if not str(detail).startswith('Passed header=[0,1,2] are too many rows for this multi_index of columns'):
+ raise AssertionError("failure in read_csv header=range(3)")
+
+ try:
+ read_csv(path,tupleize_cols=False,header=range(7),index_col=0)
+ except (Exception), detail:
+ if not str(detail).startswith('Passed header=[0,1,2,3,4,5,6], len of 7, but only 6 lines in file'):
+ raise AssertionError("failure in read_csv header=range(7)")
+
+ for i in [3,4,5,6,7]:
+ self.assertRaises(Exception, read_csv, path, tupleize_cols=False, header=range(i), index_col=0)
+ self.assertRaises(Exception, read_csv, path, tupleize_cols=False, header=[0,2], index_col=0)
+
+ # write with cols
+ self.assertRaises(Exception, df.to_csv, path,tupleize_cols=False,cols=['foo','bar'])
+
with ensure_clean(pname) as path:
# empty
tsframe[:0].to_csv(path)
| In theory should close:
#3571, #1651, #3141
Works, but a couple of issues/caveats:
- ~~index_col needs to be specified as an integer list (can be fixed)~~
- header is a list of rows to read that contain the multi-index,
a row that is skipped (e.g. [0,1,3,5], will just be skipped, like a comment)
- the writing format might be a bit odd: the col names go in the first column,
other index_cols are blanks (they are separated, just == '')
The names of an multi-index on the index are after the columns and before the data,
and are a full row (but blank after the row names).
- ~~I am not sure if we should allow `df.to_csv('path',index=False)` when have a multi-index columns, could just ban it I guess (mainly as it screws up the write format, and then where do you put the names?)~~
- ~~The `cols` argument needs testing and prob is broken when using multi-index on the columns (it really should be specified as a tuple I think, but that is work, so maybe just ban it when using multi-index columns)~~
- needs more testing
```
In [14]: df = mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)
In [15]: df.to_csv('test.csv')
In [16]: !cat 'test.csv'
C0,,C_l0_g0,C_l0_g1,C_l0_g2
C1,,C_l1_g0,C_l1_g1,C_l1_g2
C2,,C_l2_g0,C_l2_g1,C_l2_g2
C3,,C_l3_g0,C_l3_g1,C_l3_g2
R0,R1,,,
R_l0_g0,R_l1_g0,R0C0,R0C1,R0C2
R_l0_g1,R_l1_g1,R1C0,R1C1,R1C2
R_l0_g2,R_l1_g2,R2C0,R2C1,R2C2
R_l0_g3,R_l1_g3,R3C0,R3C1,R3C2
R_l0_g4,R_l1_g4,R4C0,R4C1,R4C2
In [17]: res = read_csv('test.csv',header=[0,1,2,3],index_col=[0,1])
In [18]: res.index
Out[18]:
MultiIndex
[(u'R_l0_g0', u'R_l1_g0'), (u'R_l0_g1', u'R_l1_g1'), (u'R_l0_g2', u'R_l1_g2'), (u'R_l0_g3', u'R_l1_g3'), (u'R_l0_g4', u'R_l1_g4')]
In [19]: res.columns
Out[19]:
MultiIndex
[(u'C_l0_g0', u'C_l1_g0', u'C_l2_g0', u'C_l3_g0'), (u'C_l0_g1', u'C_l1_g1', u'C_l2_g1', u'C_l3_g1'), (u'C_l0_g2', u'C_l1_g2', u'C_l2_g2', u'C_l3_g2')]
In [20]: res
Out[20]:
C0 C_l0_g0 C_l0_g1 C_l0_g2
C1 C_l1_g0 C_l1_g1 C_l1_g2
C2 C_l2_g0 C_l2_g1 C_l2_g2
C3 C_l3_g0 C_l3_g1 C_l3_g2
R0 R1
R_l0_g0 R_l1_g0 R0C0 R0C1 R0C2
R_l0_g1 R_l1_g1 R1C0 R1C1 R1C2
R_l0_g2 R_l1_g2 R2C0 R2C1 R2C2
R_l0_g3 R_l1_g3 R3C0 R3C1 R3C2
R_l0_g4 R_l1_g4 R4C0 R4C1 R4C2
In [21]: res.index.names
Out[21]: ['R0', 'R1']
In [22]: res.columns.names
Out[22]: ['C0', 'C1', 'C2', 'C3']
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/3575 | 2013-05-11T04:08:31Z | 2013-05-19T14:22:04Z | 2013-05-19T14:22:04Z | 2014-06-12T12:04:57Z |
ENH: plot only numeric data and raise an exception *before* plotting if there is no numeric data | diff --git a/RELEASE.rst b/RELEASE.rst
index b4f3fa1999c8a..efc0f912060b7 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -67,6 +67,10 @@ pandas 0.11.1
to specify custom column names of the returned DataFrame (GH3649_),
thanks @hoechenberger
- ``read_html`` no longer performs hard date conversion
+ - Plotting functions now raise a ``TypeError`` before trying to plot anything
+ if the associated objects have have a dtype of ``object`` (GH1818_,
+ GH3572_). This happens before any drawing takes place which elimnates any
+ spurious plots from showing up.
**API Changes**
@@ -89,6 +93,9 @@ pandas 0.11.1
- Raise on ``iloc`` when boolean indexing with a label based indexer mask
e.g. a boolean Series, even with integer labels, will raise. Since ``iloc``
is purely positional based, the labels on the Series are not alignable (GH3631_)
+ - The ``raise_on_error`` option to plotting methods is obviated by GH3572_,
+ so it is removed. Plots now always raise when data cannot be plotted or the
+ object being plotted has a dtype of ``object``.
**Bug Fixes**
@@ -227,6 +234,8 @@ pandas 0.11.1
.. _GH3659: https://github.com/pydata/pandas/issues/3659
.. _GH3649: https://github.com/pydata/pandas/issues/3649
.. _Gh3616: https://github.com/pydata/pandas/issues/3616
+.. _GH1818: https://github.com/pydata/pandas/issues/1818
+.. _GH3572: https://github.com/pydata/pandas/issues/3572
pandas 0.11.0
=============
diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt
index 6ff3afeb69581..9209c3938023e 100644
--- a/doc/source/v0.11.1.txt
+++ b/doc/source/v0.11.1.txt
@@ -61,6 +61,13 @@ API changes
``df.iloc[mask]`` will raise a ``ValueError``
+ - The ``raise_on_error`` argument to plotting functions is removed. Instead,
+ plotting functions raise a ``TypeError`` when the ``dtype`` of the object
+ is ``object`` to remind you to avoid ``object`` arrays whenever possible
+ and thus you should cast to an appropriate numeric dtype if you need to
+ plot something.
+
+
Enhancements
~~~~~~~~~~~~
@@ -118,7 +125,7 @@ Enhancements
The last element yielded by the iterator will be a ``Series`` containing
the last element of the longest string in the ``Series`` with all other
- elements being ``NaN``. Here since ``'slow`` is the longest string
+ elements being ``NaN``. Here since ``'slow'`` is the longest string
and there are no other strings with the same length ``'w'`` is the only
non-null string in the yielded ``Series``.
@@ -158,6 +165,11 @@ Enhancements
- ``pd.melt()`` now accepts the optional parameters ``var_name`` and ``value_name``
to specify custom column names of the returned DataFrame.
+ - Plotting functions now raise a ``TypeError`` before trying to plot anything
+ if the associated objects have have a ``dtype`` of ``object`` (GH1818_,
+ GH3572_). This happens before any drawing takes place which elimnates any
+ spurious plots from showing up.
+
Bug Fixes
~~~~~~~~~
@@ -227,3 +239,5 @@ on GitHub for a complete list.
.. _GH3605: https://github.com/pydata/pandas/issues/3605
.. _GH3606: https://github.com/pydata/pandas/issues/3606
.. _GH3656: https://github.com/pydata/pandas/issues/3656
+.. _GH1818: https://github.com/pydata/pandas/issues/1818
+.. _GH3572: https://github.com/pydata/pandas/issues/3572
diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py
index 197b26014a760..5a1411ccf577e 100644
--- a/pandas/tests/test_graphics.py
+++ b/pandas/tests/test_graphics.py
@@ -187,6 +187,27 @@ def test_bootstrap_plot(self):
from pandas.tools.plotting import bootstrap_plot
_check_plot_works(bootstrap_plot, self.ts, size=10)
+ @slow
+ def test_all_invalid_plot_data(self):
+ s = Series(list('abcd'))
+ kinds = 'line', 'bar', 'barh', 'kde', 'density'
+
+ for kind in kinds:
+ self.assertRaises(TypeError, s.plot, kind=kind)
+
+ @slow
+ def test_partially_invalid_plot_data(self):
+ s = Series(['a', 'b', 1.0, 2])
+ kinds = 'line', 'bar', 'barh', 'kde', 'density'
+
+ for kind in kinds:
+ self.assertRaises(TypeError, s.plot, kind=kind)
+
+ @slow
+ def test_invalid_kind(self):
+ s = Series([1, 2])
+ self.assertRaises(ValueError, s.plot, kind='aasdf')
+
class TestDataFramePlots(unittest.TestCase):
@@ -249,11 +270,9 @@ def test_nonnumeric_exclude(self):
plt.close('all')
df = DataFrame({'A': ["x", "y", "z"], 'B': [1,2,3]})
- ax = df.plot(raise_on_error=False) # it works
+ ax = df.plot()
self.assert_(len(ax.get_lines()) == 1) #B was plotted
- self.assertRaises(Exception, df.plot)
-
@slow
def test_label(self):
import matplotlib.pyplot as plt
@@ -688,6 +707,26 @@ def test_unordered_ts(self):
ydata = ax.lines[0].get_ydata()
self.assert_(np.all(ydata == np.array([1.0, 2.0, 3.0])))
+ @slow
+ def test_all_invalid_plot_data(self):
+ kinds = 'line', 'bar', 'barh', 'kde', 'density'
+ df = DataFrame(list('abcd'))
+ for kind in kinds:
+ self.assertRaises(TypeError, df.plot, kind=kind)
+
+ @slow
+ def test_partially_invalid_plot_data(self):
+ kinds = 'line', 'bar', 'barh', 'kde', 'density'
+ df = DataFrame(np.random.randn(10, 2), dtype=object)
+ df[np.random.rand(df.shape[0]) > 0.5] = 'a'
+ for kind in kinds:
+ self.assertRaises(TypeError, df.plot, kind=kind)
+
+ @slow
+ def test_invalid_kind(self):
+ df = DataFrame(np.random.randn(10, 2))
+ self.assertRaises(ValueError, df.plot, kind='aasdf')
+
class TestDataFrameGroupByPlots(unittest.TestCase):
@classmethod
diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py
index 751f5fcdb82b2..583aecdbf9290 100644
--- a/pandas/tools/plotting.py
+++ b/pandas/tools/plotting.py
@@ -1,6 +1,5 @@
# being a bit too dynamic
# pylint: disable=E1101
-from itertools import izip
import datetime
import warnings
import re
@@ -701,10 +700,8 @@ class MPLPlot(object):
"""
_default_rot = 0
- _pop_attributes = ['label', 'style', 'logy', 'logx', 'loglog',
- 'raise_on_error']
- _attr_defaults = {'logy': False, 'logx': False, 'loglog': False,
- 'raise_on_error': True}
+ _pop_attributes = ['label', 'style', 'logy', 'logx', 'loglog']
+ _attr_defaults = {'logy': False, 'logx': False, 'loglog': False}
def __init__(self, data, kind=None, by=None, subplots=False, sharex=True,
sharey=False, use_index=True,
@@ -875,7 +872,27 @@ def _get_layout(self):
return (len(self.data.columns), 1)
def _compute_plot_data(self):
- pass
+ try:
+ # might be a frame
+ numeric_data = self.data._get_numeric_data()
+ except AttributeError:
+ # a series, but no object dtypes allowed!
+ if self.data.dtype == np.object_:
+ raise TypeError('invalid dtype for plotting, please cast to a '
+ 'numeric dtype explicitly if you want to plot')
+
+ numeric_data = self.data
+
+ try:
+ is_empty = numeric_data.empty
+ except AttributeError:
+ is_empty = not len(numeric_data)
+
+ # no empty frames or series allowed
+ if is_empty:
+ raise TypeError('No numeric data to plot')
+
+ self.data = numeric_data
def _make_plot(self):
raise NotImplementedError
@@ -1184,27 +1201,17 @@ def _make_plot(self):
else:
args = (ax, x, y, style)
- try:
- newline = plotf(*args, **kwds)[0]
- lines.append(newline)
- leg_label = label
- if self.mark_right and self.on_right(i):
- leg_label += ' (right)'
- labels.append(leg_label)
- ax.grid(self.grid)
-
- if self._is_datetype():
- left, right = _get_xlim(lines)
- ax.set_xlim(left, right)
- except AttributeError as inst: # non-numeric
- msg = ('Unable to plot data %s vs index %s,\n'
- 'error was: %s' % (str(y), str(x), str(inst)))
- if not self.raise_on_error:
- print msg
- else:
- msg = msg + ('\nConsider setting raise_on_error=False'
- 'to suppress')
- raise Exception(msg)
+ newline = plotf(*args, **kwds)[0]
+ lines.append(newline)
+ leg_label = label
+ if self.mark_right and self.on_right(i):
+ leg_label += ' (right)'
+ labels.append(leg_label)
+ ax.grid(self.grid)
+
+ if self._is_datetype():
+ left, right = _get_xlim(lines)
+ ax.set_xlim(left, right)
self._make_legend(lines, labels)
@@ -1223,22 +1230,12 @@ def to_leg_label(label, i):
return label
def _plot(data, col_num, ax, label, style, **kwds):
- try:
- newlines = tsplot(data, plotf, ax=ax, label=label,
- style=style, **kwds)
- ax.grid(self.grid)
- lines.append(newlines[0])
- leg_label = to_leg_label(label, col_num)
- labels.append(leg_label)
- except AttributeError as inst: #non-numeric
- msg = ('Unable to plot %s,\n'
- 'error was: %s' % (str(data), str(inst)))
- if not self.raise_on_error:
- print msg
- else:
- msg = msg + ('\nConsider setting raise_on_error=False'
- 'to suppress')
- raise Exception(msg)
+ newlines = tsplot(data, plotf, ax=ax, label=label,
+ style=style, **kwds)
+ ax.grid(self.grid)
+ lines.append(newlines[0])
+ leg_label = to_leg_label(label, col_num)
+ labels.append(leg_label)
if isinstance(data, Series):
ax = self._get_ax(0) # self.axes[0]
@@ -1610,8 +1607,8 @@ def plot_series(series, label=None, kind='line', use_index=True, rot=None,
If not passed, uses gca()
style : string, default matplotlib default
matplotlib line style to use
- grid : matplot grid
- legend: matplot legende
+ grid : matplotlib grid
+ legend: matplotlib legend
logx : boolean, default False
For line plots, use log scaling on x axis
logy : boolean, default False
@@ -1633,6 +1630,8 @@ def plot_series(series, label=None, kind='line', use_index=True, rot=None,
klass = BarPlot
elif kind == 'kde':
klass = KdePlot
+ else:
+ raise ValueError('Invalid chart type given %s' % kind)
"""
If no axis is specified, we check whether there are existing figures.
diff --git a/pandas/tseries/tests/test_plotting.py b/pandas/tseries/tests/test_plotting.py
index 5d7dc880b2868..eae04081e7479 100644
--- a/pandas/tseries/tests/test_plotting.py
+++ b/pandas/tseries/tests/test_plotting.py
@@ -87,18 +87,14 @@ def test_nonnumeric_exclude(self):
idx = date_range('1/1/1987', freq='A', periods=3)
df = DataFrame({'A': ["x", "y", "z"], 'B': [1,2,3]}, idx)
- self.assertRaises(Exception, df.plot)
plt.close('all')
- ax = df.plot(raise_on_error=False) # it works
+ ax = df.plot() # it works
self.assert_(len(ax.get_lines()) == 1) #B was plotted
plt.close('all')
- self.assertRaises(Exception, df.A.plot)
- plt.close('all')
- ax = df['A'].plot(raise_on_error=False) # it works
- self.assert_(len(ax.get_lines()) == 0)
+ self.assertRaises(TypeError, df['A'].plot)
@slow
def test_tsplot(self):
| Raise a `TypeError` alerting the user to the fact that they are trying to plot nonnumeric data, or if there are any numeric data plot those. closes #1818.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3572 | 2013-05-11T00:06:40Z | 2013-05-21T21:01:11Z | 2013-05-21T21:01:11Z | 2014-06-13T08:15:50Z |
DOC: document non-preservation of dtypes across rows with iterrows | diff --git a/doc/source/basics.rst b/doc/source/basics.rst
index 5739fe0922d6d..da3bbcb4f0dc2 100644
--- a/doc/source/basics.rst
+++ b/doc/source/basics.rst
@@ -835,7 +835,6 @@ containing the data in each row:
...: print '%s\n%s' % (row_index, row)
...:
-
For instance, a contrived way to transpose the dataframe would be:
.. ipython:: python
@@ -847,6 +846,18 @@ For instance, a contrived way to transpose the dataframe would be:
df2_t = DataFrame(dict((idx,values) for idx, values in df2.iterrows()))
print df2_t
+.. note::
+
+ ``iterrows`` does **not** preserve dtypes across the rows (dtypes are
+ preserved across columns for DataFrames). For example,
+
+ .. ipython:: python
+
+ df = DataFrame([[1, 1.0]], columns=['x', 'y'])
+ row = next(df.iterrows())[1]
+ print row['x'].dtype
+ print df['x'].dtype
+
itertuples
~~~~~~~~~~
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 0ffdcb0e036ce..ad1429fcea1ca 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -772,7 +772,25 @@ def iteritems(self):
def iterrows(self):
"""
- Iterate over rows of DataFrame as (index, Series) pairs
+ Iterate over rows of DataFrame as (index, Series) pairs.
+
+ Notes
+ -----
+
+ * ``iterrows`` does **not** preserve dtypes across the rows (dtypes
+ are preserved across columns for DataFrames). For example,
+
+ >>> df = DataFrame([[1, 1.0]], columns=['x', 'y'])
+ >>> row = next(df.iterrows())[1]
+ >>> print row['x'].dtype
+ float64
+ >>> print df['x'].dtype
+ int64
+
+ Returns
+ -------
+ it : generator
+ A generator that iterates over the rows of the frame.
"""
columns = self.columns
for k, v in izip(self.index, self.values):
| iterrows doesn't preserve dtypes across rows, document it, #3566. i learned that you can nest ..ipython:: blocks in ..note:: blocks in sphinx. sweet.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3569 | 2013-05-10T23:09:09Z | 2013-05-11T11:47:28Z | 2013-05-11T11:47:28Z | 2014-07-07T00:24:42Z |
DOC: add bs4/lxml install note to README.rst | diff --git a/README.rst b/README.rst
index ea713006c7189..ee728ce7fc7a4 100644
--- a/README.rst
+++ b/README.rst
@@ -90,6 +90,12 @@ Optional dependencies
* openpyxl version 1.6.1 or higher, for writing .xlsx files
* xlrd >= 0.9.0
* Needed for Excel I/O
+ * `lxml <http://lxml.de>`__, or `Beautiful Soup 4 <http://www.crummy.com/software/BeautifulSoup>`__: for reading HTML tables
+ * The differences between lxml and Beautiful Soup 4 are mostly speed (lxml
+ is faster), however sometimes Beautiful Soup returns what you might
+ intuitively expect. Both backends are implemented, so try them both to
+ see which one you like. They should return very similar results.
+ * Note that lxml requires Cython to build successfully
* `boto <https://pypi.python.org/pypi/boto>`__: necessary for Amazon S3 access.
| Glanced at the GitHub landing page and didn't see this.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3565 | 2013-05-10T15:30:06Z | 2013-05-10T22:32:39Z | 2013-05-10T22:32:39Z | 2014-06-14T20:18:33Z |
BUG: Duplicate indexes with and empty DataFrame.from_records will return a correct frame (GH3562) | diff --git a/RELEASE.rst b/RELEASE.rst
index 8e48395efc9ab..82f88a0c8e592 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -77,6 +77,7 @@ pandas 0.11.1
- ``.loc`` was not raising when passed an integer list (GH3449_)
- Unordered time series selection was misbehaving when using label slicing (GH3448_)
- Duplicate indexes with getitem will return items in the correct order (GH3455_, GH3457_)
+ - Duplicate indexes with and empty DataFrame.from_records will return a correct frame (GH3562_)
- Fix sorting in a frame with a list of columns which contains datetime64[ns] dtypes (GH3461_)
- DataFrames fetched via FRED now handle '.' as a NaN. (GH3469_)
- Fix regression in a DataFrame apply with axis=1, objects were not being converted back
@@ -137,6 +138,7 @@ pandas 0.11.1
.. _GH3495: https://github.com/pydata/pandas/issues/3495
.. _GH3492: https://github.com/pydata/pandas/issues/3492
.. _GH3552: https://github.com/pydata/pandas/issues/3552
+.. _GH3562: https://github.com/pydata/pandas/issues/3562
.. _GH3493: https://github.com/pydata/pandas/issues/3493
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 13e1654963844..b6459b0e461b4 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -1960,7 +1960,8 @@ def form_blocks(arrays, names, axes):
items = axes[0]
if len(arrays) < len(items):
- extra_items = items - Index(names)
+ nn = set(names)
+ extra_items = Index([ i for i in items if i not in nn ])
else:
extra_items = []
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 0c9dd21d2f645..7e7813e048bd1 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -3518,6 +3518,16 @@ def test_from_records_misc_brokenness(self):
results = df2_obj.get_dtype_counts()
expected = Series({ 'datetime64[ns]' : 1, 'int64' : 1 })
+ def test_from_records_empty(self):
+ # 3562
+ result = DataFrame.from_records([], columns=['a','b','c'])
+ expected = DataFrame(columns=['a','b','c'])
+ assert_frame_equal(result, expected)
+
+ result = DataFrame.from_records([], columns=['a','b','b'])
+ expected = DataFrame(columns=['a','b','b'])
+ assert_frame_equal(result, expected)
+
def test_to_records_floats(self):
df = DataFrame(np.random.rand(10, 10))
df.to_records()
| closes #3562
| https://api.github.com/repos/pandas-dev/pandas/pulls/3564 | 2013-05-10T15:00:17Z | 2013-05-10T16:01:33Z | 2013-05-10T16:01:33Z | 2014-06-29T09:51:40Z |
BUG: (GH3561) non-unique indexers with a list-like now return in the same order as the passed values | diff --git a/RELEASE.rst b/RELEASE.rst
index 31627cec01d1e..4e6570669656d 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -91,6 +91,7 @@ pandas 0.11.1
(removed warning) (GH2786_), and fix (GH3230_)
- Fix to_csv to handle non-unique columns (GH3495_)
- Duplicate indexes with getitem will return items in the correct order (GH3455_, GH3457_)
+ and handle missing elements like unique indices (GH3561_)
- Duplicate indexes with and empty DataFrame.from_records will return a correct frame (GH3562_)
- Fixed bug in groupby with empty series referencing a variable before assignment. (GH3510_)
- Fixed bug in mixed-frame assignment with aligned series (GH3492_)
@@ -148,6 +149,7 @@ pandas 0.11.1
.. _GH3552: https://github.com/pydata/pandas/issues/3552
.. _GH3562: https://github.com/pydata/pandas/issues/3562
.. _GH3586: https://github.com/pydata/pandas/issues/3586
+.. _GH3561: https://github.com/pydata/pandas/issues/3561
.. _GH3493: https://github.com/pydata/pandas/issues/3493
.. _GH3579: https://github.com/pydata/pandas/issues/3579
.. _GH3593: https://github.com/pydata/pandas/issues/3593
diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst
index d67a2d51cc1b8..55b7e653c3630 100644
--- a/doc/source/indexing.rst
+++ b/doc/source/indexing.rst
@@ -1368,6 +1368,9 @@ incompatible the new object internals are with the ``Index`` functions):
- ``slice_locs``: returns the "range" to slice between two labels
- ``get_indexer``: Computes the indexing vector for reindexing / data
alignment purposes. See the source / docstrings for more on this
+ - ``get_indexer_non_unique``: Computes the indexing vector for reindexing / data
+ alignment purposes when the index is non-unique. See the source / docstrings
+ for more on this
- ``reindex``: Does any pre-conversion of the input index then calls
``get_indexer``
- ``union``, ``intersection``: computes the union or intersection of two
diff --git a/pandas/core/index.py b/pandas/core/index.py
index 7baae543714ec..3e5a4f5676437 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -859,6 +859,25 @@ def get_indexer(self, target, method=None, limit=None):
return com._ensure_platform_int(indexer)
+ def get_indexer_non_unique(self, target, **kwargs):
+ """ return an indexer suitable for taking from a non unique index
+ return the labels in the same order as the target, and
+ return a missing indexer into the target (missing are marked as -1
+ in the indexer); target must be an iterable """
+ target = _ensure_index(target)
+ pself, ptarget = self._possibly_promote(target)
+ if pself is not self or ptarget is not target:
+ return pself.get_indexer_non_unique(ptarget)
+
+ if self.is_all_dates:
+ self = Index(self.asi8)
+ tgt_values = target.asi8
+ else:
+ tgt_values = target.values
+
+ indexer, missing = self._engine.get_indexer_non_unique(tgt_values)
+ return Index(indexer), missing
+
def _possibly_promote(self, other):
# A hack, but it works
from pandas.tseries.index import DatetimeIndex
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index bc8b7a3646a33..29adce4e02591 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -458,8 +458,23 @@ def _reindex(keys, level=None):
if labels.is_unique:
return _reindex(keyarr, level=level)
else:
- mask = labels.isin(keyarr)
- return self.obj.take(mask.nonzero()[0], axis=axis, convert=False)
+ indexer, missing = labels.get_indexer_non_unique(keyarr)
+ check = indexer != -1
+ result = self.obj.take(indexer[check], axis=axis, convert=False)
+
+ # need to merge the result labels and the missing labels
+ if len(missing):
+ l = np.arange(len(indexer))
+
+ missing_labels = keyarr.take(missing)
+ missing_labels_indexer = l[~check]
+ cur_labels = result._get_axis(axis).values
+ cur_labels_indexer = l[check]
+ new_labels = lib.combine_from_indexers(cur_labels, cur_labels_indexer,
+ missing_labels, missing_labels_indexer)
+ result = result.reindex_axis(new_labels,axis=axis)
+
+ return result
def _convert_to_indexer(self, obj, axis=0):
"""
@@ -569,20 +584,8 @@ def _convert_to_indexer(self, obj, axis=0):
# non-unique (dups)
else:
- indexer = []
- check = np.arange(len(labels))
- lvalues = labels.values
- for x in objarr:
- # ugh
- to_or = lib.map_infer(lvalues, x.__eq__)
- if not to_or.any():
- raise KeyError('%s not in index' % str(x))
-
- # add the indicies (as we want to take)
- indexer.extend(check[to_or])
-
- indexer = Index(indexer)
-
+ indexer, missing = labels.get_indexer_non_unique(objarr)
+ check = indexer
mask = check == -1
if mask.any():
diff --git a/pandas/index.pyx b/pandas/index.pyx
index 2ad5474549ec6..7d33d6083d0eb 100644
--- a/pandas/index.pyx
+++ b/pandas/index.pyx
@@ -267,8 +267,46 @@ cdef class IndexEngine:
self._ensure_mapping_populated()
return self.mapping.lookup(values)
+ def get_indexer_non_unique(self, targets):
+ """ return an indexer suitable for takng from a non unique index
+ return the labels in the same order ast the target
+ and a missing indexer into the targets (which correspond
+ to the -1 indicies in the results """
+ cdef:
+ ndarray values
+ ndarray[int64_t] result, missing
+ object v, val
+ int count = 0, count_missing = 0
+ Py_ssize_t i, j, n, found
+
+ self._ensure_mapping_populated()
+ values = self._get_index_values()
+ n = len(values)
+ n_t = len(targets)
+ result = np.empty(n+n_t, dtype=np.int64)
+ missing = np.empty(n_t, dtype=np.int64)
+
+ for i in range(n_t):
+ val = util.get_value_at(targets, i)
+ found = 0
+
+ for j in range(n):
+ v = util.get_value_at(values, j)
+
+ if v == val:
+ result[count] = j
+ count += 1
+ found = 1
+
+ # value not found
+ if found == 0:
+ result[count] = -1
+ count += 1
+ missing[count_missing] = i
+ count_missing += 1
+ return result[0:count], missing[0:count_missing]
cdef class Int64Engine(IndexEngine):
diff --git a/pandas/lib.pyx b/pandas/lib.pyx
index d043691bc061e..30c65d9fcdd9f 100644
--- a/pandas/lib.pyx
+++ b/pandas/lib.pyx
@@ -416,6 +416,25 @@ def dicts_to_array(list dicts, list columns):
return result
+@cython.wraparound(False)
+@cython.boundscheck(False)
+def combine_from_indexers(ndarray a, ndarray[int64_t] a_indexer,
+ ndarray b, ndarray[int64_t] b_indexer):
+ cdef:
+ Py_ssize_t i, n_a, n_b
+ ndarray result
+
+ n_a = len(a)
+ n_b = len(b)
+ result = np.empty(n_a+n_b,dtype=object)
+
+ for i in range(n_a):
+ result[a_indexer[i]] = a[i]
+ for i in range(n_b):
+ result[b_indexer[i]] = b[i]
+
+ return result
+
def fast_zip(list ndarrays):
'''
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index ce89dda63597f..e92cc22dccaf6 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -4668,8 +4668,29 @@ def _check_df(df,cols=None):
with ensure_clean() as path:
df.to_csv(path,cols = cols,chunksize=chunksize)
rs_c = pd.read_csv(path,index_col=0)
- rs_c.columns = df.columns
- assert_frame_equal(df,rs_c,check_names=False)
+
+ # we wrote them in a different order
+ # so compare them in that order
+ if cols is not None:
+
+ if df.columns.is_unique:
+ rs_c.columns = cols
+ else:
+ indexer, missing = df.columns.get_indexer_non_unique(cols)
+ rs_c.columns = df.columns.take(indexer)
+
+ for c in cols:
+ obj_df = df[c]
+ obj_rs = rs_c[c]
+ if isinstance(obj_df,Series):
+ assert_series_equal(obj_df,obj_rs)
+ else:
+ assert_frame_equal(obj_df,obj_rs,check_names=False)
+
+ # wrote in the same order
+ else:
+ rs_c.columns = df.columns
+ assert_frame_equal(df,rs_c,check_names=False)
chunksize=5
N = int(chunksize*2.5)
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index 01651f2674a90..46fd98fc14ffb 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -784,6 +784,28 @@ def test_dups_fancy_indexing(self):
assert_frame_equal(df,result)
+ # GH 3561, dups not in selected order
+ ind = ['A', 'A', 'B', 'C']
+ df = DataFrame({'test':range(len(ind))}, index=ind)
+ rows = ['C', 'B']
+ res = df.ix[rows]
+ self.assert_(rows == list(res.index))
+
+ res = df.ix[Index(rows)]
+ self.assert_(Index(rows).equals(res.index))
+
+ rows = ['C','B','E']
+ res = df.ix[rows]
+ self.assert_(rows == list(res.index))
+
+ # inconcistent returns for unique/duplicate indices when values are missing
+ df = DataFrame(randn(4,3),index=list('ABCD'))
+ expected = df.ix[['E']]
+
+ dfnu = DataFrame(randn(5,3),index=list('AABCD'))
+ result = dfnu.ix[['E']]
+ assert_frame_equal(result, expected)
+
def test_indexing_mixed_frame_bug(self):
# GH3492
| close #3561
| https://api.github.com/repos/pandas-dev/pandas/pulls/3563 | 2013-05-10T14:42:27Z | 2013-05-14T21:44:58Z | 2013-05-14T21:44:57Z | 2014-07-10T09:20:30Z |
ENH: unicode of PeriodIndex returns valid Python code | diff --git a/pandas/tseries/period.py b/pandas/tseries/period.py
index d9dfa51bc0bff..abb7486de9351 100644
--- a/pandas/tseries/period.py
+++ b/pandas/tseries/period.py
@@ -1102,6 +1102,25 @@ def __repr__(self):
output += 'length: %d' % len(self)
return output
+ def __unicode__(self):
+ output = self.__class__.__name__
+ output += u'('
+ prefix = '' if py3compat.PY3 else 'u'
+ mapper = "{0}'{{0}}'".format(prefix)
+ output += '[{0}]'.format(', '.join(map(mapper.format, self)))
+ output += ", freq='{0}'".format(self.freq)
+ output += ')'
+ return output
+
+ def __bytes__(self):
+ encoding = com.get_option('display.encoding')
+ return self.__unicode__().encode(encoding, 'replace')
+
+ def __str__(self):
+ if py3compat.PY3:
+ return self.__unicode__()
+ return self.__bytes__()
+
def take(self, indices, axis=None):
"""
Analogous to ndarray.take
diff --git a/pandas/tseries/tests/test_period.py b/pandas/tseries/tests/test_period.py
index 10a5e039b9fc6..95de08909a50a 100644
--- a/pandas/tseries/tests/test_period.py
+++ b/pandas/tseries/tests/test_period.py
@@ -1616,6 +1616,83 @@ def test_ts_repr(self):
ts = Series(np.random.randn(len(index)), index=index)
repr(ts)
+ def test_period_index_unicode(self):
+ pi = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
+ assert_equal(len(pi), 9)
+ assert_equal(pi, eval(unicode(pi)))
+
+ pi = PeriodIndex(freq='Q', start='1/1/2001', end='12/1/2009')
+ assert_equal(len(pi), 4 * 9)
+ assert_equal(pi, eval(unicode(pi)))
+
+ pi = PeriodIndex(freq='M', start='1/1/2001', end='12/1/2009')
+ assert_equal(len(pi), 12 * 9)
+ assert_equal(pi, eval(unicode(pi)))
+
+ start = Period('02-Apr-2005', 'B')
+ i1 = PeriodIndex(start=start, periods=20)
+ assert_equal(len(i1), 20)
+ assert_equal(i1.freq, start.freq)
+ assert_equal(i1[0], start)
+ assert_equal(i1, eval(unicode(i1)))
+
+ end_intv = Period('2006-12-31', 'W')
+ i1 = PeriodIndex(end=end_intv, periods=10)
+ assert_equal(len(i1), 10)
+ assert_equal(i1.freq, end_intv.freq)
+ assert_equal(i1[-1], end_intv)
+ assert_equal(i1, eval(unicode(i1)))
+
+ end_intv = Period('2006-12-31', '1w')
+ i2 = PeriodIndex(end=end_intv, periods=10)
+ assert_equal(len(i1), len(i2))
+ self.assert_((i1 == i2).all())
+ assert_equal(i1.freq, i2.freq)
+ assert_equal(i1, eval(unicode(i1)))
+ assert_equal(i2, eval(unicode(i2)))
+
+ end_intv = Period('2006-12-31', ('w', 1))
+ i2 = PeriodIndex(end=end_intv, periods=10)
+ assert_equal(len(i1), len(i2))
+ self.assert_((i1 == i2).all())
+ assert_equal(i1.freq, i2.freq)
+ assert_equal(i1, eval(unicode(i1)))
+ assert_equal(i2, eval(unicode(i2)))
+
+ try:
+ PeriodIndex(start=start, end=end_intv)
+ raise AssertionError('Cannot allow mixed freq for start and end')
+ except ValueError:
+ pass
+
+ end_intv = Period('2005-05-01', 'B')
+ i1 = PeriodIndex(start=start, end=end_intv)
+ assert_equal(i1, eval(unicode(i1)))
+
+ try:
+ PeriodIndex(start=start)
+ raise AssertionError(
+ 'Must specify periods if missing start or end')
+ except ValueError:
+ pass
+
+ # infer freq from first element
+ i2 = PeriodIndex([end_intv, Period('2005-05-05', 'B')])
+ assert_equal(len(i2), 2)
+ assert_equal(i2[0], end_intv)
+ assert_equal(i2, eval(unicode(i2)))
+
+ i2 = PeriodIndex(np.array([end_intv, Period('2005-05-05', 'B')]))
+ assert_equal(len(i2), 2)
+ assert_equal(i2[0], end_intv)
+ assert_equal(i2, eval(unicode(i2)))
+
+ # Mixed freq should fail
+ vals = [end_intv, Period('2006-12-31', 'w')]
+ self.assertRaises(ValueError, PeriodIndex, vals)
+ vals = np.array(vals)
+ self.assertRaises(ValueError, PeriodIndex, vals)
+
def test_frame_index_to_string(self):
index = PeriodIndex(['2011-1', '2011-2', '2011-3'], freq='M')
frame = DataFrame(np.random.randn(3, 4), index=index)
| addresses #3460.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3560 | 2013-05-10T01:22:14Z | 2013-05-10T09:29:37Z | 2013-05-10T09:29:37Z | 2014-06-23T18:20:54Z |
ENH: raise useful error message on invalid concat arguments | diff --git a/pandas/tools/merge.py b/pandas/tools/merge.py
index 6d224ffcb7b05..7f05a045e36af 100644
--- a/pandas/tools/merge.py
+++ b/pandas/tools/merge.py
@@ -888,6 +888,11 @@ class _Concatenator(object):
def __init__(self, objs, axis=0, join='outer', join_axes=None,
keys=None, levels=None, names=None,
ignore_index=False, verify_integrity=False):
+ if not isinstance(objs, (tuple, list, dict)):
+ raise AssertionError('first argument must be a list of pandas '
+ 'objects, you passed an object of type '
+ '"{0}"'.format(type(objs).__name__))
+
if join == 'outer':
self.intersect = False
elif join == 'inner':
diff --git a/pandas/tools/tests/test_merge.py b/pandas/tools/tests/test_merge.py
index bc9d6f11408b8..2fb527b2eee6b 100644
--- a/pandas/tools/tests/test_merge.py
+++ b/pandas/tools/tests/test_merge.py
@@ -13,7 +13,8 @@
from pandas.tseries.index import DatetimeIndex
from pandas.tools.merge import merge, concat, ordered_merge, MergeError
from pandas.util.testing import (assert_frame_equal, assert_series_equal,
- assert_almost_equal, rands)
+ assert_almost_equal, rands,
+ makeCustomDataframe as mkdf)
import pandas.algos as algos
import pandas.util.testing as tm
@@ -1689,6 +1690,11 @@ def test_concat_series_axis1_same_names_ignore_index(self):
result = concat([s1, s2], axis=1, ignore_index=True)
self.assertTrue(np.array_equal(result.columns, [0, 1]))
+ def test_concat_invalid_first_argument(self):
+ df1 = mkdf(10, 2)
+ df2 = mkdf(10, 2)
+ self.assertRaises(AssertionError, concat, df1, df2)
+
class TestOrderedMerge(unittest.TestCase):
def setUp(self):
| fixes #3481.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3559 | 2013-05-10T00:38:57Z | 2013-05-10T10:43:07Z | 2013-05-10T10:43:07Z | 2014-06-26T17:46:41Z |
fix panel tranpose arguments api bug | diff --git a/RELEASE.rst b/RELEASE.rst
index 8e48395efc9ab..89f7fcc6fdd37 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -101,6 +101,7 @@ pandas 0.11.1
e.g. add datetimes, multiple timedelta x datetime
- Fix ``.diff`` on datelike and timedelta operations (GH3100_)
- ``combine_first`` not returning the same dtype in cases where it can (GH3552_)
+ - Fixed bug with ``Panel.transpose`` argument aliases (GH3556_)
.. _GH3164: https://github.com/pydata/pandas/issues/3164
.. _GH2786: https://github.com/pydata/pandas/issues/2786
@@ -138,6 +139,7 @@ pandas 0.11.1
.. _GH3492: https://github.com/pydata/pandas/issues/3492
.. _GH3552: https://github.com/pydata/pandas/issues/3552
.. _GH3493: https://github.com/pydata/pandas/issues/3493
+.. _GH3556: https://github.com/pydata/pandas/issues/3556
pandas 0.11.0
diff --git a/pandas/core/panel.py b/pandas/core/panel.py
index 8d6828421c3fa..869bb31acad6b 100644
--- a/pandas/core/panel.py
+++ b/pandas/core/panel.py
@@ -1166,16 +1166,35 @@ def transpose(self, *args, **kwargs):
-------
y : Panel (new object)
"""
-
# construct the args
args = list(args)
+ aliases = tuple(kwargs.iterkeys())
+
for a in self._AXIS_ORDERS:
if not a in kwargs:
- try:
- kwargs[a] = args.pop(0)
- except (IndexError):
- raise ValueError(
- "not enough arguments specified to transpose!")
+ where = map(a.startswith, aliases)
+
+ if any(where):
+ if sum(where) != 1:
+ raise AssertionError(
+ 'Ambiguous parameter aliases "{0}" passed, valid '
+ 'parameter aliases are '
+ '{1}'.format([n for n, m in zip(aliases, where)
+ if m], self._AXIS_ALIASES))
+
+ k = aliases[where.index(True)]
+
+ try:
+ kwargs[self._AXIS_ALIASES[k]] = kwargs.pop(k)
+ except KeyError:
+ raise KeyError('Invalid parameter alias '
+ '"{0}"'.format(k))
+ else:
+ try:
+ kwargs[a] = args.pop(0)
+ except IndexError:
+ raise ValueError(
+ "not enough arguments specified to transpose!")
axes = [self._get_axis_number(kwargs[a]) for a in self._AXIS_ORDERS]
diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py
index 437f8b7279824..081af101b643b 100644
--- a/pandas/tests/test_panel.py
+++ b/pandas/tests/test_panel.py
@@ -1140,6 +1140,30 @@ def test_transpose(self):
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
+ # test kwargs
+ result = self.panel.transpose(items='minor', major='major',
+ minor='items')
+ expected = self.panel.swapaxes('items', 'minor')
+ assert_panel_equal(result, expected)
+
+ # text mixture of args
+ result = self.panel.transpose('minor', major='major', minor='items')
+ expected = self.panel.swapaxes('items', 'minor')
+ assert_panel_equal(result, expected)
+
+ result = self.panel.transpose('minor', 'major', minor='items')
+ expected = self.panel.swapaxes('items', 'minor')
+ assert_panel_equal(result, expected)
+
+ ## test bad aliases
+ # test ambiguous aliases
+ self.assertRaises(AssertionError, self.panel.transpose, 'minor',
+ maj='major', majo='items')
+
+ # test invalid kwargs
+ self.assertRaises(KeyError, self.panel.transpose, 'minor',
+ maj='major', minor='items')
+
result = self.panel.transpose(2, 1, 0)
assert_panel_equal(result, expected)
| fixes #3556.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3558 | 2013-05-09T23:57:03Z | 2013-05-10T20:08:55Z | 2013-05-10T20:08:55Z | 2014-06-12T15:13:45Z |
fix TypeError for display.multi_sparse == False | diff --git a/pandas/core/format.py b/pandas/core/format.py
index fa2135bb4310c..ecce3b54239c9 100644
--- a/pandas/core/format.py
+++ b/pandas/core/format.py
@@ -736,7 +736,7 @@ def _write_hierarchical_rows(self, fmt_values, indent):
row.extend(idx_values[i])
row.extend(fmt_values[j][i] for j in range(ncols))
self.write_tr(row, indent, self.indent_delta, tags=None,
- nindex_levels=len(frame.index.nlevels))
+ nindex_levels=frame.index.nlevels)
def _get_level_lengths(levels):
diff --git a/pandas/tests/test_format.py b/pandas/tests/test_format.py
index 37f08dd177eae..89988a21894b9 100644
--- a/pandas/tests/test_format.py
+++ b/pandas/tests/test_format.py
@@ -435,6 +435,114 @@ def test_to_html_escape_disabled(self):
</table>"""
self.assertEqual(xp, rs)
+ def test_to_html_multiindex_sparsify_false_multi_sparse(self):
+ with option_context('display.multi_sparse', False):
+ index = pd.MultiIndex.from_arrays([[0, 0, 1, 1], [0, 1, 0, 1]],
+ names=['foo', None])
+
+ df = DataFrame([[0, 1], [2, 3], [4, 5], [6, 7]], index=index)
+
+ result = df.to_html()
+ expected = """\
+<table border="1" class="dataframe">
+ <thead>
+ <tr style="text-align: right;">
+ <th></th>
+ <th></th>
+ <th>0</th>
+ <th>1</th>
+ </tr>
+ <tr>
+ <th>foo</th>
+ <th></th>
+ <th></th>
+ <th></th>
+ </tr>
+ </thead>
+ <tbody>
+ <tr>
+ <th>0</th>
+ <th>0</th>
+ <td> 0</td>
+ <td> 1</td>
+ </tr>
+ <tr>
+ <th>0</th>
+ <th>1</th>
+ <td> 2</td>
+ <td> 3</td>
+ </tr>
+ <tr>
+ <th>1</th>
+ <th>0</th>
+ <td> 4</td>
+ <td> 5</td>
+ </tr>
+ <tr>
+ <th>1</th>
+ <th>1</th>
+ <td> 6</td>
+ <td> 7</td>
+ </tr>
+ </tbody>
+</table>"""
+ self.assertEquals(result, expected)
+
+ df = DataFrame([[0, 1], [2, 3], [4, 5], [6, 7]],
+ columns=index[::2], index=index)
+
+ result = df.to_html()
+ expected = """\
+<table border="1" class="dataframe">
+ <thead>
+ <tr>
+ <th></th>
+ <th>foo</th>
+ <th>0</th>
+ <th>1</th>
+ </tr>
+ <tr>
+ <th></th>
+ <th></th>
+ <th>0</th>
+ <th>0</th>
+ </tr>
+ <tr>
+ <th>foo</th>
+ <th></th>
+ <th></th>
+ <th></th>
+ </tr>
+ </thead>
+ <tbody>
+ <tr>
+ <th>0</th>
+ <th>0</th>
+ <td> 0</td>
+ <td> 1</td>
+ </tr>
+ <tr>
+ <th>0</th>
+ <th>1</th>
+ <td> 2</td>
+ <td> 3</td>
+ </tr>
+ <tr>
+ <th>1</th>
+ <th>0</th>
+ <td> 4</td>
+ <td> 5</td>
+ </tr>
+ <tr>
+ <th>1</th>
+ <th>1</th>
+ <td> 6</td>
+ <td> 7</td>
+ </tr>
+ </tbody>
+</table>"""
+ self.assertEquals(result, expected)
+
def test_to_html_multiindex_sparsify(self):
index = pd.MultiIndex.from_arrays([[0, 0, 1, 1], [0, 1, 0, 1]],
names=['foo', None])
| fixes #3553.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3557 | 2013-05-09T22:25:23Z | 2013-05-10T09:05:54Z | 2013-05-10T09:05:54Z | 2014-07-16T08:08:02Z |
BUG: combine_first not returning the same dtype in cases where it can (GH3552) | diff --git a/RELEASE.rst b/RELEASE.rst
index 1970b00c05add..984dcfa0f1d39 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -38,6 +38,7 @@ pandas 0.11.1
- Fixed various issues with internal pprinting code, the repr() for various objects
including TimeStamp and *Index now produces valid python code strings and
can be used to recreate the object, (GH3038_), (GH3379_), (GH3251_)
+ - ``convert_objects`` now accepts a ``copy`` parameter (defaults to ``True``)
- ``HDFStore``
- will retain index attributes (freq,tz,name) on recreation (GH3499_)
@@ -97,6 +98,7 @@ pandas 0.11.1
- Raise a TypeError on invalid datetime/timedelta operations
e.g. add datetimes, multiple timedelta x datetime
- Fix ``.diff`` on datelike and timedelta operations (GH3100_)
+ - ``combine_first`` not returning the same dtype in cases where it can (GH3552_)
.. _GH3164: https://github.com/pydata/pandas/issues/3164
.. _GH2786: https://github.com/pydata/pandas/issues/2786
@@ -129,6 +131,7 @@ pandas 0.11.1
.. _GH3499: https://github.com/pydata/pandas/issues/3499
.. _GH3495: https://github.com/pydata/pandas/issues/3495
.. _GH3492: https://github.com/pydata/pandas/issues/3492
+.. _GH3552: https://github.com/pydata/pandas/issues/3552
.. _GH3493: https://github.com/pydata/pandas/issues/3493
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 3fd95e161b41a..0ffdcb0e036ce 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1627,21 +1627,23 @@ def info(self, verbose=True, buf=None, max_cols=None):
def dtypes(self):
return self.apply(lambda x: x.dtype)
- def convert_objects(self, convert_dates=True, convert_numeric=False):
+ def convert_objects(self, convert_dates=True, convert_numeric=False, copy=True):
"""
Attempt to infer better dtype for object columns
- Always returns a copy (even if no object columns)
Parameters
----------
convert_dates : if True, attempt to soft convert_dates, if 'coerce', force conversion (and non-convertibles get NaT)
convert_numeric : if True attempt to coerce to numerbers (including strings), non-convertibles get NaN
+ copy : boolean, return a copy if True (True by default)
Returns
-------
converted : DataFrame
"""
- return self._constructor(self._data.convert(convert_dates=convert_dates, convert_numeric=convert_numeric))
+ return self._constructor(self._data.convert(convert_dates=convert_dates,
+ convert_numeric=convert_numeric,
+ copy=copy))
#----------------------------------------------------------------------
# properties for index and columns
@@ -3735,7 +3737,10 @@ def combine(self, other, func, fill_value=None, overwrite=True):
result[col] = arr
- return self._constructor(result, index=new_index, columns=new_columns)
+ # convert_objects just in case
+ return self._constructor(result,
+ index=new_index,
+ columns=new_columns).convert_objects(copy=False)
def combine_first(self, other):
"""
@@ -4210,7 +4215,7 @@ def _apply_standard(self, func, axis, ignore_failures=False):
if axis == 1:
result = result.T
- result = result.convert_objects()
+ result = result.convert_objects(copy=False)
return result
else:
diff --git a/pandas/core/series.py b/pandas/core/series.py
index d8e7ea74e0560..3509e226d46fb 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -933,10 +933,9 @@ def astype(self, dtype):
return self._constructor(casted, index=self.index, name=self.name,
dtype=casted.dtype)
- def convert_objects(self, convert_dates=True, convert_numeric=True):
+ def convert_objects(self, convert_dates=True, convert_numeric=True, copy=True):
"""
Attempt to infer better dtype
- Always return a copy
Parameters
----------
@@ -946,6 +945,8 @@ def convert_objects(self, convert_dates=True, convert_numeric=True):
convert_numeric : boolean, default True
if True attempt to coerce to numbers (including strings),
non-convertibles get NaN
+ copy : boolean, default True
+ if True return a copy even if not object dtype
Returns
-------
@@ -955,7 +956,7 @@ def convert_objects(self, convert_dates=True, convert_numeric=True):
return Series(com._possibly_convert_objects(self.values,
convert_dates=convert_dates, convert_numeric=convert_numeric),
index=self.index, name=self.name)
- return self.copy()
+ return self.copy() if copy else self
def repeat(self, reps):
"""
diff --git a/pandas/sparse/frame.py b/pandas/sparse/frame.py
index ed33be33ac02a..e893f83f6d640 100644
--- a/pandas/sparse/frame.py
+++ b/pandas/sparse/frame.py
@@ -125,7 +125,7 @@ def _consolidate_inplace(self):
# do nothing when DataFrame calls this method
pass
- def convert_objects(self, convert_dates=True):
+ def convert_objects(self, convert_dates=True, convert_numeric=False, copy=True):
# XXX
return self
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 69225c40e36df..0c9dd21d2f645 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -7889,6 +7889,14 @@ def test_combine_first_mixed_bug(self):
expected = DataFrame({ 'A' : [1,2,3,5,3,7.], 'B' : [np.nan,2,3,4,6,8] })
assert_frame_equal(result,expected)
+ # GH3552, return object dtype with bools
+ df1 = DataFrame([[np.nan, 3.,True], [-4.6, np.nan, True], [np.nan, 7., False]])
+ df2 = DataFrame([[-42.6, np.nan, True], [-5., 1.6, False]], index=[1, 2])
+
+ result = df1.combine_first(df2)[2]
+ expected = Series([True,True,False])
+ assert_series_equal(result,expected)
+
def test_update(self):
df = DataFrame([[1.5, nan, 3.],
[1.5, nan, 3.],
| closes #3552
ENH: `convert_objects` now accepts a `copy` parameter (defaults to `True`)
| https://api.github.com/repos/pandas-dev/pandas/pulls/3555 | 2013-05-09T18:45:14Z | 2013-05-09T19:16:14Z | 2013-05-09T19:16:14Z | 2014-06-16T08:28:16Z |
BUG: Fix .diff() on datelike and timedelta operations (GH3100_) | diff --git a/RELEASE.rst b/RELEASE.rst
index ece8de259021f..1970b00c05add 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -96,6 +96,7 @@ pandas 0.11.1
- Properly convert np.datetime64 objects in a Series (GH3416_)
- Raise a TypeError on invalid datetime/timedelta operations
e.g. add datetimes, multiple timedelta x datetime
+ - Fix ``.diff`` on datelike and timedelta operations (GH3100_)
.. _GH3164: https://github.com/pydata/pandas/issues/3164
.. _GH2786: https://github.com/pydata/pandas/issues/2786
@@ -108,6 +109,7 @@ pandas 0.11.1
.. _GH3379: https://github.com/pydata/pandas/issues/3379
.. _GH3480: https://github.com/pydata/pandas/issues/3480
.. _GH2852: https://github.com/pydata/pandas/issues/2852
+.. _GH3100: https://github.com/pydata/pandas/issues/3100
.. _GH3454: https://github.com/pydata/pandas/issues/3454
.. _GH3457: https://github.com/pydata/pandas/issues/3457
.. _GH3491: https://github.com/pydata/pandas/issues/3491
diff --git a/pandas/core/common.py b/pandas/core/common.py
index 893d912dcece8..4aefa73ae8ee8 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -617,9 +617,18 @@ def func(arr, indexer, out, fill_value=np.nan):
def diff(arr, n, axis=0):
+ """ difference of n between self,
+ analagoust to s-s.shift(n) """
+
n = int(n)
dtype = arr.dtype
- if issubclass(dtype.type, np.integer):
+ na = np.nan
+
+ if is_timedelta64_dtype(arr) or is_datetime64_dtype(arr):
+ dtype = 'timedelta64[ns]'
+ arr = arr.view('i8')
+ na = tslib.iNaT
+ elif issubclass(dtype.type, np.integer):
dtype = np.float64
elif issubclass(dtype.type, np.bool_):
dtype = np.object_
@@ -628,7 +637,7 @@ def diff(arr, n, axis=0):
na_indexer = [slice(None)] * arr.ndim
na_indexer[axis] = slice(None, n) if n >= 0 else slice(n, None)
- out_arr[tuple(na_indexer)] = np.nan
+ out_arr[tuple(na_indexer)] = na
if arr.ndim == 2 and arr.dtype.name in _diff_special:
f = _diff_special[arr.dtype.name]
@@ -642,7 +651,24 @@ def diff(arr, n, axis=0):
lag_indexer[axis] = slice(None, -n) if n > 0 else slice(-n, None)
lag_indexer = tuple(lag_indexer)
- out_arr[res_indexer] = arr[res_indexer] - arr[lag_indexer]
+ # need to make sure that we account for na for datelike/timedelta
+ # we don't actually want to subtract these i8 numbers
+ if dtype == 'timedelta64[ns]':
+ res = arr[res_indexer]
+ lag = arr[lag_indexer]
+
+ mask = (arr[res_indexer] == na) | (arr[lag_indexer] == na)
+ if mask.any():
+ res = res.copy()
+ res[mask] = 0
+ lag = lag.copy()
+ lag[mask] = 0
+
+ result = res-lag
+ result[mask] = na
+ out_arr[res_indexer] = result
+ else:
+ out_arr[res_indexer] = arr[res_indexer] - arr[lag_indexer]
return out_arr
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index 981d74d8ba94b..6fbce9df753d8 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -4039,6 +4039,17 @@ def test_diff(self):
xp = self.ts - self.ts
assert_series_equal(rs, xp)
+ # datetime diff (GH3100)
+ s = Series(date_range('20130102',periods=5))
+ rs = s-s.shift(1)
+ xp = s.diff()
+ assert_series_equal(rs, xp)
+
+ # timedelta diff
+ nrs = rs-rs.shift(1)
+ nxp = xp.diff()
+ assert_series_equal(nrs, nxp)
+
def test_pct_change(self):
rs = self.ts.pct_change(fill_method=None)
assert_series_equal(rs, self.ts / self.ts.shift(1) - 1)
| close #3100
| https://api.github.com/repos/pandas-dev/pandas/pulls/3554 | 2013-05-09T18:11:21Z | 2013-05-09T18:52:53Z | 2013-05-09T18:52:53Z | 2014-06-12T13:40:05Z |
BUG: raise on invalid operations for timedelta/datetime | diff --git a/RELEASE.rst b/RELEASE.rst
index 7c9982961c01f..ece8de259021f 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -94,6 +94,8 @@ pandas 0.11.1
- Fixed bug in selecting month/quarter/year from a series would not select the time element
on the last day (GH3546_)
- Properly convert np.datetime64 objects in a Series (GH3416_)
+ - Raise a TypeError on invalid datetime/timedelta operations
+ e.g. add datetimes, multiple timedelta x datetime
.. _GH3164: https://github.com/pydata/pandas/issues/3164
.. _GH2786: https://github.com/pydata/pandas/issues/2786
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 7c0c12c11e177..d8e7ea74e0560 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -75,7 +75,7 @@ def na_op(x, y):
return result
- def wrapper(self, other):
+ def wrapper(self, other, name=name):
from pandas.core.frame import DataFrame
dtype = None
wrap_results = lambda x: x
@@ -123,6 +123,13 @@ def convert_to_array(values):
# 2 datetimes or 2 timedeltas
if (is_timedelta_lhs and is_timedelta_rhs) or (is_datetime_lhs and is_datetime_rhs):
+ if is_datetime_lhs and name not in ['__sub__']:
+ raise TypeError("can only operate on a datetimes for subtraction, "
+ "but the operator [%s] was passed" % name)
+ elif is_timedelta_lhs and name not in ['__add__','__sub__']:
+ raise TypeError("can only operate on a timedeltas for "
+ "addition and subtraction, but the operator [%s] was passed" % name)
+
dtype = 'timedelta64[ns]'
# we may have to convert to object unfortunately here
@@ -135,6 +142,10 @@ def wrap_results(x):
# datetime and timedelta
elif (is_timedelta_lhs and is_datetime_rhs) or (is_timedelta_rhs and is_datetime_lhs):
+
+ if name not in ['__add__','__sub__']:
+ raise TypeError("can only operate on a timedelta and a datetime for "
+ "addition and subtraction, but the operator [%s] was passed" % name)
dtype = 'M8[ns]'
else:
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index 78e2cef230e24..981d74d8ba94b 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -1889,26 +1889,14 @@ def test_operators_timedelta64(self):
assert_series_equal(result,expected)
self.assert_(result.dtype=='m8[ns]')
- result = df['A'] + datetime(2001,1,1)
- expected = Series([timedelta(days=26663+i) for i in range(3)])
- assert_series_equal(result,expected)
- self.assert_(result.dtype=='m8[ns]')
-
d = datetime(2001,1,1,3,4)
resulta = df['A'] - d
self.assert_(resulta.dtype=='m8[ns]')
- resultb = df['A'] + d
- self.assert_(resultb.dtype=='m8[ns]')
-
# roundtrip
resultb = resulta + d
assert_series_equal(df['A'],resultb)
- # timedelta on lhs
- result = resultb + d
- self.assert_(result.dtype=='m8[ns]')
-
# timedeltas on rhs
td = timedelta(days=1)
resulta = df['A'] + td
@@ -1931,6 +1919,42 @@ def test_operators_timedelta64(self):
self.assert_(result.dtype=='m8[ns]')
assert_series_equal(result,expected)
+ def test_operators_datetimelike(self):
+
+ ### timedelta64 ###
+ td1 = Series([timedelta(minutes=5,seconds=3)]*3)
+ td2 = timedelta(minutes=5,seconds=4)
+ for op in ['__mul__','__floordiv__','__truediv__','__div__','__pow__']:
+ op = getattr(td1,op,None)
+ if op is not None:
+ self.assertRaises(TypeError, op, td2)
+ td1 + td2
+ td1 - td2
+
+ ### datetime64 ###
+ dt1 = Series([Timestamp('20111230'),Timestamp('20120101'),Timestamp('20120103')])
+ dt2 = Series([Timestamp('20111231'),Timestamp('20120102'),Timestamp('20120104')])
+ for op in ['__add__','__mul__','__floordiv__','__truediv__','__div__','__pow__']:
+ op = getattr(dt1,op,None)
+ if op is not None:
+ self.assertRaises(TypeError, op, dt2)
+ dt1 - dt2
+
+ ### datetime64 with timetimedelta ###
+ for op in ['__mul__','__floordiv__','__truediv__','__div__','__pow__']:
+ op = getattr(dt1,op,None)
+ if op is not None:
+ self.assertRaises(TypeError, op, td1)
+ dt1 + td1
+ dt1 - td1
+
+ ### timetimedelta with datetime64 ###
+ for op in ['__mul__','__floordiv__','__truediv__','__div__','__pow__']:
+ op = getattr(td1,op,None)
+ if op is not None:
+ self.assertRaises(TypeError, op, dt1)
+ td1 + dt1
+ td1 - dt1
def test_timedelta64_functions(self):
| e.g. can't add 2 datetimes, nor multiply timedelta \* datetime
| https://api.github.com/repos/pandas-dev/pandas/pulls/3550 | 2013-05-09T14:38:32Z | 2013-05-09T15:25:46Z | 2013-05-09T15:25:46Z | 2014-06-21T11:10:14Z |
BUG/TST: fixed up retaining of index names in the table .info (like freq/tz) | diff --git a/RELEASE.rst b/RELEASE.rst
index a69ace7c51890..7c9982961c01f 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -41,8 +41,9 @@ pandas 0.11.1
- ``HDFStore``
- will retain index attributes (freq,tz,name) on recreation (GH3499_)
- - will warn with a FrequencyWarning if you are attempting to append
- an index with a different frequency than the existing
+ - will warn with a AttributeConflictWarning if you are attempting to append
+ an index with a different frequency than the existing, or attempting
+ to append an index with a different name than the existing
- support datelike columns with a timezone as data_columns (GH2852_)
- table writing performance improvements.
diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt
index 2e3a67ead65e0..76565df8f593c 100644
--- a/doc/source/v0.11.1.txt
+++ b/doc/source/v0.11.1.txt
@@ -17,8 +17,9 @@ Enhancements
- ``HDFStore``
- will retain index attributes (freq,tz,name) on recreation (GH3499_)
- - will warn with a FrequencyWarning if you are attempting to append
- an index with a different frequency than the existing
+ - will warn with a AttributeConflictWarning if you are attempting to append
+ an index with a different frequency than the existing, or attempting
+ to append an index with a different name than the existing
- support datelike columns with a timezone as data_columns (GH2852_)
See the `full release notes
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 834a94a139ee5..d3b7533840a86 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -42,10 +42,10 @@ class IncompatibilityWarning(Warning): pass
where criteria is being ignored as this version [%s] is too old (or not-defined),
read the file in and write it out to a new file to upgrade (with the copy_to method)
"""
-class FrequencyWarning(Warning): pass
-frequency_doc = """
-the frequency of the existing index is [%s] which conflicts with the new freq [%s],
-resetting the frequency to None
+class AttributeConflictWarning(Warning): pass
+attribute_conflict_doc = """
+the [%s] attribute of the existing index is [%s] which conflicts with the new [%s],
+resetting the attribute to None
"""
class PerformanceWarning(Warning): pass
performance_doc = """
@@ -873,9 +873,9 @@ def _write_to_group(self, key, value, index=True, table=False, append=False, com
if not s.is_table or (s.is_table and table is None and s.is_exists):
raise ValueError('Can only append to Tables')
if not s.is_exists:
- s.set_info()
+ s.set_object_info()
else:
- s.set_info()
+ s.set_object_info()
if not s.is_table and complib:
raise ValueError('Compression not supported on non-table')
@@ -949,7 +949,7 @@ class IndexCol(object):
is_an_indexable = True
is_data_indexable = True
is_searchable = False
- _info_fields = ['freq','tz','name']
+ _info_fields = ['freq','tz','index_name']
def __init__(self, values=None, kind=None, typ=None, cname=None, itemsize=None,
name=None, axis=None, kind_attr=None, pos=None, freq=None, tz=None,
@@ -965,7 +965,7 @@ def __init__(self, values=None, kind=None, typ=None, cname=None, itemsize=None,
self.pos = pos
self.freq = freq
self.tz = tz
- self.index_name = None
+ self.index_name = index_name
self.table = None
if name is not None:
@@ -1042,7 +1042,7 @@ def convert(self, values, nan_rep):
kwargs['freq'] = self.freq
if self.tz is not None:
kwargs['tz'] = self.tz
- if self.name is not None:
+ if self.index_name is not None:
kwargs['name'] = self.index_name
try:
self.values = Index(_maybe_convert(values, self.kind), **kwargs)
@@ -1128,7 +1128,7 @@ def validate_attr(self, append):
def update_info(self, info):
""" set/update the info for this indexable with the key/value
- if validate is True, then raise if an existing value does not match the value """
+ if there is a conflict raise/warn as needed """
for key in self._info_fields:
@@ -1140,15 +1140,16 @@ def update_info(self, info):
idx = info[self.name] = dict()
existing_value = idx.get(key)
- if key in idx and existing_value != value:
+ if key in idx and value is not None and existing_value != value:
- # frequency just warn
- if key == 'freq':
- ws = frequency_doc % (existing_value,value)
- warnings.warn(ws, FrequencyWarning)
+ # frequency/name just warn
+ if key in ['freq','index_name']:
+ ws = attribute_conflict_doc % (key,existing_value,value)
+ warnings.warn(ws, AttributeConflictWarning)
# reset
idx[key] = None
+ setattr(self,key,None)
else:
raise ValueError("invalid info for [%s] for [%s]"""
@@ -1554,7 +1555,7 @@ def __repr__(self):
def __str__(self):
return self.__repr__()
- def set_info(self):
+ def set_object_info(self):
""" set my pandas type & version """
self.attrs.pandas_type = self.pandas_kind
self.attrs.pandas_version = _version
@@ -2275,6 +2276,10 @@ def values_cols(self):
""" return a list of my values cols """
return [i.cname for i in self.values_axes]
+ def set_info(self):
+ """ update our table index info """
+ self.attrs.info = self.info
+
def set_attrs(self):
""" set our table type & indexables """
self.attrs.table_type = self.table_type
@@ -2282,9 +2287,9 @@ def set_attrs(self):
self.attrs.values_cols = self.values_cols()
self.attrs.non_index_axes = self.non_index_axes
self.attrs.data_columns = self.data_columns
- self.attrs.info = self.info
self.attrs.nan_rep = self.nan_rep
self.attrs.levels = self.levels
+ self.set_info()
def get_attrs(self):
""" retrieve our attributes """
@@ -2487,7 +2492,7 @@ def create_axes(self, axes, obj, validate=True, nan_rep=None, data_columns=None,
axes = [ a.axis for a in existing_table.index_axes]
data_columns = existing_table.data_columns
nan_rep = existing_table.nan_rep
- self.info = existing_table.info
+ self.info = copy.copy(existing_table.info)
else:
existing_table = None
@@ -2879,6 +2884,9 @@ def write(self, obj, axes=None, append=False, complib=None,
else:
table = self.table
+ # update my info
+ self.set_info()
+
# validate the axes and set the kinds
for a in self.axes:
a.validate_and_set(table, append)
@@ -3036,10 +3044,10 @@ def read(self, where=None, columns=None, **kwargs):
if self.is_transposed:
values = a.cvalues
index_ = cols
- cols_ = Index(index)
+ cols_ = Index(index,name=getattr(index,'name',None))
else:
values = a.cvalues.T
- index_ = Index(index)
+ index_ = Index(index,name=getattr(index,'name',None))
cols_ = cols
# if we have a DataIndexableCol, its shape will only be 1 dim
@@ -3157,12 +3165,17 @@ class AppendableNDimTable(AppendablePanelTable):
obj_type = Panel4D
def _convert_index(index):
+ index_name = getattr(index,'name',None)
+
if isinstance(index, DatetimeIndex):
converted = index.asi8
- return IndexCol(converted, 'datetime64', _tables().Int64Col(), freq=getattr(index,'freq',None), tz=getattr(index,'tz',None))
+ return IndexCol(converted, 'datetime64', _tables().Int64Col(),
+ freq=getattr(index,'freq',None), tz=getattr(index,'tz',None),
+ index_name=index_name)
elif isinstance(index, (Int64Index, PeriodIndex)):
atom = _tables().Int64Col()
- return IndexCol(index.values, 'integer', atom, freq=getattr(index,'freq',None))
+ return IndexCol(index.values, 'integer', atom, freq=getattr(index,'freq',None),
+ index_name=index_name)
if isinstance(index, MultiIndex):
raise Exception('MultiIndex not supported here!')
@@ -3173,36 +3186,45 @@ def _convert_index(index):
if inferred_type == 'datetime64':
converted = values.view('i8')
- return IndexCol(converted, 'datetime64', _tables().Int64Col())
+ return IndexCol(converted, 'datetime64', _tables().Int64Col(),
+ freq=getattr(index,'freq',None), tz=getattr(index,'tz',None),
+ index_name=index_name)
elif inferred_type == 'datetime':
converted = np.array([(time.mktime(v.timetuple()) +
v.microsecond / 1E6) for v in values],
dtype=np.float64)
- return IndexCol(converted, 'datetime', _tables().Time64Col())
+ return IndexCol(converted, 'datetime', _tables().Time64Col(),
+ index_name=index_name)
elif inferred_type == 'date':
converted = np.array([time.mktime(v.timetuple()) for v in values],
dtype=np.int32)
- return IndexCol(converted, 'date', _tables().Time32Col())
+ return IndexCol(converted, 'date', _tables().Time32Col(),
+ index_name=index_name)
elif inferred_type == 'string':
# atom = _tables().ObjectAtom()
# return np.asarray(values, dtype='O'), 'object', atom
converted = np.array(list(values), dtype=np.str_)
itemsize = converted.dtype.itemsize
- return IndexCol(converted, 'string', _tables().StringCol(itemsize), itemsize=itemsize)
+ return IndexCol(converted, 'string', _tables().StringCol(itemsize), itemsize=itemsize,
+ index_name=index_name)
elif inferred_type == 'unicode':
atom = _tables().ObjectAtom()
- return IndexCol(np.asarray(values, dtype='O'), 'object', atom)
+ return IndexCol(np.asarray(values, dtype='O'), 'object', atom,
+ index_name=index_name)
elif inferred_type == 'integer':
# take a guess for now, hope the values fit
atom = _tables().Int64Col()
- return IndexCol(np.asarray(values, dtype=np.int64), 'integer', atom)
+ return IndexCol(np.asarray(values, dtype=np.int64), 'integer', atom,
+ index_name=index_name)
elif inferred_type == 'floating':
atom = _tables().Float64Col()
- return IndexCol(np.asarray(values, dtype=np.float64), 'float', atom)
+ return IndexCol(np.asarray(values, dtype=np.float64), 'float', atom,
+ index_name=index_name)
else: # pragma: no cover
atom = _tables().ObjectAtom()
- return IndexCol(np.asarray(values, dtype='O'), 'object', atom)
+ return IndexCol(np.asarray(values, dtype='O'), 'object', atom,
+ index_name=index_name)
def _unconvert_index(data, kind):
if kind == 'datetime64':
diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py
index 3daa08a0d591a..7f1803a4e7aa3 100644
--- a/pandas/io/tests/test_pytables.py
+++ b/pandas/io/tests/test_pytables.py
@@ -10,9 +10,9 @@
import pandas
from pandas import (Series, DataFrame, Panel, MultiIndex, bdate_range,
date_range, Index)
-from pandas.io.pytables import (HDFStore, get_store, Term,
+from pandas.io.pytables import (HDFStore, get_store, Term, read_hdf,
IncompatibilityWarning, PerformanceWarning,
- FrequencyWarning)
+ AttributeConflictWarning)
import pandas.util.testing as tm
from pandas.tests.test_series import assert_series_equal
from pandas.tests.test_frame import assert_frame_equal
@@ -109,8 +109,6 @@ def test_conv_read_write(self):
try:
- from pandas import read_hdf
-
def roundtrip(key, obj,**kwargs):
obj.to_hdf(self.path, key,**kwargs)
return read_hdf(self.path, key)
@@ -2089,17 +2087,17 @@ def test_retain_index_attributes(self):
result = store.get('data')
tm.assert_frame_equal(df,result)
- for attr in ['freq','tz']:
+ for attr in ['freq','tz','name']:
for idx in ['index','columns']:
self.assert_(getattr(getattr(df,idx),attr,None) == getattr(getattr(result,idx),attr,None))
# try to append a table with a different frequency
- warnings.filterwarnings('ignore', category=FrequencyWarning)
+ warnings.filterwarnings('ignore', category=AttributeConflictWarning)
df2 = DataFrame(dict(A = Series(xrange(3),
index=date_range('2002-1-1',periods=3,freq='D'))))
store.append('data',df2)
- warnings.filterwarnings('always', category=FrequencyWarning)
+ warnings.filterwarnings('always', category=AttributeConflictWarning)
self.assert_(store.get_storer('data').info['index']['freq'] is None)
@@ -2114,12 +2112,27 @@ def test_retain_index_attributes(self):
def test_retain_index_attributes2(self):
with tm.ensure_clean(self.path) as path:
- warnings.filterwarnings('ignore', category=FrequencyWarning)
+
+ warnings.filterwarnings('ignore', category=AttributeConflictWarning)
+
df = DataFrame(dict(A = Series(xrange(3), index=date_range('2000-1-1',periods=3,freq='H'))))
df.to_hdf(path,'data',mode='w',append=True)
df2 = DataFrame(dict(A = Series(xrange(3), index=date_range('2002-1-1',periods=3,freq='D'))))
df2.to_hdf(path,'data',append=True)
- warnings.filterwarnings('always', category=FrequencyWarning)
+
+ idx = date_range('2000-1-1',periods=3,freq='H')
+ idx.name = 'foo'
+ df = DataFrame(dict(A = Series(xrange(3), index=idx)))
+ df.to_hdf(path,'data',mode='w',append=True)
+ self.assert_(read_hdf(path,'data').index.name == 'foo')
+
+ idx2 = date_range('2001-1-1',periods=3,freq='H')
+ idx2.name = 'bar'
+ df2 = DataFrame(dict(A = Series(xrange(3), index=idx2)))
+ df2.to_hdf(path,'data',append=True)
+ self.assert_(read_hdf(path,'data').index.name is None)
+
+ warnings.filterwarnings('always', category=AttributeConflictWarning)
def test_panel_select(self):
| adjunct to #3531 (retention of index name attributes was not working)
| https://api.github.com/repos/pandas-dev/pandas/pulls/3549 | 2013-05-09T00:16:27Z | 2013-05-09T00:34:59Z | 2013-05-09T00:34:59Z | 2014-07-16T08:07:44Z |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.