title stringlengths 1 185 | diff stringlengths 0 32.2M | body stringlengths 0 123k ⌀ | url stringlengths 57 58 | created_at stringlengths 20 20 | closed_at stringlengths 20 20 | merged_at stringlengths 20 20 ⌀ | updated_at stringlengths 20 20 |
|---|---|---|---|---|---|---|---|
BUG: fix Series constructor for scalar and Categorical dtype | diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt
index 8d6a3dc72163e..c963a3f403868 100644
--- a/doc/source/whatsnew/v0.23.0.txt
+++ b/doc/source/whatsnew/v0.23.0.txt
@@ -709,7 +709,8 @@ Categorical
``self`` but in a different order (:issue:`19551`)
- Bug in :meth:`Index.astype` with a categorical dtype where the resultant index is not converted to a :class:`CategoricalIndex` for all types of index (:issue:`18630`)
- Bug in :meth:`Series.astype` and ``Categorical.astype()`` where an existing categorical data does not get updated (:issue:`10696`, :issue:`18593`)
-- Bug in :class:`Index` constructor with ``dtype=CategoricalDtype(...)`` where ``categories`` and ``ordered`` are not maintained (:issue:`19032`)
+- Bug in :class:`Index` constructor with ``dtype=CategoricalDtype(...)`` where ``categories`` and ``ordered`` are not maintained (issue:`19032`)
+- Bug in :class:`Series` constructor with scalar and ``dtype=CategoricalDtype(...)`` where ``categories`` and ``ordered`` are not maintained (issue:`19565`)
Datetimelike
^^^^^^^^^^^^
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index 55919fb2bea0d..352ce29f5c37b 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -1178,7 +1178,7 @@ def construct_1d_arraylike_from_scalar(value, length, dtype):
subarr = DatetimeIndex([value] * length, dtype=dtype)
elif is_categorical_dtype(dtype):
from pandas import Categorical
- subarr = Categorical([value] * length)
+ subarr = Categorical([value] * length, dtype=dtype)
else:
if not isinstance(dtype, (np.dtype, type(np.dtype))):
dtype = dtype.dtype
diff --git a/pandas/tests/dtypes/test_cast.py b/pandas/tests/dtypes/test_cast.py
index d13d781f03117..31bd962b67afb 100644
--- a/pandas/tests/dtypes/test_cast.py
+++ b/pandas/tests/dtypes/test_cast.py
@@ -22,7 +22,8 @@
maybe_convert_string_to_object,
maybe_convert_scalar,
find_common_type,
- construct_1d_object_array_from_listlike)
+ construct_1d_object_array_from_listlike,
+ construct_1d_arraylike_from_scalar)
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
DatetimeTZDtype,
@@ -422,3 +423,15 @@ def test_cast_1d_array(self, datum1, datum2):
@pytest.mark.parametrize('val', [1, 2., None])
def test_cast_1d_array_invalid_scalar(self, val):
pytest.raises(TypeError, construct_1d_object_array_from_listlike, val)
+
+ def test_cast_1d_arraylike_from_scalar_categorical(self):
+ # GH 19565 - Categorical result from scalar did not maintain categories
+ # and ordering of the passed dtype
+ cats = ['a', 'b', 'c']
+ cat_type = CategoricalDtype(categories=cats, ordered=False)
+ expected = pd.Categorical(['a', 'a'], categories=cats)
+ result = construct_1d_arraylike_from_scalar('a', len(expected),
+ cat_type)
+ tm.assert_categorical_equal(result, expected,
+ check_category_order=True,
+ check_dtype=True)
diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py
index 33737387edffa..77f9dfcce686d 100644
--- a/pandas/tests/series/test_constructors.py
+++ b/pandas/tests/series/test_constructors.py
@@ -270,6 +270,13 @@ def test_constructor_categorical_dtype(self):
tm.assert_index_equal(result.cat.categories, pd.Index(['b', 'a']))
assert result.cat.ordered is False
+ # GH 19565 - Check broadcasting of scalar with Categorical dtype
+ result = Series('a', index=[0, 1],
+ dtype=CategoricalDtype(['a', 'b'], ordered=True))
+ expected = Series(['a', 'a'], index=[0, 1],
+ dtype=CategoricalDtype(['a', 'b'], ordered=True))
+ tm.assert_series_equal(result, expected, check_categorical=True)
+
def test_categorical_sideeffects_free(self):
# Passing a categorical to a Series and then changing values in either
# the series or the categorical should not change the values in the
| Categories and ordering of the resulting Series were not the same as those of the passed Categorical dtype. Closes #19565
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/19717 | 2018-02-15T18:49:12Z | 2018-02-24T14:58:41Z | 2018-02-24T14:58:41Z | 2018-02-24T14:58:45Z |
BUG: Fix Series constructor for Categorical with index | diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt
index fb22dc40e335f..5330f7e7e998b 100644
--- a/doc/source/whatsnew/v0.23.0.txt
+++ b/doc/source/whatsnew/v0.23.0.txt
@@ -919,6 +919,7 @@ Reshaping
- Comparisons between :class:`Series` and :class:`Index` would return a ``Series`` with an incorrect name, ignoring the ``Index``'s name attribute (:issue:`19582`)
- Bug in :func:`qcut` where datetime and timedelta data with ``NaT`` present raised a ``ValueError`` (:issue:`19768`)
- Bug in :func:`DataFrame.iterrows`, which would infers strings not compliant to `ISO8601 <https://en.wikipedia.org/wiki/ISO_8601>`_ to datetimes (:issue:`19671`)
+- Bug in :class:`Series` constructor with ``Categorical`` where a ```ValueError`` is not raised when an index of different length is given (:issue:`19342`)
Other
^^^^^
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 26b7fd552b062..8053651a4877a 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -212,7 +212,6 @@ def __init__(self, data=None, index=None, dtype=None, name=None,
'be False.')
elif is_extension_array_dtype(data) and dtype is not None:
- # GH12574: Allow dtype=category only, otherwise error
if not data.dtype.is_dtype(dtype):
raise ValueError("Cannot specify a dtype '{}' with an "
"extension array of a different "
@@ -235,6 +234,18 @@ def __init__(self, data=None, index=None, dtype=None, name=None,
if not is_list_like(data):
data = [data]
index = com._default_index(len(data))
+ elif is_list_like(data):
+
+ # a scalar numpy array is list-like but doesn't
+ # have a proper length
+ try:
+ if len(index) != len(data):
+ raise ValueError(
+ 'Length of passed values is {val}, '
+ 'index implies {ind}'
+ .format(val=len(data), ind=len(index)))
+ except TypeError:
+ pass
# create/copy the manager
if isinstance(data, SingleBlockManager):
diff --git a/pandas/tests/io/formats/test_style.py b/pandas/tests/io/formats/test_style.py
index bedb11d4fc4ae..adf8e14b756c2 100644
--- a/pandas/tests/io/formats/test_style.py
+++ b/pandas/tests/io/formats/test_style.py
@@ -24,7 +24,7 @@ def setup_method(self, method):
def h(x, foo='bar'):
return pd.Series(
- ['color: {foo}'.format(foo=foo)], index=x.index, name=x.name)
+ 'color: {foo}'.format(foo=foo), index=x.index, name=x.name)
self.h = h
self.styler = Styler(self.df)
diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py
index 77f9dfcce686d..25f425ffa0021 100644
--- a/pandas/tests/series/test_constructors.py
+++ b/pandas/tests/series/test_constructors.py
@@ -400,6 +400,34 @@ def test_constructor_default_index(self):
s = Series([0, 1, 2])
tm.assert_index_equal(s.index, pd.Index(np.arange(3)))
+ @pytest.mark.parametrize('input', [[1, 2, 3],
+ (1, 2, 3),
+ list(range(3)),
+ pd.Categorical(['a', 'b', 'a']),
+ (i for i in range(3)),
+ map(lambda x: x, range(3))])
+ def test_constructor_index_mismatch(self, input):
+ # GH 19342
+ # test that construction of a Series with an index of different length
+ # raises an error
+ msg = 'Length of passed values is 3, index implies 4'
+ with pytest.raises(ValueError, message=msg):
+ Series(input, index=np.arange(4))
+
+ def test_constructor_numpy_scalar(self):
+ # GH 19342
+ # construction with a numpy scalar
+ # should not raise
+ result = Series(np.array(100), index=np.arange(4), dtype='int64')
+ expected = Series(100, index=np.arange(4), dtype='int64')
+ tm.assert_series_equal(result, expected)
+
+ def test_constructor_broadcast_list(self):
+ # GH 19342
+ # construction with single-element container and index
+ # should raise
+ pytest.raises(ValueError, Series, ['foo'], index=['a', 'b', 'c'])
+
def test_constructor_corner(self):
df = tm.makeTimeDataFrame()
objs = [df, df]
| Fixes Series constructor so that ValueError is raised when a Categorical and index of incorrect length are given. Closes issue #19342
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/19714 | 2018-02-15T14:04:56Z | 2018-02-27T01:13:01Z | 2018-02-27T01:13:01Z | 2018-02-27T01:45:16Z |
Fix the non cython build for cpp extensions | diff --git a/setup.py b/setup.py
index c66979dd19ef0..c7784260d79ca 100755
--- a/setup.py
+++ b/setup.py
@@ -311,7 +311,6 @@ class CheckSDist(sdist_class):
'pandas/_libs/missing.pyx',
'pandas/_libs/reduction.pyx',
'pandas/_libs/testing.pyx',
- 'pandas/_libs/window.pyx',
'pandas/_libs/skiplist.pyx',
'pandas/_libs/sparse.pyx',
'pandas/_libs/parsers.pyx',
@@ -331,6 +330,10 @@ class CheckSDist(sdist_class):
'pandas/_libs/writers.pyx',
'pandas/io/sas/sas.pyx']
+ _cpp_pyxfiles = ['pandas/_libs/window.pyx',
+ 'pandas/io/msgpack/_packer.pyx',
+ 'pandas/io/msgpack/_unpacker.pyx']
+
def initialize_options(self):
sdist_class.initialize_options(self)
@@ -338,12 +341,17 @@ def run(self):
if 'cython' in cmdclass:
self.run_command('cython')
else:
- for pyxfile in self._pyxfiles:
- cfile = pyxfile[:-3] + 'c'
- msg = ("C-source file '{source}' not found.\n"
- "Run 'setup.py cython' before sdist.".format(
- source=cfile))
- assert os.path.isfile(cfile), msg
+ # If we are not running cython then
+ # compile the extensions correctly
+ pyx_files = [(self._pyxfiles, 'c'), (self._cpp_pyxfiles, 'cpp')]
+
+ for pyxfiles, extension in pyx_files:
+ for pyxfile in pyxfiles:
+ sourcefile = pyxfile[:-3] + extension
+ msg = ("{extension}-source file '{source}' not found.\n"
+ "Run 'setup.py cython' before sdist.".format(
+ source=sourcefile, extension=extension))
+ assert os.path.isfile(sourcefile), msg
sdist_class.run(self)
@@ -417,6 +425,11 @@ def get_tag(self):
cmdclass['build_src'] = DummyBuildSrc
cmdclass['build_ext'] = CheckingBuildExt
+if sys.byteorder == 'big':
+ endian_macro = [('__BIG_ENDIAN__', '1')]
+else:
+ endian_macro = [('__LITTLE_ENDIAN__', '1')]
+
lib_depends = ['inference']
@@ -453,6 +466,7 @@ def pxd(name):
'pandas/_libs/src/datetime/np_datetime_strings.h']
np_datetime_sources = ['pandas/_libs/src/datetime/np_datetime.c',
'pandas/_libs/src/datetime/np_datetime_strings.c']
+
tseries_depends = np_datetime_headers + ['pandas/_libs/tslibs/np_datetime.pxd']
# some linux distros require it
@@ -618,17 +632,42 @@ def pxd(name):
'_libs.window': {
'pyxfile': '_libs/window',
'pxdfiles': ['_libs/skiplist', '_libs/src/util'],
- 'language': 'c++'},
+ 'language': 'c++',
+ 'suffix': '.cpp'},
'_libs.writers': {
'pyxfile': '_libs/writers',
'pxdfiles': ['_libs/src/util']},
'io.sas._sas': {
- 'pyxfile': 'io/sas/sas'}}
+ 'pyxfile': 'io/sas/sas'},
+ 'io.msgpack._packer': {
+ 'macros': endian_macro,
+ 'depends': ['pandas/_libs/src/msgpack/pack.h',
+ 'pandas/_libs/src/msgpack/pack_template.h'],
+ 'include': ['pandas/_libs/src/msgpack'] + common_include,
+ 'language': 'c++',
+ 'suffix': '.cpp',
+ 'pyxfile': 'io/msgpack/_packer',
+ 'subdir': 'io/msgpack'},
+ 'io.msgpack._unpacker': {
+ 'depends': ['pandas/_libs/src/msgpack/unpack.h',
+ 'pandas/_libs/src/msgpack/unpack_define.h',
+ 'pandas/_libs/src/msgpack/unpack_template.h'],
+ 'macros': endian_macro,
+ 'include': ['pandas/_libs/src/msgpack'] + common_include,
+ 'language': 'c++',
+ 'suffix': '.cpp',
+ 'pyxfile': 'io/msgpack/_unpacker',
+ 'subdir': 'io/msgpack'
+ }
+}
extensions = []
for name, data in ext_data.items():
- sources = [srcpath(data['pyxfile'], suffix=suffix, subdir='')]
+ source_suffix = suffix if suffix == '.pyx' else data.get('suffix', '.c')
+
+ sources = [srcpath(data['pyxfile'], suffix=source_suffix, subdir='')]
+
pxds = [pxd(x) for x in data.get('pxdfiles', [])]
if suffix == '.pyx' and pxds:
sources.extend(pxds)
@@ -642,46 +681,11 @@ def pxd(name):
depends=data.get('depends', []),
include_dirs=include,
language=data.get('language', 'c'),
+ define_macros=data.get('macros', []),
extra_compile_args=extra_compile_args)
extensions.append(obj)
-# ----------------------------------------------------------------------
-# msgpack
-
-if sys.byteorder == 'big':
- macros = [('__BIG_ENDIAN__', '1')]
-else:
- macros = [('__LITTLE_ENDIAN__', '1')]
-
-msgpack_include = ['pandas/_libs/src/msgpack'] + common_include
-msgpack_suffix = suffix if suffix == '.pyx' else '.cpp'
-unpacker_depends = ['pandas/_libs/src/msgpack/unpack.h',
- 'pandas/_libs/src/msgpack/unpack_define.h',
- 'pandas/_libs/src/msgpack/unpack_template.h']
-
-packer_ext = Extension('pandas.io.msgpack._packer',
- depends=['pandas/_libs/src/msgpack/pack.h',
- 'pandas/_libs/src/msgpack/pack_template.h'],
- sources=[srcpath('_packer',
- suffix=msgpack_suffix,
- subdir='io/msgpack')],
- language='c++',
- include_dirs=msgpack_include,
- define_macros=macros,
- extra_compile_args=extra_compile_args)
-unpacker_ext = Extension('pandas.io.msgpack._unpacker',
- depends=unpacker_depends,
- sources=[srcpath('_unpacker',
- suffix=msgpack_suffix,
- subdir='io/msgpack')],
- language='c++',
- include_dirs=msgpack_include,
- define_macros=macros,
- extra_compile_args=extra_compile_args)
-extensions.append(packer_ext)
-extensions.append(unpacker_ext)
-
# ----------------------------------------------------------------------
# ujson
| - [ ] closes #19668
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
cc @TomAugspurger @jreback
This _should_ fix the problem with building with a non Cython environment. The problem is that we assume extensions are C everywhere and I didn't realize that this was a test path.
Also setup.py might be in need of a rethink, there's a lot of special conditions that I think could be cleaned up with some better formatting. | https://api.github.com/repos/pandas-dev/pandas/pulls/19707 | 2018-02-15T00:57:10Z | 2018-02-19T23:38:43Z | 2018-02-19T23:38:43Z | 2018-02-19T23:38:47Z |
DOC: Adding guide for the pandas documentation sprint | diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst
index 258ab874cafcf..d0c5032079288 100644
--- a/doc/source/contributing.rst
+++ b/doc/source/contributing.rst
@@ -292,12 +292,9 @@ Some other important things to know about the docs:
overviews per topic together with some other information (what's new,
installation, etc).
-- The docstrings follow the **Numpy Docstring Standard**, which is used widely
- in the Scientific Python community. This standard specifies the format of
- the different sections of the docstring. See `this document
- <https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt>`_
- for a detailed explanation, or look at some of the existing functions to
- extend it in a similar manner.
+- The docstrings follow a pandas convention, based on the **Numpy Docstring
+ Standard**. Follow the :ref:`pandas docstring guide <docstring>` for detailed
+ instructions on how to write a correct docstring.
- The tutorials make heavy use of the `ipython directive
<http://matplotlib.org/sampledoc/ipython_directive.html>`_ sphinx extension.
diff --git a/doc/source/contributing_docstring.rst b/doc/source/contributing_docstring.rst
new file mode 100644
index 0000000000000..cd56b76fa891b
--- /dev/null
+++ b/doc/source/contributing_docstring.rst
@@ -0,0 +1,910 @@
+.. _docstring:
+
+======================
+pandas docstring guide
+======================
+
+.. note::
+ `Video tutorial: Pandas docstring guide
+ <https://www.youtube.com/watch?v=EOA0lUeW4NI>`_ by Frank Akogun.
+
+About docstrings and standards
+------------------------------
+
+A Python docstring is a string used to document a Python module, class,
+function or method, so programmers can understand what it does without having
+to read the details of the implementation.
+
+Also, it is a common practice to generate online (html) documentation
+automatically from docstrings. `Sphinx <http://www.sphinx-doc.org>`_ serves
+this purpose.
+
+Next example gives an idea on how a docstring looks like:
+
+.. code-block:: python
+
+ def add(num1, num2):
+ """
+ Add up two integer numbers.
+
+ This function simply wraps the `+` operator, and does not
+ do anything interesting, except for illustrating what is
+ the docstring of a very simple function.
+
+ Parameters
+ ----------
+ num1 : int
+ First number to add
+ num2 : int
+ Second number to add
+
+ Returns
+ -------
+ int
+ The sum of `num1` and `num2`
+
+ See Also
+ --------
+ subtract : Subtract one integer from another
+
+ Examples
+ --------
+ >>> add(2, 2)
+ 4
+ >>> add(25, 0)
+ 25
+ >>> add(10, -10)
+ 0
+ """
+ return num1 + num2
+
+Some standards exist about docstrings, so they are easier to read, and they can
+be exported to other formats such as html or pdf.
+
+The first conventions every Python docstring should follow are defined in
+`PEP-257 <https://www.python.org/dev/peps/pep-0257/>`_.
+
+As PEP-257 is quite open, and some other standards exist on top of it. In the
+case of pandas, the numpy docstring convention is followed. The conventions is
+explained in this document:
+
+- `numpydoc docstring guide <http://numpydoc.readthedocs.io/en/latest/format.html>`_
+ (which is based in the original `Guide to NumPy/SciPy documentation
+ <https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt>`_)
+
+numpydoc is a Sphinx extension to support the numpy docstring convention.
+
+The standard uses reStructuredText (reST). reStructuredText is a markup
+language that allows encoding styles in plain text files. Documentation
+about reStructuredText can be found in:
+
+- `Sphinx reStructuredText primer <http://www.sphinx-doc.org/en/stable/rest.html>`_
+- `Quick reStructuredText reference <http://docutils.sourceforge.net/docs/user/rst/quickref.html>`_
+- `Full reStructuredText specification <http://docutils.sourceforge.net/docs/ref/rst/restructuredtext.html>`_
+
+The rest of this document will summarize all the above guides, and will
+provide additional convention specific to the pandas project.
+
+.. _docstring.tutorial:
+
+Writing a docstring
+-------------------
+
+.. _docstring.general:
+
+General rules
+~~~~~~~~~~~~~
+
+Docstrings must be defined with three double-quotes. No blank lines should be
+left before or after the docstring. The text starts in the next line after the
+opening quotes. The closing quotes have their own line
+(meaning that they are not at the end of the last sentence).
+
+In rare occasions reST styles like bold text or itallics will be used in
+docstrings, but is it common to have inline code, which is presented between
+backticks. It is considered inline code:
+
+- The name of a parameter
+- Python code, a module, function, built-in, type, literal... (e.g. ``os``,
+ ``list``, ``numpy.abs``, ``datetime.date``, ``True``)
+- A pandas class (in the form ``:class:`~pandas.Series```)
+- A pandas method (in the form ``:meth:`pandas.Series.sum```)
+- A pandas function (in the form ``:func:`pandas.to_datetime```)
+
+**Good:**
+
+.. code-block:: python
+
+ def add_values(arr):
+ """
+ Add the values in `arr`.
+
+ This is equivalent to Python `sum` of :meth:`pandas.Series.sum`.
+
+ Some sections are omitted here for simplicity.
+ """
+ return sum(arr)
+
+**Bad:**
+
+.. code-block:: python
+
+ def func():
+
+ """Some function.
+
+ With several mistakes in the docstring.
+
+ It has a blank like after the signature `def func():`.
+
+ The text 'Some function' should go in the line after the
+ opening quotes of the docstring, not in the same line.
+
+ There is a blank line between the docstring and the first line
+ of code `foo = 1`.
+
+ The closing quotes should be in the next line, not in this one."""
+
+ foo = 1
+ bar = 2
+ return foo + bar
+
+.. _docstring.short_summary:
+
+Section 1: Short summary
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+The short summary is a single sentence that expresses what the function does in
+a concise way.
+
+The short summary must start with a capital letter, end with a dot, and fit in
+a single line. It needs to express what the object does without providing
+details. For functions and methods, the short summary must start with an
+infinitive verb.
+
+**Good:**
+
+.. code-block:: python
+
+ def astype(dtype):
+ """
+ Cast Series type.
+
+ This section will provide further details.
+ """
+ pass
+
+**Bad:**
+
+.. code-block:: python
+
+ def astype(dtype):
+ """
+ Casts Series type.
+
+ Verb in third-person of the present simple, should be infinitive.
+ """
+ pass
+
+ def astype(dtype):
+ """
+ Method to cast Series type.
+
+ Does not start with verb.
+ """
+ pass
+
+ def astype(dtype):
+ """
+ Cast Series type
+
+ Missing dot at the end.
+ """
+ pass
+
+ def astype(dtype):
+ """
+ Cast Series type from its current type to the new type defined in
+ the parameter dtype.
+
+ Summary is too verbose and doesn't fit in a single line.
+ """
+ pass
+
+.. _docstring.extended_summary:
+
+Section 2: Extended summary
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The extended summary provides details on what the function does. It should not
+go into the details of the parameters, or discuss implementation notes, which
+go in other sections.
+
+A blank line is left between the short summary and the extended summary. And
+every paragraph in the extended summary is finished by a dot.
+
+The extended summary should provide details on why the function is useful and
+their use cases, if it is not too generic.
+
+.. code-block:: python
+
+ def unstack():
+ """
+ Pivot a row index to columns.
+
+ When using a multi-index, a level can be pivoted so each value in
+ the index becomes a column. This is especially useful when a subindex
+ is repeated for the main index, and data is easier to visualize as a
+ pivot table.
+
+ The index level will be automatically removed from the index when added
+ as columns.
+ """
+ pass
+
+.. _docstring.parameters:
+
+Section 3: Parameters
+~~~~~~~~~~~~~~~~~~~~~
+
+The details of the parameters will be added in this section. This section has
+the title "Parameters", followed by a line with a hyphen under each letter of
+the word "Parameters". A blank line is left before the section title, but not
+after, and not between the line with the word "Parameters" and the one with
+the hyphens.
+
+After the title, each parameter in the signature must be documented, including
+`*args` and `**kwargs`, but not `self`.
+
+The parameters are defined by their name, followed by a space, a colon, another
+space, and the type (or types). Note that the space between the name and the
+colon is important. Types are not defined for `*args` and `**kwargs`, but must
+be defined for all other parameters. After the parameter definition, it is
+required to have a line with the parameter description, which is indented, and
+can have multiple lines. The description must start with a capital letter, and
+finish with a dot.
+
+For keyword arguments with a default value, the default will be listed after a
+comma at the end of the type. The exact form of the type in this case will be
+"int, default 0". In some cases it may be useful to explain what the default
+argument means, which can be added after a comma "int, default -1, meaning all
+cpus".
+
+In cases where the default value is `None`, meaning that the value will not be
+used. Instead of "str, default None", it is preferred to write "str, optional".
+When `None` is a value being used, we will keep the form "str, default None".
+For example, in `df.to_csv(compression=None)`, `None` is not a value being used,
+but means that compression is optional, and no compression is being used if not
+provided. In this case we will use `str, optional`. Only in cases like
+`func(value=None)` and `None` is being used in the same way as `0` or `foo`
+would be used, then we will specify "str, int or None, default None".
+
+**Good:**
+
+.. code-block:: python
+
+ class Series:
+ def plot(self, kind, color='blue', **kwargs):
+ """
+ Generate a plot.
+
+ Render the data in the Series as a matplotlib plot of the
+ specified kind.
+
+ Parameters
+ ----------
+ kind : str
+ Kind of matplotlib plot.
+ color : str, default 'blue'
+ Color name or rgb code.
+ **kwargs
+ These parameters will be passed to the matplotlib plotting
+ function.
+ """
+ pass
+
+**Bad:**
+
+.. code-block:: python
+
+ class Series:
+ def plot(self, kind, **kwargs):
+ """
+ Generate a plot.
+
+ Render the data in the Series as a matplotlib plot of the
+ specified kind.
+
+ Note the blank line between the parameters title and the first
+ parameter. Also, note that after the name of the parameter `kind`
+ and before the colon, a space is missing.
+
+ Also, note that the parameter descriptions do not start with a
+ capital letter, and do not finish with a dot.
+
+ Finally, the `**kwargs` parameter is missing.
+
+ Parameters
+ ----------
+
+ kind: str
+ kind of matplotlib plot
+ """
+ pass
+
+.. _docstring.parameter_types:
+
+Parameter types
+^^^^^^^^^^^^^^^
+
+When specifying the parameter types, Python built-in data types can be used
+directly (the Python type is preferred to the more verbose string, integer,
+boolean, etc):
+
+- int
+- float
+- str
+- bool
+
+For complex types, define the subtypes. For `dict` and `tuple`, as more than
+one type is present, we use the brackets to help read the type (curly brackets
+for `dict` and normal brackets for `tuple`):
+
+- list of int
+- dict of {str : int}
+- tuple of (str, int, int)
+- tuple of (str,)
+- set of str
+
+In case where there are just a set of values allowed, list them in curly
+brackets and separated by commas (followed by a space). If the values are
+ordinal and they have an order, list them in this order. Otherwise, list
+the default value first, if there is one:
+
+- {0, 10, 25}
+- {'simple', 'advanced'}
+- {'low', 'medium', 'high'}
+- {'cat', 'dog', 'bird'}
+
+If the type is defined in a Python module, the module must be specified:
+
+- datetime.date
+- datetime.datetime
+- decimal.Decimal
+
+If the type is in a package, the module must be also specified:
+
+- numpy.ndarray
+- scipy.sparse.coo_matrix
+
+If the type is a pandas type, also specify pandas except for Series and
+DataFrame:
+
+- Series
+- DataFrame
+- pandas.Index
+- pandas.Categorical
+- pandas.SparseArray
+
+If the exact type is not relevant, but must be compatible with a numpy
+array, array-like can be specified. If Any type that can be iterated is
+accepted, iterable can be used:
+
+- array-like
+- iterable
+
+If more than one type is accepted, separate them by commas, except the
+last two types, that need to be separated by the word 'or':
+
+- int or float
+- float, decimal.Decimal or None
+- str or list of str
+
+If ``None`` is one of the accepted values, it always needs to be the last in
+the list.
+
+For axis, the convention is to use something like:
+
+- axis : {0 or 'index', 1 or 'columns', None}, default None
+
+.. _docstring.returns:
+
+Section 4: Returns or Yields
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+If the method returns a value, it will be documented in this section. Also
+if the method yields its output.
+
+The title of the section will be defined in the same way as the "Parameters".
+With the names "Returns" or "Yields" followed by a line with as many hyphens
+as the letters in the preceding word.
+
+The documentation of the return is also similar to the parameters. But in this
+case, no name will be provided, unless the method returns or yields more than
+one value (a tuple of values).
+
+The types for "Returns" and "Yields" are the same as the ones for the
+"Parameters". Also, the description must finish with a dot.
+
+For example, with a single value:
+
+.. code-block:: python
+
+ def sample():
+ """
+ Generate and return a random number.
+
+ The value is sampled from a continuous uniform distribution between
+ 0 and 1.
+
+ Returns
+ -------
+ float
+ Random number generated.
+ """
+ return random.random()
+
+With more than one value:
+
+.. code-block:: python
+
+ def random_letters():
+ """
+ Generate and return a sequence of random letters.
+
+ The length of the returned string is also random, and is also
+ returned.
+
+ Returns
+ -------
+ length : int
+ Length of the returned string.
+ letters : str
+ String of random letters.
+ """
+ length = random.randint(1, 10)
+ letters = ''.join(random.choice(string.ascii_lowercase)
+ for i in range(length))
+ return length, letters
+
+If the method yields its value:
+
+.. code-block:: python
+
+ def sample_values():
+ """
+ Generate an infinite sequence of random numbers.
+
+ The values are sampled from a continuous uniform distribution between
+ 0 and 1.
+
+ Yields
+ ------
+ float
+ Random number generated.
+ """
+ while True:
+ yield random.random()
+
+.. _docstring.see_also:
+
+Section 5: See Also
+~~~~~~~~~~~~~~~~~~~
+
+This section is used to let users know about pandas functionality
+related to the one being documented. In rare cases, if no related methods
+or functions can be found at all, this section can be skipped.
+
+An obvious example would be the `head()` and `tail()` methods. As `tail()` does
+the equivalent as `head()` but at the end of the `Series` or `DataFrame`
+instead of at the beginning, it is good to let the users know about it.
+
+To give an intuition on what can be considered related, here there are some
+examples:
+
+* ``loc`` and ``iloc``, as they do the same, but in one case providing indices
+ and in the other positions
+* ``max`` and ``min``, as they do the opposite
+* ``iterrows``, ``itertuples`` and ``iteritems``, as it is easy that a user
+ looking for the method to iterate over columns ends up in the method to
+ iterate over rows, and vice-versa
+* ``fillna`` and ``dropna``, as both methods are used to handle missing values
+* ``read_csv`` and ``to_csv``, as they are complementary
+* ``merge`` and ``join``, as one is a generalization of the other
+* ``astype`` and ``pandas.to_datetime``, as users may be reading the
+ documentation of ``astype`` to know how to cast as a date, and the way to do
+ it is with ``pandas.to_datetime``
+* ``where`` is related to ``numpy.where``, as its functionality is based on it
+
+When deciding what is related, you should mainly use your common sense and
+think about what can be useful for the users reading the documentation,
+especially the less experienced ones.
+
+When relating to other libraries (mainly ``numpy``), use the name of the module
+first (not an alias like ``np``). If the function is in a module which is not
+the main one, like ``scipy.sparse``, list the full module (e.g.
+``scipy.sparse.coo_matrix``).
+
+This section, as the previous, also has a header, "See Also" (note the capital
+S and A). Also followed by the line with hyphens, and preceded by a blank line.
+
+After the header, we will add a line for each related method or function,
+followed by a space, a colon, another space, and a short description that
+illustrated what this method or function does, why is it relevant in this
+context, and what are the key differences between the documented function and
+the one referencing. The description must also finish with a dot.
+
+Note that in "Returns" and "Yields", the description is located in the
+following line than the type. But in this section it is located in the same
+line, with a colon in between. If the description does not fit in the same
+line, it can continue in the next ones, but it has to be indented in them.
+
+For example:
+
+.. code-block:: python
+
+ class Series:
+ def head(self):
+ """
+ Return the first 5 elements of the Series.
+
+ This function is mainly useful to preview the values of the
+ Series without displaying the whole of it.
+
+ Returns
+ -------
+ Series
+ Subset of the original series with the 5 first values.
+
+ See Also
+ --------
+ Series.tail : Return the last 5 elements of the Series.
+ Series.iloc : Return a slice of the elements in the Series,
+ which can also be used to return the first or last n.
+ """
+ return self.iloc[:5]
+
+.. _docstring.notes:
+
+Section 6: Notes
+~~~~~~~~~~~~~~~~
+
+This is an optional section used for notes about the implementation of the
+algorithm. Or to document technical aspects of the function behavior.
+
+Feel free to skip it, unless you are familiar with the implementation of the
+algorithm, or you discover some counter-intuitive behavior while writing the
+examples for the function.
+
+This section follows the same format as the extended summary section.
+
+.. _docstring.examples:
+
+Section 7: Examples
+~~~~~~~~~~~~~~~~~~~
+
+This is one of the most important sections of a docstring, even if it is
+placed in the last position. As often, people understand concepts better
+with examples, than with accurate explanations.
+
+Examples in docstrings, besides illustrating the usage of the function or
+method, must be valid Python code, that in a deterministic way returns the
+presented output, and that can be copied and run by users.
+
+They are presented as a session in the Python terminal. `>>>` is used to
+present code. `...` is used for code continuing from the previous line.
+Output is presented immediately after the last line of code generating the
+output (no blank lines in between). Comments describing the examples can
+be added with blank lines before and after them.
+
+The way to present examples is as follows:
+
+1. Import required libraries (except ``numpy`` and ``pandas``)
+
+2. Create the data required for the example
+
+3. Show a very basic example that gives an idea of the most common use case
+
+4. Add examples with explanations that illustrate how the parameters can be
+ used for extended functionality
+
+A simple example could be:
+
+.. code-block:: python
+
+ class Series:
+ def head(self, n=5):
+ """
+ Return the first elements of the Series.
+
+ This function is mainly useful to preview the values of the
+ Series without displaying the whole of it.
+
+ Parameters
+ ----------
+ n : int
+ Number of values to return.
+
+ Return
+ ------
+ pandas.Series
+ Subset of the original series with the n first values.
+
+ See Also
+ --------
+ tail : Return the last n elements of the Series.
+
+ Examples
+ --------
+ >>> s = pd.Series(['Ant', 'Bear', 'Cow', 'Dog', 'Falcon',
+ ... 'Lion', 'Monkey', 'Rabbit', 'Zebra'])
+ >>> s.head()
+ 0 Ant
+ 1 Bear
+ 2 Cow
+ 3 Dog
+ 4 Falcon
+ dtype: object
+
+ With the `n` parameter, we can change the number of returned rows:
+
+ >>> s.head(n=3)
+ 0 Ant
+ 1 Bear
+ 2 Cow
+ dtype: object
+ """
+ return self.iloc[:n]
+
+The examples should be as concise as possible. In cases where the complexity of
+the function requires long examples, is recommended to use blocks with headers
+in bold. Use double star ``**`` to make a text bold, like in ``**this example**``.
+
+.. _docstring.example_conventions:
+
+Conventions for the examples
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Code in examples is assumed to always start with these two lines which are not
+shown:
+
+.. code-block:: python
+
+ import numpy as np
+ import pandas as pd
+
+
+Any other module used in the examples must be explicitly imported, one per line (as
+recommended in `PEP-8 <https://www.python.org/dev/peps/pep-0008/#imports>`_)
+and avoiding aliases. Avoid excessive imports, but if needed, imports from
+the standard library go first, followed by third-party libraries (like
+matplotlib).
+
+When illustrating examples with a single ``Series`` use the name ``s``, and if
+illustrating with a single ``DataFrame`` use the name ``df``. For indices,
+``idx`` is the preferred name. If a set of homogeneous ``Series`` or
+``DataFrame`` is used, name them ``s1``, ``s2``, ``s3``... or ``df1``,
+``df2``, ``df3``... If the data is not homogeneous, and more than one structure
+is needed, name them with something meaningful, for example ``df_main`` and
+``df_to_join``.
+
+Data used in the example should be as compact as possible. The number of rows
+is recommended to be around 4, but make it a number that makes sense for the
+specific example. For example in the ``head`` method, it requires to be higher
+than 5, to show the example with the default values. If doing the ``mean``, we
+could use something like ``[1, 2, 3]``, so it is easy to see that the value
+returned is the mean.
+
+For more complex examples (groupping for example), avoid using data without
+interpretation, like a matrix of random numbers with columns A, B, C, D...
+And instead use a meaningful example, which makes it easier to understand the
+concept. Unless required by the example, use names of animals, to keep examples
+consistent. And numerical properties of them.
+
+When calling the method, keywords arguments ``head(n=3)`` are preferred to
+positional arguments ``head(3)``.
+
+**Good:**
+
+.. code-block:: python
+
+ class Series:
+ def mean(self):
+ """
+ Compute the mean of the input.
+
+ Examples
+ --------
+ >>> s = pd.Series([1, 2, 3])
+ >>> s.mean()
+ 2
+ """
+ pass
+
+
+ def fillna(self, value):
+ """
+ Replace missing values by `value`.
+
+ Examples
+ --------
+ >>> s = pd.Series([1, np.nan, 3])
+ >>> s.fillna(0)
+ [1, 0, 3]
+ """
+ pass
+
+ def groupby_mean(self):
+ """
+ Group by index and return mean.
+
+ Examples
+ --------
+ >>> s = pd.Series([380., 370., 24., 26],
+ ... name='max_speed',
+ ... index=['falcon', 'falcon', 'parrot', 'parrot'])
+ >>> s.groupby_mean()
+ index
+ falcon 375.0
+ parrot 25.0
+ Name: max_speed, dtype: float64
+ """
+ pass
+
+ def contains(self, pattern, case_sensitive=True, na=numpy.nan):
+ """
+ Return whether each value contains `pattern`.
+
+ In this case, we are illustrating how to use sections, even
+ if the example is simple enough and does not require them.
+
+ Examples
+ --------
+ >>> s = pd.Series('Antelope', 'Lion', 'Zebra', numpy.nan)
+ >>> s.contains(pattern='a')
+ 0 False
+ 1 False
+ 2 True
+ 3 NaN
+ dtype: bool
+
+ **Case sensitivity**
+
+ With `case_sensitive` set to `False` we can match `a` with both
+ `a` and `A`:
+
+ >>> s.contains(pattern='a', case_sensitive=False)
+ 0 True
+ 1 False
+ 2 True
+ 3 NaN
+ dtype: bool
+
+ **Missing values**
+
+ We can fill missing values in the output using the `na` parameter:
+
+ >>> s.contains(pattern='a', na=False)
+ 0 False
+ 1 False
+ 2 True
+ 3 False
+ dtype: bool
+ """
+ pass
+
+**Bad:**
+
+.. code-block:: python
+
+ def method(foo=None, bar=None):
+ """
+ A sample DataFrame method.
+
+ Do not import numpy and pandas.
+
+ Try to use meaningful data, when it makes the example easier
+ to understand.
+
+ Try to avoid positional arguments like in `df.method(1)`. They
+ can be all right if previously defined with a meaningful name,
+ like in `present_value(interest_rate)`, but avoid them otherwise.
+
+ When presenting the behavior with different parameters, do not place
+ all the calls one next to the other. Instead, add a short sentence
+ explaining what the example shows.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> import pandas as pd
+ >>> df = pd.DataFrame(numpy.random.randn(3, 3),
+ ... columns=('a', 'b', 'c'))
+ >>> df.method(1)
+ 21
+ >>> df.method(bar=14)
+ 123
+ """
+ pass
+
+
+.. _docstring.doctest_tips:
+
+Tips for getting your examples pass the doctests
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Getting the examples pass the doctests in the validation script can sometimes
+be tricky. Here are some attention points:
+
+* Import all needed libraries (except for pandas and numpy, those are already
+ imported as ``import pandas as pd`` and ``import numpy as np``) and define
+ all variables you use in the example.
+
+* Try to avoid using random data. However random data might be OK in some
+ cases, like if the function you are documenting deals with probability
+ distributions, or if the amount of data needed to make the function result
+ meaningful is too much, such that creating it manually is very cumbersome.
+ In those cases, always use a fixed random seed to make the generated examples
+ predictable. Example::
+
+ >>> np.random.seed(42)
+ >>> df = pd.DataFrame({'normal': np.random.normal(100, 5, 20)})
+
+* If you have a code snippet that wraps multiple lines, you need to use '...'
+ on the continued lines: ::
+
+ >>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], index=['a', 'b', 'c'],
+ ... columns=['A', 'B'])
+
+* If you want to show a case where an exception is raised, you can do::
+
+ >>> pd.to_datetime(["712-01-01"])
+ Traceback (most recent call last):
+ OutOfBoundsDatetime: Out of bounds nanosecond timestamp: 712-01-01 00:00:00
+
+ It is essential to include the "Traceback (most recent call last):", but for
+ the actual error only the error name is sufficient.
+
+* If there is a small part of the result that can vary (e.g. a hash in an object
+ represenation), you can use ``...`` to represent this part.
+
+ If you want to show that ``s.plot()`` returns a matplotlib AxesSubplot object,
+ this will fail the doctest ::
+
+ >>> s.plot()
+ <matplotlib.axes._subplots.AxesSubplot at 0x7efd0c0b0690>
+
+ However, you can do (notice the comment that needs to be added) ::
+
+ >>> s.plot() # doctest: +ELLIPSIS
+ <matplotlib.axes._subplots.AxesSubplot at ...>
+
+
+.. _docstring.example_plots:
+
+Plots in examples
+^^^^^^^^^^^^^^^^^
+
+There are some methods in pandas returning plots. To render the plots generated
+by the examples in the documentation, the ``.. plot::`` directive exists.
+
+To use it, place the next code after the "Examples" header as shown below. The
+plot will be generated automatically when building the documentation.
+
+.. code-block:: python
+
+ class Series:
+ def plot(self):
+ """
+ Generate a plot with the `Series` data.
+
+ Examples
+ --------
+
+ .. plot::
+ :context: close-figs
+
+ >>> s = pd.Series([1, 2, 3])
+ >>> s.plot()
+ """
+ pass
| This PR is to make it easier to review the proposal guide for the pandas documentation sprint, as discussed in pandas-dev.
| https://api.github.com/repos/pandas-dev/pandas/pulls/19704 | 2018-02-14T22:06:34Z | 2018-03-12T17:32:30Z | 2018-03-12T17:32:30Z | 2018-03-12T17:44:34Z |
GroupBy Rank SegFault Fix - astype instead of view | diff --git a/pandas/_libs/groupby_helper.pxi.in b/pandas/_libs/groupby_helper.pxi.in
index 1d77a373bb7dd..fe4d31516d839 100644
--- a/pandas/_libs/groupby_helper.pxi.in
+++ b/pandas/_libs/groupby_helper.pxi.in
@@ -531,7 +531,7 @@ def group_rank_{{name}}(ndarray[float64_t, ndim=2] out,
# each label corresponds to a different group value,
# the mask helps you differentiate missing values before
# performing sort on the actual values
- _as = np.lexsort(order).view(dtype=np.int64)
+ _as = np.lexsort(order).astype(np.int64, copy=False)
if not ascending:
_as = _as[::-1]
| xref #19679 and #19481 | https://api.github.com/repos/pandas-dev/pandas/pulls/19701 | 2018-02-14T19:46:29Z | 2018-02-14T23:12:15Z | 2018-02-14T23:12:15Z | 2018-02-14T23:22:37Z |
Move conda build and ASV check to cron job | diff --git a/.travis.yml b/.travis.yml
index 4cbe7f86bd2fa..b1168f18315c3 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -52,7 +52,7 @@ matrix:
# In allow_failures
- dist: trusty
env:
- - JOB="3.5_CONDA_BUILD_TEST" TEST_ARGS="--skip-slow --skip-network" CONDA_BUILD_TEST=true
+ - JOB="3.5" TEST_ARGS="--skip-slow --skip-network"
- dist: trusty
env:
- JOB="3.6" TEST_ARGS="--skip-slow --skip-network" PANDAS_TESTING_MODE="deprecate" CONDA_FORGE=true COVERAGE=true
@@ -73,17 +73,13 @@ matrix:
env:
- JOB="3.6_NUMPY_DEV" TEST_ARGS="--skip-slow --skip-network" PANDAS_TESTING_MODE="deprecate"
# In allow_failures
- - dist: trusty
- env:
- - JOB="3.6_ASV" ASV=true
- # In allow_failures
- dist: trusty
env:
- JOB="3.6_DOC" DOC=true
allow_failures:
- dist: trusty
env:
- - JOB="3.5_CONDA_BUILD_TEST" TEST_ARGS="--skip-slow --skip-network" CONDA_BUILD_TEST=true
+ - JOB="3.5" TEST_ARGS="--skip-slow --skip-network"
- dist: trusty
env:
- JOB="2.7_SLOW" SLOW=true
@@ -97,9 +93,6 @@ matrix:
- dist: trusty
env:
- JOB="3.6_NUMPY_DEV" TEST_ARGS="--skip-slow --skip-network" PANDAS_TESTING_MODE="deprecate"
- - dist: trusty
- env:
- - JOB="3.6_ASV" ASV=true
- dist: trusty
env:
- JOB="3.6_DOC" DOC=true
@@ -135,7 +128,6 @@ script:
- ci/script_single.sh
- ci/script_multi.sh
- ci/lint.sh
- - ci/asv.sh
- echo "checking imports"
- source activate pandas && python ci/check_imports.py
- echo "script done"
diff --git a/ci/asv.sh b/ci/asv.sh
deleted file mode 100755
index 1e9a8d6380eb5..0000000000000
--- a/ci/asv.sh
+++ /dev/null
@@ -1,35 +0,0 @@
-#!/bin/bash
-
-echo "inside $0"
-
-source activate pandas
-
-RET=0
-
-if [ "$ASV" ]; then
- echo "Check for failed asv benchmarks"
-
- cd asv_bench
-
- asv machine --yes
-
- time asv dev | tee failed_asv.txt
-
- echo "The following asvs benchmarks (if any) failed."
-
- cat failed_asv.txt | grep "failed" failed_asv.txt
-
- if [ $? = "0" ]; then
- RET=1
- fi
-
- echo "DONE displaying failed asvs benchmarks."
-
- rm failed_asv.txt
-
- echo "Check for failed asv benchmarks DONE"
-else
- echo "NOT checking for failed asv benchmarks"
-fi
-
-exit $RET
diff --git a/ci/install_travis.sh b/ci/install_travis.sh
index 6e270519e60c3..458ff083b65eb 100755
--- a/ci/install_travis.sh
+++ b/ci/install_travis.sh
@@ -50,12 +50,6 @@ conda config --set ssl_verify false || exit 1
conda config --set quiet true --set always_yes true --set changeps1 false || exit 1
conda update -q conda
-if [ "$CONDA_BUILD_TEST" ]; then
- echo
- echo "[installing conda-build]"
- conda install conda-build
-fi
-
echo
echo "[add channels]"
conda config --remove channels defaults || exit 1
@@ -122,7 +116,7 @@ if [ "$COVERAGE" ]; then
fi
echo
-if [ -z "$PIP_BUILD_TEST" ] && [ -z "$CONDA_BUILD_TEST" ]; then
+if [ -z "$PIP_BUILD_TEST" ] ; then
# build but don't install
echo "[build em]"
@@ -177,15 +171,6 @@ if [ "$PIP_BUILD_TEST" ]; then
conda uninstall -y cython
time pip install dist/*tar.gz || exit 1
-elif [ "$CONDA_BUILD_TEST" ]; then
-
- # build & install testing
- echo "[building conda recipe]"
- time conda build ./conda.recipe --python 3.5 -q --no-test || exit 1
-
- echo "[installing]"
- conda install pandas --use-local || exit 1
-
else
# install our pandas
diff --git a/ci/requirements-3.5_CONDA_BUILD_TEST.build b/ci/requirements-3.5.build
similarity index 100%
rename from ci/requirements-3.5_CONDA_BUILD_TEST.build
rename to ci/requirements-3.5.build
diff --git a/ci/requirements-3.5_CONDA_BUILD_TEST.pip b/ci/requirements-3.5.pip
similarity index 100%
rename from ci/requirements-3.5_CONDA_BUILD_TEST.pip
rename to ci/requirements-3.5.pip
diff --git a/ci/requirements-3.5_CONDA_BUILD_TEST.run b/ci/requirements-3.5.run
similarity index 100%
rename from ci/requirements-3.5_CONDA_BUILD_TEST.run
rename to ci/requirements-3.5.run
diff --git a/ci/requirements-3.5_CONDA_BUILD_TEST.sh b/ci/requirements-3.5.sh
similarity index 86%
rename from ci/requirements-3.5_CONDA_BUILD_TEST.sh
rename to ci/requirements-3.5.sh
index 093fdbcf21d78..529e1e8742722 100644
--- a/ci/requirements-3.5_CONDA_BUILD_TEST.sh
+++ b/ci/requirements-3.5.sh
@@ -2,7 +2,7 @@
source activate pandas
-echo "install 35 CONDA_BUILD_TEST"
+echo "install 35"
# pip install python-dateutil to get latest
conda remove -n pandas python-dateutil --force
diff --git a/ci/requirements-3.6_ASV.build b/ci/requirements-3.6_ASV.build
deleted file mode 100644
index bc72eed2a0d4e..0000000000000
--- a/ci/requirements-3.6_ASV.build
+++ /dev/null
@@ -1,5 +0,0 @@
-python=3.6*
-python-dateutil
-pytz
-numpy=1.13*
-cython
diff --git a/ci/requirements-3.6_ASV.run b/ci/requirements-3.6_ASV.run
deleted file mode 100644
index 6c45e3371e9cf..0000000000000
--- a/ci/requirements-3.6_ASV.run
+++ /dev/null
@@ -1,25 +0,0 @@
-ipython
-ipykernel
-ipywidgets
-sphinx=1.5*
-nbconvert
-nbformat
-notebook
-matplotlib
-seaborn
-scipy
-lxml
-beautifulsoup4
-html5lib
-pytables
-python-snappy
-openpyxl
-xlrd
-xlwt
-xlsxwriter
-sqlalchemy
-numexpr
-bottleneck
-statsmodels
-xarray
-pyqt
diff --git a/ci/requirements-3.6_ASV.sh b/ci/requirements-3.6_ASV.sh
deleted file mode 100755
index 8a46f85dbb6bc..0000000000000
--- a/ci/requirements-3.6_ASV.sh
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/bin/bash
-
-source activate pandas
-
-echo "[install ASV_BUILD deps]"
-
-pip install git+https://github.com/spacetelescope/asv
diff --git a/ci/script_multi.sh b/ci/script_multi.sh
index 766e51625fbe6..6c354fc4cab0b 100755
--- a/ci/script_multi.sh
+++ b/ci/script_multi.sh
@@ -18,7 +18,7 @@ fi
export PYTHONHASHSEED=$(python -c 'import random; print(random.randint(1, 4294967295))')
echo PYTHONHASHSEED=$PYTHONHASHSEED
-if [ "$PIP_BUILD_TEST" ] || [ "$CONDA_BUILD_TEST" ]; then
+if [ "$PIP_BUILD_TEST" ] ; then
echo "[build-test]"
echo "[env]"
@@ -37,9 +37,6 @@ if [ "$PIP_BUILD_TEST" ] || [ "$CONDA_BUILD_TEST" ]; then
elif [ "$DOC" ]; then
echo "We are not running pytest as this is a doc-build"
-elif [ "$ASV" ]; then
- echo "We are not running pytest as this is an asv-build"
-
elif [ "$COVERAGE" ]; then
echo pytest -s -n 2 -m "not single" --cov=pandas --cov-report xml:/tmp/cov-multiple.xml --junitxml=/tmp/multiple.xml --strict $TEST_ARGS pandas
pytest -s -n 2 -m "not single" --cov=pandas --cov-report xml:/tmp/cov-multiple.xml --junitxml=/tmp/multiple.xml --strict $TEST_ARGS pandas
diff --git a/ci/script_single.sh b/ci/script_single.sh
index 153847ab2e8c9..74b0e897f1d73 100755
--- a/ci/script_single.sh
+++ b/ci/script_single.sh
@@ -16,15 +16,12 @@ if [ "$SLOW" ]; then
TEST_ARGS="--only-slow --skip-network"
fi
-if [ "$PIP_BUILD_TEST" ] || [ "$CONDA_BUILD_TEST" ]; then
+if [ "$PIP_BUILD_TEST" ]; then
echo "We are not running pytest as this is a build test."
elif [ "$DOC" ]; then
echo "We are not running pytest as this is a doc-build"
-elif [ "$ASV" ]; then
- echo "We are not running pytest as this is an asv-build"
-
elif [ "$COVERAGE" ]; then
echo pytest -s -m "single" --strict --cov=pandas --cov-report xml:/tmp/cov-single.xml --junitxml=/tmp/single.xml $TEST_ARGS pandas
pytest -s -m "single" --strict --cov=pandas --cov-report xml:/tmp/cov-single.xml --junitxml=/tmp/single.xml $TEST_ARGS pandas
| xref #19695 (not closing until pip build test is moved or we decide to keep that here)
https://travis-ci.org/pandas-dev/pandas-ci and http://github.com/pandas-dev/pandas-ci | https://api.github.com/repos/pandas-dev/pandas/pulls/19698 | 2018-02-14T16:22:16Z | 2018-02-14T19:56:52Z | 2018-02-14T19:56:52Z | 2018-02-14T19:58:11Z |
remove usages of _get_na_value | diff --git a/pandas/core/indexes/api.py b/pandas/core/indexes/api.py
index b7af533f96ddc..2e5ec8b554ce7 100644
--- a/pandas/core/indexes/api.py
+++ b/pandas/core/indexes/api.py
@@ -2,7 +2,6 @@
_new_Index,
_ensure_index,
_ensure_index_from_sequences,
- _get_na_value,
InvalidIndexError) # noqa
from pandas.core.indexes.category import CategoricalIndex # noqa
from pandas.core.indexes.multi import MultiIndex # noqa
@@ -25,7 +24,7 @@
'InvalidIndexError', 'TimedeltaIndex',
'PeriodIndex', 'DatetimeIndex',
'_new_Index', 'NaT',
- '_ensure_index', '_ensure_index_from_sequences', '_get_na_value',
+ '_ensure_index', '_ensure_index_from_sequences',
'_get_combined_index',
'_get_objs_combined_axis', '_union_indexes',
'_get_consensus_names',
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index be7c1624936bf..1a74eb28e3f03 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -2098,7 +2098,7 @@ def asof(self, label):
try:
loc = self.get_loc(label, method='pad')
except KeyError:
- return _get_na_value(self.dtype)
+ return self._na_value
else:
if isinstance(loc, slice):
loc = loc.indices(len(self))[-1]
@@ -4316,12 +4316,6 @@ def _ensure_index(index_like, copy=False):
return Index(index_like)
-def _get_na_value(dtype):
- if is_datetime64_any_dtype(dtype) or is_timedelta64_dtype(dtype):
- return libts.NaT
- return np.nan
-
-
def _ensure_has_len(seq):
"""If seq is an iterator, put its values into a list."""
try:
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 94dbd8b884e47..73f4aee1c4880 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -34,7 +34,7 @@
from pandas.core.indexes.base import (
Index, _ensure_index,
- _get_na_value, InvalidIndexError,
+ InvalidIndexError,
_index_shared_docs)
from pandas.core.indexes.frozen import (
FrozenNDArray, FrozenList, _ensure_frozen)
@@ -804,7 +804,7 @@ def values(self):
elif box:
taken = algos.take_1d(lev._box_values(lev._ndarray_values),
lab,
- fill_value=_get_na_value(lev.dtype.type))
+ fill_value=lev._na_value)
else:
taken = algos.take_1d(np.asarray(lev._values), lab)
values.append(taken)
diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py
index c8bca476c65f2..3ef152d091b24 100644
--- a/pandas/core/reshape/reshape.py
+++ b/pandas/core/reshape/reshape.py
@@ -29,7 +29,7 @@
import pandas.core.algorithms as algos
from pandas._libs import algos as _algos, reshape as _reshape
-from pandas.core.index import Index, MultiIndex, _get_na_value
+from pandas.core.index import Index, MultiIndex
class _Unstacker(object):
@@ -260,7 +260,7 @@ def get_new_columns(self):
return self.removed_level
lev = self.removed_level
- return lev.insert(0, _get_na_value(lev.dtype.type))
+ return lev.insert(0, lev._na_value)
stride = len(self.removed_level) + self.lift
width = len(self.value_columns)
@@ -299,7 +299,7 @@ def get_new_index(self):
if len(self.new_index_levels) == 1:
lev, lab = self.new_index_levels[0], result_labels[0]
if (lab == -1).any():
- lev = lev.insert(len(lev), _get_na_value(lev.dtype.type))
+ lev = lev.insert(len(lev), lev._na_value)
return lev.take(lab)
return MultiIndex(levels=self.new_index_levels, labels=result_labels,
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 655eaa5373f5a..90dc14836ab55 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -1261,8 +1261,6 @@ def count(self, level=None):
-------
nobs : int or Series (if level specified)
"""
- from pandas.core.index import _get_na_value
-
if level is None:
return notna(com._values_from_object(self)).sum()
@@ -1275,7 +1273,7 @@ def count(self, level=None):
mask = lab == -1
if mask.any():
lab[mask] = cnt = len(lev)
- lev = lev.insert(cnt, _get_na_value(lev.dtype.type))
+ lev = lev.insert(cnt, lev._na_value)
obs = lab[notna(self.values)]
out = np.bincount(obs, minlength=len(lev) or None)
| I think the function might also be incorrectly excluding period_dtype. | https://api.github.com/repos/pandas-dev/pandas/pulls/19692 | 2018-02-14T03:53:07Z | 2018-02-16T18:21:52Z | 2018-02-16T18:21:52Z | 2018-02-18T18:22:02Z |
add missing args, make kwarg explicit | diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index be7c1624936bf..81b6b28d3927e 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -3943,8 +3943,8 @@ def _evaluate_with_timedelta_like(self, other, op, opstr, reversed=False):
def _evaluate_with_datetime_like(self, other, op, opstr):
raise TypeError("can only perform ops with datetime like values")
- def _evaluate_compare(self, op):
- raise base.AbstractMethodError(self)
+ def _evaluate_compare(self, other, op):
+ raise com.AbstractMethodError(self)
@classmethod
def _add_comparison_methods(cls):
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index f553e1a02c9d6..dd5feefc49fe3 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -3569,8 +3569,8 @@ def reduction(self, f, axis=0, consolidate=True, transposed=False,
placement=np.arange(len(values)))],
axes[0])
- def isna(self, **kwargs):
- return self.apply('apply', **kwargs)
+ def isna(self, func, **kwargs):
+ return self.apply('apply', func=func, **kwargs)
def where(self, **kwargs):
return self.apply('where', **kwargs)
| Index._evaluate_compare is never hit, but if it were it would raise twice: once for not having the right signature and again for referencing a non-existent base.AbstractMethodError
`BlockManager.isna` is entirely non-obvious because there is a required kwarg that isn't in its signature. This makes it explicit. | https://api.github.com/repos/pandas-dev/pandas/pulls/19691 | 2018-02-14T03:10:36Z | 2018-02-15T12:31:12Z | 2018-02-15T12:31:12Z | 2018-06-22T03:32:32Z |
DOC/BLD: unpin sphinx to use sphinx 1.7 | diff --git a/ci/requirements-3.6_DOC.run b/ci/requirements-3.6_DOC.run
index 6c45e3371e9cf..084f38ce17eb2 100644
--- a/ci/requirements-3.6_DOC.run
+++ b/ci/requirements-3.6_DOC.run
@@ -1,7 +1,7 @@
ipython
ipykernel
ipywidgets
-sphinx=1.5*
+sphinx
nbconvert
nbformat
notebook
diff --git a/ci/requirements_dev.txt b/ci/requirements_dev.txt
index a474658fa2922..82f8de277c57b 100644
--- a/ci/requirements_dev.txt
+++ b/ci/requirements_dev.txt
@@ -7,4 +7,4 @@ pytest>=3.1
python-dateutil>=2.5.0
pytz
setuptools>=3.3
-sphinx=1.5*
+sphinx
diff --git a/doc/sphinxext/numpydoc/numpydoc.py b/doc/sphinxext/numpydoc/numpydoc.py
index 2bc2d1e91ed3f..4861aa90edce1 100755
--- a/doc/sphinxext/numpydoc/numpydoc.py
+++ b/doc/sphinxext/numpydoc/numpydoc.py
@@ -26,7 +26,6 @@
raise RuntimeError("Sphinx 1.0.1 or newer is required")
from .docscrape_sphinx import get_doc_object, SphinxDocString
-from sphinx.util.compat import Directive
if sys.version_info[0] >= 3:
sixu = lambda s: s
| xref https://github.com/pandas-dev/pandas/issues/18147, closes https://github.com/pandas-dev/pandas/issues/16705
Sphinx 1.7 is released, so let's try out if we can now remove the pinning of sphinx to 1.5.x | https://api.github.com/repos/pandas-dev/pandas/pulls/19687 | 2018-02-13T21:53:27Z | 2018-02-26T08:46:26Z | 2018-02-26T08:46:26Z | 2018-02-26T08:46:29Z |
API: Validate keyword arguments to fillna | diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt
index a4b943f995a33..54fbbb12c2917 100644
--- a/doc/source/whatsnew/v0.23.0.txt
+++ b/doc/source/whatsnew/v0.23.0.txt
@@ -556,6 +556,7 @@ Datetimelike API Changes
- Subtracting ``NaT`` from a :class:`Series` with ``dtype='datetime64[ns]'`` returns a ``Series`` with ``dtype='timedelta64[ns]'`` instead of ``dtype='datetime64[ns]'`` (:issue:`18808`)
- Operations between a :class:`Series` with dtype ``dtype='datetime64[ns]'`` and a :class:`PeriodIndex` will correctly raises ``TypeError`` (:issue:`18850`)
- Subtraction of :class:`Series` with timezone-aware ``dtype='datetime64[ns]'`` with mis-matched timezones will raise ``TypeError`` instead of ``ValueError`` (:issue:`18817`)
+- :func:`pandas.merge` provides a more informative error message when trying to merge on timezone-aware and timezone-naive columns (:issue:`15800`)
.. _whatsnew_0230.api.other:
@@ -575,7 +576,6 @@ Other API Changes
- :func:`Series.fillna` now raises a ``TypeError`` instead of a ``ValueError`` when passed a list, tuple or DataFrame as a ``value`` (:issue:`18293`)
- :func:`pandas.DataFrame.merge` no longer casts a ``float`` column to ``object`` when merging on ``int`` and ``float`` columns (:issue:`16572`)
- :func:`pandas.merge` now raises a ``ValueError`` when trying to merge on incompatible data types (:issue:`9780`)
-- :func:`pandas.merge` provides a more informative error message when trying to merge on timezone-aware and timezone-naive columns (:issue:`15800`)
- The default NA value for :class:`UInt64Index` has changed from 0 to ``NaN``, which impacts methods that mask with NA, such as ``UInt64Index.where()`` (:issue:`18398`)
- Refactored ``setup.py`` to use ``find_packages`` instead of explicitly listing out all subpackages (:issue:`18535`)
- Rearranged the order of keyword arguments in :func:`read_excel()` to align with :func:`read_csv()` (:issue:`16672`)
@@ -589,6 +589,7 @@ Other API Changes
- :func:`Series.to_csv` now accepts a ``compression`` argument that works in the same way as the ``compression`` argument in :func:`DataFrame.to_csv` (:issue:`18958`)
- Set operations (union, difference...) on :class:`IntervalIndex` with incompatible index types will now raise a ``TypeError`` rather than a ``ValueError`` (:issue:`19329`)
- :class:`DateOffset` objects render more simply, e.g. ``<DateOffset: days=1>`` instead of ``<DateOffset: kwds={'days': 1}>`` (:issue:`19403`)
+- ``Categorical.fillna`` now validates its ``value`` and ``method`` keyword arguments. It now raises when both or none are specified, matching the behavior of :meth:`Series.fillna` (:issue:`19682`)
.. _whatsnew_0230.deprecations:
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index 7354115f8295e..493b2e5bd899b 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -40,7 +40,7 @@
Appender, cache_readonly, deprecate_kwarg, Substitution)
from pandas.io.formats.terminal import get_terminal_size
-from pandas.util._validators import validate_bool_kwarg
+from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs
from pandas.core.config import get_option
from .base import ExtensionArray
@@ -1610,6 +1610,9 @@ def fillna(self, value=None, method=None, limit=None):
-------
filled : Categorical with NA/NaN filled
"""
+ value, method = validate_fillna_kwargs(
+ value, method, validate_scalar_dict_value=False
+ )
if value is None:
value = np.nan
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 297450417e3cf..8034cf89cf8b7 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -54,7 +54,7 @@
import pandas.core.nanops as nanops
from pandas.util._decorators import (Appender, Substitution,
deprecate_kwarg)
-from pandas.util._validators import validate_bool_kwarg
+from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs
from pandas.core import config
# goal is to be able to define the docs close to function, while still being
@@ -4697,10 +4697,8 @@ def infer_objects(self):
def fillna(self, value=None, method=None, axis=None, inplace=False,
limit=None, downcast=None):
inplace = validate_bool_kwarg(inplace, 'inplace')
+ value, method = validate_fillna_kwargs(value, method)
- if isinstance(value, (list, tuple)):
- raise TypeError('"value" parameter must be a scalar or dict, but '
- 'you passed a "{0}"'.format(type(value).__name__))
self._consolidate_inplace()
# set the default here, so functions examining the signaure
@@ -4711,8 +4709,7 @@ def fillna(self, value=None, method=None, axis=None, inplace=False,
method = missing.clean_fill_method(method)
from pandas import DataFrame
if value is None:
- if method is None:
- raise ValueError('must specify a fill method or value')
+
if self._is_mixed_type and axis == 1:
if inplace:
raise NotImplementedError()
@@ -4746,9 +4743,6 @@ def fillna(self, value=None, method=None, axis=None, inplace=False,
coerce=True,
downcast=downcast)
else:
- if method is not None:
- raise ValueError('cannot specify both a fill method and value')
-
if len(self._get_axis(axis)) == 0:
return self
diff --git a/pandas/tests/categorical/test_missing.py b/pandas/tests/categorical/test_missing.py
index 79758dee5cfda..fca5573547071 100644
--- a/pandas/tests/categorical/test_missing.py
+++ b/pandas/tests/categorical/test_missing.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
import numpy as np
+import pytest
import pandas.util.testing as tm
from pandas import (Categorical, Index, isna)
@@ -53,3 +54,18 @@ def test_set_item_nan(self):
exp = Categorical([1, np.nan, 3], categories=[1, 2, 3])
tm.assert_categorical_equal(cat, exp)
+
+ @pytest.mark.parametrize('fillna_kwargs, msg', [
+ (dict(value=1, method='ffill'),
+ "Cannot specify both 'value' and 'method'."),
+ (dict(),
+ "Must specify a fill 'value' or 'method'."),
+ (dict(method='bad'),
+ "Invalid fill method. Expecting .* bad"),
+ ])
+ def test_fillna_raises(self, fillna_kwargs, msg):
+ # https://github.com/pandas-dev/pandas/issues/19682
+ cat = Categorical([1, 2, 3])
+
+ with tm.assert_raises_regex(ValueError, msg):
+ cat.fillna(**fillna_kwargs)
diff --git a/pandas/util/_validators.py b/pandas/util/_validators.py
index b30ffc7416f92..a96563051e7de 100644
--- a/pandas/util/_validators.py
+++ b/pandas/util/_validators.py
@@ -320,3 +320,39 @@ def validate_axis_style_args(data, args, kwargs, arg_name, method_name):
msg = "Cannot specify all of '{}', 'index', 'columns'."
raise TypeError(msg.format(arg_name))
return out
+
+
+def validate_fillna_kwargs(value, method, validate_scalar_dict_value=True):
+ """Validate the keyword arguments to 'fillna'.
+
+ This checks that exactly one of 'value' and 'method' is specified.
+ If 'method' is specified, this validates that it's a valid method.
+
+ Parameters
+ ----------
+ value, method : object
+ The 'value' and 'method' keyword arguments for 'fillna'.
+ validate_scalar_dict_value : bool, default True
+ Whether to validate that 'value' is a scalar or dict. Specifically,
+ validate that it is not a list or tuple.
+
+ Returns
+ -------
+ value, method : object
+ """
+ from pandas.core.missing import clean_fill_method
+
+ if value is None and method is None:
+ raise ValueError("Must specify a fill 'value' or 'method'.")
+ elif value is None and method is not None:
+ method = clean_fill_method(method)
+
+ elif value is not None and method is None:
+ if validate_scalar_dict_value and isinstance(value, (list, tuple)):
+ raise TypeError('"value" parameter must be a scalar or dict, but '
+ 'you passed a "{0}"'.format(type(value).__name__))
+
+ elif value is not None and method is not None:
+ raise ValueError("Cannot specify both 'value' and 'method'.")
+
+ return value, method
| Closes https://github.com/pandas-dev/pandas/issues/19682 | https://api.github.com/repos/pandas-dev/pandas/pulls/19684 | 2018-02-13T20:13:08Z | 2018-02-22T00:20:58Z | 2018-02-22T00:20:58Z | 2018-05-02T13:09:54Z |
DOC: fix IPython spelling. | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 2782ee7b9d201..bc045d74cee52 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1059,7 +1059,7 @@ def to_gbq(self, destination_table, project_id, chunksize=10000,
private_key : str (optional)
Service account private key in JSON format. Can be file path
or string contents. This is useful for remote server
- authentication (eg. jupyter iPython notebook on remote host)
+ authentication (eg. Jupyter/IPython notebook on remote host)
"""
from pandas.io import gbq
diff --git a/pandas/io/gbq.py b/pandas/io/gbq.py
index b452b0cf5ddd4..f9bc6ae1a5451 100644
--- a/pandas/io/gbq.py
+++ b/pandas/io/gbq.py
@@ -65,7 +65,7 @@ def read_gbq(query, project_id=None, index_col=None, col_order=None,
private_key : str (optional)
Service account private key in JSON format. Can be file path
or string contents. This is useful for remote server
- authentication (eg. jupyter iPython notebook on remote host)
+ authentication (eg. Jupyter/IPython notebook on remote host)
dialect : {'legacy', 'standard'}, default 'legacy'
'legacy' : Use BigQuery's legacy SQL dialect.
| It's upper case I and P (or full lower case),
| https://api.github.com/repos/pandas-dev/pandas/pulls/19683 | 2018-02-13T19:53:57Z | 2018-02-13T21:41:15Z | 2018-02-13T21:41:15Z | 2018-02-13T21:41:59Z |
Explicitly set dtype of np.lexsort in group_rank | diff --git a/pandas/_libs/groupby_helper.pxi.in b/pandas/_libs/groupby_helper.pxi.in
index 48dac7bf10362..1d77a373bb7dd 100644
--- a/pandas/_libs/groupby_helper.pxi.in
+++ b/pandas/_libs/groupby_helper.pxi.in
@@ -531,7 +531,7 @@ def group_rank_{{name}}(ndarray[float64_t, ndim=2] out,
# each label corresponds to a different group value,
# the mask helps you differentiate missing values before
# performing sort on the actual values
- _as = np.lexsort(order)
+ _as = np.lexsort(order).view(dtype=np.int64)
if not ascending:
_as = _as[::-1]
| xref #19481
I didn't see any other instance in `group_rank` where the dtype was open to interpretation, so I'm wondering if `np.lexsort` is returning a plain `int` on 32 bit systems. Hoping that explicitly getting a view of that `np.lexsort` result to match the dtype of `_as` will resolve the test issues | https://api.github.com/repos/pandas-dev/pandas/pulls/19679 | 2018-02-13T18:19:44Z | 2018-02-13T23:56:32Z | 2018-02-13T23:56:32Z | 2018-02-14T19:23:22Z |
DOC: Clarify and add fill_value example in arithmetic ops | diff --git a/pandas/core/ops.py b/pandas/core/ops.py
index 4c234ccb4dd47..5a715f3dceb76 100644
--- a/pandas/core/ops.py
+++ b/pandas/core/ops.py
@@ -255,8 +255,10 @@ def _get_frame_op_default_axis(name):
----------
other : Series or scalar value
fill_value : None or float value, default None (NaN)
- Fill missing (NaN) values with this value. If both Series are
- missing, the result will be missing
+ Fill existing missing (NaN) values, and any new element needed for
+ successful Series alignment, with this value before computation.
+ If data in both corresponding Series locations is missing
+ the result will be missing
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level
@@ -265,6 +267,30 @@ def _get_frame_op_default_axis(name):
-------
result : Series
+Examples
+--------
+>>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd'])
+>>> a
+a 1.0
+b 1.0
+c 1.0
+d NaN
+dtype: float64
+>>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e'])
+>>> b
+a 1.0
+b NaN
+d 1.0
+e NaN
+dtype: float64
+>>> a.add(b, fill_value=0)
+a 2.0
+b 1.0
+c 1.0
+d 1.0
+e NaN
+dtype: float64
+
See also
--------
Series.{reverse}
@@ -280,8 +306,10 @@ def _get_frame_op_default_axis(name):
axis : {0, 1, 'index', 'columns'}
For Series input, axis to match Series index on
fill_value : None or float value, default None
- Fill missing (NaN) values with this value. If both DataFrame locations are
- missing, the result will be missing
+ Fill existing missing (NaN) values, and any new element needed for
+ successful DataFrame alignment, with this value before computation.
+ If data in both corresponding DataFrame locations is missing
+ the result will be missing
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level
@@ -293,6 +321,33 @@ def _get_frame_op_default_axis(name):
Returns
-------
result : DataFrame
+
+Examples
+--------
+>>> a = pd.DataFrame([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd'],
+ columns=['one'])
+>>> a
+ one
+a 1.0
+b 1.0
+c 1.0
+d NaN
+>>> b = pd.DataFrame(dict(one=[1, np.nan, 1, np.nan],
+ two=[np.nan, 2, np.nan, 2]),
+ index=['a', 'b', 'd', 'e'])
+>>> b
+ one two
+a 1.0 NaN
+b NaN 2.0
+d 1.0 NaN
+e NaN 2.0
+>>> a.add(b, fill_value=0)
+ one two
+a 2.0 NaN
+b 1.0 2.0
+c 1.0 NaN
+d 1.0 NaN
+e NaN 2.0
"""
_flex_doc_FRAME = """
@@ -307,8 +362,10 @@ def _get_frame_op_default_axis(name):
axis : {{0, 1, 'index', 'columns'}}
For Series input, axis to match Series index on
fill_value : None or float value, default None
- Fill missing (NaN) values with this value. If both DataFrame
- locations are missing, the result will be missing
+ Fill existing missing (NaN) values, and any new element needed for
+ successful DataFrame alignment, with this value before computation.
+ If data in both corresponding DataFrame locations is missing
+ the result will be missing
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level
@@ -321,6 +378,33 @@ def _get_frame_op_default_axis(name):
-------
result : DataFrame
+Examples
+--------
+>>> a = pd.DataFrame([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd'],
+ columns=['one'])
+>>> a
+ one
+a 1.0
+b 1.0
+c 1.0
+d NaN
+>>> b = pd.DataFrame(dict(one=[1, np.nan, 1, np.nan],
+ two=[np.nan, 2, np.nan, 2]),
+ index=['a', 'b', 'd', 'e'])
+>>> b
+ one two
+a 1.0 NaN
+b NaN 2.0
+d 1.0 NaN
+e NaN 2.0
+>>> a.add(b, fill_value=0)
+ one two
+a 2.0 NaN
+b 1.0 2.0
+c 1.0 NaN
+d 1.0 NaN
+e NaN 2.0
+
See also
--------
DataFrame.{reverse}
@@ -392,7 +476,6 @@ def _make_flex_doc(op_name, typ):
base_doc = _flex_doc_PANEL
else:
raise AssertionError('Invalid typ argument.')
-
doc = base_doc.format(desc=op_desc['desc'], op_name=op_name,
equiv=equiv, reverse=op_desc['reverse'])
return doc
| - [ ] closes #19653
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
| https://api.github.com/repos/pandas-dev/pandas/pulls/19675 | 2018-02-13T14:32:36Z | 2018-02-22T00:16:22Z | 2018-02-22T00:16:22Z | 2018-02-22T00:16:25Z |
Cythonized GroupBy Fill | diff --git a/asv_bench/benchmarks/groupby.py b/asv_bench/benchmarks/groupby.py
index 61db39528a5fb..c347442784d41 100644
--- a/asv_bench/benchmarks/groupby.py
+++ b/asv_bench/benchmarks/groupby.py
@@ -370,11 +370,11 @@ class GroupByMethods(object):
param_names = ['dtype', 'method']
params = [['int', 'float'],
- ['all', 'any', 'count', 'cumcount', 'cummax', 'cummin',
- 'cumprod', 'cumsum', 'describe', 'first', 'head', 'last', 'mad',
- 'max', 'min', 'median', 'mean', 'nunique', 'pct_change', 'prod',
- 'rank', 'sem', 'shift', 'size', 'skew', 'std', 'sum', 'tail',
- 'unique', 'value_counts', 'var']]
+ ['all', 'any', 'bfill', 'count', 'cumcount', 'cummax', 'cummin',
+ 'cumprod', 'cumsum', 'describe', 'ffill', 'first', 'head',
+ 'last', 'mad', 'max', 'min', 'median', 'mean', 'nunique',
+ 'pct_change', 'prod', 'rank', 'sem', 'shift', 'size', 'skew',
+ 'std', 'sum', 'tail', 'unique', 'value_counts', 'var']]
def setup(self, dtype, method):
ngroups = 1000
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt
index fd3c3a5a7a301..fcaf46b1c3d71 100644
--- a/doc/source/whatsnew/v0.23.0.txt
+++ b/doc/source/whatsnew/v0.23.0.txt
@@ -689,6 +689,7 @@ Performance Improvements
- Improved performance of pairwise ``.rolling()`` and ``.expanding()`` with ``.cov()`` and ``.corr()`` operations (:issue:`17917`)
- Improved performance of :func:`DataFrameGroupBy.rank` (:issue:`15779`)
- Improved performance of variable ``.rolling()`` on ``.min()`` and ``.max()`` (:issue:`19521`)
+- Improved performance of ``GroupBy.ffill`` and ``GroupBy.bfill`` (:issue:`11296`)
.. _whatsnew_0230.docs:
diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx
index 866683ce378ab..e3d208a915225 100644
--- a/pandas/_libs/groupby.pyx
+++ b/pandas/_libs/groupby.pyx
@@ -94,5 +94,221 @@ cdef inline float64_t kth_smallest_c(float64_t* a,
return a[k]
+@cython.boundscheck(False)
+@cython.wraparound(False)
+def group_median_float64(ndarray[float64_t, ndim=2] out,
+ ndarray[int64_t] counts,
+ ndarray[float64_t, ndim=2] values,
+ ndarray[int64_t] labels,
+ Py_ssize_t min_count=-1):
+ """
+ Only aggregates on axis=0
+ """
+ cdef:
+ Py_ssize_t i, j, N, K, ngroups, size
+ ndarray[int64_t] _counts
+ ndarray data
+ float64_t* ptr
+
+ assert min_count == -1, "'min_count' only used in add and prod"
+
+ ngroups = len(counts)
+ N, K = (<object> values).shape
+
+ indexer, _counts = groupsort_indexer(labels, ngroups)
+ counts[:] = _counts[1:]
+
+ data = np.empty((K, N), dtype=np.float64)
+ ptr = <float64_t*> data.data
+
+ take_2d_axis1_float64_float64(values.T, indexer, out=data)
+
+ with nogil:
+
+ for i in range(K):
+ # exclude NA group
+ ptr += _counts[0]
+ for j in range(ngroups):
+ size = _counts[j + 1]
+ out[j, i] = median_linear(ptr, size)
+ ptr += size
+
+
+@cython.boundscheck(False)
+@cython.wraparound(False)
+def group_cumprod_float64(float64_t[:, :] out,
+ float64_t[:, :] values,
+ int64_t[:] labels,
+ bint is_datetimelike):
+ """
+ Only transforms on axis=0
+ """
+ cdef:
+ Py_ssize_t i, j, N, K, size
+ float64_t val
+ float64_t[:, :] accum
+ int64_t lab
+
+ N, K = (<object> values).shape
+ accum = np.ones_like(values)
+
+ with nogil:
+ for i in range(N):
+ lab = labels[i]
+
+ if lab < 0:
+ continue
+ for j in range(K):
+ val = values[i, j]
+ if val == val:
+ accum[lab, j] *= val
+ out[i, j] = accum[lab, j]
+
+
+@cython.boundscheck(False)
+@cython.wraparound(False)
+def group_cumsum(numeric[:, :] out,
+ numeric[:, :] values,
+ int64_t[:] labels,
+ is_datetimelike):
+ """
+ Only transforms on axis=0
+ """
+ cdef:
+ Py_ssize_t i, j, N, K, size
+ numeric val
+ numeric[:, :] accum
+ int64_t lab
+
+ N, K = (<object> values).shape
+ accum = np.zeros_like(values)
+
+ with nogil:
+ for i in range(N):
+ lab = labels[i]
+
+ if lab < 0:
+ continue
+ for j in range(K):
+ val = values[i, j]
+
+ if numeric == float32_t or numeric == float64_t:
+ if val == val:
+ accum[lab, j] += val
+ out[i, j] = accum[lab, j]
+ else:
+ accum[lab, j] += val
+ out[i, j] = accum[lab, j]
+
+
+@cython.boundscheck(False)
+@cython.wraparound(False)
+def group_shift_indexer(ndarray[int64_t] out, ndarray[int64_t] labels,
+ int ngroups, int periods):
+ cdef:
+ Py_ssize_t N, i, j, ii
+ int offset, sign
+ int64_t lab, idxer, idxer_slot
+ int64_t[:] label_seen = np.zeros(ngroups, dtype=np.int64)
+ int64_t[:, :] label_indexer
+
+ N, = (<object> labels).shape
+
+ if periods < 0:
+ periods = -periods
+ offset = N - 1
+ sign = -1
+ elif periods > 0:
+ offset = 0
+ sign = 1
+
+ if periods == 0:
+ with nogil:
+ for i in range(N):
+ out[i] = i
+ else:
+ # array of each previous indexer seen
+ label_indexer = np.zeros((ngroups, periods), dtype=np.int64)
+ with nogil:
+ for i in range(N):
+ ## reverse iterator if shifting backwards
+ ii = offset + sign * i
+ lab = labels[ii]
+
+ # Skip null keys
+ if lab == -1:
+ out[ii] = -1
+ continue
+
+ label_seen[lab] += 1
+
+ idxer_slot = label_seen[lab] % periods
+ idxer = label_indexer[lab, idxer_slot]
+
+ if label_seen[lab] > periods:
+ out[ii] = idxer
+ else:
+ out[ii] = -1
+
+ label_indexer[lab, idxer_slot] = ii
+
+
+@cython.wraparound(False)
+@cython.boundscheck(False)
+def group_fillna_indexer(ndarray[int64_t] out, ndarray[int64_t] labels,
+ ndarray[uint8_t] mask, object direction,
+ int64_t limit):
+ """Indexes how to fill values forwards or backwards within a group
+
+ Parameters
+ ----------
+ out : array of int64_t values which this method will write its results to
+ Missing values will be written to with a value of -1
+ labels : array containing unique label for each group, with its ordering
+ matching up to the corresponding record in `values`
+ mask : array of int64_t values where a 1 indicates a missing value
+ direction : {'ffill', 'bfill'}
+ Direction for fill to be applied (forwards or backwards, respectively)
+ limit : Consecutive values to fill before stopping, or -1 for no limit
+
+ Notes
+ -----
+ This method modifies the `out` parameter rather than returning an object
+ """
+ cdef:
+ Py_ssize_t i, N
+ ndarray[int64_t] sorted_labels
+ int64_t idx, curr_fill_idx=-1, filled_vals=0
+
+ N = len(out)
+
+ # Make sure all arrays are the same size
+ assert N == len(labels) == len(mask)
+
+ sorted_labels = np.argsort(labels).astype(np.int64, copy=False)
+ if direction == 'bfill':
+ sorted_labels = sorted_labels[::-1]
+
+ with nogil:
+ for i in range(N):
+ idx = sorted_labels[i]
+ if mask[idx] == 1: # is missing
+ # Stop filling once we've hit the limit
+ if filled_vals >= limit and limit != -1:
+ curr_fill_idx = -1
+ filled_vals += 1
+ else: # reset items when not missing
+ filled_vals = 0
+ curr_fill_idx = idx
+
+ out[idx] = curr_fill_idx
+
+ # If we move to the next group, reset
+ # the fill_idx and counter
+ if i == N - 1 or labels[idx] != labels[sorted_labels[i+1]]:
+ curr_fill_idx = -1
+ filled_vals = 0
+
+
# generated from template
include "groupby_helper.pxi"
diff --git a/pandas/_libs/groupby_helper.pxi.in b/pandas/_libs/groupby_helper.pxi.in
index e03e3af65755b..de802f4a72277 100644
--- a/pandas/_libs/groupby_helper.pxi.in
+++ b/pandas/_libs/groupby_helper.pxi.in
@@ -791,166 +791,3 @@ def group_cummax_{{name}}(ndarray[{{dest_type2}}, ndim=2] out,
out[i, j] = mval
{{endfor}}
-
-#----------------------------------------------------------------------
-# other grouping functions not needing a template
-#----------------------------------------------------------------------
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def group_median_float64(ndarray[float64_t, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[float64_t, ndim=2] values,
- ndarray[int64_t] labels,
- Py_ssize_t min_count=-1):
- """
- Only aggregates on axis=0
- """
- cdef:
- Py_ssize_t i, j, N, K, ngroups, size
- ndarray[int64_t] _counts
- ndarray data
- float64_t* ptr
-
- assert min_count == -1, "'min_count' only used in add and prod"
-
- ngroups = len(counts)
- N, K = (<object> values).shape
-
- indexer, _counts = groupsort_indexer(labels, ngroups)
- counts[:] = _counts[1:]
-
- data = np.empty((K, N), dtype=np.float64)
- ptr = <float64_t*> data.data
-
- take_2d_axis1_float64_float64(values.T, indexer, out=data)
-
- with nogil:
-
- for i in range(K):
- # exclude NA group
- ptr += _counts[0]
- for j in range(ngroups):
- size = _counts[j + 1]
- out[j, i] = median_linear(ptr, size)
- ptr += size
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def group_cumprod_float64(float64_t[:, :] out,
- float64_t[:, :] values,
- int64_t[:] labels,
- bint is_datetimelike):
- """
- Only transforms on axis=0
- """
- cdef:
- Py_ssize_t i, j, N, K, size
- float64_t val
- float64_t[:, :] accum
- int64_t lab
-
- N, K = (<object> values).shape
- accum = np.ones_like(values)
-
- with nogil:
- for i in range(N):
- lab = labels[i]
-
- if lab < 0:
- continue
- for j in range(K):
- val = values[i, j]
- if val == val:
- accum[lab, j] *= val
- out[i, j] = accum[lab, j]
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def group_cumsum(numeric[:, :] out,
- numeric[:, :] values,
- int64_t[:] labels,
- is_datetimelike):
- """
- Only transforms on axis=0
- """
- cdef:
- Py_ssize_t i, j, N, K, size
- numeric val
- numeric[:, :] accum
- int64_t lab
-
- N, K = (<object> values).shape
- accum = np.zeros_like(values)
-
- with nogil:
- for i in range(N):
- lab = labels[i]
-
- if lab < 0:
- continue
- for j in range(K):
- val = values[i, j]
-
- if numeric == float32_t or numeric == float64_t:
- if val == val:
- accum[lab, j] += val
- out[i, j] = accum[lab, j]
- else:
- accum[lab, j] += val
- out[i, j] = accum[lab, j]
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def group_shift_indexer(int64_t[:] out, int64_t[:] labels,
- int ngroups, int periods):
- cdef:
- Py_ssize_t N, i, j, ii
- int offset, sign
- int64_t lab, idxer, idxer_slot
- int64_t[:] label_seen = np.zeros(ngroups, dtype=np.int64)
- int64_t[:, :] label_indexer
-
- N, = (<object> labels).shape
-
- if periods < 0:
- periods = -periods
- offset = N - 1
- sign = -1
- elif periods > 0:
- offset = 0
- sign = 1
-
- if periods == 0:
- with nogil:
- for i in range(N):
- out[i] = i
- else:
- # array of each previous indexer seen
- label_indexer = np.zeros((ngroups, periods), dtype=np.int64)
- with nogil:
- for i in range(N):
- ## reverse iterator if shifting backwards
- ii = offset + sign * i
- lab = labels[ii]
-
- # Skip null keys
- if lab == -1:
- out[ii] = -1
- continue
-
- label_seen[lab] += 1
-
- idxer_slot = label_seen[lab] % periods
- idxer = label_indexer[lab, idxer_slot]
-
- if label_seen[lab] > periods:
- out[ii] = idxer
- else:
- out[ii] = -1
-
- label_indexer[lab, idxer_slot] = ii
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index b1615f720368d..852ad04cd8a2e 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -1,5 +1,5 @@
import types
-from functools import wraps
+from functools import wraps, partial
import numpy as np
import datetime
import collections
@@ -38,7 +38,7 @@
_ensure_float)
from pandas.core.dtypes.cast import maybe_downcast_to_dtype
from pandas.core.dtypes.generic import ABCSeries
-from pandas.core.dtypes.missing import isna, notna, _maybe_fill
+from pandas.core.dtypes.missing import isna, isnull, notna, _maybe_fill
from pandas.core.base import (PandasObject, SelectionMixin, GroupByError,
DataError, SpecificationError)
@@ -1457,6 +1457,36 @@ def expanding(self, *args, **kwargs):
from pandas.core.window import ExpandingGroupby
return ExpandingGroupby(self, *args, **kwargs)
+ def _fill(self, direction, limit=None):
+ """Shared function for `pad` and `backfill` to call Cython method
+
+ Parameters
+ ----------
+ direction : {'ffill', 'bfill'}
+ Direction passed to underlying Cython function. `bfill` will cause
+ values to be filled backwards. `ffill` and any other values will
+ default to a forward fill
+ limit : int, default None
+ Maximum number of consecutive values to fill. If `None`, this
+ method will convert to -1 prior to passing to Cython
+
+ Returns
+ -------
+ `Series` or `DataFrame` with filled values
+
+ See Also
+ --------
+ pad
+ backfill
+ """
+ # Need int value for Cython
+ if limit is None:
+ limit = -1
+
+ return self._get_cythonized_result('group_fillna_indexer',
+ self.grouper, needs_mask=True,
+ direction=direction, limit=limit)
+
@Substitution(name='groupby')
def pad(self, limit=None):
"""
@@ -1474,7 +1504,7 @@ def pad(self, limit=None):
Series.fillna
DataFrame.fillna
"""
- return self.apply(lambda x: x.ffill(limit=limit))
+ return self._fill('ffill', limit=limit)
ffill = pad
@Substitution(name='groupby')
@@ -1494,7 +1524,7 @@ def backfill(self, limit=None):
Series.fillna
DataFrame.fillna
"""
- return self.apply(lambda x: x.bfill(limit=limit))
+ return self._fill('bfill', limit=limit)
bfill = backfill
@Substitution(name='groupby')
@@ -1843,6 +1873,45 @@ def cummax(self, axis=0, **kwargs):
return self._cython_transform('cummax', numeric_only=False)
+ def _get_cythonized_result(self, how, grouper, needs_mask=False,
+ needs_ngroups=False, **kwargs):
+ """Get result for Cythonized functions
+
+ Parameters
+ ----------
+ how : str, Cythonized function name to be called
+ grouper : Grouper object containing pertinent group info
+ needs_mask : bool, default False
+ Whether boolean mask needs to be part of the Cython call signature
+ needs_ngroups : bool, default False
+ Whether number of groups part of the Cython call signature
+ **kwargs : dict
+ Extra arguments to be passed back to Cython funcs
+
+ Returns
+ -------
+ `Series` or `DataFrame` with filled values
+ """
+
+ labels, _, ngroups = grouper.group_info
+ output = collections.OrderedDict()
+ base_func = getattr(libgroupby, how)
+
+ for name, obj in self._iterate_slices():
+ indexer = np.zeros_like(labels, dtype=np.int64)
+ func = partial(base_func, indexer, labels)
+ if needs_mask:
+ mask = isnull(obj.values).view(np.uint8)
+ func = partial(func, mask)
+
+ if needs_ngroups:
+ func = partial(func, ngroups)
+
+ func(**kwargs) # Call func to modify indexer values in place
+ output[name] = algorithms.take_nd(obj.values, indexer)
+
+ return self._wrap_transformed_output(output)
+
@Substitution(name='groupby')
@Appender(_doc_template)
def shift(self, periods=1, freq=None, axis=0):
@@ -1860,17 +1929,9 @@ def shift(self, periods=1, freq=None, axis=0):
if freq is not None or axis != 0:
return self.apply(lambda x: x.shift(periods, freq, axis))
- labels, _, ngroups = self.grouper.group_info
-
- # filled in by Cython
- indexer = np.zeros_like(labels)
- libgroupby.group_shift_indexer(indexer, labels, ngroups, periods)
-
- output = {}
- for name, obj in self._iterate_slices():
- output[name] = algorithms.take_nd(obj.values, indexer)
-
- return self._wrap_transformed_output(output)
+ return self._get_cythonized_result('group_shift_indexer',
+ self.grouper, needs_ngroups=True,
+ periods=periods)
@Substitution(name='groupby')
@Appender(_doc_template)
@@ -3577,7 +3638,6 @@ def describe(self, **kwargs):
def value_counts(self, normalize=False, sort=True, ascending=False,
bins=None, dropna=True):
- from functools import partial
from pandas.core.reshape.tile import cut
from pandas.core.reshape.merge import _get_join_indexers
@@ -4585,9 +4645,17 @@ def _apply_to_column_groupbys(self, func):
in self._iterate_column_groupbys()),
keys=self._selected_obj.columns, axis=1)
+ def _fill(self, direction, limit=None):
+ """Overriden method to join grouped columns in output"""
+ res = super(DataFrameGroupBy, self)._fill(direction, limit=limit)
+ output = collections.OrderedDict(
+ (grp.name, grp.grouper) for grp in self.grouper.groupings)
+
+ from pandas import concat
+ return concat((self._wrap_transformed_output(output), res), axis=1)
+
def count(self):
""" Compute count of group, excluding missing values """
- from functools import partial
from pandas.core.dtypes.missing import _isna_ndarraylike as isna
data, _ = self._get_data_to_aggregate()
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index 129ac6b06205c..2429e9975fc8e 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -2061,6 +2061,61 @@ def test_rank_object_raises(self, ties_method, ascending, na_option,
ascending=ascending,
na_option=na_option, pct=pct)
+ @pytest.mark.parametrize("mix_groupings", [True, False])
+ @pytest.mark.parametrize("as_series", [True, False])
+ @pytest.mark.parametrize("val1,val2", [
+ ('foo', 'bar'), (1, 2), (1., 2.)])
+ @pytest.mark.parametrize("fill_method,limit,exp_vals", [
+ ("ffill", None,
+ [np.nan, np.nan, 'val1', 'val1', 'val1', 'val2', 'val2', 'val2']),
+ ("ffill", 1,
+ [np.nan, np.nan, 'val1', 'val1', np.nan, 'val2', 'val2', np.nan]),
+ ("bfill", None,
+ ['val1', 'val1', 'val1', 'val2', 'val2', 'val2', np.nan, np.nan]),
+ ("bfill", 1,
+ [np.nan, 'val1', 'val1', np.nan, 'val2', 'val2', np.nan, np.nan])
+ ])
+ def test_group_fill_methods(self, mix_groupings, as_series, val1, val2,
+ fill_method, limit, exp_vals):
+ vals = [np.nan, np.nan, val1, np.nan, np.nan, val2, np.nan, np.nan]
+ _exp_vals = list(exp_vals)
+ # Overwrite placeholder values
+ for index, exp_val in enumerate(_exp_vals):
+ if exp_val == 'val1':
+ _exp_vals[index] = val1
+ elif exp_val == 'val2':
+ _exp_vals[index] = val2
+
+ # Need to modify values and expectations depending on the
+ # Series / DataFrame that we ultimately want to generate
+ if mix_groupings: # ['a', 'b', 'a, 'b', ...]
+ keys = ['a', 'b'] * len(vals)
+
+ def interweave(list_obj):
+ temp = list()
+ for x in list_obj:
+ temp.extend([x, x])
+
+ return temp
+
+ _exp_vals = interweave(_exp_vals)
+ vals = interweave(vals)
+ else: # ['a', 'a', 'a', ... 'b', 'b', 'b']
+ keys = ['a'] * len(vals) + ['b'] * len(vals)
+ _exp_vals = _exp_vals * 2
+ vals = vals * 2
+
+ df = DataFrame({'key': keys, 'val': vals})
+ if as_series:
+ result = getattr(
+ df.groupby('key')['val'], fill_method)(limit=limit)
+ exp = Series(_exp_vals, name='val')
+ assert_series_equal(result, exp)
+ else:
+ result = getattr(df.groupby('key'), fill_method)(limit=limit)
+ exp = DataFrame({'key': keys, 'val': _exp_vals})
+ assert_frame_equal(result, exp)
+
def test_dont_clobber_name_column(self):
df = DataFrame({'key': ['a', 'a', 'a', 'b', 'b', 'b'],
'name': ['foo', 'bar', 'baz'] * 2})
| - [X] closes #11296
- [X] tests added / passed
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
I am not a fan of how I've implemented this in `groupby.py` but I think this change highlights even more the need for some refactoring of that module, specifically in how Cython transformations are getting dispatched. This still works in the meantime but open to any feedback on how the methods are getting wired back to the Cython layer.
Below are ASVs for the change
```bash
before after ratio
[d9551c8e] [5e007f86]
+ 81.6±0.3μs 96.7±2μs 1.19 groupby.GroupByMethods.time_method('float', 'count')
+ 455±3μs 516±20μs 1.13 groupby.GroupByMethods.time_method('float', 'cummin')
+ 306±0.6μs 340±5μs 1.11 groupby.GroupByMethods.time_method('float', 'prod')
- 142±0.4ms 280±6μs 0.00 groupby.GroupByMethods.time_method('int', 'bfill')
- 230±10ms 327±7μs 0.00 groupby.GroupByMethods.time_method('float', 'bfill')
- 135±0.8ms 189±0.3μs 0.00 groupby.GroupByMethods.time_method('int', 'ffill')
- 227±2ms 184±0.6μs 0.00 groupby.GroupByMethods.time_method('float', 'ffill')
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/19673 | 2018-02-13T07:36:11Z | 2018-02-25T16:05:27Z | 2018-02-25T16:05:27Z | 2018-02-25T20:38:08Z |
DOC: Ambiguous description in to_parquet engine documentation | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 2782ee7b9d201..02d9cc5cbdf30 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1678,9 +1678,10 @@ def to_parquet(self, fname, engine='auto', compression='snappy',
fname : str
string file path
engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto'
- Parquet reader library to use. If 'auto', then the option
- 'io.parquet.engine' is used. If 'auto', then the first
- library to be installed is used.
+ Parquet library to use. If 'auto', then the option
+ ``io.parquet.engine`` is used. The default ``io.parquet.engine``
+ behavior is to try 'pyarrow', falling back to 'fastparquet' if
+ 'pyarrow' is unavailable.
compression : {'snappy', 'gzip', 'brotli', None}, default 'snappy'
Name of the compression to use. Use ``None`` for no compression.
kwargs
diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py
index 1c22a305c089d..a99014f07a6b3 100644
--- a/pandas/io/parquet.py
+++ b/pandas/io/parquet.py
@@ -244,9 +244,10 @@ def to_parquet(df, path, engine='auto', compression='snappy', **kwargs):
path : string
File path
engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto'
- Parquet reader library to use. If 'auto', then the option
- 'io.parquet.engine' is used. If 'auto', then the first
- library to be installed is used.
+ Parquet library to use. If 'auto', then the option
+ ``io.parquet.engine`` is used. The default ``io.parquet.engine``
+ behavior is to try 'pyarrow', falling back to 'fastparquet' if
+ 'pyarrow' is unavailable.
compression : {'snappy', 'gzip', 'brotli', None}, default 'snappy'
Name of the compression to use. Use ``None`` for no compression.
kwargs
@@ -271,9 +272,10 @@ def read_parquet(path, engine='auto', columns=None, **kwargs):
.. versionadded 0.21.1
engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto'
- Parquet reader library to use. If 'auto', then the option
- 'io.parquet.engine' is used. If 'auto', then the first
- library to be installed is used.
+ Parquet library to use. If 'auto', then the option
+ ``io.parquet.engine`` is used. The default ``io.parquet.engine``
+ behavior is to try 'pyarrow', falling back to 'fastparquet' if
+ 'pyarrow' is unavailable.
kwargs are passed to the engine
Returns
| Ambiguity correction in documentation on pandas.DataFrame.to_parquet
- [x] closes #19662
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] tests added / passed
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/19669 | 2018-02-12T23:17:59Z | 2018-02-15T08:17:32Z | 2018-02-15T08:17:32Z | 2018-02-15T08:17:32Z |
enable multivalues insert | diff --git a/doc/source/io.rst b/doc/source/io.rst
index 0b9a610b50d7d..93f5c5bea53b4 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -4711,6 +4711,12 @@ writes ``data`` to the database in batches of 1000 rows at a time:
data.to_sql('data_chunked', engine, chunksize=1000)
+.. note::
+
+ The function :func:`~pandas.DataFrame.to_sql` will perform a multivalue
+ insert if the engine dialect ``supports_multivalues_insert``. This will
+ greatly speed up the insert in some cases.
+
SQL data types
++++++++++++++
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt
index feca90aae6237..233816600ec0f 100644
--- a/doc/source/whatsnew/v0.23.0.txt
+++ b/doc/source/whatsnew/v0.23.0.txt
@@ -341,6 +341,8 @@ Other Enhancements
- :func:`DataFrame.replace` now supports the ``method`` parameter, which can be used to specify the replacement method when ``to_replace`` is a scalar, list or tuple and ``value`` is ``None`` (:issue:`19632`)
- :meth:`Timestamp.month_name`, :meth:`DatetimeIndex.month_name`, and :meth:`Series.dt.month_name` are now available (:issue:`12805`)
- :meth:`Timestamp.day_name` and :meth:`DatetimeIndex.day_name` are now available to return day names with a specified locale (:issue:`12806`)
+- :meth:`DataFrame.to_sql` now performs a multivalue insert if the underlying connection supports itk rather than inserting row by row.
+ ``SQLAlchemy`` dialects supporting multivalue inserts include: ``mysql``, ``postgresql``, ``sqlite`` and any dialect with ``supports_multivalues_insert``. (:issue:`14315`, :issue:`8953`)
.. _whatsnew_0230.api_breaking:
diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index a582d32741ae9..ccb8d2d99d734 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -572,8 +572,29 @@ def create(self):
else:
self._execute_create()
- def insert_statement(self):
- return self.table.insert()
+ def insert_statement(self, data, conn):
+ """
+ Generate tuple of SQLAlchemy insert statement and any arguments
+ to be executed by connection (via `_execute_insert`).
+
+ Parameters
+ ----------
+ conn : SQLAlchemy connectable(engine/connection)
+ Connection to recieve the data
+ data : list of dict
+ The data to be inserted
+
+ Returns
+ -------
+ SQLAlchemy statement
+ insert statement
+ *, optional
+ Additional parameters to be passed when executing insert statement
+ """
+ dialect = getattr(conn, 'dialect', None)
+ if dialect and getattr(dialect, 'supports_multivalues_insert', False):
+ return self.table.insert(data),
+ return self.table.insert(), data
def insert_data(self):
if self.index is not None:
@@ -612,8 +633,9 @@ def insert_data(self):
return column_names, data_list
def _execute_insert(self, conn, keys, data_iter):
+ """Insert data into this table with database connection"""
data = [{k: v for k, v in zip(keys, row)} for row in data_iter]
- conn.execute(self.insert_statement(), data)
+ conn.execute(*self.insert_statement(data, conn))
def insert(self, chunksize=None):
keys, data_list = self.insert_data()
diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py
index f3ab74d37a2bc..4530cc9d2fba9 100644
--- a/pandas/tests/io/test_sql.py
+++ b/pandas/tests/io/test_sql.py
@@ -1665,6 +1665,29 @@ class Temporary(Base):
tm.assert_frame_equal(df, expected)
+ def test_insert_multivalues(self):
+ # issues addressed
+ # https://github.com/pandas-dev/pandas/issues/14315
+ # https://github.com/pandas-dev/pandas/issues/8953
+
+ db = sql.SQLDatabase(self.conn)
+ df = DataFrame({'A': [1, 0, 0], 'B': [1.1, 0.2, 4.3]})
+ table = sql.SQLTable("test_table", db, frame=df)
+ data = [
+ {'A': 1, 'B': 0.46},
+ {'A': 0, 'B': -2.06}
+ ]
+ statement = table.insert_statement(data, conn=self.conn)[0]
+
+ if self.supports_multivalues_insert:
+ assert statement.parameters == data, (
+ 'insert statement should be multivalues'
+ )
+ else:
+ assert statement.parameters is None, (
+ 'insert statement should not be multivalues'
+ )
+
class _TestSQLAlchemyConn(_EngineToConnMixin, _TestSQLAlchemy):
@@ -1679,6 +1702,7 @@ class _TestSQLiteAlchemy(object):
"""
flavor = 'sqlite'
+ supports_multivalues_insert = True
@classmethod
def connect(cls):
@@ -1727,6 +1751,7 @@ class _TestMySQLAlchemy(object):
"""
flavor = 'mysql'
+ supports_multivalues_insert = True
@classmethod
def connect(cls):
@@ -1796,6 +1821,7 @@ class _TestPostgreSQLAlchemy(object):
"""
flavor = 'postgresql'
+ supports_multivalues_insert = True
@classmethod
def connect(cls):
| ### Summary
Currently when pushing a dataframe to a database, lines are inserted one by one. This change enables multivalues inserts.
TODO
- [x] release note
- [ ] address chunksize behavior
### Reference
http://docs.sqlalchemy.org/en/rel_0_9/core/dml.html?highlight=insert%20values#sqlalchemy.sql.expression.Insert.values | https://api.github.com/repos/pandas-dev/pandas/pulls/19664 | 2018-02-12T19:47:31Z | 2018-03-07T21:54:46Z | 2018-03-07T21:54:46Z | 2018-05-17T11:28:07Z |
DOC: ignore Panel deprecation warnings during doc build | diff --git a/doc/source/conf.py b/doc/source/conf.py
index c188f83f80250..7c4edd0486636 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -15,6 +15,8 @@
import re
import inspect
import importlib
+import warnings
+
from pandas.compat import u, PY3
try:
@@ -375,6 +377,13 @@
'wiki': ('https://github.com/pandas-dev/pandas/wiki/%s',
'wiki ')}
+
+# ignore all deprecation warnings from Panel during doc build
+# (to avoid the need to add :okwarning: in many places)
+warnings.filterwarnings("ignore", message="\nPanel is deprecated",
+ category=FutureWarning)
+
+
ipython_exec_lines = [
'import numpy as np',
'import pandas as pd',
| The doc build log is currently full of Panel deprecation warnings. To avoid having to put `:okwarning:` on all code-blocks that use Panel, I put a general filterwarnings for this warning in the conf.py file which hopefully works (alternative would be to put this inside the .rst file itself at the top in a suppressed ipython code block) | https://api.github.com/repos/pandas-dev/pandas/pulls/19663 | 2018-02-12T19:26:48Z | 2018-02-13T19:01:43Z | 2018-02-13T19:01:43Z | 2018-02-13T19:01:46Z |
collect index formatting tests | diff --git a/pandas/tests/indexes/datetimes/test_formats.py b/pandas/tests/indexes/datetimes/test_formats.py
index ea2731f66f0ef..0d1a9e65ce6c6 100644
--- a/pandas/tests/indexes/datetimes/test_formats.py
+++ b/pandas/tests/indexes/datetimes/test_formats.py
@@ -1,6 +1,10 @@
-from pandas import DatetimeIndex
+from datetime import datetime
+from pandas import DatetimeIndex, Series
import numpy as np
+import dateutil.tz
+import pytz
+import pytest
import pandas.util.testing as tm
import pandas as pd
@@ -45,3 +49,172 @@ def test_to_native_types():
result = index.to_native_types(na_rep='pandas')
tm.assert_numpy_array_equal(result, expected)
+
+
+class TestDatetimeIndexRendering(object):
+ def test_dti_repr_short(self):
+ dr = pd.date_range(start='1/1/2012', periods=1)
+ repr(dr)
+
+ dr = pd.date_range(start='1/1/2012', periods=2)
+ repr(dr)
+
+ dr = pd.date_range(start='1/1/2012', periods=3)
+ repr(dr)
+
+ @pytest.mark.parametrize('method', ['__repr__', '__unicode__', '__str__'])
+ def test_dti_representation(self, method):
+ idxs = []
+ idxs.append(DatetimeIndex([], freq='D'))
+ idxs.append(DatetimeIndex(['2011-01-01'], freq='D'))
+ idxs.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D'))
+ idxs.append(DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'],
+ freq='D'))
+ idxs.append(DatetimeIndex(
+ ['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'
+ ], freq='H', tz='Asia/Tokyo'))
+ idxs.append(DatetimeIndex(
+ ['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern'))
+ idxs.append(DatetimeIndex(
+ ['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='UTC'))
+
+ exp = []
+ exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""")
+ exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', "
+ "freq='D')")
+ exp.append("DatetimeIndex(['2011-01-01', '2011-01-02'], "
+ "dtype='datetime64[ns]', freq='D')")
+ exp.append("DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
+ "dtype='datetime64[ns]', freq='D')")
+ exp.append("DatetimeIndex(['2011-01-01 09:00:00+09:00', "
+ "'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']"
+ ", dtype='datetime64[ns, Asia/Tokyo]', freq='H')")
+ exp.append("DatetimeIndex(['2011-01-01 09:00:00-05:00', "
+ "'2011-01-01 10:00:00-05:00', 'NaT'], "
+ "dtype='datetime64[ns, US/Eastern]', freq=None)")
+ exp.append("DatetimeIndex(['2011-01-01 09:00:00+00:00', "
+ "'2011-01-01 10:00:00+00:00', 'NaT'], "
+ "dtype='datetime64[ns, UTC]', freq=None)""")
+
+ with pd.option_context('display.width', 300):
+ for indx, expected in zip(idxs, exp):
+ result = getattr(indx, method)()
+ assert result == expected
+
+ def test_dti_representation_to_series(self):
+ idx1 = DatetimeIndex([], freq='D')
+ idx2 = DatetimeIndex(['2011-01-01'], freq='D')
+ idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
+ idx4 = DatetimeIndex(
+ ['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
+ idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
+ '2011-01-01 11:00'], freq='H', tz='Asia/Tokyo')
+ idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
+ tz='US/Eastern')
+ idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15'])
+
+ exp1 = """Series([], dtype: datetime64[ns])"""
+
+ exp2 = ("0 2011-01-01\n"
+ "dtype: datetime64[ns]")
+
+ exp3 = ("0 2011-01-01\n"
+ "1 2011-01-02\n"
+ "dtype: datetime64[ns]")
+
+ exp4 = ("0 2011-01-01\n"
+ "1 2011-01-02\n"
+ "2 2011-01-03\n"
+ "dtype: datetime64[ns]")
+
+ exp5 = ("0 2011-01-01 09:00:00+09:00\n"
+ "1 2011-01-01 10:00:00+09:00\n"
+ "2 2011-01-01 11:00:00+09:00\n"
+ "dtype: datetime64[ns, Asia/Tokyo]")
+
+ exp6 = ("0 2011-01-01 09:00:00-05:00\n"
+ "1 2011-01-01 10:00:00-05:00\n"
+ "2 NaT\n"
+ "dtype: datetime64[ns, US/Eastern]")
+
+ exp7 = ("0 2011-01-01 09:00:00\n"
+ "1 2011-01-02 10:15:00\n"
+ "dtype: datetime64[ns]")
+
+ with pd.option_context('display.width', 300):
+ for idx, expected in zip([idx1, idx2, idx3, idx4,
+ idx5, idx6, idx7],
+ [exp1, exp2, exp3, exp4,
+ exp5, exp6, exp7]):
+ result = repr(Series(idx))
+ assert result == expected
+
+ def test_dti_summary(self):
+ # GH#9116
+ idx1 = DatetimeIndex([], freq='D')
+ idx2 = DatetimeIndex(['2011-01-01'], freq='D')
+ idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
+ idx4 = DatetimeIndex(
+ ['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
+ idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
+ '2011-01-01 11:00'],
+ freq='H', tz='Asia/Tokyo')
+ idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
+ tz='US/Eastern')
+
+ exp1 = ("DatetimeIndex: 0 entries\n"
+ "Freq: D")
+
+ exp2 = ("DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01\n"
+ "Freq: D")
+
+ exp3 = ("DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02\n"
+ "Freq: D")
+
+ exp4 = ("DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03\n"
+ "Freq: D")
+
+ exp5 = ("DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 "
+ "to 2011-01-01 11:00:00+09:00\n"
+ "Freq: H")
+
+ exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT"""
+
+ for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
+ [exp1, exp2, exp3, exp4, exp5, exp6]):
+ result = idx.summary()
+ assert result == expected
+
+ def test_dti_business_repr(self):
+ # only really care that it works
+ repr(pd.bdate_range(datetime(2009, 1, 1), datetime(2010, 1, 1)))
+
+ def test_dti_business_summary(self):
+ rng = pd.bdate_range(datetime(2009, 1, 1), datetime(2010, 1, 1))
+ rng.summary()
+ rng[2:2].summary()
+
+ def test_dti_business_summary_pytz(self):
+ pd.bdate_range('1/1/2005', '1/1/2009', tz=pytz.utc).summary()
+
+ def test_dti_business_summary_dateutil(self):
+ pd.bdate_range('1/1/2005', '1/1/2009',
+ tz=dateutil.tz.tzutc()).summary()
+
+ def test_dti_custom_business_repr(self):
+ # only really care that it works
+ repr(pd.bdate_range(datetime(2009, 1, 1), datetime(2010, 1, 1),
+ freq='C'))
+
+ def test_dti_custom_business_summary(self):
+ rng = pd.bdate_range(datetime(2009, 1, 1), datetime(2010, 1, 1),
+ freq='C')
+ rng.summary()
+ rng[2:2].summary()
+
+ def test_dti_custom_business_summary_pytz(self):
+ pd.bdate_range('1/1/2005', '1/1/2009', freq='C', tz=pytz.utc).summary()
+
+ def test_dti_custom_business_summary_dateutil(self):
+ pd.bdate_range('1/1/2005', '1/1/2009', freq='C',
+ tz=dateutil.tz.tzutc()).summary()
diff --git a/pandas/tests/indexes/datetimes/test_misc.py b/pandas/tests/indexes/datetimes/test_misc.py
index 4a46c3b04bbad..2013b5e6cd6dd 100644
--- a/pandas/tests/indexes/datetimes/test_misc.py
+++ b/pandas/tests/indexes/datetimes/test_misc.py
@@ -83,16 +83,6 @@ def test_range_edges(self):
'1970-01-03', '1970-01-04'])
tm.assert_index_equal(idx, exp)
- def test_datetimeindex_repr_short(self):
- dr = date_range(start='1/1/2012', periods=1)
- repr(dr)
-
- dr = date_range(start='1/1/2012', periods=2)
- repr(dr)
-
- dr = date_range(start='1/1/2012', periods=3)
- repr(dr)
-
class TestDatetime64(object):
diff --git a/pandas/tests/indexes/datetimes/test_ops.py b/pandas/tests/indexes/datetimes/test_ops.py
index bc43b427fe0aa..b42cd454803b8 100644
--- a/pandas/tests/indexes/datetimes/test_ops.py
+++ b/pandas/tests/indexes/datetimes/test_ops.py
@@ -1,6 +1,4 @@
-import pytz
import pytest
-import dateutil
import warnings
import numpy as np
from datetime import datetime
@@ -153,130 +151,6 @@ def test_repeat(self):
tm.assert_raises_regex(ValueError, msg, np.repeat,
rng, reps, axis=1)
- def test_representation(self):
-
- idx = []
- idx.append(DatetimeIndex([], freq='D'))
- idx.append(DatetimeIndex(['2011-01-01'], freq='D'))
- idx.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D'))
- idx.append(DatetimeIndex(
- ['2011-01-01', '2011-01-02', '2011-01-03'], freq='D'))
- idx.append(DatetimeIndex(
- ['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'
- ], freq='H', tz='Asia/Tokyo'))
- idx.append(DatetimeIndex(
- ['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern'))
- idx.append(DatetimeIndex(
- ['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='UTC'))
-
- exp = []
- exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""")
- exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', "
- "freq='D')")
- exp.append("DatetimeIndex(['2011-01-01', '2011-01-02'], "
- "dtype='datetime64[ns]', freq='D')")
- exp.append("DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
- "dtype='datetime64[ns]', freq='D')")
- exp.append("DatetimeIndex(['2011-01-01 09:00:00+09:00', "
- "'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']"
- ", dtype='datetime64[ns, Asia/Tokyo]', freq='H')")
- exp.append("DatetimeIndex(['2011-01-01 09:00:00-05:00', "
- "'2011-01-01 10:00:00-05:00', 'NaT'], "
- "dtype='datetime64[ns, US/Eastern]', freq=None)")
- exp.append("DatetimeIndex(['2011-01-01 09:00:00+00:00', "
- "'2011-01-01 10:00:00+00:00', 'NaT'], "
- "dtype='datetime64[ns, UTC]', freq=None)""")
-
- with pd.option_context('display.width', 300):
- for indx, expected in zip(idx, exp):
- for func in ['__repr__', '__unicode__', '__str__']:
- result = getattr(indx, func)()
- assert result == expected
-
- def test_representation_to_series(self):
- idx1 = DatetimeIndex([], freq='D')
- idx2 = DatetimeIndex(['2011-01-01'], freq='D')
- idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
- idx4 = DatetimeIndex(
- ['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
- idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
- '2011-01-01 11:00'], freq='H', tz='Asia/Tokyo')
- idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
- tz='US/Eastern')
- idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15'])
-
- exp1 = """Series([], dtype: datetime64[ns])"""
-
- exp2 = ("0 2011-01-01\n"
- "dtype: datetime64[ns]")
-
- exp3 = ("0 2011-01-01\n"
- "1 2011-01-02\n"
- "dtype: datetime64[ns]")
-
- exp4 = ("0 2011-01-01\n"
- "1 2011-01-02\n"
- "2 2011-01-03\n"
- "dtype: datetime64[ns]")
-
- exp5 = ("0 2011-01-01 09:00:00+09:00\n"
- "1 2011-01-01 10:00:00+09:00\n"
- "2 2011-01-01 11:00:00+09:00\n"
- "dtype: datetime64[ns, Asia/Tokyo]")
-
- exp6 = ("0 2011-01-01 09:00:00-05:00\n"
- "1 2011-01-01 10:00:00-05:00\n"
- "2 NaT\n"
- "dtype: datetime64[ns, US/Eastern]")
-
- exp7 = ("0 2011-01-01 09:00:00\n"
- "1 2011-01-02 10:15:00\n"
- "dtype: datetime64[ns]")
-
- with pd.option_context('display.width', 300):
- for idx, expected in zip([idx1, idx2, idx3, idx4,
- idx5, idx6, idx7],
- [exp1, exp2, exp3, exp4,
- exp5, exp6, exp7]):
- result = repr(Series(idx))
- assert result == expected
-
- def test_summary(self):
- # GH9116
- idx1 = DatetimeIndex([], freq='D')
- idx2 = DatetimeIndex(['2011-01-01'], freq='D')
- idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
- idx4 = DatetimeIndex(
- ['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
- idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
- '2011-01-01 11:00'],
- freq='H', tz='Asia/Tokyo')
- idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
- tz='US/Eastern')
-
- exp1 = ("DatetimeIndex: 0 entries\n"
- "Freq: D")
-
- exp2 = ("DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01\n"
- "Freq: D")
-
- exp3 = ("DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02\n"
- "Freq: D")
-
- exp4 = ("DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03\n"
- "Freq: D")
-
- exp5 = ("DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 "
- "to 2011-01-01 11:00:00+09:00\n"
- "Freq: H")
-
- exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT"""
-
- for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
- [exp1, exp2, exp3, exp4, exp5, exp6]):
- result = idx.summary()
- assert result == expected
-
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T',
'S', 'L', 'U'],
@@ -544,10 +418,6 @@ def test_copy(self):
repr(cp)
tm.assert_index_equal(cp, self.rng)
- def test_repr(self):
- # only really care that it works
- repr(self.rng)
-
def test_shift(self):
shifted = self.rng.shift(5)
assert shifted[0] == self.rng[5]
@@ -565,16 +435,6 @@ def test_shift(self):
shifted = rng.shift(1, freq=BDay())
assert shifted[0] == rng[0] + BDay()
- def test_summary(self):
- self.rng.summary()
- self.rng[2:2].summary()
-
- def test_summary_pytz(self):
- bdate_range('1/1/2005', '1/1/2009', tz=pytz.utc).summary()
-
- def test_summary_dateutil(self):
- bdate_range('1/1/2005', '1/1/2009', tz=dateutil.tz.tzutc()).summary()
-
def test_equals(self):
assert not self.rng.equals(list(self.rng))
@@ -612,10 +472,6 @@ def test_copy(self):
repr(cp)
tm.assert_index_equal(cp, self.rng)
- def test_repr(self):
- # only really care that it works
- repr(self.rng)
-
def test_shift(self):
shifted = self.rng.shift(5)
@@ -640,16 +496,5 @@ def test_pickle_unpickle(self):
unpickled = tm.round_trip_pickle(self.rng)
assert unpickled.offset is not None
- def test_summary(self):
- self.rng.summary()
- self.rng[2:2].summary()
-
- def test_summary_pytz(self):
- bdate_range('1/1/2005', '1/1/2009', freq='C', tz=pytz.utc).summary()
-
- def test_summary_dateutil(self):
- bdate_range('1/1/2005', '1/1/2009', freq='C',
- tz=dateutil.tz.tzutc()).summary()
-
def test_equals(self):
assert not self.rng.equals(list(self.rng))
diff --git a/pandas/tests/indexes/period/test_formats.py b/pandas/tests/indexes/period/test_formats.py
index 533481ce051f7..b1a1060bf86c4 100644
--- a/pandas/tests/indexes/period/test_formats.py
+++ b/pandas/tests/indexes/period/test_formats.py
@@ -1,6 +1,7 @@
from pandas import PeriodIndex
import numpy as np
+import pytest
import pandas.util.testing as tm
import pandas as pd
@@ -46,3 +47,163 @@ def test_to_native_types():
result = index.to_native_types(na_rep='pandas')
tm.assert_numpy_array_equal(result, expected)
+
+
+class TestPeriodIndexRendering(object):
+ @pytest.mark.parametrize('method', ['__repr__', '__unicode__', '__str__'])
+ def test_representation(self, method):
+ # GH#7601
+ idx1 = PeriodIndex([], freq='D')
+ idx2 = PeriodIndex(['2011-01-01'], freq='D')
+ idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
+ idx4 = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'],
+ freq='D')
+ idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
+ idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'],
+ freq='H')
+ idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
+ idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
+ idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
+ idx10 = PeriodIndex(['2011-01-01', '2011-02-01'], freq='3D')
+
+ exp1 = """PeriodIndex([], dtype='period[D]', freq='D')"""
+
+ exp2 = """PeriodIndex(['2011-01-01'], dtype='period[D]', freq='D')"""
+
+ exp3 = ("PeriodIndex(['2011-01-01', '2011-01-02'], dtype='period[D]', "
+ "freq='D')")
+
+ exp4 = ("PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
+ "dtype='period[D]', freq='D')")
+
+ exp5 = ("PeriodIndex(['2011', '2012', '2013'], dtype='period[A-DEC]', "
+ "freq='A-DEC')")
+
+ exp6 = ("PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], "
+ "dtype='period[H]', freq='H')")
+
+ exp7 = ("PeriodIndex(['2013Q1'], dtype='period[Q-DEC]', "
+ "freq='Q-DEC')")
+
+ exp8 = ("PeriodIndex(['2013Q1', '2013Q2'], dtype='period[Q-DEC]', "
+ "freq='Q-DEC')")
+
+ exp9 = ("PeriodIndex(['2013Q1', '2013Q2', '2013Q3'], "
+ "dtype='period[Q-DEC]', freq='Q-DEC')")
+
+ exp10 = ("PeriodIndex(['2011-01-01', '2011-02-01'], "
+ "dtype='period[3D]', freq='3D')")
+
+ for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
+ idx6, idx7, idx8, idx9, idx10],
+ [exp1, exp2, exp3, exp4, exp5,
+ exp6, exp7, exp8, exp9, exp10]):
+ result = getattr(idx, method)()
+ assert result == expected
+
+ def test_representation_to_series(self):
+ # GH#10971
+ idx1 = PeriodIndex([], freq='D')
+ idx2 = PeriodIndex(['2011-01-01'], freq='D')
+ idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
+ idx4 = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'],
+ freq='D')
+ idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
+ idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'],
+ freq='H')
+
+ idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
+ idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
+ idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
+
+ exp1 = """Series([], dtype: object)"""
+
+ exp2 = """0 2011-01-01
+dtype: object"""
+
+ exp3 = """0 2011-01-01
+1 2011-01-02
+dtype: object"""
+
+ exp4 = """0 2011-01-01
+1 2011-01-02
+2 2011-01-03
+dtype: object"""
+
+ exp5 = """0 2011
+1 2012
+2 2013
+dtype: object"""
+
+ exp6 = """0 2011-01-01 09:00
+1 2012-02-01 10:00
+2 NaT
+dtype: object"""
+
+ exp7 = """0 2013Q1
+dtype: object"""
+
+ exp8 = """0 2013Q1
+1 2013Q2
+dtype: object"""
+
+ exp9 = """0 2013Q1
+1 2013Q2
+2 2013Q3
+dtype: object"""
+
+ for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
+ idx6, idx7, idx8, idx9],
+ [exp1, exp2, exp3, exp4, exp5,
+ exp6, exp7, exp8, exp9]):
+ result = repr(pd.Series(idx))
+ assert result == expected
+
+ def test_summary(self):
+ # GH#9116
+ idx1 = PeriodIndex([], freq='D')
+ idx2 = PeriodIndex(['2011-01-01'], freq='D')
+ idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
+ idx4 = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'],
+ freq='D')
+ idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
+ idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'],
+ freq='H')
+
+ idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
+ idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
+ idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
+
+ exp1 = """PeriodIndex: 0 entries
+Freq: D"""
+
+ exp2 = """PeriodIndex: 1 entries, 2011-01-01 to 2011-01-01
+Freq: D"""
+
+ exp3 = """PeriodIndex: 2 entries, 2011-01-01 to 2011-01-02
+Freq: D"""
+
+ exp4 = """PeriodIndex: 3 entries, 2011-01-01 to 2011-01-03
+Freq: D"""
+
+ exp5 = """PeriodIndex: 3 entries, 2011 to 2013
+Freq: A-DEC"""
+
+ exp6 = """PeriodIndex: 3 entries, 2011-01-01 09:00 to NaT
+Freq: H"""
+
+ exp7 = """PeriodIndex: 1 entries, 2013Q1 to 2013Q1
+Freq: Q-DEC"""
+
+ exp8 = """PeriodIndex: 2 entries, 2013Q1 to 2013Q2
+Freq: Q-DEC"""
+
+ exp9 = """PeriodIndex: 3 entries, 2013Q1 to 2013Q3
+Freq: Q-DEC"""
+
+ for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
+ idx6, idx7, idx8, idx9],
+ [exp1, exp2, exp3, exp4, exp5,
+ exp6, exp7, exp8, exp9]):
+ result = idx.summary()
+ assert result == expected
diff --git a/pandas/tests/indexes/period/test_indexing.py b/pandas/tests/indexes/period/test_indexing.py
index 6cb4226dffc5a..b913934195260 100644
--- a/pandas/tests/indexes/period/test_indexing.py
+++ b/pandas/tests/indexes/period/test_indexing.py
@@ -14,9 +14,6 @@
class TestGetItem(object):
- def setup_method(self, method):
- pass
-
def test_getitem(self):
idx1 = pd.period_range('2011-01-01', '2011-01-31', freq='D',
name='idx')
diff --git a/pandas/tests/indexes/period/test_ops.py b/pandas/tests/indexes/period/test_ops.py
index 8745de0c2a7aa..f3cc554c192a8 100644
--- a/pandas/tests/indexes/period/test_ops.py
+++ b/pandas/tests/indexes/period/test_ops.py
@@ -115,164 +115,6 @@ def test_numpy_minmax(self):
tm.assert_raises_regex(
ValueError, errmsg, np.argmax, pr, out=0)
- def test_representation(self):
- # GH 7601
- idx1 = PeriodIndex([], freq='D')
- idx2 = PeriodIndex(['2011-01-01'], freq='D')
- idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
- idx4 = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'],
- freq='D')
- idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
- idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00',
- 'NaT'], freq='H')
- idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
- idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
- idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
- idx10 = PeriodIndex(['2011-01-01', '2011-02-01'], freq='3D')
-
- exp1 = """PeriodIndex([], dtype='period[D]', freq='D')"""
-
- exp2 = """PeriodIndex(['2011-01-01'], dtype='period[D]', freq='D')"""
-
- exp3 = ("PeriodIndex(['2011-01-01', '2011-01-02'], dtype='period[D]', "
- "freq='D')")
-
- exp4 = ("PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
- "dtype='period[D]', freq='D')")
-
- exp5 = ("PeriodIndex(['2011', '2012', '2013'], dtype='period[A-DEC]', "
- "freq='A-DEC')")
-
- exp6 = ("PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], "
- "dtype='period[H]', freq='H')")
-
- exp7 = ("PeriodIndex(['2013Q1'], dtype='period[Q-DEC]', "
- "freq='Q-DEC')")
-
- exp8 = ("PeriodIndex(['2013Q1', '2013Q2'], dtype='period[Q-DEC]', "
- "freq='Q-DEC')")
-
- exp9 = ("PeriodIndex(['2013Q1', '2013Q2', '2013Q3'], "
- "dtype='period[Q-DEC]', freq='Q-DEC')")
-
- exp10 = ("PeriodIndex(['2011-01-01', '2011-02-01'], "
- "dtype='period[3D]', freq='3D')")
-
- for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
- idx6, idx7, idx8, idx9, idx10],
- [exp1, exp2, exp3, exp4, exp5,
- exp6, exp7, exp8, exp9, exp10]):
- for func in ['__repr__', '__unicode__', '__str__']:
- result = getattr(idx, func)()
- assert result == expected
-
- def test_representation_to_series(self):
- # GH 10971
- idx1 = PeriodIndex([], freq='D')
- idx2 = PeriodIndex(['2011-01-01'], freq='D')
- idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
- idx4 = PeriodIndex(['2011-01-01', '2011-01-02',
- '2011-01-03'], freq='D')
- idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
- idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00',
- 'NaT'], freq='H')
-
- idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
- idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
- idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
-
- exp1 = """Series([], dtype: object)"""
-
- exp2 = """0 2011-01-01
-dtype: object"""
-
- exp3 = """0 2011-01-01
-1 2011-01-02
-dtype: object"""
-
- exp4 = """0 2011-01-01
-1 2011-01-02
-2 2011-01-03
-dtype: object"""
-
- exp5 = """0 2011
-1 2012
-2 2013
-dtype: object"""
-
- exp6 = """0 2011-01-01 09:00
-1 2012-02-01 10:00
-2 NaT
-dtype: object"""
-
- exp7 = """0 2013Q1
-dtype: object"""
-
- exp8 = """0 2013Q1
-1 2013Q2
-dtype: object"""
-
- exp9 = """0 2013Q1
-1 2013Q2
-2 2013Q3
-dtype: object"""
-
- for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
- idx6, idx7, idx8, idx9],
- [exp1, exp2, exp3, exp4, exp5,
- exp6, exp7, exp8, exp9]):
- result = repr(pd.Series(idx))
- assert result == expected
-
- def test_summary(self):
- # GH9116
- idx1 = PeriodIndex([], freq='D')
- idx2 = PeriodIndex(['2011-01-01'], freq='D')
- idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
- idx4 = PeriodIndex(
- ['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
- idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
- idx6 = PeriodIndex(
- ['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], freq='H')
-
- idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
- idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
- idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
-
- exp1 = """PeriodIndex: 0 entries
-Freq: D"""
-
- exp2 = """PeriodIndex: 1 entries, 2011-01-01 to 2011-01-01
-Freq: D"""
-
- exp3 = """PeriodIndex: 2 entries, 2011-01-01 to 2011-01-02
-Freq: D"""
-
- exp4 = """PeriodIndex: 3 entries, 2011-01-01 to 2011-01-03
-Freq: D"""
-
- exp5 = """PeriodIndex: 3 entries, 2011 to 2013
-Freq: A-DEC"""
-
- exp6 = """PeriodIndex: 3 entries, 2011-01-01 09:00 to NaT
-Freq: H"""
-
- exp7 = """PeriodIndex: 1 entries, 2013Q1 to 2013Q1
-Freq: Q-DEC"""
-
- exp8 = """PeriodIndex: 2 entries, 2013Q1 to 2013Q2
-Freq: Q-DEC"""
-
- exp9 = """PeriodIndex: 3 entries, 2013Q1 to 2013Q3
-Freq: Q-DEC"""
-
- for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
- idx6, idx7, idx8, idx9],
- [exp1, exp2, exp3, exp4, exp5,
- exp6, exp7, exp8, exp9]):
- result = idx.summary()
- assert result == expected
-
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H',
'T', 'S', 'L', 'U'],
diff --git a/pandas/tests/indexes/timedeltas/test_formats.py b/pandas/tests/indexes/timedeltas/test_formats.py
new file mode 100644
index 0000000000000..a8375459d74e4
--- /dev/null
+++ b/pandas/tests/indexes/timedeltas/test_formats.py
@@ -0,0 +1,96 @@
+# -*- coding: utf-8 -*-
+
+import pytest
+
+import pandas as pd
+from pandas import TimedeltaIndex
+
+
+class TestTimedeltaIndexRendering(object):
+ @pytest.mark.parametrize('method', ['__repr__', '__unicode__', '__str__'])
+ def test_representation(self, method):
+ idx1 = TimedeltaIndex([], freq='D')
+ idx2 = TimedeltaIndex(['1 days'], freq='D')
+ idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
+ idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
+ idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
+
+ exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
+
+ exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
+ "freq='D')")
+
+ exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
+ "dtype='timedelta64[ns]', freq='D')")
+
+ exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
+ "dtype='timedelta64[ns]', freq='D')")
+
+ exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
+ "'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
+
+ with pd.option_context('display.width', 300):
+ for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
+ [exp1, exp2, exp3, exp4, exp5]):
+ result = getattr(idx, method)()
+ assert result == expected
+
+ def test_representation_to_series(self):
+ idx1 = TimedeltaIndex([], freq='D')
+ idx2 = TimedeltaIndex(['1 days'], freq='D')
+ idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
+ idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
+ idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
+
+ exp1 = """Series([], dtype: timedelta64[ns])"""
+
+ exp2 = ("0 1 days\n"
+ "dtype: timedelta64[ns]")
+
+ exp3 = ("0 1 days\n"
+ "1 2 days\n"
+ "dtype: timedelta64[ns]")
+
+ exp4 = ("0 1 days\n"
+ "1 2 days\n"
+ "2 3 days\n"
+ "dtype: timedelta64[ns]")
+
+ exp5 = ("0 1 days 00:00:01\n"
+ "1 2 days 00:00:00\n"
+ "2 3 days 00:00:00\n"
+ "dtype: timedelta64[ns]")
+
+ with pd.option_context('display.width', 300):
+ for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
+ [exp1, exp2, exp3, exp4, exp5]):
+ result = repr(pd.Series(idx))
+ assert result == expected
+
+ def test_summary(self):
+ # GH#9116
+ idx1 = TimedeltaIndex([], freq='D')
+ idx2 = TimedeltaIndex(['1 days'], freq='D')
+ idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
+ idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
+ idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
+
+ exp1 = ("TimedeltaIndex: 0 entries\n"
+ "Freq: D")
+
+ exp2 = ("TimedeltaIndex: 1 entries, 1 days to 1 days\n"
+ "Freq: D")
+
+ exp3 = ("TimedeltaIndex: 2 entries, 1 days to 2 days\n"
+ "Freq: D")
+
+ exp4 = ("TimedeltaIndex: 3 entries, 1 days to 3 days\n"
+ "Freq: D")
+
+ exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days "
+ "00:00:00")
+
+ for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
+ [exp1, exp2, exp3, exp4, exp5]):
+ result = idx.summary()
+ assert result == expected
diff --git a/pandas/tests/indexes/timedeltas/test_ops.py b/pandas/tests/indexes/timedeltas/test_ops.py
index d154aa2172ef7..690ba66b6f5ef 100644
--- a/pandas/tests/indexes/timedeltas/test_ops.py
+++ b/pandas/tests/indexes/timedeltas/test_ops.py
@@ -73,94 +73,6 @@ def test_numpy_minmax(self):
tm.assert_raises_regex(
ValueError, errmsg, np.argmax, td, out=0)
- def test_representation(self):
- idx1 = TimedeltaIndex([], freq='D')
- idx2 = TimedeltaIndex(['1 days'], freq='D')
- idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
- idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
- idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
-
- exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
-
- exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
- "freq='D')")
-
- exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
- "dtype='timedelta64[ns]', freq='D')")
-
- exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
- "dtype='timedelta64[ns]', freq='D')")
-
- exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
- "'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
-
- with pd.option_context('display.width', 300):
- for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
- [exp1, exp2, exp3, exp4, exp5]):
- for func in ['__repr__', '__unicode__', '__str__']:
- result = getattr(idx, func)()
- assert result == expected
-
- def test_representation_to_series(self):
- idx1 = TimedeltaIndex([], freq='D')
- idx2 = TimedeltaIndex(['1 days'], freq='D')
- idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
- idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
- idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
-
- exp1 = """Series([], dtype: timedelta64[ns])"""
-
- exp2 = """0 1 days
-dtype: timedelta64[ns]"""
-
- exp3 = """0 1 days
-1 2 days
-dtype: timedelta64[ns]"""
-
- exp4 = """0 1 days
-1 2 days
-2 3 days
-dtype: timedelta64[ns]"""
-
- exp5 = """0 1 days 00:00:01
-1 2 days 00:00:00
-2 3 days 00:00:00
-dtype: timedelta64[ns]"""
-
- with pd.option_context('display.width', 300):
- for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
- [exp1, exp2, exp3, exp4, exp5]):
- result = repr(pd.Series(idx))
- assert result == expected
-
- def test_summary(self):
- # GH9116
- idx1 = TimedeltaIndex([], freq='D')
- idx2 = TimedeltaIndex(['1 days'], freq='D')
- idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
- idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
- idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
-
- exp1 = ("TimedeltaIndex: 0 entries\n"
- "Freq: D")
-
- exp2 = ("TimedeltaIndex: 1 entries, 1 days to 1 days\n"
- "Freq: D")
-
- exp3 = ("TimedeltaIndex: 2 entries, 1 days to 2 days\n"
- "Freq: D")
-
- exp4 = ("TimedeltaIndex: 3 entries, 1 days to 3 days\n"
- "Freq: D")
-
- exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days "
- "00:00:00")
-
- for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
- [exp1, exp2, exp3, exp4, exp5]):
- result = idx.summary()
- assert result == expected
-
def test_value_counts_unique(self):
# GH 7735
|
It looks like PeriodIndex/TDI/DTI tests are using different rules for what constitutes test_indexing/test_setops. Double-checking before making a PR:
test_get_loc_* --> test_indexing
test_get_indexer_* --> test_indexing
test_insert_* --> test_indexing
test_where_* --> test_indexing
test_join_* --> test_setops
test_append_* --> test_setops
test_difference_* --> test_setops
Many of these at the moment are in e.g. test_ops or test_period/test_datetime. | https://api.github.com/repos/pandas-dev/pandas/pulls/19661 | 2018-02-12T17:51:03Z | 2018-02-18T16:09:45Z | 2018-02-18T16:09:45Z | 2018-02-18T18:21:44Z |
DOC: spellchecked io.rst | diff --git a/doc/source/io.rst b/doc/source/io.rst
index 1785de54b7dd6..7bb34e4d232dd 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -28,8 +28,11 @@
IO Tools (Text, CSV, HDF5, ...)
===============================
-The pandas I/O API is a set of top level ``reader`` functions accessed like ``pd.read_csv()`` that generally return a ``pandas``
-object. The corresponding ``writer`` functions are object methods that are accessed like ``df.to_csv()``
+The pandas I/O API is a set of top level ``reader`` functions accessed like
+:func:`pandas.read_csv` that generally return a pandas object. The corresponding
+``writer`` functions are object methods that are accessed like
+:meth:`DataFrame.to_csv`. Below is a table containing available ``readers`` and
+``writers``.
.. csv-table::
:header: "Format Type", "Data Description", "Reader", "Writer"
@@ -65,13 +68,14 @@ CSV & Text files
The two workhorse functions for reading text files (a.k.a. flat files) are
:func:`read_csv` and :func:`read_table`. They both use the same parsing code to
-intelligently convert tabular data into a DataFrame object. See the
+intelligently convert tabular data into a ``DataFrame`` object. See the
:ref:`cookbook<cookbook.csv>` for some advanced strategies.
Parsing options
'''''''''''''''
-:func:`read_csv` and :func:`read_table` accept the following arguments:
+The functions :func:`read_csv` and :func:`read_table` accept the following
+common arguments:
Basic
+++++
@@ -94,7 +98,7 @@ delimiter : str, default ``None``
delim_whitespace : boolean, default False
Specifies whether or not whitespace (e.g. ``' '`` or ``'\t'``)
will be used as the delimiter. Equivalent to setting ``sep='\s+'``.
- If this option is set to True, nothing should be passed in for the
+ If this option is set to ``True``, nothing should be passed in for the
``delimiter`` parameter.
.. versionadded:: 0.18.1 support for the Python parser.
@@ -122,7 +126,7 @@ names : array-like, default ``None``
explicitly pass ``header=None``. Duplicates in this list will cause
a ``UserWarning`` to be issued.
index_col : int or sequence or ``False``, default ``None``
- Column to use as the row labels of the DataFrame. If a sequence is given, a
+ Column to use as the row labels of the ``DataFrame``. If a sequence is given, a
MultiIndex is used. If you have a malformed file with delimiters at the end of
each line, you might consider ``index_col=False`` to force pandas to *not* use
the first column as the index (row names).
@@ -131,8 +135,8 @@ usecols : array-like or callable, default ``None``
be positional (i.e. integer indices into the document columns) or strings
that correspond to column names provided either by the user in `names` or
inferred from the document header row(s). For example, a valid array-like
- `usecols` parameter would be [0, 1, 2] or ['foo', 'bar', 'baz']. Element
- order is ignored, so usecols=[0,1] is the same as [1, 0].
+ `usecols` parameter would be ``[0, 1, 2]`` or ``['foo', 'bar', 'baz']``.
+ Element order is ignored, so ``usecols=[0, 1]`` is the same as ``[1, 0]``.
If callable, the callable function will be evaluated against the column names,
returning names where the callable function evaluates to True:
@@ -145,12 +149,12 @@ usecols : array-like or callable, default ``None``
Using this parameter results in much faster parsing time and lower memory usage.
squeeze : boolean, default ``False``
- If the parsed data only contains one column then return a Series.
+ If the parsed data only contains one column then return a ``Series``.
prefix : str, default ``None``
Prefix to add to column numbers when no header, e.g. 'X' for X0, X1, ...
mangle_dupe_cols : boolean, default ``True``
Duplicate columns will be specified as 'X', 'X.1'...'X.N', rather than 'X'...'X'.
- Passing in False will cause data to be overwritten if there are duplicate
+ Passing in ``False`` will cause data to be overwritten if there are duplicate
names in the columns.
General Parsing Configuration
@@ -197,7 +201,7 @@ low_memory : boolean, default ``True``
Internally process the file in chunks, resulting in lower memory use
while parsing, but possibly mixed type inference. To ensure no mixed
types either set ``False``, or specify the type with the ``dtype`` parameter.
- Note that the entire file is read into a single DataFrame regardless,
+ Note that the entire file is read into a single ``DataFrame`` regardless,
use the ``chunksize`` or ``iterator`` parameter to return the data in chunks.
(Only valid with C parser)
memory_map : boolean, default False
@@ -217,16 +221,16 @@ keep_default_na : boolean, default ``True``
Whether or not to include the default NaN values when parsing the data.
Depending on whether `na_values` is passed in, the behavior is as follows:
- * If `keep_default_na` is True, and `na_values` are specified, `na_values`
+ * If `keep_default_na` is ``True``, and `na_values` are specified, `na_values`
is appended to the default NaN values used for parsing.
- * If `keep_default_na` is True, and `na_values` are not specified, only
+ * If `keep_default_na` is ``True``, and `na_values` are not specified, only
the default NaN values are used for parsing.
- * If `keep_default_na` is False, and `na_values` are specified, only
+ * If `keep_default_na` is ``False``, and `na_values` are specified, only
the NaN values specified `na_values` are used for parsing.
- * If `keep_default_na` is False, and `na_values` are not specified, no
+ * If `keep_default_na` is ``False``, and `na_values` are not specified, no
strings will be parsed as NaN.
- Note that if `na_filter` is passed in as False, the `keep_default_na` and
+ Note that if `na_filter` is passed in as ``False``, the `keep_default_na` and
`na_values` parameters will be ignored.
na_filter : boolean, default ``True``
Detect missing value markers (empty strings and the value of na_values). In
@@ -341,9 +345,9 @@ Error Handling
error_bad_lines : boolean, default ``True``
Lines with too many fields (e.g. a csv line with too many commas) will by
- default cause an exception to be raised, and no DataFrame will be returned. If
- ``False``, then these "bad lines" will dropped from the DataFrame that is
- returned. See :ref:`bad lines <io.bad_lines>`
+ default cause an exception to be raised, and no ``DataFrame`` will be
+ returned. If ``False``, then these "bad lines" will dropped from the
+ ``DataFrame`` that is returned. See :ref:`bad lines <io.bad_lines>`
below.
warn_bad_lines : boolean, default ``True``
If error_bad_lines is ``False``, and warn_bad_lines is ``True``, a warning for
@@ -354,8 +358,8 @@ warn_bad_lines : boolean, default ``True``
Specifying column data types
''''''''''''''''''''''''''''
-You can indicate the data type for the whole DataFrame or
-individual columns:
+You can indicate the data type for the whole ``DataFrame`` or individual
+columns:
.. ipython:: python
@@ -368,11 +372,11 @@ individual columns:
df = pd.read_csv(StringIO(data), dtype={'b': object, 'c': np.float64})
df.dtypes
-Fortunately, ``pandas`` offers more than one way to ensure that your column(s)
+Fortunately, pandas offers more than one way to ensure that your column(s)
contain only one ``dtype``. If you're unfamiliar with these concepts, you can
see :ref:`here<basics.dtypes>` to learn more about dtypes, and
:ref:`here<basics.object_conversion>` to learn more about ``object`` conversion in
-``pandas``.
+pandas.
For instance, you can use the ``converters`` argument
@@ -395,7 +399,7 @@ dtypes after reading in the data,
df2
df2['col_1'].apply(type).value_counts()
-which would convert all valid parsing to floats, leaving the invalid parsing
+which will convert all valid parsing to floats, leaving the invalid parsing
as ``NaN``.
Ultimately, how you deal with reading in columns containing mixed dtypes
@@ -407,7 +411,7 @@ worth trying.
.. versionadded:: 0.20.0 support for the Python parser.
- The ``dtype`` option is supported by the 'python' engine
+ The ``dtype`` option is supported by the 'python' engine.
.. note::
In some cases, reading in abnormal data with columns containing mixed dtypes
@@ -453,7 +457,8 @@ Specifying Categorical dtype
pd.read_csv(StringIO(data)).dtypes
pd.read_csv(StringIO(data), dtype='category').dtypes
-Individual columns can be parsed as a ``Categorical`` using a dict specification
+Individual columns can be parsed as a ``Categorical`` using a dict
+specification:
.. ipython:: python
@@ -551,17 +556,18 @@ If the header is in a row other than the first, pass the row number to
Duplicate names parsing
'''''''''''''''''''''''
-If the file or header contains duplicate names, pandas by default will deduplicate
-these names so as to prevent data overwrite:
+If the file or header contains duplicate names, pandas will by default
+distinguish between them so as to prevent overwriting data:
.. ipython :: python
data = 'a,b,a\n0,1,2\n3,4,5'
pd.read_csv(StringIO(data))
-There is no more duplicate data because ``mangle_dupe_cols=True`` by default, which modifies
-a series of duplicate columns 'X'...'X' to become 'X', 'X.1',...'X.N'. If ``mangle_dupe_cols
-=False``, duplicate data can arise:
+There is no more duplicate data because ``mangle_dupe_cols=True`` by default,
+which modifies a series of duplicate columns 'X', ..., 'X' to become
+'X', 'X.1', ..., 'X.N'. If ``mangle_dupe_cols=False``, duplicate data can
+arise:
.. code-block :: python
@@ -716,7 +722,7 @@ result in byte strings being decoded to unicode in the result:
Some formats which encode all characters as multiple bytes, like UTF-16, won't
parse correctly at all without specifying the encoding. `Full list of Python
standard encodings
-<https://docs.python.org/3/library/codecs.html#standard-encodings>`_
+<https://docs.python.org/3/library/codecs.html#standard-encodings>`_.
.. _io.index_col:
@@ -724,7 +730,7 @@ Index columns and trailing delimiters
'''''''''''''''''''''''''''''''''''''
If a file has one more column of data than the number of column names, the
-first column will be used as the DataFrame's row names:
+first column will be used as the ``DataFrame``'s row names:
.. ipython:: python
@@ -894,30 +900,31 @@ Pandas will try to call the ``date_parser`` function in three different ways. If
an exception is raised, the next one is tried:
1. ``date_parser`` is first called with one or more arrays as arguments,
- as defined using `parse_dates` (e.g., ``date_parser(['2013', '2013'], ['1', '2'])``)
+ as defined using `parse_dates` (e.g., ``date_parser(['2013', '2013'], ['1', '2'])``).
2. If #1 fails, ``date_parser`` is called with all the columns
- concatenated row-wise into a single array (e.g., ``date_parser(['2013 1', '2013 2'])``)
+ concatenated row-wise into a single array (e.g., ``date_parser(['2013 1', '2013 2'])``).
3. If #2 fails, ``date_parser`` is called once for every row with one or more
string arguments from the columns indicated with `parse_dates`
(e.g., ``date_parser('2013', '1')`` for the first row, ``date_parser('2013', '2')``
- for the second, etc.)
+ for the second, etc.).
Note that performance-wise, you should try these methods of parsing dates in order:
-1. Try to infer the format using ``infer_datetime_format=True`` (see section below)
+1. Try to infer the format using ``infer_datetime_format=True`` (see section below).
2. If you know the format, use ``pd.to_datetime()``:
- ``date_parser=lambda x: pd.to_datetime(x, format=...)``
+ ``date_parser=lambda x: pd.to_datetime(x, format=...)``.
3. If you have a really non-standard format, use a custom ``date_parser`` function.
For optimal performance, this should be vectorized, i.e., it should accept arrays
as arguments.
-You can explore the date parsing functionality in ``date_converters.py`` and
-add your own. We would love to turn this module into a community supported set
-of date/time parsers. To get you started, ``date_converters.py`` contains
+You can explore the date parsing functionality in
+`date_converters.py <https://github.com/pandas-dev/pandas/blob/master/pandas/io/date_converters.py>`__
+and add your own. We would love to turn this module into a community supported
+set of date/time parsers. To get you started, ``date_converters.py`` contains
functions to parse dual date and time columns, year/month/day columns,
and year/month/day/hour/minute/second columns. It also contains a
``generic_parser`` function so you can curry it with a function that deals with
@@ -945,7 +952,7 @@ of strings. So in general, ``infer_datetime_format`` should not have any
negative consequences if enabled.
Here are some examples of datetime strings that can be guessed (All
-representing December 30th, 2011 at 00:00:00)
+representing December 30th, 2011 at 00:00:00):
- "20111230"
- "2011/12/30"
@@ -954,7 +961,7 @@ representing December 30th, 2011 at 00:00:00)
- "30/Dec/2011 00:00:00"
- "30/December/2011 00:00:00"
-``infer_datetime_format`` is sensitive to ``dayfirst``. With
+Note that ``infer_datetime_format`` is sensitive to ``dayfirst``. With
``dayfirst=True``, it will guess "01/12/2011" to be December 1st. With
``dayfirst=False`` (default) it will guess "01/12/2011" to be January 12th.
@@ -1030,7 +1037,7 @@ correctly:
with open('tmp.csv', 'w') as fh:
fh.write(data)
-By default, numbers with a thousands separator will be parsed as strings
+By default, numbers with a thousands separator will be parsed as strings:
.. ipython:: python
@@ -1040,7 +1047,7 @@ By default, numbers with a thousands separator will be parsed as strings
df.level.dtype
-The ``thousands`` keyword allows integers to be parsed correctly
+The ``thousands`` keyword allows integers to be parsed correctly:
.. ipython:: python
@@ -1060,11 +1067,12 @@ The ``thousands`` keyword allows integers to be parsed correctly
NA Values
'''''''''
-To control which values are parsed as missing values (which are signified by ``NaN``), specify a
-string in ``na_values``. If you specify a list of strings, then all values in
-it are considered to be missing values. If you specify a number (a ``float``, like ``5.0`` or an ``integer`` like ``5``),
-the corresponding equivalent values will also imply a missing value (in this case effectively
-``[5.0,5]`` are recognized as ``NaN``.
+To control which values are parsed as missing values (which are signified by
+``NaN``), specify a string in ``na_values``. If you specify a list of strings,
+then all values in it are considered to be missing values. If you specify a
+number (a ``float``, like ``5.0`` or an ``integer`` like ``5``), the
+corresponding equivalent values will also imply a missing value (in this case
+effectively ``[5.0, 5]`` are recognized as ``NaN``).
To completely override the default values that are recognized as missing, specify ``keep_default_na=False``.
@@ -1073,29 +1081,34 @@ To completely override the default values that are recognized as missing, specif
The default ``NaN`` recognized values are ``['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN', '#N/A N/A', '#N/A', 'N/A',
'n/a', 'NA', '#NA', 'NULL', 'null', 'NaN', '-NaN', 'nan', '-nan', '']``.
+Let us consider some examples:
+
.. code-block:: python
read_csv(path, na_values=[5])
-the default values, in addition to ``5`` , ``5.0`` when interpreted as numbers are recognized as ``NaN``
+In the example above ``5`` and ``5.0`` will be recognized as ``NaN``, in
+addition to the defaults. A string will first be interpreted as a numerical
+``5``, then as a ``NaN``.
.. code-block:: python
read_csv(path, keep_default_na=False, na_values=[""])
-only an empty field will be ``NaN``
+Above, only an empty field will be recognized as ``NaN``.
.. code-block:: python
read_csv(path, keep_default_na=False, na_values=["NA", "0"])
-only ``NA`` and ``0`` as strings are ``NaN``
+Above, both ``NA`` and ``0`` as strings are ``NaN``.
.. code-block:: python
read_csv(path, na_values=["Nope"])
-the default values, in addition to the string ``"Nope"`` are recognized as ``NaN``
+The default values, in addition to the string ``"Nope"`` are recognized as
+``NaN``.
.. _io.infinity:
@@ -1143,9 +1156,9 @@ Boolean values
''''''''''''''
The common values ``True``, ``False``, ``TRUE``, and ``FALSE`` are all
-recognized as boolean. Sometime you would want to recognize some other values
-as being boolean. To do this use the ``true_values`` and ``false_values``
-options:
+recognized as boolean. Occasionally you might want to recognize other values
+as being boolean. To do this, use the ``true_values`` and ``false_values``
+options as follows:
.. ipython:: python
@@ -1161,7 +1174,7 @@ Handling "bad" lines
Some files may have malformed lines with too few fields or too many. Lines with
too few fields will have NA values filled in the trailing fields. Lines with
-too many will cause an error by default:
+too many fields will raise an error by default:
.. ipython:: python
:suppress:
@@ -1228,7 +1241,7 @@ By default, ``read_csv`` uses the Excel dialect and treats the double quote as
the quote character, which causes it to fail when it finds a newline before it
finds the closing double quote.
-We can get around this using ``dialect``
+We can get around this using ``dialect``:
.. ipython:: python
:okwarning:
@@ -1253,9 +1266,9 @@ after a delimiter:
print(data)
pd.read_csv(StringIO(data), skipinitialspace=True)
-The parsers make every attempt to "do the right thing" and not be very
-fragile. Type inference is a pretty big deal. So if a column can be coerced to
-integer dtype without altering the contents, it will do so. Any non-numeric
+The parsers make every attempt to "do the right thing" and not be fragile. Type
+inference is a pretty big deal. If a column can be coerced to integer dtype
+without altering the contents, the parser will do so. Any non-numeric
columns will come through as object dtype as with the rest of pandas objects.
.. _io.quoting:
@@ -1278,7 +1291,7 @@ should pass the ``escapechar`` option:
Files with Fixed Width Columns
''''''''''''''''''''''''''''''
-While ``read_csv`` reads delimited data, the :func:`read_fwf` function works
+While :func:`read_csv` reads delimited data, the :func:`read_fwf` function works
with data files that have known and fixed column widths. The function parameters
to ``read_fwf`` are largely the same as `read_csv` with two extra parameters, and
a different usage of the ``delimiter`` parameter:
@@ -1287,7 +1300,7 @@ a different usage of the ``delimiter`` parameter:
fixed-width fields of each line as half-open intervals (i.e., [from, to[ ).
String value 'infer' can be used to instruct the parser to try detecting
the column specifications from the first 100 rows of the data. Default
- behaviour, if not specified, is to infer.
+ behavior, if not specified, is to infer.
- ``widths``: A list of field widths which can be used instead of 'colspecs'
if the intervals are contiguous.
- ``delimiter``: Characters to consider as filler characters in the fixed-width file.
@@ -1312,7 +1325,7 @@ Consider a typical fixed-width data file:
print(open('bar.csv').read())
-In order to parse this file into a DataFrame, we simply need to supply the
+In order to parse this file into a ``DataFrame``, we simply need to supply the
column specifications to the `read_fwf` function along with the file name:
.. ipython:: python
@@ -1383,7 +1396,7 @@ column:
print(open('foo.csv').read())
In this special case, ``read_csv`` assumes that the first column is to be used
-as the index of the DataFrame:
+as the index of the ``DataFrame``:
.. ipython:: python
@@ -1436,10 +1449,10 @@ rows will skip the intervening rows.
.. ipython:: python
from pandas.util.testing import makeCustomDataframe as mkdf
- df = mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)
+ df = mkdf(5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
df.to_csv('mi.csv')
print(open('mi.csv').read())
- pd.read_csv('mi.csv',header=[0,1,2,3],index_col=[0,1])
+ pd.read_csv('mi.csv', header=[0, 1, 2, 3], index_col=[0, 1])
``read_csv`` is also able to interpret a more common format
of multi-columns indices.
@@ -1448,17 +1461,17 @@ of multi-columns indices.
:suppress:
data = ",a,a,a,b,c,c\n,q,r,s,t,u,v\none,1,2,3,4,5,6\ntwo,7,8,9,10,11,12"
- fh = open('mi2.csv','w')
+ fh = open('mi2.csv', 'w')
fh.write(data)
fh.close()
.. ipython:: python
print(open('mi2.csv').read())
- pd.read_csv('mi2.csv',header=[0,1],index_col=0)
+ pd.read_csv('mi2.csv', header=[0, 1], index_col=0)
Note: If an ``index_col`` is not specified (e.g. you don't have an index, or wrote it
-with ``df.to_csv(..., index=False``), then any ``names`` on the columns index will be *lost*.
+with ``df.to_csv(..., index=False)``, then any ``names`` on the columns index will be *lost*.
.. ipython:: python
:suppress:
@@ -1578,7 +1591,7 @@ Writing out Data
Writing to CSV format
+++++++++++++++++++++
-The Series and DataFrame objects have an instance method ``to_csv`` which
+The ``Series`` and ``DataFrame`` objects have an instance method ``to_csv`` which
allows storing the contents of the object as a comma-separated-values file. The
function takes a number of arguments. Only the first is required.
@@ -1591,7 +1604,7 @@ function takes a number of arguments. Only the first is required.
- ``index``: whether to write row (index) names (default True)
- ``index_label``: Column label(s) for index column(s) if desired. If None
(default), and `header` and `index` are True, then the index names are
- used. (A sequence should be given if the DataFrame uses MultiIndex).
+ used. (A sequence should be given if the ``DataFrame`` uses MultiIndex).
- ``mode`` : Python write mode, default 'w'
- ``encoding``: a string representing the encoding to use if the contents are
non-ASCII, for Python versions prior to 3
@@ -1611,7 +1624,7 @@ Writing a formatted string
.. _io.formatting:
-The DataFrame object has an instance method ``to_string`` which allows control
+The ``DataFrame`` object has an instance method ``to_string`` which allows control
over the string representation of the object. All arguments are optional:
- ``buf`` default None, for example a StringIO object
@@ -1622,8 +1635,8 @@ over the string representation of the object. All arguments are optional:
which takes a single argument and returns a formatted string
- ``float_format`` default None, a function which takes a single (float)
argument and returns a formatted string; to be applied to floats in the
- DataFrame.
- - ``sparsify`` default True, set to False for a DataFrame with a hierarchical
+ ``DataFrame``.
+ - ``sparsify`` default True, set to False for a ``DataFrame`` with a hierarchical
index to print every multiindex key at each row.
- ``index_names`` default True, will print the names of the indices
- ``index`` default True, will print the index (ie, row labels)
@@ -1631,7 +1644,7 @@ over the string representation of the object. All arguments are optional:
- ``justify`` default ``left``, will print column headers left- or
right-justified
-The Series object also has a ``to_string`` method, but with only the ``buf``,
+The ``Series`` object also has a ``to_string`` method, but with only the ``buf``,
``na_rep``, ``float_format`` arguments. There is also a ``length`` argument
which, if set to ``True``, will additionally output the length of the Series.
@@ -1654,11 +1667,11 @@ with optional parameters:
This can be ``None`` in which case a JSON string is returned
- ``orient`` :
- Series :
+ ``Series``:
- default is ``index``
- allowed values are {``split``, ``records``, ``index``}
- DataFrame
+ ``DataFrame``:
- default is ``columns``
- allowed values are {``split``, ``records``, ``index``, ``columns``, ``values``, ``table``}
@@ -1693,7 +1706,7 @@ Orient Options
++++++++++++++
There are a number of different options for the format of the resulting JSON
-file / string. Consider the following DataFrame and Series:
+file / string. Consider the following ``DataFrame`` and ``Series``:
.. ipython:: python
@@ -1720,8 +1733,8 @@ but the index labels are now primary:
sjo.to_json(orient="index")
**Record oriented** serializes the data to a JSON array of column -> value records,
-index labels are not included. This is useful for passing DataFrame data to plotting
-libraries, for example the JavaScript library d3.js:
+index labels are not included. This is useful for passing ``DataFrame`` data to plotting
+libraries, for example the JavaScript library ``d3.js``:
.. ipython:: python
@@ -1756,7 +1769,7 @@ preservation of metadata including but not limited to dtypes and index names.
Date Handling
+++++++++++++
-Writing in ISO date format
+Writing in ISO date format:
.. ipython:: python
@@ -1766,21 +1779,21 @@ Writing in ISO date format
json = dfd.to_json(date_format='iso')
json
-Writing in ISO date format, with microseconds
+Writing in ISO date format, with microseconds:
.. ipython:: python
json = dfd.to_json(date_format='iso', date_unit='us')
json
-Epoch timestamps, in seconds
+Epoch timestamps, in seconds:
.. ipython:: python
json = dfd.to_json(date_format='epoch', date_unit='s')
json
-Writing to a file, with a date index and a date column
+Writing to a file, with a date index and a date column:
.. ipython:: python
@@ -1795,7 +1808,8 @@ Writing to a file, with a date index and a date column
Fallback Behavior
+++++++++++++++++
-If the JSON serializer cannot handle the container contents directly it will fallback in the following manner:
+If the JSON serializer cannot handle the container contents directly it will
+fall back in the following manner:
- if the dtype is unsupported (e.g. ``np.complex``) then the ``default_handler``, if provided, will be called
for each value, otherwise an exception is raised.
@@ -1864,13 +1878,13 @@ is ``None``. To explicitly force ``Series`` parsing, pass ``typ=series``
``table``; adhering to the JSON `Table Schema`_
-- ``dtype`` : if True, infer dtypes, if a dict of column to dtype, then use those, if False, then don't infer dtypes at all, default is True, apply only to the data
-- ``convert_axes`` : boolean, try to convert the axes to the proper dtypes, default is True
-- ``convert_dates`` : a list of columns to parse for dates; If True, then try to parse date-like columns, default is True
-- ``keep_default_dates`` : boolean, default True. If parsing dates, then parse the default date-like columns
-- ``numpy`` : direct decoding to NumPy arrays. default is False;
- Supports numeric data only, although labels may be non-numeric. Also note that the JSON ordering **MUST** be the same for each term if ``numpy=True``
-- ``precise_float`` : boolean, default ``False``. Set to enable usage of higher precision (strtod) function when decoding string to double values. Default (``False``) is to use fast but less precise builtin functionality
+- ``dtype`` : if True, infer dtypes, if a dict of column to dtype, then use those, if ``False``, then don't infer dtypes at all, default is True, apply only to the data.
+- ``convert_axes`` : boolean, try to convert the axes to the proper dtypes, default is ``True``
+- ``convert_dates`` : a list of columns to parse for dates; If ``True``, then try to parse date-like columns, default is ``True``.
+- ``keep_default_dates`` : boolean, default ``True``. If parsing dates, then parse the default date-like columns.
+- ``numpy`` : direct decoding to NumPy arrays. default is ``False``;
+ Supports numeric data only, although labels may be non-numeric. Also note that the JSON ordering **MUST** be the same for each term if ``numpy=True``.
+- ``precise_float`` : boolean, default ``False``. Set to enable usage of higher precision (strtod) function when decoding string to double values. Default (``False``) is to use fast but less precise builtin functionality.
- ``date_unit`` : string, the timestamp unit to detect if converting dates. Default
None. By default the timestamp precision will be detected, if this is not desired
then pass one of 's', 'ms', 'us' or 'ns' to force timestamp precision to
@@ -1888,9 +1902,11 @@ overview.
Data Conversion
+++++++++++++++
-The default of ``convert_axes=True``, ``dtype=True``, and ``convert_dates=True`` will try to parse the axes, and all of the data
-into appropriate types, including dates. If you need to override specific dtypes, pass a dict to ``dtype``. ``convert_axes`` should only
-be set to ``False`` if you need to preserve string-like numbers (e.g. '1', '2') in an axes.
+The default of ``convert_axes=True``, ``dtype=True``, and ``convert_dates=True``
+will try to parse the axes, and all of the data into appropriate types,
+including dates. If you need to override specific dtypes, pass a dict to
+``dtype``. ``convert_axes`` should only be set to ``False`` if you need to
+preserve string-like numbers (e.g. '1', '2') in an axes.
.. note::
@@ -2175,7 +2191,7 @@ A few notes on the generated table schema:
- Periods are converted to timestamps before serialization, and so have the
same behavior of being converted to UTC. In addition, periods will contain
- and additional field ``freq`` with the period's frequency, e.g. ``'A-DEC'``
+ and additional field ``freq`` with the period's frequency, e.g. ``'A-DEC'``.
.. ipython:: python
@@ -2184,7 +2200,7 @@ A few notes on the generated table schema:
build_table_schema(s_per)
- Categoricals use the ``any`` type and an ``enum`` constraint listing
- the set of possible values. Additionally, an ``ordered`` field is included
+ the set of possible values. Additionally, an ``ordered`` field is included:
.. ipython:: python
@@ -2212,7 +2228,7 @@ A few notes on the generated table schema:
+ For series, the ``object.name`` is used. If that's none, then the
name is ``values``
- + For DataFrames, the stringified version of the column name is used
+ + For ``DataFrames``, the stringified version of the column name is used
+ For ``Index`` (not ``MultiIndex``), ``index.name`` is used, with a
fallback to ``index`` if that is None.
+ For ``MultiIndex``, ``mi.names`` is used. If any level has no name,
@@ -2268,15 +2284,15 @@ Reading HTML Content
below regarding the issues surrounding the BeautifulSoup4/html5lib/lxml parsers.
The top-level :func:`~pandas.io.html.read_html` function can accept an HTML
-string/file/URL and will parse HTML tables into list of pandas DataFrames.
+string/file/URL and will parse HTML tables into list of pandas ``DataFrames``.
Let's look at a few examples.
.. note::
``read_html`` returns a ``list`` of ``DataFrame`` objects, even if there is
- only a single table contained in the HTML content
+ only a single table contained in the HTML content.
-Read a URL with no options
+Read a URL with no options:
.. ipython:: python
@@ -2290,7 +2306,7 @@ Read a URL with no options
and the data below may be slightly different.
Read in the content of the file from the above URL and pass it to ``read_html``
-as a string
+as a string:
.. ipython:: python
:suppress:
@@ -2304,7 +2320,7 @@ as a string
dfs = pd.read_html(f.read())
dfs
-You can even pass in an instance of ``StringIO`` if you so desire
+You can even pass in an instance of ``StringIO`` if you so desire:
.. ipython:: python
@@ -2323,7 +2339,7 @@ You can even pass in an instance of ``StringIO`` if you so desire
<http://www.github.com/pandas-dev/pandas/issues>`__.
-Read a URL and match a table that contains specific text
+Read a URL and match a table that contains specific text:
.. code-block:: python
@@ -2339,26 +2355,26 @@ from the data minus the parsed header elements (``<th>`` elements).
dfs = pd.read_html(url, header=0)
-Specify an index column
+Specify an index column:
.. code-block:: python
dfs = pd.read_html(url, index_col=0)
-Specify a number of rows to skip
+Specify a number of rows to skip:
.. code-block:: python
dfs = pd.read_html(url, skiprows=0)
Specify a number of rows to skip using a list (``xrange`` (Python 2 only) works
-as well)
+as well):
.. code-block:: python
dfs = pd.read_html(url, skiprows=range(2))
-Specify an HTML attribute
+Specify an HTML attribute:
.. code-block:: python
@@ -2366,7 +2382,7 @@ Specify an HTML attribute
dfs2 = pd.read_html(url, attrs={'class': 'sortable'})
print(np.array_equal(dfs1[0], dfs2[0])) # Should be True
-Specify values that should be converted to NaN
+Specify values that should be converted to NaN:
.. code-block:: python
@@ -2374,7 +2390,7 @@ Specify values that should be converted to NaN
.. versionadded:: 0.19
-Specify whether to keep the default set of NaN values
+Specify whether to keep the default set of NaN values:
.. code-block:: python
@@ -2384,7 +2400,7 @@ Specify whether to keep the default set of NaN values
Specify converters for columns. This is useful for numerical text data that has
leading zeros. By default columns that are numerical are cast to numeric
-types and the leading zeros are lost. To avoid this, we can convert these
+types and the leading zeros are lost. To avoid this, we can convert these
columns to strings.
.. code-block:: python
@@ -2395,13 +2411,13 @@ columns to strings.
.. versionadded:: 0.19
-Use some combination of the above
+Use some combination of the above:
.. code-block:: python
dfs = pd.read_html(url, match='Metcalf Bank', index_col=0)
-Read in pandas ``to_html`` output (with some loss of floating point precision)
+Read in pandas ``to_html`` output (with some loss of floating point precision):
.. code-block:: python
@@ -2410,15 +2426,15 @@ Read in pandas ``to_html`` output (with some loss of floating point precision)
dfin = pd.read_html(s, index_col=0)
The ``lxml`` backend will raise an error on a failed parse if that is the only
-parser you provide (if you only have a single parser you can provide just a
+parser you provide. If you only have a single parser you can provide just a
string, but it is considered good practice to pass a list with one string if,
-for example, the function expects a sequence of strings)
+for example, the function expects a sequence of strings. You may use:
.. code-block:: python
dfs = pd.read_html(url, 'Metcalf Bank', index_col=0, flavor=['lxml'])
-or
+Or you could pass ``flavor='lxml'`` without a list:
.. code-block:: python
@@ -2472,7 +2488,7 @@ HTML:
.. raw:: html
:file: _static/basic.html
-The ``columns`` argument will limit the columns shown
+The ``columns`` argument will limit the columns shown:
.. ipython:: python
@@ -2489,7 +2505,7 @@ HTML:
:file: _static/columns.html
``float_format`` takes a Python callable to control the precision of floating
-point values
+point values:
.. ipython:: python
@@ -2506,7 +2522,7 @@ HTML:
:file: _static/float_format.html
``bold_rows`` will make the row labels bold by default, but you can turn that
-off
+off:
.. ipython:: python
@@ -2579,7 +2595,7 @@ parse HTML tables in the top-level pandas io function ``read_html``.
* Benefits
- * |lxml|_ is very fast
+ * |lxml|_ is very fast.
* |lxml|_ requires Cython to install correctly.
@@ -2652,8 +2668,8 @@ The :func:`~pandas.read_excel` method can read Excel 2003 (``.xls``) and
Excel 2007+ (``.xlsx``) files using the ``xlrd`` Python
module. The :meth:`~DataFrame.to_excel` instance method is used for
saving a ``DataFrame`` to Excel. Generally the semantics are
-similar to working with :ref:`csv<io.read_csv_table>` data. See the :ref:`cookbook<cookbook.excel>` for some
-advanced strategies
+similar to working with :ref:`csv<io.read_csv_table>` data.
+See the :ref:`cookbook<cookbook.excel>` for some advanced strategies.
.. _io.excel_reader:
@@ -2696,7 +2712,7 @@ The ``sheet_names`` property will generate
a list of the sheet names in the file.
The primary use-case for an ``ExcelFile`` is parsing multiple sheets with
-different parameters
+different parameters:
.. code-block:: python
@@ -2725,7 +2741,7 @@ of sheet names can simply be passed to ``read_excel`` with no loss in performanc
Specifying Sheets
+++++++++++++++++
-.. note :: The second argument is ``sheet_name``, not to be confused with ``ExcelFile.sheet_names``
+.. note :: The second argument is ``sheet_name``, not to be confused with ``ExcelFile.sheet_names``.
.. note :: An ExcelFile's attribute ``sheet_names`` provides access to a list of sheets.
@@ -2802,12 +2818,12 @@ parameters.
df.index = df.index.set_names(['lvl1', 'lvl2'])
df.to_excel('path_to_file.xlsx')
- df = pd.read_excel('path_to_file.xlsx', index_col=[0,1])
+ df = pd.read_excel('path_to_file.xlsx', index_col=[0, 1])
df
If the source file has both ``MultiIndex`` index and columns, lists specifying each
-should be passed to ``index_col`` and ``header``
+should be passed to ``index_col`` and ``header``:
.. ipython:: python
@@ -2828,10 +2844,10 @@ Parsing Specific Columns
++++++++++++++++++++++++
It is often the case that users will insert columns to do temporary computations
-in Excel and you may not want to read in those columns. `read_excel` takes
-a `usecols` keyword to allow you to specify a subset of columns to parse.
+in Excel and you may not want to read in those columns. ``read_excel`` takes
+a ``usecols`` keyword to allow you to specify a subset of columns to parse.
-If `usecols` is an integer, then it is assumed to indicate the last column
+If ``usecols`` is an integer, then it is assumed to indicate the last column
to be parsed.
.. code-block:: python
@@ -2840,11 +2856,12 @@ to be parsed.
If `usecols` is a list of integers, then it is assumed to be the file column
indices to be parsed.
+
.. code-block:: python
read_excel('path_to_file.xls', 'Sheet1', usecols=[0, 2, 3])
-Element order is ignored, so usecols=[0,1] is the same as [1,0].
+Element order is ignored, so ``usecols=[0,1]`` is the same as ``[1,0]``.
Parsing Dates
+++++++++++++
@@ -2852,7 +2869,7 @@ Parsing Dates
Datetime-like values are normally automatically converted to the appropriate
dtype when reading the excel file. But if you have a column of strings that
*look* like dates (but are not actually formatted as dates in excel), you can
-use the `parse_dates` keyword to parse those strings to datetimes:
+use the ``parse_dates`` keyword to parse those strings to datetimes:
.. code-block:: python
@@ -2862,7 +2879,7 @@ use the `parse_dates` keyword to parse those strings to datetimes:
Cell Converters
+++++++++++++++
-It is possible to transform the contents of Excel cells via the `converters`
+It is possible to transform the contents of Excel cells via the ``converters``
option. For instance, to convert a column to boolean:
.. code-block:: python
@@ -2903,11 +2920,11 @@ Writing Excel Files
Writing Excel Files to Disk
+++++++++++++++++++++++++++
-To write a DataFrame object to a sheet of an Excel file, you can use the
+To write a ``DataFrame`` object to a sheet of an Excel file, you can use the
``to_excel`` instance method. The arguments are largely the same as ``to_csv``
described above, the first argument being the name of the excel file, and the
-optional second argument the name of the sheet to which the DataFrame should be
-written. For example:
+optional second argument the name of the sheet to which the ``DataFrame`` should be
+written. For example:
.. code-block:: python
@@ -2917,7 +2934,7 @@ Files with a ``.xls`` extension will be written using ``xlwt`` and those with a
``.xlsx`` extension will be written using ``xlsxwriter`` (if available) or
``openpyxl``.
-The DataFrame will be written in a way that tries to mimic the REPL output.
+The ``DataFrame`` will be written in a way that tries to mimic the REPL output.
The ``index_label`` will be placed in the second
row instead of the first. You can place it in the first row by setting the
``merge_cells`` option in ``to_excel()`` to ``False``:
@@ -2926,10 +2943,7 @@ row instead of the first. You can place it in the first row by setting the
df.to_excel('path_to_file.xlsx', index_label='label', merge_cells=False)
-The Panel class also has a ``to_excel`` instance method,
-which writes each DataFrame in the Panel to a separate sheet.
-
-In order to write separate DataFrames to separate sheets in a single Excel file,
+In order to write separate ``DataFrames`` to separate sheets in a single Excel file,
one can pass an :class:`~pandas.io.excel.ExcelWriter`.
.. code-block:: python
@@ -2990,13 +3004,13 @@ Pandas supports writing Excel files to buffer-like objects such as ``StringIO``
Excel writer engines
''''''''''''''''''''
-``pandas`` chooses an Excel writer via two methods:
+Pandas chooses an Excel writer via two methods:
1. the ``engine`` keyword argument
2. the filename extension (via the default specified in config options)
-By default, ``pandas`` uses the `XlsxWriter`_ for ``.xlsx`` and `openpyxl`_
-for ``.xlsm`` files and `xlwt`_ for ``.xls`` files. If you have multiple
+By default, pandas uses the `XlsxWriter`_ for ``.xlsx``, `openpyxl`_
+for ``.xlsm``, and `xlwt`_ for ``.xls`` files. If you have multiple
engines installed, you can set the default engine through :ref:`setting the
config options <options>` ``io.excel.xlsx.writer`` and
``io.excel.xls.writer``. pandas will fall back on `openpyxl`_ for ``.xlsx``
@@ -3034,8 +3048,8 @@ Style and Formatting
The look and feel of Excel worksheets created from pandas can be modified using the following parameters on the ``DataFrame``'s ``to_excel`` method.
-- ``float_format`` : Format string for floating point numbers (default None)
-- ``freeze_panes`` : A tuple of two integers representing the bottommost row and rightmost column to freeze. Each of these parameters is one-based, so (1, 1) will freeze the first row and first column (default None)
+- ``float_format`` : Format string for floating point numbers (default ``None``).
+- ``freeze_panes`` : A tuple of two integers representing the bottommost row and rightmost column to freeze. Each of these parameters is one-based, so (1, 1) will freeze the first row and first column (default ``None``).
@@ -3044,10 +3058,10 @@ The look and feel of Excel worksheets created from pandas can be modified using
Clipboard
---------
-A handy way to grab data is to use the ``read_clipboard`` method, which takes
-the contents of the clipboard buffer and passes them to the ``read_table``
-method. For instance, you can copy the following
-text to the clipboard (CTRL-C on many operating systems):
+A handy way to grab data is to use the :meth:`~DataFrame.read_clipboard` method,
+which takes the contents of the clipboard buffer and passes them to the
+``read_table`` method. For instance, you can copy the following text to the
+clipboard (CTRL-C on many operating systems):
.. code-block:: python
@@ -3056,7 +3070,7 @@ text to the clipboard (CTRL-C on many operating systems):
y 2 5 q
z 3 6 r
-And then import the data directly to a DataFrame by calling:
+And then import the data directly to a ``DataFrame`` by calling:
.. code-block:: python
@@ -3066,10 +3080,11 @@ And then import the data directly to a DataFrame by calling:
clipdf
-The ``to_clipboard`` method can be used to write the contents of a DataFrame to
+
+The ``to_clipboard`` method can be used to write the contents of a ``DataFrame`` to
the clipboard. Following which you can paste the clipboard contents into other
applications (CTRL-V on many operating systems). Here we illustrate writing a
-DataFrame into clipboard and reading it back.
+``DataFrame`` into clipboard and reading it back.
.. ipython:: python
@@ -3121,7 +3136,7 @@ any pickled pandas object (or any other pickled object) from file:
Several internal refactorings have been done while still preserving
compatibility with pickles created with older versions of pandas. However,
- for such cases, pickled dataframes, series etc, must be read with
+ for such cases, pickled ``DataFrames``, ``Series`` etc, must be read with
``pd.read_pickle``, rather than ``pickle.load``.
See `here <http://pandas.pydata.org/pandas-docs/stable/whatsnew.html#whatsnew-0130-refactoring>`__
@@ -3139,8 +3154,8 @@ Compressed pickle files
:func:`read_pickle`, :meth:`DataFrame.to_pickle` and :meth:`Series.to_pickle` can read
and write compressed pickle files. The compression types of ``gzip``, ``bz2``, ``xz`` are supported for reading and writing.
-`zip`` file supports read only and must contain only one data file
-to be read in.
+The ``zip`` file format only supports reading and must contain only one data file
+to be read.
The compression type can be an explicit parameter or be inferred from the file extension.
If 'infer', then use ``gzip``, ``bz2``, ``zip``, or ``xz`` if filename ends in ``'.gz'``, ``'.bz2'``, ``'.zip'``, or
@@ -3154,7 +3169,7 @@ If 'infer', then use ``gzip``, ``bz2``, ``zip``, or ``xz`` if filename ends in `
'C': pd.date_range('20130101', periods=1000, freq='s')})
df
-Using an explicit compression type
+Using an explicit compression type:
.. ipython:: python
@@ -3162,7 +3177,7 @@ Using an explicit compression type
rt = pd.read_pickle("data.pkl.compress", compression="gzip")
rt
-Inferring compression type from the extension
+Inferring compression type from the extension:
.. ipython:: python
@@ -3170,7 +3185,7 @@ Inferring compression type from the extension
rt = pd.read_pickle("data.pkl.xz", compression="infer")
rt
-The default is to 'infer
+The default is to 'infer':
.. ipython:: python
@@ -3221,14 +3236,14 @@ You can pass a list of objects and you will receive them back on deserialization
pd.to_msgpack('foo.msg', df, 'foo', np.array([1,2,3]), s)
pd.read_msgpack('foo.msg')
-You can pass ``iterator=True`` to iterate over the unpacked results
+You can pass ``iterator=True`` to iterate over the unpacked results:
.. ipython:: python
for o in pd.read_msgpack('foo.msg',iterator=True):
print(o)
-You can pass ``append=True`` to the writer to append to an existing pack
+You can pass ``append=True`` to the writer to append to an existing pack:
.. ipython:: python
@@ -3331,7 +3346,7 @@ In a current or later Python session, you can retrieve stored objects:
# dotted (attribute) access provides get as well
store.df
-Deletion of the object specified by the key
+Deletion of the object specified by the key:
.. ipython:: python
@@ -3340,7 +3355,7 @@ Deletion of the object specified by the key
store
-Closing a Store, Context Manager
+Closing a Store and using a context manager:
.. ipython:: python
@@ -3348,8 +3363,7 @@ Closing a Store, Context Manager
store
store.is_open
- # Working with, and automatically closing the store with the context
- # manager
+ # Working with, and automatically closing the store using a context manager
with pd.HDFStore('store.h5') as store:
store.keys()
@@ -3449,17 +3463,17 @@ the ``fixed`` format. These types of stores are **not** appendable once written
remove them and rewrite). Nor are they **queryable**; they must be
retrieved in their entirety. They also do not support dataframes with non-unique column names.
The ``fixed`` format stores offer very fast writing and slightly faster reading than ``table`` stores.
-This format is specified by default when using ``put`` or ``to_hdf`` or by ``format='fixed'`` or ``format='f'``
+This format is specified by default when using ``put`` or ``to_hdf`` or by ``format='fixed'`` or ``format='f'``.
.. warning::
- A ``fixed`` format will raise a ``TypeError`` if you try to retrieve using a ``where`` .
+ A ``fixed`` format will raise a ``TypeError`` if you try to retrieve using a ``where``:
.. code-block:: python
- pd.DataFrame(randn(10,2)).to_hdf('test_fixed.h5','df')
+ pd.DataFrame(randn(10, 2)).to_hdf('test_fixed.h5', 'df')
- pd.read_hdf('test_fixed.h5','df',where='index>5')
+ pd.read_hdf('test_fixed.h5', 'df', where='index>5')
TypeError: cannot pass a where specification when reading a fixed format.
this store must be selected in its entirety
@@ -3472,9 +3486,9 @@ Table Format
``HDFStore`` supports another ``PyTables`` format on disk, the ``table``
format. Conceptually a ``table`` is shaped very much like a DataFrame,
with rows and columns. A ``table`` may be appended to in the same or
-other sessions. In addition, delete & query type operations are
+other sessions. In addition, delete and query type operations are
supported. This format is specified by ``format='table'`` or ``format='t'``
-to ``append`` or ``put`` or ``to_hdf``
+to ``append`` or ``put`` or ``to_hdf``.
This format can be set as an option as well ``pd.set_option('io.hdf.default_format','table')`` to
enable ``put/append/to_hdf`` to by default store in the ``table`` format.
@@ -3514,9 +3528,9 @@ Hierarchical Keys
Keys to a store can be specified as a string. These can be in a
hierarchical path-name like format (e.g. ``foo/bar/bah``), which will
generate a hierarchy of sub-stores (or ``Groups`` in PyTables
-parlance). Keys can be specified with out the leading '/' and are ALWAYS
+parlance). Keys can be specified with out the leading '/' and are **always**
absolute (e.g. 'foo' refers to '/foo'). Removal operations can remove
-everything in the sub-store and BELOW, so be *careful*.
+everything in the sub-store and **below**, so be *careful*.
.. ipython:: python
@@ -3547,7 +3561,7 @@ everything in the sub-store and BELOW, so be *careful*.
/foo/bar/bah (Group) ''
children := ['block0_items' (Array), 'block0_values' (Array), 'axis0' (Array), 'axis1' (Array)]
- Instead, use explicit string based keys
+ Instead, use explicit string based keys:
.. ipython:: python
@@ -3596,8 +3610,8 @@ defaults to `nan`.
Storing Multi-Index DataFrames
++++++++++++++++++++++++++++++
-Storing multi-index dataframes as tables is very similar to
-storing/selecting from homogeneous index DataFrames.
+Storing multi-index ``DataFrames`` as tables is very similar to
+storing/selecting from homogeneous index ``DataFrames``.
.. ipython:: python
@@ -3632,10 +3646,10 @@ data.
A query is specified using the ``Term`` class under the hood, as a boolean expression.
-- ``index`` and ``columns`` are supported indexers of a DataFrame
+- ``index`` and ``columns`` are supported indexers of a ``DataFrames``.
- ``major_axis``, ``minor_axis``, and ``items`` are supported indexers of
- the Panel
-- if ``data_columns`` are specified, these can be used as additional indexers
+ the Panel.
+- if ``data_columns`` are specified, these can be used as additional indexers.
Valid comparison operators are:
@@ -3849,7 +3863,7 @@ to perform queries (other than the `indexable` columns, which you can
always query). For instance say you want to perform this common
operation, on-disk, and return just the frame that matches this
query. You can specify ``data_columns = True`` to force all columns to
-be data_columns
+be ``data_columns``.
.. ipython:: python
@@ -3879,7 +3893,7 @@ There is some performance degradation by making lots of columns into
`data columns`, so it is up to the user to designate these. In addition,
you cannot change data columns (nor indexables) after the first
append/put operation (Of course you can simply read in the data and
-create a new table!)
+create a new table!).
Iterator
++++++++
@@ -3912,7 +3926,7 @@ chunks.
.. ipython:: python
- dfeq = pd.DataFrame({'number': np.arange(1,11)})
+ dfeq = pd.DataFrame({'number': np.arange(1, 11)})
dfeq
store.append('dfeq', dfeq, data_columns=['number'])
@@ -3921,9 +3935,9 @@ chunks.
return [l[i:i+n] for i in range(0, len(l), n)]
evens = [2,4,6,8,10]
- coordinates = store.select_as_coordinates('dfeq','number=evens')
+ coordinates = store.select_as_coordinates('dfeq', 'number=evens')
for c in chunks(coordinates, 2):
- print(store.select('dfeq',where=c))
+ print(store.select('dfeq', where=c))
Advanced Queries
++++++++++++++++
@@ -4005,7 +4019,7 @@ table names to a list of 'columns' you want in that table. If `None`
is used in place of a list, that table will have the remaining
unspecified columns of the given DataFrame. The argument ``selector``
defines which table is the selector table (which you can make queries from).
-The argument ``dropna`` will drop rows from the input DataFrame to ensure
+The argument ``dropna`` will drop rows from the input ``DataFrame`` to ensure
tables are synchronized. This means that if a row for one of the tables
being written to is entirely ``np.NaN``, that row will be dropped from all tables.
@@ -4081,7 +4095,7 @@ the table using a ``where`` that selects all but the missing data.
automatically. Thus, repeatedly deleting (or removing nodes) and adding
again, **WILL TEND TO INCREASE THE FILE SIZE**.
- To *repack and clean* the file, use :ref:`ptrepack <io.hdf5-ptrepack>`
+ To *repack and clean* the file, use :ref:`ptrepack <io.hdf5-ptrepack>`.
.. _io.hdf5-notes:
@@ -4464,7 +4478,7 @@ Several caveats.
- Non supported types include ``Period`` and actual Python object types. These will raise a helpful error message
on an attempt at serialization.
-See the `Full Documentation <https://github.com/wesm/feather>`__
+See the `Full Documentation <https://github.com/wesm/feather>`__.
.. ipython:: python
@@ -4522,8 +4536,8 @@ dtypes, including extension dtypes such as datetime with tz.
Several caveats.
-- Duplicate column names and non-string columns names are not supported
-- Index level names, if specified, must be strings
+- Duplicate column names and non-string columns names are not supported.
+- Index level names, if specified, must be strings.
- Categorical dtypes can be serialized to parquet, but will de-serialize as ``object`` dtype.
- Non supported types include ``Period`` and actual Python object types. These will raise a helpful error message
on an attempt at serialization.
@@ -4532,7 +4546,7 @@ You can specify an ``engine`` to direct the serialization. This can be one of ``
If the engine is NOT specified, then the ``pd.options.io.parquet.engine`` option is checked; if this is also ``auto``,
then ``pyarrow`` is tried, and falling back to ``fastparquet``.
-See the documentation for `pyarrow <http://arrow.apache.org/docs/python/>`__ and `fastparquet <https://fastparquet.readthedocs.io/en/latest/>`__
+See the documentation for `pyarrow <http://arrow.apache.org/docs/python/>`__ and `fastparquet <https://fastparquet.readthedocs.io/en/latest/>`__.
.. note::
@@ -4652,7 +4666,7 @@ If you want to manage your own connections you can pass one of those instead:
Writing DataFrames
''''''''''''''''''
-Assuming the following data is in a DataFrame ``data``, we can insert it into
+Assuming the following data is in a ``DataFrame`` ``data``, we can insert it into
the database using :func:`~pandas.DataFrame.to_sql`.
+-----+------------+-------+-------+-------+
@@ -4738,7 +4752,7 @@ table name and optionally a subset of columns to read.
pd.read_sql_table('data', engine)
-You can also specify the name of the column as the DataFrame index,
+You can also specify the name of the column as the ``DataFrame`` index,
and specify a subset of columns to be read.
.. ipython:: python
@@ -4807,7 +4821,7 @@ Specifying this will return an iterator through chunks of the query result:
for chunk in pd.read_sql_query("SELECT * FROM data_chunks", engine, chunksize=5):
print(chunk)
-You can also run a plain query without creating a dataframe with
+You can also run a plain query without creating a ``DataFrame`` with
:func:`~pandas.io.sql.execute`. This is useful for queries that don't return values,
such as INSERT. This is functionally equivalent to calling ``execute`` on the
SQLAlchemy engine or db connection object. Again, you must use the SQL syntax
@@ -4923,7 +4937,7 @@ pandas integrates with this external package. if ``pandas-gbq`` is installed, yo
use the pandas methods ``pd.read_gbq`` and ``DataFrame.to_gbq``, which will call the
respective functions from ``pandas-gbq``.
-Full documentation can be found `here <https://pandas-gbq.readthedocs.io/>`__
+Full documentation can be found `here <https://pandas-gbq.readthedocs.io/>`__.
.. _io.stata:
@@ -4986,7 +5000,7 @@ Reading from Stata format
'''''''''''''''''''''''''
The top-level function ``read_stata`` will read a dta file and return
-either a DataFrame or a :class:`~pandas.io.stata.StataReader` that can
+either a ``DataFrame`` or a :class:`~pandas.io.stata.StataReader` that can
be used to read the file incrementally.
.. ipython:: python
@@ -5084,7 +5098,7 @@ whether imported ``Categorical`` variables are ordered.
.. note::
- *Stata* supports partially labeled series. These series have value labels for
+ *Stata* supports partially labeled series. These series have value labels for
some but not all data values. Importing a partially labeled series will produce
a ``Categorical`` with string categories for the values that are labeled and
numeric categories for values with no label.
@@ -5144,7 +5158,7 @@ into and from pandas, we recommend these packages from the broader community.
netCDF
''''''
-xarray_ provides data structures inspired by the pandas DataFrame for working
+xarray_ provides data structures inspired by the pandas ``DataFrame`` for working
with multi-dimensional datasets, with a focus on the netCDF file format and
easy conversion to and from pandas.
@@ -5173,7 +5187,8 @@ ignored.
dtypes: float64(1), int64(1)
memory usage: 15.3 MB
-Writing
+When writing, the top-three functions in terms of speed are are
+``test_pickle_write``, ``test_feather_write`` and ``test_hdf_fixed_write_compress``.
.. code-block:: ipython
@@ -5204,7 +5219,8 @@ Writing
In [32]: %timeit test_pickle_write_compress(df)
3.33 s ± 55.2 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
-Reading
+When reading, the top three are ``test_feather_read``, ``test_pickle_read`` and
+``test_hdf_fixed_read``.
.. code-block:: ipython
@@ -5249,7 +5265,7 @@ Space on disk (in bytes)
16000848 Aug 21 18:00 test.pkl
7554108 Aug 21 18:00 test.pkl.compress
-And here's the code
+And here's the code:
.. code-block:: python
| Spellcheck of the docs, specifically of `io.rst`.
I did not read everything thoroughly, but I believe the suggested changes will help enforce a greater level of consistency in the docs.
* Backticks ` `` ` around Series, DataFrame.
* Added some function references, i.e. `` :meth:`~DataFrame.method` ``.
* Consistency of booleans, typsetting as ``False`` instead of False or *False*.
* Minor rephrasing of sentences, spelling, periods, colons etc.
Cheers!
| https://api.github.com/repos/pandas-dev/pandas/pulls/19660 | 2018-02-12T17:07:07Z | 2018-02-14T11:31:20Z | 2018-02-14T11:31:20Z | 2018-02-14T11:31:39Z |
BUG: Fix __truediv__ numexpr error | diff --git a/RELEASE.rst b/RELEASE.rst
index 57cb53c1096f6..ebd88091050f1 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -250,6 +250,8 @@ pandas 0.11.1
not converting dtypes (GH3911_)
- Fixed a bug where ``DataFrame.replace`` with a compiled regular expression
in the ``to_replace`` argument wasn't working (GH3907_)
+ - Fixed ``__truediv__`` in Python 2.7 with ``numexpr`` installed to actually do true division when dividing
+ two integer arrays with at least 10000 cells total (GH3764_)
.. _GH3164: https://github.com/pydata/pandas/issues/3164
.. _GH2786: https://github.com/pydata/pandas/issues/2786
@@ -351,6 +353,7 @@ pandas 0.11.1
.. _GH3907: https://github.com/pydata/pandas/issues/3907
.. _GH3911: https://github.com/pydata/pandas/issues/3911
.. _GH3912: https://github.com/pydata/pandas/issues/3912
+.. _GH3764: https://github.com/pydata/pandas/issues/3764
pandas 0.11.0
=============
diff --git a/pandas/core/expressions.py b/pandas/core/expressions.py
index 34e56fe576a07..abe891b82410c 100644
--- a/pandas/core/expressions.py
+++ b/pandas/core/expressions.py
@@ -51,7 +51,7 @@ def set_numexpr_threads(n = None):
pass
-def _evaluate_standard(op, op_str, a, b, raise_on_error=True):
+def _evaluate_standard(op, op_str, a, b, raise_on_error=True, **eval_kwargs):
""" standard evaluation """
return op(a,b)
@@ -79,7 +79,7 @@ def _can_use_numexpr(op, op_str, a, b, dtype_check):
return False
-def _evaluate_numexpr(op, op_str, a, b, raise_on_error = False):
+def _evaluate_numexpr(op, op_str, a, b, raise_on_error = False, **eval_kwargs):
result = None
if _can_use_numexpr(op, op_str, a, b, 'evaluate'):
@@ -92,7 +92,7 @@ def _evaluate_numexpr(op, op_str, a, b, raise_on_error = False):
result = ne.evaluate('a_value %s b_value' % op_str,
local_dict={ 'a_value' : a_value,
'b_value' : b_value },
- casting='safe')
+ casting='safe', **eval_kwargs)
except (ValueError), detail:
if 'unknown type object' in str(detail):
pass
@@ -142,7 +142,7 @@ def _where_numexpr(cond, a, b, raise_on_error = False):
# turn myself on
set_use_numexpr(True)
-def evaluate(op, op_str, a, b, raise_on_error=False, use_numexpr=True):
+def evaluate(op, op_str, a, b, raise_on_error=False, use_numexpr=True, **eval_kwargs):
""" evaluate and return the expression of the op on a and b
Parameters
@@ -158,7 +158,7 @@ def evaluate(op, op_str, a, b, raise_on_error=False, use_numexpr=True):
"""
if use_numexpr:
- return _evaluate(op, op_str, a, b, raise_on_error=raise_on_error)
+ return _evaluate(op, op_str, a, b, raise_on_error=raise_on_error, **eval_kwargs)
return _evaluate_standard(op, op_str, a, b, raise_on_error=raise_on_error)
def where(cond, a, b, raise_on_error=False, use_numexpr=True):
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index f0145364363ac..47142daa8b20b 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -190,10 +190,10 @@ class DataConflictError(Exception):
# Factory helper methods
-def _arith_method(op, name, str_rep = None, default_axis='columns', fill_zeros=None):
+def _arith_method(op, name, str_rep = None, default_axis='columns', fill_zeros=None, **eval_kwargs):
def na_op(x, y):
try:
- result = expressions.evaluate(op, str_rep, x, y, raise_on_error=True)
+ result = expressions.evaluate(op, str_rep, x, y, raise_on_error=True, **eval_kwargs)
result = com._fill_zeros(result,y,fill_zeros)
except TypeError:
@@ -853,12 +853,17 @@ def __contains__(self, key):
__sub__ = _arith_method(operator.sub, '__sub__', '-', default_axis=None)
__mul__ = _arith_method(operator.mul, '__mul__', '*', default_axis=None)
__truediv__ = _arith_method(operator.truediv, '__truediv__', '/',
- default_axis=None, fill_zeros=np.inf)
+ default_axis=None, fill_zeros=np.inf, truediv=True)
+ # numexpr produces a different value (python/numpy: 0.000, numexpr: inf)
+ # when dividing by zero, so can't use floordiv speed up (yet)
+ # __floordiv__ = _arith_method(operator.floordiv, '__floordiv__', '//',
__floordiv__ = _arith_method(operator.floordiv, '__floordiv__',
default_axis=None, fill_zeros=np.inf)
__pow__ = _arith_method(operator.pow, '__pow__', '**', default_axis=None)
- __mod__ = _arith_method(operator.mod, '__mod__', '*', default_axis=None, fill_zeros=np.nan)
+ # currently causes a floating point exception to occur - so sticking with unaccelerated for now
+ # __mod__ = _arith_method(operator.mod, '__mod__', '%', default_axis=None, fill_zeros=np.nan)
+ __mod__ = _arith_method(operator.mod, '__mod__', default_axis=None, fill_zeros=np.nan)
__radd__ = _arith_method(_radd_compat, '__radd__', default_axis=None)
__rmul__ = _arith_method(operator.mul, '__rmul__', default_axis=None)
@@ -879,7 +884,7 @@ def __contains__(self, key):
# Python 2 division methods
if not py3compat.PY3:
__div__ = _arith_method(operator.div, '__div__', '/',
- default_axis=None, fill_zeros=np.inf)
+ default_axis=None, fill_zeros=np.inf, truediv=False)
__rdiv__ = _arith_method(lambda x, y: y / x, '__rdiv__',
default_axis=None, fill_zeros=np.inf)
diff --git a/pandas/tests/test_expressions.py b/pandas/tests/test_expressions.py
index af7f20a65fa7c..ba0a9926dfa78 100644
--- a/pandas/tests/test_expressions.py
+++ b/pandas/tests/test_expressions.py
@@ -30,6 +30,7 @@
_frame2 = DataFrame(np.random.randn(100, 4), columns = list('ABCD'), dtype='float64')
_mixed = DataFrame({ 'A' : _frame['A'].copy(), 'B' : _frame['B'].astype('float32'), 'C' : _frame['C'].astype('int64'), 'D' : _frame['D'].astype('int32') })
_mixed2 = DataFrame({ 'A' : _frame2['A'].copy(), 'B' : _frame2['B'].astype('float32'), 'C' : _frame2['C'].astype('int64'), 'D' : _frame2['D'].astype('int32') })
+_integer = DataFrame(np.random.randint(1, 100, size=(10001, 4)), columns = list('ABCD'), dtype='int64')
class TestExpressions(unittest.TestCase):
@@ -41,7 +42,56 @@ def setUp(self):
self.frame2 = _frame2.copy()
self.mixed = _mixed.copy()
self.mixed2 = _mixed2.copy()
-
+ self.integer = _integer.copy()
+ self._MIN_ELEMENTS = expr._MIN_ELEMENTS
+
+ def tearDown(self):
+ expr._MIN_ELEMENTS = self._MIN_ELEMENTS
+
+ #TODO: add test for Panel
+ #TODO: add tests for binary operations
+ @nose.tools.nottest
+ def run_arithmetic_test(self, df, assert_func, check_dtype=False):
+ expr._MIN_ELEMENTS = 0
+ operations = ['add', 'sub', 'mul','mod','truediv','floordiv','pow']
+ if not py3compat.PY3:
+ operations.append('div')
+ for arith in operations:
+ op = getattr(operator, arith)
+ expr.set_use_numexpr(False)
+ expected = op(df, df)
+ expr.set_use_numexpr(True)
+ result = op(df, df)
+ try:
+ if check_dtype:
+ if arith == 'div':
+ assert expected.dtype.kind == df.dtype.kind
+ if arith == 'truediv':
+ assert expected.dtype.kind == 'f'
+ assert_func(expected, result)
+ except Exception:
+ print("Failed test with operator %r" % op.__name__)
+ raise
+
+ def test_integer_arithmetic(self):
+ self.run_arithmetic_test(self.integer, assert_frame_equal)
+ self.run_arithmetic_test(self.integer.icol(0), assert_series_equal,
+ check_dtype=True)
+
+ def test_float_arithemtic(self):
+ self.run_arithmetic_test(self.frame, assert_frame_equal)
+ self.run_arithmetic_test(self.frame.icol(0), assert_series_equal,
+ check_dtype=True)
+
+ def test_mixed_arithmetic(self):
+ self.run_arithmetic_test(self.mixed, assert_frame_equal)
+ for col in self.mixed.columns:
+ self.run_arithmetic_test(self.mixed[col], assert_series_equal)
+
+ def test_integer_with_zeros(self):
+ self.integer *= np.random.randint(0, 2, size=np.shape(self.integer))
+ self.run_arithmetic_test(self.integer, assert_frame_equal)
+ self.run_arithmetic_test(self.integer.icol(0), assert_series_equal)
def test_invalid(self):
| Fixes a number of items relating to accelerated arithmetic in frame.
---
`__truediv__` had been set up to use numexpr, but this happened by passingthe division operator as the string. numexpr's evaluate only checked 2 frames up, which meant that it picked up the division setting from`frame.py` and would do floor/integer division when both inputs were integers.You'd only see that issue with a dataframe large enough to trigger numexpr evaluation (>10000 cells)
This adds test cases to `test_expression.py` that [exhibit this failure under thePython2.7 full deps test](https://travis-ci.org/jtratner/pandas/builds/7822069). The testcases only test `Series` and `DataFrame` (though it looks like neither`Series` nor `Panel` use `numexpr`). It doesn't fail under Python 3 because integer division is totally gone.
Now `evaluate`, `_evaluate_standard` and `_evaluate_numexpr` all accept extra keyword arguments, which are passed to `numexpr.evaluate`.
The test case is currently a separate commit that fails. I wasn't sure whether I should have combined it with the bugfix commit or not. Happyto change it if that's more appropriate.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3764 | 2013-06-05T22:40:00Z | 2013-06-18T13:01:18Z | 2013-06-18T13:01:18Z | 2014-06-13T05:42:35Z |
ENH: support HDFStore in Python3 (via PyTables 3.0.0) | diff --git a/README.rst b/README.rst
index daea702476ebc..85868176722bd 100644
--- a/README.rst
+++ b/README.rst
@@ -85,7 +85,6 @@ Optional dependencies
- `Cython <http://www.cython.org>`__: Only necessary to build development version. Version 0.17.1 or higher.
- `SciPy <http://www.scipy.org>`__: miscellaneous statistical functions
- `PyTables <http://www.pytables.org>`__: necessary for HDF5-based storage
- - Not yet supported on python >= 3
- `matplotlib <http://matplotlib.sourceforge.net/>`__: for plotting
- `statsmodels <http://statsmodels.sourceforge.net/>`__
- Needed for parts of :mod:`pandas.stats`
diff --git a/RELEASE.rst b/RELEASE.rst
index 12d2389a8a59b..3a347246be8dd 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -63,6 +63,7 @@ pandas 0.11.1
to append an index with a different name than the existing
- support datelike columns with a timezone as data_columns (GH2852_)
- table writing performance improvements.
+ - support python3 (via ``PyTables 3.0.0``) (GH3750_)
- Add modulo operator to Series, DataFrame
- Add ``date`` method to DatetimeIndex
- Simplified the API and added a describe method to Categorical
@@ -79,10 +80,14 @@ pandas 0.11.1
**API Changes**
- - When removing an object from a ``HDFStore``, ``remove(key)`` raises
- ``KeyError`` if the key is not a valid store object.
- - In an ``HDFStore``, raise a ``TypeError`` on passing ``where`` or ``columns``
- to select with a Storer; these are invalid parameters at this time
+ - ``HDFStore``
+
+ - When removing an object, ``remove(key)`` raises
+ ``KeyError`` if the key is not a valid store object.
+ - raise a ``TypeError`` on passing ``where`` or ``columns``
+ to select with a Storer; these are invalid parameters at this time
+ - can now specify an ``encoding`` option to ``append/put``
+ to enable alternate encodings (GH3750_)
- The repr() for (Multi)Index now obeys display.max_seq_items rather
then numpy threshold print options. (GH3426_, GH3466_)
- Added mangle_dupe_cols option to read_table/csv, allowing users
@@ -288,6 +293,7 @@ pandas 0.11.1
.. _GH3740: https://github.com/pydata/pandas/issues/3740
.. _GH3748: https://github.com/pydata/pandas/issues/3748
.. _GH3741: https://github.com/pydata/pandas/issues/3741
+.. _GH3750: https://github.com/pydata/pandas/issues/3750
pandas 0.11.0
=============
diff --git a/ci/install.sh b/ci/install.sh
index a091834a9570f..b748070db85aa 100755
--- a/ci/install.sh
+++ b/ci/install.sh
@@ -69,13 +69,11 @@ if ( ! $VENV_FILE_AVAILABLE ); then
pip install $PIP_ARGS cython
if [ ${TRAVIS_PYTHON_VERSION:0:1} == "2" ]; then
- # installed explicitly above, to get the library as well
- # sudo apt-get $APT_ARGS install libhdf5-serial-dev;
- pip install numexpr
- pip install tables
pip install $PIP_ARGS xlwt
fi
+ pip install numexpr
+ pip install tables
pip install $PIP_ARGS matplotlib
pip install $PIP_ARGS openpyxl
pip install $PIP_ARGS xlrd>=0.9.0
diff --git a/doc/source/install.rst b/doc/source/install.rst
index 6868969c1b968..9dc8064da45e3 100644
--- a/doc/source/install.rst
+++ b/doc/source/install.rst
@@ -95,7 +95,6 @@ Optional Dependencies
version. Version 0.17.1 or higher.
* `SciPy <http://www.scipy.org>`__: miscellaneous statistical functions
* `PyTables <http://www.pytables.org>`__: necessary for HDF5-based storage
- * Not yet supported on python >= 3
* `matplotlib <http://matplotlib.sourceforge.net/>`__: for plotting
* `statsmodels <http://statsmodels.sourceforge.net/>`__
* Needed for parts of :mod:`pandas.stats`
diff --git a/doc/source/io.rst b/doc/source/io.rst
index 802ab08e85932..1c615ca278668 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -1300,12 +1300,11 @@ the high performance HDF5 format using the excellent `PyTables
<http://www.pytables.org/>`__ library. See the :ref:`cookbook<cookbook.hdf>`
for some advanced strategies
-.. warning::
+.. note::
- ``PyTables`` 3.0.0 was recently released. This enables support for Python 3,
- however, it has not been integrated into pandas as of yet. (Under Python 2,
- ``PyTables`` version >= 2.3 is supported).
-
+ ``PyTables`` 3.0.0 was recently released to enables support for Python 3.
+ Pandas should be fully compatible (and previously written stores should be
+ backwards compatible) with all ``PyTables`` >= 2.3
.. ipython:: python
:suppress:
diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt
index b2fee1acbc4d6..badb364d214d1 100644
--- a/doc/source/v0.11.1.txt
+++ b/doc/source/v0.11.1.txt
@@ -237,6 +237,9 @@ Enhancements
pd.get_option('a.b')
pd.get_option('b.c')
+ - Support for ``HDFStore`` (via ``PyTables 3.0.0``) on Python3
+
+
Bug Fixes
~~~~~~~~~
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 5a480e08effba..b1b7b80e5fd23 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -13,12 +13,13 @@
import numpy as np
from pandas import (
- Series, TimeSeries, DataFrame, Panel, Panel4D, Index, MultiIndex, Int64Index
+ Series, TimeSeries, DataFrame, Panel, Panel4D, Index,
+ MultiIndex, Int64Index, Timestamp
)
from pandas.sparse.api import SparseSeries, SparseDataFrame, SparsePanel
from pandas.sparse.array import BlockIndex, IntIndex
from pandas.tseries.api import PeriodIndex, DatetimeIndex
-from pandas.core.common import adjoin, isnull
+from pandas.core.common import adjoin, isnull, is_list_like
from pandas.core.algorithms import match, unique, factorize
from pandas.core.categorical import Categorical
from pandas.core.common import _asarray_tuplesafe, _try_sort
@@ -27,6 +28,7 @@
from pandas.core.index import Int64Index, _ensure_index
import pandas.core.common as com
from pandas.tools.merge import concat
+from pandas.util import py3compat
import pandas.lib as lib
import pandas.algos as algos
@@ -37,6 +39,21 @@
# versioning attribute
_version = '0.10.1'
+# PY3 encoding if we don't specify
+_default_encoding = 'UTF-8'
+
+def _ensure_decoded(s):
+ """ if we have bytes, decode them to unicde """
+ if isinstance(s, np.bytes_):
+ s = s.decode('UTF-8')
+ return s
+def _ensure_encoding(encoding):
+ # set the encoding if we need
+ if encoding is None:
+ if py3compat.PY3:
+ encoding = _default_encoding
+ return encoding
+
class IncompatibilityWarning(Warning): pass
incompatibility_doc = """
where criteria is being ignored as this version [%s] is too old (or not-defined),
@@ -56,40 +73,40 @@ class PerformanceWarning(Warning): pass
# map object types
_TYPE_MAP = {
- Series : 'series',
- SparseSeries : 'sparse_series',
- TimeSeries : 'series',
- DataFrame : 'frame',
- SparseDataFrame : 'sparse_frame',
- Panel : 'wide',
- Panel4D : 'ndim',
- SparsePanel : 'sparse_panel'
+ Series : u'series',
+ SparseSeries : u'sparse_series',
+ TimeSeries : u'series',
+ DataFrame : u'frame',
+ SparseDataFrame : u'sparse_frame',
+ Panel : u'wide',
+ Panel4D : u'ndim',
+ SparsePanel : u'sparse_panel'
}
# storer class map
_STORER_MAP = {
- 'TimeSeries' : 'LegacySeriesStorer',
- 'Series' : 'LegacySeriesStorer',
- 'DataFrame' : 'LegacyFrameStorer',
- 'DataMatrix' : 'LegacyFrameStorer',
- 'series' : 'SeriesStorer',
- 'sparse_series' : 'SparseSeriesStorer',
- 'frame' : 'FrameStorer',
- 'sparse_frame' : 'SparseFrameStorer',
- 'wide' : 'PanelStorer',
- 'sparse_panel' : 'SparsePanelStorer',
+ u'TimeSeries' : 'LegacySeriesStorer',
+ u'Series' : 'LegacySeriesStorer',
+ u'DataFrame' : 'LegacyFrameStorer',
+ u'DataMatrix' : 'LegacyFrameStorer',
+ u'series' : 'SeriesStorer',
+ u'sparse_series' : 'SparseSeriesStorer',
+ u'frame' : 'FrameStorer',
+ u'sparse_frame' : 'SparseFrameStorer',
+ u'wide' : 'PanelStorer',
+ u'sparse_panel' : 'SparsePanelStorer',
}
# table class map
_TABLE_MAP = {
- 'generic_table' : 'GenericTable',
- 'appendable_frame' : 'AppendableFrameTable',
- 'appendable_multiframe' : 'AppendableMultiFrameTable',
- 'appendable_panel' : 'AppendablePanelTable',
- 'appendable_ndim' : 'AppendableNDimTable',
- 'worm' : 'WORMTable',
- 'legacy_frame' : 'LegacyFrameTable',
- 'legacy_panel' : 'LegacyPanelTable',
+ u'generic_table' : 'GenericTable',
+ u'appendable_frame' : 'AppendableFrameTable',
+ u'appendable_multiframe' : 'AppendableMultiFrameTable',
+ u'appendable_panel' : 'AppendablePanelTable',
+ u'appendable_ndim' : 'AppendableNDimTable',
+ u'worm' : 'WORMTable',
+ u'legacy_frame' : 'LegacyFrameTable',
+ u'legacy_panel' : 'LegacyPanelTable',
}
# axes map
@@ -522,15 +539,16 @@ def put(self, key, value, table=None, append=False, **kwargs):
Parameters
----------
- key : object
- value : {Series, DataFrame, Panel}
- table : boolean, default False
+ key : object
+ value : {Series, DataFrame, Panel}
+ table : boolean, default False
Write as a PyTables Table structure which may perform worse but
allow more flexible operations like searching / selecting subsets
of the data
- append : boolean, default False
+ append : boolean, default False
For table data structures, append the input data to the existing
table
+ encoding : default None, provide an encoding for strings
"""
self._write_to_group(key, value, table=table, append=append, **kwargs)
@@ -595,6 +613,7 @@ def append(self, key, value, columns=None, **kwargs):
nan_rep : string to use as string nan represenation
chunksize : size to chunk the writing
expectedrows : expected TOTAL row size of this table
+ encoding : default None, provide an encoding for strings
Notes
-----
@@ -692,7 +711,8 @@ def create_table_index(self, key, **kwargs):
def groups(self):
""" return a list of all the top-level nodes (that are not themselves a pandas storage object) """
_tables()
- return [ g for g in self._handle.walkNodes() if getattr(g._v_attrs,'pandas_type',None) or getattr(g,'table',None) or (isinstance(g,_table_mod.table.Table) and g._v_name != 'table') ]
+ return [ g for g in self._handle.walkNodes() if getattr(g._v_attrs,'pandas_type',None) or getattr(
+ g,'table',None) or (isinstance(g,_table_mod.table.Table) and g._v_name != u'table') ]
def get_node(self, key):
""" return the node with the key or None if it does not exist """
@@ -712,7 +732,8 @@ def get_storer(self, key):
s.infer_axes()
return s
- def copy(self, file, mode = 'w', propindexes = True, keys = None, complib = None, complevel = None, fletcher32 = False, overwrite = True):
+ def copy(self, file, mode = 'w', propindexes = True, keys = None, complib = None, complevel = None,
+ fletcher32 = False, overwrite = True):
""" copy the existing store to a new file, upgrading in place
Parameters
@@ -746,9 +767,9 @@ def copy(self, file, mode = 'w', propindexes = True, keys = None, complib = None
index = False
if propindexes:
index = [ a.name for a in s.axes if a.is_indexed ]
- new_store.append(k,data, index=index, data_columns=getattr(s,'data_columns',None))
+ new_store.append(k, data, index=index, data_columns=getattr(s,'data_columns',None), encoding=s.encoding)
else:
- new_store.put(k,data)
+ new_store.put(k, data, encoding=s.encoding)
return new_store
@@ -761,8 +782,8 @@ def error(t):
raise TypeError("cannot properly create the storer for: [%s] [group->%s,value->%s,table->%s,append->%s,kwargs->%s]" %
(t,group,type(value),table,append,kwargs))
- pt = getattr(group._v_attrs,'pandas_type',None)
- tt = getattr(group._v_attrs,'table_type',None)
+ pt = _ensure_decoded(getattr(group._v_attrs,'pandas_type',None))
+ tt = _ensure_decoded(getattr(group._v_attrs,'table_type',None))
# infer the pt from the passed value
if pt is None:
@@ -770,8 +791,8 @@ def error(t):
_tables()
if getattr(group,'table',None) or isinstance(group,_table_mod.table.Table):
- pt = 'frame_table'
- tt = 'generic_table'
+ pt = u'frame_table'
+ tt = u'generic_table'
else:
raise TypeError("cannot create a storer if the object is not existing nor a value are passed")
else:
@@ -783,10 +804,10 @@ def error(t):
# we are actually a table
if table or append:
- pt += '_table'
+ pt += u'_table'
# a storer node
- if 'table' not in pt:
+ if u'table' not in pt:
try:
return globals()[_STORER_MAP[pt]](self, group, **kwargs)
except:
@@ -798,26 +819,26 @@ def error(t):
# if we are a writer, determin the tt
if value is not None:
- if pt == 'frame_table':
+ if pt == u'frame_table':
index = getattr(value,'index',None)
if index is not None:
if index.nlevels == 1:
- tt = 'appendable_frame'
+ tt = u'appendable_frame'
elif index.nlevels > 1:
- tt = 'appendable_multiframe'
- elif pt == 'wide_table':
- tt = 'appendable_panel'
- elif pt == 'ndim_table':
- tt = 'appendable_ndim'
+ tt = u'appendable_multiframe'
+ elif pt == u'wide_table':
+ tt = u'appendable_panel'
+ elif pt == u'ndim_table':
+ tt = u'appendable_ndim'
else:
# distiguish between a frame/table
- tt = 'legacy_panel'
+ tt = u'legacy_panel'
try:
fields = group.table._v_attrs.fields
- if len(fields) == 1 and fields[0] == 'value':
- tt = 'legacy_frame'
+ if len(fields) == 1 and fields[0] == u'value':
+ tt = u'legacy_frame'
except:
pass
@@ -826,7 +847,8 @@ def error(t):
except:
error('_TABLE_MAP')
- def _write_to_group(self, key, value, index=True, table=False, append=False, complib=None, **kwargs):
+ def _write_to_group(self, key, value, index=True, table=False, append=False,
+ complib=None, encoding=None, **kwargs):
group = self.get_node(key)
# remove the node if we are not appending
@@ -851,7 +873,8 @@ def _write_to_group(self, key, value, index=True, table=False, append=False, com
group = self._handle.createGroup(path, p)
path = new_path
- s = self._create_storer(group, value, table=table, append=append, **kwargs)
+ s = self._create_storer(group, value, table=table, append=append,
+ encoding=encoding, **kwargs)
if append:
# raise if we are trying to append to a non-table,
# or a table that exists (and we are putting)
@@ -890,7 +913,7 @@ class TableIterator(object):
def __init__(self, func, nrows, start=None, stop=None, chunksize=None):
self.func = func
- self.nrows = nrows
+ self.nrows = nrows or 0
self.start = start or 0
if stop is None:
@@ -1015,7 +1038,7 @@ def infer(self, table):
new_self.get_attr()
return new_self
- def convert(self, values, nan_rep):
+ def convert(self, values, nan_rep, encoding):
""" set the values from this selection: take = take ownership """
try:
values = values[self.cname]
@@ -1024,19 +1047,19 @@ def convert(self, values, nan_rep):
kwargs = dict()
if self.freq is not None:
- kwargs['freq'] = self.freq
+ kwargs['freq'] = _ensure_decoded(self.freq)
if self.tz is not None:
- kwargs['tz'] = self.tz
+ kwargs['tz'] = _ensure_decoded(self.tz)
if self.index_name is not None:
- kwargs['name'] = self.index_name
+ kwargs['name'] = _ensure_decoded(self.index_name)
try:
- self.values = Index(_maybe_convert(values, self.kind), **kwargs)
+ self.values = Index(_maybe_convert(values, self.kind, self.encoding), **kwargs)
except:
# if the output freq is different that what we recorded, then infer it
if 'freq' in kwargs:
kwargs['freq'] = 'infer'
- self.values = Index(_maybe_convert(values, self.kind), **kwargs)
+ self.values = Index(_maybe_convert(values, self.kind, encoding), **kwargs)
return self
def take_data(self):
@@ -1068,7 +1091,7 @@ def __iter__(self):
def maybe_set_size(self, min_itemsize=None, **kwargs):
""" maybe set a string col itemsize:
min_itemsize can be an interger or a dict with this columns name with an integer size """
- if self.kind == 'string':
+ if _ensure_decoded(self.kind) == u'string':
if isinstance(min_itemsize, dict):
min_itemsize = min_itemsize.get(self.name)
@@ -1088,7 +1111,7 @@ def validate_col(self, itemsize=None):
# validate this column for string truncation (or reset to the max size)
dtype = getattr(self, 'dtype', None)
- if self.kind == 'string':
+ if _ensure_decoded(self.kind) == u'string':
c = self.col
if c is not None:
@@ -1167,7 +1190,7 @@ class GenericIndexCol(IndexCol):
def is_indexed(self):
return False
- def convert(self, values, nan_rep):
+ def convert(self, values, nan_rep, encoding):
""" set the values from this selection: take = take ownership """
self.values = Int64Index(np.arange(self.table.nrows))
@@ -1218,7 +1241,7 @@ def __init__(self, values=None, kind=None, typ=None, cname=None, data=None, bloc
super(DataCol, self).__init__(
values=values, kind=kind, typ=typ, cname=cname, **kwargs)
self.dtype = None
- self.dtype_attr = "%s_dtype" % self.name
+ self.dtype_attr = u"%s_dtype" % self.name
self.set_data(data)
def __repr__(self):
@@ -1246,22 +1269,25 @@ def take_data(self):
def set_kind(self):
# set my kind if we can
if self.dtype is not None:
- if self.dtype.startswith('string'):
+ dtype = _ensure_decoded(self.dtype)
+ if dtype.startswith(u'string') or dtype.startswith(u'bytes'):
self.kind = 'string'
- elif self.dtype.startswith('float'):
+ elif dtype.startswith(u'float'):
self.kind = 'float'
- elif self.dtype.startswith('int'):
+ elif dtype.startswith(u'int') or dtype.startswith(u'uint'):
self.kind = 'integer'
- elif self.dtype.startswith('date'):
+ elif dtype.startswith(u'date'):
self.kind = 'datetime'
- elif self.dtype.startswith('bool'):
+ elif dtype.startswith(u'bool'):
self.kind = 'bool'
+ else:
+ raise AssertionError("cannot interpret dtype of [%s] in [%s]" % (dtype,self))
# set my typ if we need
if self.typ is None:
self.typ = getattr(self.description,self.cname,None)
- def set_atom(self, block, existing_col, min_itemsize, nan_rep, info, **kwargs):
+ def set_atom(self, block, existing_col, min_itemsize, nan_rep, info, encoding=None, **kwargs):
""" create and setup my atom from the block b """
self.values = list(block.items)
@@ -1304,7 +1330,7 @@ def set_atom(self, block, existing_col, min_itemsize, nan_rep, info, **kwargs):
# this is basically a catchall; if say a datetime64 has nans then will
# end up here ###
elif inferred_type == 'string' or dtype == 'object':
- self.set_atom_string(block, existing_col, min_itemsize, nan_rep)
+ self.set_atom_string(block, existing_col, min_itemsize, nan_rep, encoding)
else:
self.set_atom_data(block)
@@ -1313,7 +1339,7 @@ def set_atom(self, block, existing_col, min_itemsize, nan_rep, info, **kwargs):
def get_atom_string(self, block, itemsize):
return _tables().StringCol(itemsize=itemsize, shape=block.shape[0])
- def set_atom_string(self, block, existing_col, min_itemsize, nan_rep):
+ def set_atom_string(self, block, existing_col, min_itemsize, nan_rep, encoding):
# fill nan items with myself
block = block.fillna(nan_rep)
data = block.values
@@ -1334,7 +1360,7 @@ def set_atom_string(self, block, existing_col, min_itemsize, nan_rep):
# itemsize is the maximum length of a string (along any dimension)
- itemsize = lib.max_len_string_array(data.ravel())
+ itemsize = lib.max_len_string_array(com._ensure_object(data.ravel()))
# specified min_itemsize?
if isinstance(min_itemsize, dict):
@@ -1351,10 +1377,10 @@ def set_atom_string(self, block, existing_col, min_itemsize, nan_rep):
self.itemsize = itemsize
self.kind = 'string'
self.typ = self.get_atom_string(block, itemsize)
- self.set_data(self.convert_string_data(data, itemsize))
+ self.set_data(self.convert_string_data(data, itemsize, encoding))
- def convert_string_data(self, data, itemsize):
- return data.astype('S%s' % itemsize)
+ def convert_string_data(self, data, itemsize, encoding):
+ return _convert_string_array(data, encoding, itemsize)
def get_atom_coltype(self):
""" return the PyTables column class for this column """
@@ -1407,7 +1433,7 @@ def validate_attr(self, append):
raise ValueError("appended items dtype do not match existing items dtype"
" in table!")
- def convert(self, values, nan_rep):
+ def convert(self, values, nan_rep, encoding):
""" set the data from this selection (and convert to the correct dtype if we can) """
try:
values = values[self.cname]
@@ -1417,9 +1443,10 @@ def convert(self, values, nan_rep):
# convert to the correct dtype
if self.dtype is not None:
+ dtype = _ensure_decoded(self.dtype)
# reverse converts
- if self.dtype == 'datetime64':
+ if dtype == u'datetime64':
# recreate the timezone
if self.tz is not None:
@@ -1432,30 +1459,30 @@ def convert(self, values, nan_rep):
else:
self.data = np.asarray(self.data, dtype='M8[ns]')
- elif self.dtype == 'date':
+ elif dtype == u'date':
self.data = np.array(
[date.fromtimestamp(v) for v in self.data], dtype=object)
- elif self.dtype == 'datetime':
+ elif dtype == u'datetime':
self.data = np.array(
[datetime.fromtimestamp(v) for v in self.data],
dtype=object)
else:
try:
- self.data = self.data.astype(self.dtype)
+ self.data = self.data.astype(dtype)
except:
self.data = self.data.astype('O')
- # convert nans
- if self.kind == 'string':
- self.data = lib.array_replace_from_nan_rep(
- self.data.ravel(), nan_rep).reshape(self.data.shape)
+ # convert nans / decode
+ if _ensure_decoded(self.kind) == u'string':
+ self.data = _unconvert_string_array(self.data, nan_rep=nan_rep, encoding=encoding)
+
return self
def get_attr(self):
""" get the data for this colummn """
self.values = getattr(self.attrs, self.kind_attr, None)
- self.dtype = getattr(self.attrs, self.dtype_attr, None)
+ self.dtype = getattr(self.attrs, self.dtype_attr, None)
self.set_kind()
def set_attr(self):
@@ -1471,7 +1498,7 @@ class DataIndexableCol(DataCol):
@property
def is_searchable(self):
- return self.kind == 'string'
+ return _ensure_decoded(self.kind) == u'string'
def get_atom_string(self, block, itemsize):
return _tables().StringCol(itemsize=itemsize)
@@ -1504,9 +1531,10 @@ class Storer(object):
ndim = None
is_table = False
- def __init__(self, parent, group, **kwargs):
+ def __init__(self, parent, group, encoding=None, **kwargs):
self.parent = parent
self.group = group
+ self.encoding = _ensure_encoding(encoding)
self.set_version()
@property
@@ -1515,7 +1543,7 @@ def is_old_version(self):
def set_version(self):
""" compute and set our version """
- version = getattr(self.group._v_attrs,'pandas_version',None)
+ version = _ensure_decoded(getattr(self.group._v_attrs,'pandas_version',None))
try:
self.version = tuple([int(x) for x in version.split('.')])
if len(self.version) == 2:
@@ -1525,7 +1553,7 @@ def set_version(self):
@property
def pandas_type(self):
- return getattr(self.group._v_attrs, 'pandas_type', None)
+ return _ensure_decoded(getattr(self.group._v_attrs, 'pandas_type', None))
def __repr__(self):
""" return a pretty representatgion of myself """
@@ -1674,10 +1702,18 @@ def validate_read(self, kwargs):
def is_exists(self):
return True
+ def set_attrs(self):
+ """ set our object attributes """
+ self.attrs.encoding = self.encoding
+
def get_attrs(self):
""" retrieve our attributes """
+ self.encoding = _ensure_encoding(getattr(self.attrs,'encoding',None))
for n in self.attributes:
- setattr(self,n,getattr(self.attrs, n, None))
+ setattr(self,n,_ensure_decoded(getattr(self.attrs, n, None)))
+
+ def write(self, obj, **kwargs):
+ self.set_attrs()
def read_array(self, key):
""" read an array for the specified node (off of group """
@@ -1700,7 +1736,7 @@ def read_array(self, key):
else:
ret = data
- if dtype == 'datetime64':
+ if dtype == u'datetime64':
ret = np.array(ret, dtype='M8[ns]')
if transposed:
@@ -1709,15 +1745,15 @@ def read_array(self, key):
return ret
def read_index(self, key):
- variety = getattr(self.attrs, '%s_variety' % key)
+ variety = _ensure_decoded(getattr(self.attrs, '%s_variety' % key))
- if variety == 'multi':
+ if variety == u'multi':
return self.read_multi_index(key)
- elif variety == 'block':
+ elif variety == u'block':
return self.read_block_index(key)
- elif variety == 'sparseint':
+ elif variety == u'sparseint':
return self.read_sparse_intindex(key)
- elif variety == 'regular':
+ elif variety == u'regular':
_, index = self.read_index_node(getattr(self.group, key))
return index
else: # pragma: no cover
@@ -1735,7 +1771,7 @@ def write_index(self, key, index):
self.write_sparse_intindex(key, index)
else:
setattr(self.attrs, '%s_variety' % key, 'regular')
- converted = _convert_index(index).set_name('index')
+ converted = _convert_index(index,self.encoding).set_name('index')
self.write_array(key, converted.values)
node = getattr(self.group, key)
node._v_attrs.kind = converted.kind
@@ -1782,7 +1818,7 @@ def write_multi_index(self, key, index):
index.names)):
# write the level
level_key = '%s_level%d' % (key, i)
- conv_level = _convert_index(lev).set_name(level_key)
+ conv_level = _convert_index(lev, self.encoding).set_name(level_key)
self.write_array(level_key, conv_level.values)
node = getattr(self.group, level_key)
node._v_attrs.kind = conv_level.kind
@@ -1815,7 +1851,7 @@ def read_multi_index(self, key):
def read_index_node(self, node):
data = node[:]
- kind = node._v_attrs.kind
+ kind = _ensure_decoded(node._v_attrs.kind)
name = None
if 'name' in node._v_attrs:
@@ -1826,23 +1862,22 @@ def read_index_node(self, node):
factory = self._get_index_factory(index_class)
kwargs = {}
- if 'freq' in node._v_attrs:
+ if u'freq' in node._v_attrs:
kwargs['freq'] = node._v_attrs['freq']
- if 'tz' in node._v_attrs:
+ if u'tz' in node._v_attrs:
kwargs['tz'] = node._v_attrs['tz']
- if kind in ('date', 'datetime'):
- index = factory(_unconvert_index(data, kind), dtype=object,
+ if kind in (u'date', u'datetime'):
+ index = factory(_unconvert_index(data, kind, encoding=self.encoding), dtype=object,
**kwargs)
else:
- index = factory(_unconvert_index(data, kind), **kwargs)
+ index = factory(_unconvert_index(data, kind, encoding=self.encoding), **kwargs)
index.name = name
return name, index
-
def write_array_empty(self, key, value):
""" write a 0-len array """
@@ -1922,7 +1957,7 @@ def read_index_legacy(self, key):
node = getattr(self.group,key)
data = node[:]
kind = node._v_attrs.kind
- return _unconvert_index_legacy(data, kind)
+ return _unconvert_index_legacy(data, kind, encoding=self.encoding)
class LegacySeriesStorer(LegacyStorer):
@@ -1942,7 +1977,7 @@ def read(self, **kwargs):
return DataFrame(values, index=index, columns=columns)
class SeriesStorer(GenericStorer):
- pandas_kind = 'series'
+ pandas_kind = u'series'
attributes = ['name']
@property
@@ -1963,12 +1998,13 @@ def read(self, **kwargs):
return Series(values, index=index, name=self.name)
def write(self, obj, **kwargs):
+ super(SeriesStorer, self).write(obj, **kwargs)
self.write_index('index', obj.index)
self.write_array('values', obj.values)
self.attrs.name = obj.name
class SparseSeriesStorer(GenericStorer):
- pandas_kind = 'sparse_series'
+ pandas_kind = u'sparse_series'
attributes = ['name','fill_value','kind']
def read(self, **kwargs):
@@ -1977,10 +2013,11 @@ def read(self, **kwargs):
sp_values = self.read_array('sp_values')
sp_index = self.read_index('sp_index')
return SparseSeries(sp_values, index=index, sparse_index=sp_index,
- kind=self.kind or 'block', fill_value=self.fill_value,
+ kind=self.kind or u'block', fill_value=self.fill_value,
name=self.name)
def write(self, obj, **kwargs):
+ super(SparseSeriesStorer, self).write(obj, **kwargs)
self.write_index('index', obj.index)
self.write_index('sp_index', obj.sp_index)
self.write_array('sp_values', obj.sp_values)
@@ -1989,7 +2026,7 @@ def write(self, obj, **kwargs):
self.attrs.kind = obj.kind
class SparseFrameStorer(GenericStorer):
- pandas_kind = 'sparse_frame'
+ pandas_kind = u'sparse_frame'
attributes = ['default_kind','default_fill_value']
def read(self, **kwargs):
@@ -2007,6 +2044,7 @@ def read(self, **kwargs):
def write(self, obj, **kwargs):
""" write it as a collection of individual sparse series """
+ super(SparseFrameStorer, self).write(obj, **kwargs)
for name, ss in obj.iteritems():
key = 'sparse_series_%s' % name
if key not in self.group._v_children:
@@ -2020,7 +2058,7 @@ def write(self, obj, **kwargs):
self.write_index('columns', obj.columns)
class SparsePanelStorer(GenericStorer):
- pandas_kind = 'sparse_panel'
+ pandas_kind = u'sparse_panel'
attributes = ['default_kind','default_fill_value']
def read(self, **kwargs):
@@ -2038,11 +2076,12 @@ def read(self, **kwargs):
default_fill_value=self.default_fill_value)
def write(self, obj, **kwargs):
+ super(SparsePanelStorer, self).write(obj, **kwargs)
self.attrs.default_fill_value = obj.default_fill_value
self.attrs.default_kind = obj.default_kind
self.write_index('items', obj.items)
- for name, sdf in obj.iteritems():
+ for name, sdf in obj.iterkv():
key = 'sparse_frame_%s' % name
if key not in self.group._v_children:
node = self._handle.createGroup(self.group, key)
@@ -2105,6 +2144,7 @@ def read(self, **kwargs):
return self.obj_type(BlockManager(blocks, axes))
def write(self, obj, **kwargs):
+ super(BlockManagerStorer, self).write(obj, **kwargs)
data = obj._data
if not data.is_consolidated():
data = data.consolidate()
@@ -2122,11 +2162,11 @@ def write(self, obj, **kwargs):
self.write_index('block%d_items' % i, blk.items)
class FrameStorer(BlockManagerStorer):
- pandas_kind = 'frame'
+ pandas_kind = u'frame'
obj_type = DataFrame
class PanelStorer(BlockManagerStorer):
- pandas_kind = 'wide'
+ pandas_kind = u'wide'
obj_type = Panel
is_shape_reversed = True
@@ -2151,7 +2191,7 @@ class Table(Storer):
levels : the names of levels
"""
- pandas_kind = 'wide_table'
+ pandas_kind = u'wide_table'
table_type = None
levels = 1
is_table = True
@@ -2225,7 +2265,7 @@ def nrows_expected(self):
@property
def is_exists(self):
""" has this table been created """
- return 'table' in self.group
+ return u'table' in self.group
@property
def storable(self):
@@ -2291,6 +2331,7 @@ def set_attrs(self):
self.attrs.non_index_axes = self.non_index_axes
self.attrs.data_columns = self.data_columns
self.attrs.nan_rep = self.nan_rep
+ self.attrs.encoding = self.encoding
self.attrs.levels = self.levels
self.set_info()
@@ -2300,6 +2341,7 @@ def get_attrs(self):
self.data_columns = getattr(self.attrs,'data_columns',None) or []
self.info = getattr(self.attrs,'info',None) or dict()
self.nan_rep = getattr(self.attrs,'nan_rep',None)
+ self.encoding = _ensure_encoding(getattr(self.attrs,'encoding',None))
self.levels = getattr(self.attrs,'levels',None) or []
t = self.table
self.index_axes = [ a.infer(t) for a in self.indexables if a.is_an_indexable ]
@@ -2430,7 +2472,7 @@ def read_axes(self, where, **kwargs):
# convert the data
for a in self.axes:
a.set_info(self.info)
- a.convert(values, nan_rep=self.nan_rep)
+ a.convert(values, nan_rep=self.nan_rep, encoding=self.encoding)
return True
@@ -2473,6 +2515,7 @@ def create_axes(self, axes, obj, validate=True, nan_rep=None, data_columns=None,
validate: validate the obj against an existiing object already written
min_itemsize: a dict of the min size for a column in bytes
nan_rep : a values to use for string column nan_rep
+ encoding : the encoding for string values
data_columns : a list of columns that we want to create separate to allow indexing (or True will force all colummns)
"""
@@ -2492,10 +2535,11 @@ def create_axes(self, axes, obj, validate=True, nan_rep=None, data_columns=None,
if self.infer_axes():
existing_table = self.copy()
existing_table.infer_axes()
- axes = [ a.axis for a in existing_table.index_axes]
- data_columns = existing_table.data_columns
- nan_rep = existing_table.nan_rep
- self.info = copy.copy(existing_table.info)
+ axes = [ a.axis for a in existing_table.index_axes]
+ data_columns = existing_table.data_columns
+ nan_rep = existing_table.nan_rep
+ self.encoding = existing_table.encoding
+ self.info = copy.copy(existing_table.info)
else:
existing_table = None
@@ -2510,6 +2554,7 @@ def create_axes(self, axes, obj, validate=True, nan_rep=None, data_columns=None,
# nan_representation
if nan_rep is None:
nan_rep = 'nan'
+
self.nan_rep = nan_rep
# create axes to index and non_index
@@ -2519,7 +2564,7 @@ def create_axes(self, axes, obj, validate=True, nan_rep=None, data_columns=None,
if i in axes:
name = obj._AXIS_NAMES[i]
index_axes_map[i] = _convert_index(
- a).set_name(name).set_axis(i)
+ a, self.encoding).set_name(name).set_axis(i)
else:
# we might be able to change the axes on the appending data if
@@ -2595,6 +2640,7 @@ def create_axes(self, axes, obj, validate=True, nan_rep=None, data_columns=None,
existing_col=existing_col,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
+ encoding=self.encoding,
info=self.info,
**kwargs)
col.set_pos(j)
@@ -2716,7 +2762,7 @@ def read_column(self, column, where = None, **kwargs):
# column must be an indexable or a data column
c = getattr(self.table.cols, column)
a.set_info(self.info)
- return Series(a.convert(c[:], nan_rep=self.nan_rep).take_data())
+ return Series(a.convert(c[:], nan_rep=self.nan_rep, encoding=self.encoding).take_data())
raise KeyError("column [%s] not found in the table" % column)
@@ -2725,7 +2771,7 @@ class WORMTable(Table):
table. writing is a one-time operation the data are stored in a format
that allows for searching the data on disk
"""
- table_type = 'worm'
+ table_type = u'worm'
def read(self, **kwargs):
""" read the indicies and the indexing array, calculate offset rows and
@@ -2750,7 +2796,7 @@ class LegacyTable(Table):
IndexCol(name='column', axis=2,
pos=1, index_kind='columns_kind'),
DataCol(name='fields', cname='values', kind_attr='fields', pos=2)]
- table_type = 'legacy'
+ table_type = u'legacy'
ndim = 3
def write(self, **kwargs):
@@ -2840,8 +2886,8 @@ def read(self, where=None, columns=None, **kwargs):
class LegacyFrameTable(LegacyTable):
""" support the legacy frame table """
- pandas_kind = 'frame_table'
- table_type = 'legacy_frame'
+ pandas_kind = u'frame_table'
+ table_type = u'legacy_frame'
obj_type = Panel
def read(self, *args, **kwargs):
@@ -2850,14 +2896,14 @@ def read(self, *args, **kwargs):
class LegacyPanelTable(LegacyTable):
""" support the legacy panel table """
- table_type = 'legacy_panel'
+ table_type = u'legacy_panel'
obj_type = Panel
class AppendableTable(LegacyTable):
""" suppor the new appendable table formats """
_indexables = None
- table_type = 'appendable'
+ table_type = u'appendable'
def write(self, obj, axes=None, append=False, complib=None,
complevel=None, fletcher32=None, min_itemsize=None, chunksize=None,
@@ -2868,7 +2914,8 @@ def write(self, obj, axes=None, append=False, complib=None,
# create the axes
self.create_axes(axes=axes, obj=obj, validate=append,
- min_itemsize=min_itemsize, **kwargs)
+ min_itemsize=min_itemsize,
+ **kwargs)
if not self.is_exists:
@@ -3019,8 +3066,8 @@ def delete(self, where=None, **kwargs):
class AppendableFrameTable(AppendableTable):
""" suppor the new appendable table formats """
- pandas_kind = 'frame_table'
- table_type = 'appendable_frame'
+ pandas_kind = u'frame_table'
+ table_type = u'appendable_frame'
ndim = 2
obj_type = DataFrame
@@ -3074,8 +3121,8 @@ def read(self, where=None, columns=None, **kwargs):
class GenericTable(AppendableFrameTable):
""" a table that read/writes the generic pytables table format """
- pandas_kind = 'frame_table'
- table_type = 'generic_table'
+ pandas_kind = u'frame_table'
+ table_type = u'generic_table'
ndim = 2
obj_type = DataFrame
@@ -3119,13 +3166,13 @@ def write(self, **kwargs):
class AppendableMultiFrameTable(AppendableFrameTable):
""" a frame with a multi-index """
- table_type = 'appendable_multiframe'
+ table_type = u'appendable_multiframe'
obj_type = DataFrame
ndim = 2
@property
def table_type_short(self):
- return 'appendable_multi'
+ return u'appendable_multi'
def write(self, obj, data_columns=None, **kwargs):
if data_columns is None:
@@ -3150,7 +3197,7 @@ def read(self, columns=None, **kwargs):
class AppendablePanelTable(AppendableTable):
""" suppor the new appendable table formats """
- table_type = 'appendable_panel'
+ table_type = u'appendable_panel'
ndim = 3
obj_type = Panel
@@ -3167,11 +3214,11 @@ def is_transposed(self):
class AppendableNDimTable(AppendablePanelTable):
""" suppor the new appendable table formats """
- table_type = 'appendable_ndim'
+ table_type = u'appendable_ndim'
ndim = 4
obj_type = Panel4D
-def _convert_index(index):
+def _convert_index(index, encoding=None):
index_name = getattr(index,'name',None)
if isinstance(index, DatetimeIndex):
@@ -3211,7 +3258,7 @@ def _convert_index(index):
# atom = _tables().ObjectAtom()
# return np.asarray(values, dtype='O'), 'object', atom
- converted = np.array(list(values), dtype=np.str_)
+ converted = _convert_string_array(values, encoding)
itemsize = converted.dtype.itemsize
return IndexCol(converted, 'string', _tables().StringCol(itemsize), itemsize=itemsize,
index_name=index_name)
@@ -3233,48 +3280,90 @@ def _convert_index(index):
return IndexCol(np.asarray(values, dtype='O'), 'object', atom,
index_name=index_name)
-def _unconvert_index(data, kind):
- if kind == 'datetime64':
+def _unconvert_index(data, kind, encoding=None):
+ kind = _ensure_decoded(kind)
+ if kind == u'datetime64':
index = DatetimeIndex(data)
- elif kind == 'datetime':
+ elif kind == u'datetime':
index = np.array([datetime.fromtimestamp(v) for v in data],
dtype=object)
- elif kind == 'date':
+ elif kind == u'date':
index = np.array([date.fromtimestamp(v) for v in data], dtype=object)
- elif kind in ('string', 'integer', 'float'):
+ elif kind in (u'integer', u'float'):
index = np.array(data)
- elif kind == 'object':
+ elif kind in (u'string'):
+ index = _unconvert_string_array(data, nan_rep=None, encoding=encoding)
+ elif kind == u'object':
index = np.array(data[0])
else: # pragma: no cover
raise ValueError('unrecognized index type %s' % kind)
return index
-def _unconvert_index_legacy(data, kind, legacy=False):
- if kind == 'datetime':
+def _unconvert_index_legacy(data, kind, legacy=False, encoding=None):
+ kind = _ensure_decoded(kind)
+ if kind == u'datetime':
index = lib.time64_to_datetime(data)
- elif kind in ('string', 'integer'):
+ elif kind in (u'integer'):
index = np.array(data, dtype=object)
+ elif kind in (u'string'):
+ index = _unconvert_string_array(data, nan_rep=None, encoding=encoding)
else: # pragma: no cover
raise ValueError('unrecognized index type %s' % kind)
return index
-def _maybe_convert(values, val_kind):
+def _convert_string_array(data, encoding, itemsize=None):
+
+ # encode if needed
+ if encoding is not None and len(data):
+ f = np.vectorize(lambda x: x.encode(encoding), otypes=[np.object])
+ data = f(data)
+
+ # create the sized dtype
+ if itemsize is None:
+ itemsize = lib.max_len_string_array(com._ensure_object(data.ravel()))
+
+ data = np.array(data,dtype="S%d" % itemsize)
+ return data
+
+def _unconvert_string_array(data, nan_rep=None, encoding=None):
+ """ deserialize a string array, possibly decoding """
+ shape = data.shape
+ data = np.array(data.ravel(),dtype=object)
+
+ # guard against a None encoding in PY3 (because of a legacy
+ # where the passed encoding is actually None)
+ encoding = _ensure_encoding(encoding)
+ if encoding is not None and len(data):
+ f = np.vectorize(lambda x: x.decode(encoding),otypes=[np.object])
+ data = f(data)
+
+ if nan_rep is None:
+ nan_rep = 'nan'
+
+ data = lib.string_array_replace_from_nan_rep(data, nan_rep)
+ return data.reshape(shape)
+
+def _maybe_convert(values, val_kind, encoding):
if _need_convert(val_kind):
- conv = _get_converter(val_kind)
+ conv = _get_converter(val_kind, encoding)
# conv = np.frompyfunc(conv, 1, 1)
values = conv(values)
return values
-def _get_converter(kind):
+def _get_converter(kind, encoding):
+ kind = _ensure_decoded(kind)
if kind == 'datetime64':
return lambda x: np.array(x, dtype='M8[ns]')
- if kind == 'datetime':
+ elif kind == 'datetime':
return lib.convert_timestamps
+ elif kind == 'string':
+ return lambda x: _unconvert_string_array(x,encoding=encoding)
else: # pragma: no cover
raise ValueError('invalid kind %s' % kind)
def _need_convert(kind):
- if kind in ('datetime', 'datetime64'):
+ kind = _ensure_decoded(kind)
+ if kind in (u'datetime', u'datetime64', u'string'):
return True
return False
@@ -3288,7 +3377,8 @@ class Term(object):
>, >=, <, <=, =, != (not equal) are allowed
value : a value or list of values (required)
queryables : a kinds map (dict of column name -> kind), or None i column is non-indexable
-
+ encoding : an encoding that will encode the query terms
+
Returns
-------
a Term object
@@ -3301,20 +3391,21 @@ class Term(object):
>>> Term('index', ['20121114','20121114'])
>>> Term('index', datetime(2012,11,14))
>>> Term('major_axis>20121114')
- >>> Term('minor_axis', ['A','B'])
+ >>> Term('minor_axis', ['A','U'])
"""
_ops = ['<=', '<', '>=', '>', '!=', '==', '=']
_search = re.compile("^\s*(?P<field>\w+)\s*(?P<op>%s)\s*(?P<value>.+)\s*$" % '|'.join(_ops))
_max_selectors = 31
- def __init__(self, field, op=None, value=None, queryables=None):
+ def __init__(self, field, op=None, value=None, queryables=None, encoding=None):
self.field = None
self.op = None
self.value = None
self.q = queryables or dict()
self.filter = None
self.condition = None
+ self.encoding = encoding
# unpack lists/tuples in field
while(isinstance(field, (tuple, list))):
@@ -3366,16 +3457,16 @@ def __init__(self, field, op=None, value=None, queryables=None):
if self.field is None or self.op is None or self.value is None:
raise ValueError("Could not create this term [%s]" % str(self))
- # = vs ==
+ # = vs ==
if self.op == '=':
self.op = '=='
# we have valid conditions
if self.op in ['>', '>=', '<', '<=']:
- if hasattr(self.value, '__iter__') and len(self.value) > 1:
+ if hasattr(self.value, '__iter__') and len(self.value) > 1 and not isinstance(self.value,basestring):
raise ValueError("an inequality condition cannot have multiple values [%s]" % str(self))
- if not hasattr(self.value, '__iter__'):
+ if not is_list_like(self.value):
self.value = [self.value]
if len(self.q):
@@ -3401,6 +3492,11 @@ def kind(self):
""" the kind of my field """
return self.q.get(self.field)
+ def generate(self, v):
+ """ create and return the op string for this TermValue """
+ val = v.tostring(self.encoding)
+ return "(%s %s %s)" % (self.field, self.op, val)
+
def eval(self):
""" set the numexpr expression for this term """
@@ -3411,40 +3507,39 @@ def eval(self):
if self.is_in_table:
values = [self.convert_value(v) for v in self.value]
else:
- values = [[v, v] for v in self.value]
+ values = [TermValue(v,v,self.kind) for v in self.value]
# equality conditions
if self.op in ['==', '!=']:
# our filter op expression
if self.op == '!=':
- filter_op = lambda axis, values: not axis.isin(values)
+ filter_op = lambda axis, vals: not axis.isin(vals)
else:
- filter_op = lambda axis, values: axis.isin(values)
+ filter_op = lambda axis, vals: axis.isin(vals)
if self.is_in_table:
# too many values to create the expression?
if len(values) <= self._max_selectors:
- self.condition = "(%s)" % ' | '.join(
- ["(%s %s %s)" % (self.field, self.op, v[0]) for v in values])
+ vs = [ self.generate(v) for v in values ]
+ self.condition = "(%s)" % ' | '.join(vs)
# use a filter after reading
else:
- self.filter = (self.field, filter_op, Index([v[1] for v in values]))
+ self.filter = (self.field, filter_op, Index([v.value for v in values]))
else:
- self.filter = (self.field, filter_op, Index([v[1] for v in values]))
+ self.filter = (self.field, filter_op, Index([v.value for v in values]))
else:
if self.is_in_table:
- self.condition = '(%s %s %s)' % (
- self.field, self.op, values[0][0])
-
+ self.condition = self.generate(values[0])
+
else:
raise TypeError("passing a filterable condition to a non-table indexer [%s]" % str(self))
@@ -3452,33 +3547,56 @@ def eval(self):
def convert_value(self, v):
""" convert the expression that is in the term to something that is accepted by pytables """
- if self.kind == 'datetime64' or self.kind == 'datetime' :
+ def stringify(value):
+ value = str(value)
+ if self.encoding is not None:
+ value = value.encode(self.encoding)
+ return value
+
+ kind = _ensure_decoded(self.kind)
+ if kind == u'datetime64' or kind == u'datetime' :
v = lib.Timestamp(v)
if v.tz is not None:
v = v.tz_convert('UTC')
- return [v.value, v]
- elif isinstance(v, datetime) or hasattr(v, 'timetuple') or self.kind == 'date':
+ return TermValue(v,v.value,kind)
+ elif isinstance(v, datetime) or hasattr(v, 'timetuple') or kind == u'date':
v = time.mktime(v.timetuple())
- return [v, Timestamp(v) ]
- elif self.kind == 'integer':
+ return TermValue(v,Timestamp(v),kind)
+ elif kind == u'integer':
v = int(float(v))
- return [v, v]
- elif self.kind == 'float':
+ return TermValue(v,v,kind)
+ elif kind == u'float':
v = float(v)
- return [v, v]
- elif self.kind == 'bool':
+ return TermValue(v,v,kind)
+ elif kind == u'bool':
if isinstance(v, basestring):
- v = not str(v).strip().lower() in ["false", "f", "no", "n", "none", "0", "[]", "{}", ""]
+ v = not v.strip().lower() in [u'false', u'f', u'no', u'n', u'none', u'0', u'[]', u'{}', u'']
else:
v = bool(v)
- return [v, v]
+ return TermValue(v,v,kind)
elif not isinstance(v, basestring):
- v = str(v)
- return [v, v]
+ v = stringify(v)
+ return TermValue(v,stringify(v),u'string')
# string quoting
- return ["'" + v + "'", v]
+ return TermValue(v,stringify(v),u'string')
+
+class TermValue(object):
+ """ hold a term value the we use to construct a condition/filter """
+
+ def __init__(self, value, converted, kind):
+ self.value = value
+ self.converted = converted
+ self.kind = kind
+ def tostring(self, encoding):
+ """ quote the string if not encoded
+ else encode and return """
+ if self.kind == u'string':
+ if encoding is not None:
+ return self.converted
+ return '"%s"' % self.converted
+ return self.converted
class Coordinates(object):
""" holds a returned coordinates list, useful to select the same rows from different tables
@@ -3528,9 +3646,9 @@ def __init__(self, table, where=None, start=None, stop=None, **kwargs):
# create the numexpr & the filter
if self.terms:
- conds = [t.condition for t in self.terms if t.condition is not None]
- if len(conds):
- self.condition = "(%s)" % ' & '.join(conds)
+ terms = [ t for t in self.terms if t.condition is not None ]
+ if len(terms):
+ self.condition = "(%s)" % ' & '.join([ t.condition for t in terms ])
self.filter = []
for t in self.terms:
if t.filter is not None:
@@ -3553,7 +3671,7 @@ def generate(self, where):
where = [where]
queryables = self.table.queryables()
- return [Term(c, queryables=queryables) for c in where]
+ return [Term(c, queryables=queryables, encoding=self.table.encoding) for c in where]
def select(self):
"""
diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py
index d0f03774f2070..8b3d4a475d952 100644
--- a/pandas/io/tests/test_pytables.py
+++ b/pandas/io/tests/test_pytables.py
@@ -17,6 +17,7 @@
from pandas.tests.test_series import assert_series_equal
from pandas.tests.test_frame import assert_frame_equal
from pandas import concat, Timestamp
+from pandas.util import py3compat
from numpy.testing.decorators import slow
@@ -115,7 +116,7 @@ def roundtrip(key, obj,**kwargs):
o = tm.makeTimeSeries()
assert_series_equal(o, roundtrip('series',o))
-
+
o = tm.makeStringSeries()
assert_series_equal(o, roundtrip('string_series',o))
@@ -474,6 +475,20 @@ def test_append(self):
store.append('uints', uint_data, data_columns=['u08','u16','u32']) # 64-bit indices not yet supported
tm.assert_frame_equal(store['uints'], uint_data)
+ def test_encoding(self):
+
+ with ensure_clean(self.path) as store:
+ df = DataFrame(dict(A='foo',B='bar'),index=range(5))
+ df.loc[2,'A'] = np.nan
+ df.loc[3,'B'] = np.nan
+ _maybe_remove(store, 'df')
+ store.append('df', df, encoding='ascii')
+ tm.assert_frame_equal(store['df'], df)
+
+ expected = df.reindex(columns=['A'])
+ result = store.select('df',Term('columns=A',encoding='ascii'))
+ tm.assert_frame_equal(result,expected)
+
def test_append_some_nans(self):
with ensure_clean(self.path) as store:
@@ -556,6 +571,7 @@ def test_append_some_nans(self):
def test_append_frame_column_oriented(self):
with ensure_clean(self.path) as store:
+
# column oriented
df = tm.makeTimeDataFrame()
_maybe_remove(store, 'df1')
@@ -1261,8 +1277,14 @@ def test_unimplemented_dtypes_table_columns(self):
with ensure_clean(self.path) as store:
+ l = [('date', datetime.date(2001, 1, 2))]
+
+ # py3 ok for unicode
+ if not py3compat.PY3:
+ l.append(('unicode', u'\u03c3'))
+
### currently not supported dtypes ####
- for n, f in [('unicode', u'\u03c3'), ('date', datetime.date(2001, 1, 2))]:
+ for n, f in l:
df = tm.makeDataFrame()
df[n] = f
self.assertRaises(
@@ -2545,6 +2567,7 @@ def test_legacy_0_10_read(self):
# legacy from 0.10
try:
store = HDFStore(tm.get_data_path('legacy_hdf/legacy_0.10.h5'), 'r')
+ str(store)
for k in store.keys():
store.select(k)
finally:
@@ -2554,6 +2577,7 @@ def test_legacy_0_11_read(self):
# legacy from 0.11
try:
store = HDFStore(tm.get_data_path('legacy_hdf/legacy_table_0.11.h5'), 'r')
+ str(store)
df = store.select('df')
df1 = store.select('df1')
mi = store.select('mi')
@@ -2585,24 +2609,25 @@ def do_copy(f = None, new_f = None, keys = None, propindexes = True, **kwargs):
# check indicies & nrows
for k in tstore.keys():
- if tstore.is_table(k):
+ if tstore.get_storer(k).is_table:
new_t = tstore.get_storer(k)
orig_t = store.get_storer(k)
self.assert_(orig_t.nrows == new_t.nrows)
- for a in orig_t.axes:
- if a.is_indexed:
- self.assert_(new_t[a.name].is_indexed == True)
- except (Exception), detail:
- pass
+ # check propindixes
+ if propindexes:
+ for a in orig_t.axes:
+ if a.is_indexed:
+ self.assert_(new_t[a.name].is_indexed == True)
+
finally:
safe_close(store)
safe_close(tstore)
safe_remove(new_f)
do_copy()
- do_copy(keys = ['df'])
+ do_copy(keys = ['/a','/b','/df1_mixed'])
do_copy(propindexes = False)
# new table
diff --git a/pandas/lib.pyx b/pandas/lib.pyx
index 15791a984ecc5..a80ad5b7d0208 100644
--- a/pandas/lib.pyx
+++ b/pandas/lib.pyx
@@ -14,6 +14,7 @@ from cpython cimport (PyDict_New, PyDict_GetItem, PyDict_SetItem,
Py_INCREF, PyTuple_SET_ITEM,
PyList_Check, PyFloat_Check,
PyString_Check,
+ PyBytes_Check,
PyTuple_SetItem,
PyTuple_New,
PyObject_SetAttrString)
@@ -762,7 +763,7 @@ def max_len_string_array(ndarray[object, ndim=1] arr):
m = 0
for i from 0 <= i < length:
v = arr[i]
- if PyString_Check(v):
+ if PyString_Check(v) or PyBytes_Check(v):
l = len(v)
if l > m:
@@ -772,11 +773,10 @@ def max_len_string_array(ndarray[object, ndim=1] arr):
@cython.boundscheck(False)
@cython.wraparound(False)
-def array_replace_from_nan_rep(ndarray[object, ndim=1] arr, object nan_rep, object replace = None):
+def string_array_replace_from_nan_rep(ndarray[object, ndim=1] arr, object nan_rep, object replace = None):
""" replace the values in the array with replacement if they are nan_rep; return the same array """
- cdef int length = arr.shape[0]
- cdef int i = 0
+ cdef int length = arr.shape[0], i = 0
if replace is None:
replace = np.nan
@@ -788,7 +788,6 @@ def array_replace_from_nan_rep(ndarray[object, ndim=1] arr, object nan_rep, obje
@cython.boundscheck(False)
@cython.wraparound(False)
-
def write_csv_rows(list data, list data_index, int nlevels, list cols, object writer):
cdef int N, j, i, ncols
| closes #3750
recently released PyTables 3.0.0 (and numexpr 2.1), which now support python3
were completely broken
These changed all data to be stored as bytes (with to/from encoding/decoding)
This PR supports an encoding argument (if you really want to encode your data),
and provides transparent access for python3 (and backwards compat) to
even python2 written stores
| https://api.github.com/repos/pandas-dev/pandas/pulls/3762 | 2013-06-05T18:21:30Z | 2013-06-06T00:14:00Z | 2013-06-06T00:14:00Z | 2014-07-05T15:12:01Z |
BUG: (GH3611) revisited; read_excel not passing thru options to ExcelFile.parse | diff --git a/RELEASE.rst b/RELEASE.rst
index b5dd3eef68dea..12d2389a8a59b 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -120,6 +120,8 @@ pandas 0.11.1
- Implement ``__nonzero__`` for ``NDFrame`` objects (GH3691_, GH3696_)
- ``as_matrix`` with mixed signed and unsigned dtypes will result in 2 x the lcd of the unsigned
as an int, maxing with ``int64``, to avoid precision issues (GH3733_)
+ - ``na_values`` in a list provided to ``read_csv/read_excel`` will match string and numeric versions
+ e.g. ``na_values=['99']`` will match 99 whether the column ends up being int, float, or string (GH3611_)
**Bug Fixes**
@@ -174,7 +176,7 @@ pandas 0.11.1
- Fix modulo and integer division on Series,DataFrames to act similary to ``float`` dtypes to return
``np.nan`` or ``np.inf`` as appropriate (GH3590_)
- Fix incorrect dtype on groupby with ``as_index=False`` (GH3610_)
- - Fix ``read_csv`` to correctly encode identical na_values, e.g. ``na_values=[-999.0,-999]``
+ - Fix ``read_csv/read_excel`` to correctly encode identical na_values, e.g. ``na_values=[-999.0,-999]``
was failing (GH3611_)
- Disable HTML output in qtconsole again. (GH3657_)
- Reworked the new repr display logic, which users found confusing. (GH3663_)
diff --git a/pandas/io/excel.py b/pandas/io/excel.py
index c23056ce76a62..5b7d13acd99ec 100644
--- a/pandas/io/excel.py
+++ b/pandas/io/excel.py
@@ -13,10 +13,7 @@
from pandas.tseries.period import Period
import json
-def read_excel(path_or_buf, sheetname, header=0, skiprows=None, skip_footer=0,
- index_col=None, parse_cols=None, parse_dates=False,
- date_parser=None, na_values=None, thousands=None, chunksize=None,
- kind=None, **kwds):
+def read_excel(path_or_buf, sheetname, kind=None, **kwds):
"""Read an Excel table into a pandas DataFrame
Parameters
@@ -47,16 +44,7 @@ def read_excel(path_or_buf, sheetname, header=0, skiprows=None, skip_footer=0,
DataFrame from the passed in Excel file
"""
return ExcelFile(path_or_buf,kind=kind).parse(sheetname=sheetname,
- header=0, skiprows=None,
- skip_footer=0,
- index_col=None,
- parse_cols=None,
- parse_dates=False,
- date_parser=None,
- na_values=None,
- thousands=None,
- chunksize=None, kind=None,
- **kwds)
+ kind=kind, **kwds)
class ExcelFile(object):
"""
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 60028d3f3f831..556d1ab1976b4 100644
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -1853,7 +1853,20 @@ def _clean_na_values(na_values, keep_default_na=True):
return na_values
def _stringify_na_values(na_values):
- return [ str(x) for x in na_values ]
+ """ return a stringified and numeric for these values """
+ result = []
+ for x in na_values:
+ result.append(str(x))
+ result.append(x)
+ try:
+ result.append(float(x))
+ except:
+ pass
+ try:
+ result.append(int(x))
+ except:
+ pass
+ return result
def _clean_index_names(columns, index_col):
if not _is_index_col(index_col):
diff --git a/pandas/io/tests/test_excel.py b/pandas/io/tests/test_excel.py
index 28242cda0b46b..39e1042d125a2 100644
--- a/pandas/io/tests/test_excel.py
+++ b/pandas/io/tests/test_excel.py
@@ -285,6 +285,15 @@ def _check_extension(self, ext):
recons = read_excel(path, 'test1', index_col=0, na_values=['NA'])
tm.assert_frame_equal(self.frame, recons)
+ # GH 3611
+ self.frame.to_excel(path, 'test1', na_rep='88')
+ recons = read_excel(path, 'test1', index_col=0, na_values=['88'])
+ tm.assert_frame_equal(self.frame, recons)
+
+ self.frame.to_excel(path, 'test1', na_rep='88')
+ recons = read_excel(path, 'test1', index_col=0, na_values=[88,88.0])
+ tm.assert_frame_equal(self.frame, recons)
+
def test_excel_roundtrip_xls_mixed(self):
_skip_if_no_xlrd()
_skip_if_no_xlwt()
diff --git a/pandas/src/inference.pyx b/pandas/src/inference.pyx
index 7d13aa8ce6765..5343819b9fbfe 100644
--- a/pandas/src/inference.pyx
+++ b/pandas/src/inference.pyx
@@ -373,12 +373,12 @@ def maybe_convert_numeric(ndarray[object] values, set na_values,
for i from 0 <= i < n:
val = values[i]
- if util.is_float_object(val):
- floats[i] = complexes[i] = val
- seen_float = 1
- elif val in na_values:
+ if val in na_values:
floats[i] = complexes[i] = nan
seen_float = 1
+ elif util.is_float_object(val):
+ floats[i] = complexes[i] = val
+ seen_float = 1
elif val is None:
floats[i] = complexes[i] = nan
seen_float = 1
| recloses #3611
API: add string and numeric versions of na_values when parsing
| https://api.github.com/repos/pandas-dev/pandas/pulls/3758 | 2013-06-05T11:56:43Z | 2013-06-05T14:02:43Z | 2013-06-05T14:02:43Z | 2014-07-16T08:11:38Z |
DOC: document bs4/lxml/html5lib issues | diff --git a/README.rst b/README.rst
index a74a155cf8a27..daea702476ebc 100644
--- a/README.rst
+++ b/README.rst
@@ -93,18 +93,49 @@ Optional dependencies
- openpyxl version 1.6.1 or higher, for writing .xlsx files
- xlrd >= 0.9.0
- Needed for Excel I/O
- - Both `html5lib <https://github.com/html5lib/html5lib-python>`__ **and**
- `Beautiful Soup 4 <http://www.crummy.com/software/BeautifulSoup>`__: for
- reading HTML tables
+ - `boto <https://pypi.python.org/pypi/boto>`__: necessary for Amazon S3
+ access.
+ - One of the following combinations of libraries is needed to use the
+ top-level :func:`~pandas.io.html.read_html` function:
+
+ - `BeautifulSoup4`_ and `html5lib`_ (Any recent version of `html5lib`_ is
+ okay.)
+ - `BeautifulSoup4`_ and `lxml`_
+ - `BeautifulSoup4`_ and `html5lib`_ and `lxml`_
+ - Only `lxml`_, although see :ref:`HTML reading gotchas <html-gotchas>`
+ for reasons as to why you should probably **not** take this approach.
.. warning::
- You need to install an older version of Beautiful Soup:
- - Version 4.1.3 and 4.0.2 have been confirmed for 64-bit Ubuntu/Debian
- - Version 4.0.2 have been confirmed for 32-bit Ubuntu
+ - if you install `BeautifulSoup4`_ you must install either
+ `lxml`_ or `html5lib`_ or both.
+ :func:`~pandas.io.html.read_html` will **not** work with *only*
+ `BeautifulSoup4`_ installed.
+ - You are highly encouraged to read :ref:`HTML reading gotchas
+ <html-gotchas>`. It explains issues surrounding the installation and
+ usage of the above three libraries
+ - You may need to install an older version of `BeautifulSoup4`_:
+ - Versions 4.2.1, 4.1.3 and 4.0.2 have been confirmed for 64 and
+ 32-bit Ubuntu/Debian
+ - Additionally, if you're using `Anaconda`_ you should definitely
+ read :ref:`the gotchas about HTML parsing libraries <html-gotchas>`
- - Any recent version of ``html5lib`` is okay.
- - `boto <https://pypi.python.org/pypi/boto>`__: necessary for Amazon S3 access.
+ .. note::
+
+ - if you're on a system with ``apt-get`` you can do
+
+ .. code-block:: sh
+
+ sudo apt-get build-dep python-lxml
+
+ to get the necessary dependencies for installation of `lxml`_. This
+ will prevent further headaches down the line.
+
+
+.. _html5lib: https://github.com/html5lib/html5lib-python
+.. _BeautifulSoup4: http://www.crummy.com/software/BeautifulSoup
+.. _lxml: http://lxml.de
+.. _Anaconda: https://store.continuum.io/cshop/anaconda
Installation from sources
diff --git a/doc/source/gotchas.rst b/doc/source/gotchas.rst
index 7b184f6d5043f..422e3cec59386 100644
--- a/doc/source/gotchas.rst
+++ b/doc/source/gotchas.rst
@@ -344,3 +344,112 @@ where the data copying occurs.
See `this link <http://stackoverflow.com/questions/13592618/python-pandas-dataframe-thread-safe>`__
for more information.
+
+.. _html-gotchas:
+
+HTML Table Parsing
+------------------
+There are some versioning issues surrounding the libraries that are used to
+parse HTML tables in the top-level pandas io function ``read_html``.
+
+**Issues with** |lxml|_
+
+ * Benefits
+
+ * |lxml|_ is very fast
+
+ * |lxml|_ requires Cython to install correctly.
+
+ * Drawbacks
+
+ * |lxml|_ does *not* make any guarantees about the results of it's parse
+ *unless* it is given |svm|_.
+
+ * In light of the above, we have chosen to allow you, the user, to use the
+ |lxml|_ backend, but **this backend will use** |html5lib|_ if |lxml|_
+ fails to parse
+
+ * It is therefore *highly recommended* that you install both
+ |BeautifulSoup4|_ and |html5lib|_, so that you will still get a valid
+ result (provided everything else is valid) even if |lxml|_ fails.
+
+**Issues with** |BeautifulSoup4|_ **using** |lxml|_ **as a backend**
+
+ * The above issues hold here as well since |BeautifulSoup4|_ is essentially
+ just a wrapper around a parser backend.
+
+**Issues with** |BeautifulSoup4|_ **using** |html5lib|_ **as a backend**
+
+ * Benefits
+
+ * |html5lib|_ is far more lenient than |lxml|_ and consequently deals
+ with *real-life markup* in a much saner way rather than just, e.g.,
+ dropping an element without notifying you.
+
+ * |html5lib|_ *generates valid HTML5 markup from invalid markup
+ automatically*. This is extremely important for parsing HTML tables,
+ since it guarantees a valid document. However, that does NOT mean that
+ it is "correct", since the process of fixing markup does not have a
+ single definition.
+
+ * |html5lib|_ is pure Python and requires no additional build steps beyond
+ its own installation.
+
+ * Drawbacks
+
+ * The biggest drawback to using |html5lib|_ is that it is slow as
+ molasses. However consider the fact that many tables on the web are not
+ big enough for the parsing algorithm runtime to matter. It is more
+ likely that the bottleneck will be in the process of reading the raw
+ text from the url over the web, i.e., IO (input-output). For very large
+ tables, this might not be true.
+
+**Issues with using** |Anaconda|_
+
+ * `Anaconda`_ ships with `lxml`_ version 3.2.0; the following workaround for
+ `Anaconda`_ was successfully used to deal with the versioning issues
+ surrounding `lxml`_ and `BeautifulSoup4`_.
+
+ .. note::
+
+ Unless you have *both*:
+
+ * A strong restriction on the upper bound of the runtime of some code
+ that incorporates :func:`~pandas.io.html.read_html`
+ * Complete knowledge that the HTML you will be parsing will be 100%
+ valid at all times
+
+ then you should install `html5lib`_ and things will work swimmingly
+ without you having to muck around with `conda`. If you want the best of
+ both worlds then install both `html5lib`_ and `lxml`_. If you do install
+ `lxml`_ then you need to perform the following commands to ensure that
+ lxml will work correctly:
+
+ .. code-block:: sh
+
+ # remove the included version
+ conda remove lxml
+
+ # install the latest version of lxml
+ pip install 'git+git://github.com/lxml/lxml.git'
+
+ # install the latest version of beautifulsoup4
+ pip install 'bzr+lp:beautifulsoup'
+
+ Note that you need `bzr <http://bazaar.canonical.com/en>`_ and `git
+ <http://git-scm.com>`_ installed to perform the last two operations.
+
+.. |svm| replace:: **strictly valid markup**
+.. _svm: http://validator.w3.org/docs/help.html#validation_basics
+
+.. |html5lib| replace:: **html5lib**
+.. _html5lib: https://github.com/html5lib/html5lib-python
+
+.. |BeautifulSoup4| replace:: **BeautifulSoup4**
+.. _BeautifulSoup4: http://www.crummy.com/software/BeautifulSoup
+
+.. |lxml| replace:: **lxml**
+.. _lxml: http://lxml.de
+
+.. |Anaconda| replace:: **Anaconda**
+.. _Anaconda: https://store.continuum.io/cshop/anaconda
diff --git a/doc/source/install.rst b/doc/source/install.rst
index 005e213fe24de..6868969c1b968 100644
--- a/doc/source/install.rst
+++ b/doc/source/install.rst
@@ -102,17 +102,49 @@ Optional Dependencies
* `openpyxl <http://packages.python.org/openpyxl/>`__, `xlrd/xlwt <http://www.python-excel.org/>`__
* openpyxl version 1.6.1 or higher
* Needed for Excel I/O
- * Both `html5lib <https://github.com/html5lib/html5lib-python>`__ **and**
- `Beautiful Soup 4 <http://www.crummy.com/software/BeautifulSoup>`__: for
- reading HTML tables
+ * `boto <https://pypi.python.org/pypi/boto>`__: necessary for Amazon S3
+ access.
+ * One of the following combinations of libraries is needed to use the
+ top-level :func:`~pandas.io.html.read_html` function:
+
+ * `BeautifulSoup4`_ and `html5lib`_ (Any recent version of `html5lib`_ is
+ okay.)
+ * `BeautifulSoup4`_ and `lxml`_
+ * `BeautifulSoup4`_ and `html5lib`_ and `lxml`_
+ * Only `lxml`_, although see :ref:`HTML reading gotchas <html-gotchas>`
+ for reasons as to why you should probably **not** take this approach.
.. warning::
- You need to install an older version of Beautiful Soup:
- - Version 4.1.3 and 4.0.2 have been confirmed for 64-bit Ubuntu/Debian
- - Version 4.0.2 have been confirmed for 32-bit Ubuntu
+ * if you install `BeautifulSoup4`_ you must install either
+ `lxml`_ or `html5lib`_ or both.
+ :func:`~pandas.io.html.read_html` will **not** work with *only*
+ `BeautifulSoup4`_ installed.
+ * You are highly encouraged to read :ref:`HTML reading gotchas
+ <html-gotchas>`. It explains issues surrounding the installation and
+ usage of the above three libraries
+ * You may need to install an older version of `BeautifulSoup4`_:
+ - Versions 4.2.1, 4.1.3 and 4.0.2 have been confirmed for 64 and
+ 32-bit Ubuntu/Debian
+ * Additionally, if you're using `Anaconda`_ you should definitely
+ read :ref:`the gotchas about HTML parsing libraries <html-gotchas>`
- * Any recent version of ``html5lib`` is okay.
+ .. note::
+
+ * if you're on a system with ``apt-get`` you can do
+
+ .. code-block:: sh
+
+ sudo apt-get build-dep python-lxml
+
+ to get the necessary dependencies for installation of `lxml`_. This
+ will prevent further headaches down the line.
+
+
+.. _html5lib: https://github.com/html5lib/html5lib-python
+.. _BeautifulSoup4: http://www.crummy.com/software/BeautifulSoup
+.. _lxml: http://lxml.de
+.. _Anaconda: https://store.continuum.io/cshop/anaconda
.. note::
diff --git a/doc/source/io.rst b/doc/source/io.rst
index 27d3e21fea2c4..802ab08e85932 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -943,6 +943,12 @@ HTML
Reading HTML Content
~~~~~~~~~~~~~~~~~~~~~~
+.. warning::
+
+ We **highly encourage** you to read the :ref:`HTML parsing gotchas
+ <html-gotchas>` regarding the issues surrounding the
+ BeautifulSoup4/html5lib/lxml parsers.
+
.. _io.read_html:
.. versionadded:: 0.11.1
| to summarize this mess of deps and check my thoughts here
### if a user insists on using `lxml` (either with or without `bs4`)
- warning about its inability to deal with the modern web
- warning saying that the user should install `html5lib` and `bs4` so that a page will parse even if `lxml` barfs
- test coverage for failing and passing pages (things that would parse "correctly" before will now fail since the parser will be extremely strict) thus only pages validated by the DTD will even try to parse
(to be fair I was really enthusiastic about `lxml` because of how fast it is but now i'm sort of against it)
### what users should really do
- install `bs4`
- install `html5lib`
- happily parse things into `DataFrame`s with a low amount of stress
### anaconda + `lxml` (no `bs4`)
- no problems (modulo the above warnings)
@wesm maybe you could chime in about what (if anything) you did to `libxml2`/`libxslt` i wasn't clear on the details from the mailing list.
### anaconda + `bs4` + `lxml`
- make sure that you're using `bs4==4.2.1`
- make sure that you're using `lxml==3.2.1`
- workout the details of how to do this with `conda` (i did this already, but it was 2 or 3 AM so I'm a little foggy on the details)
### anaconda + `bs4` + `html5lib` (no `lxml`)
- happy parsing of HTML tables
this will be in a gotcha that will be linked to from a warning at the top of the read html section of io.rst
| https://api.github.com/repos/pandas-dev/pandas/pulls/3751 | 2013-06-03T22:24:22Z | 2013-06-04T20:02:50Z | 2013-06-04T20:02:50Z | 2014-07-16T08:11:35Z |
BUG: (GH3748) Incorrectly read a HDFStore multi-index Frame witha column specification | diff --git a/RELEASE.rst b/RELEASE.rst
index bbfc9fb948ef4..2b90edaa327b0 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -81,6 +81,8 @@ pandas 0.11.1
- When removing an object from a ``HDFStore``, ``remove(key)`` raises
``KeyError`` if the key is not a valid store object.
+ - In an ``HDFStore``, raise a ``TypeError`` on passing ``where`` or ``columns``
+ to select with a Storer; these are invalid parameters at this time
- The repr() for (Multi)Index now obeys display.max_seq_items rather
then numpy threshold print options. (GH3426_, GH3466_)
- Added mangle_dupe_cols option to read_table/csv, allowing users
@@ -197,6 +199,7 @@ pandas 0.11.1
their first argument (GH3702_)
- Fix file tokenization error with \r delimiter and quoted fields (GH3453_)
- Groupby transform with item-by-item not upcasting correctly (GH3740_)
+ - Incorrectly read a HDFStore multi-index Frame witha column specification (GH3748_)
.. _GH3164: https://github.com/pydata/pandas/issues/3164
.. _GH2786: https://github.com/pydata/pandas/issues/2786
@@ -280,6 +283,7 @@ pandas 0.11.1
.. _GH3667: https://github.com/pydata/pandas/issues/3667
.. _GH3733: https://github.com/pydata/pandas/issues/3733
.. _GH3740: https://github.com/pydata/pandas/issues/3740
+.. _GH3748: https://github.com/pydata/pandas/issues/3748
pandas 0.11.0
=============
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 0a86d72a05f16..5a480e08effba 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -1664,6 +1664,12 @@ def f(values, freq=None, tz=None):
return f
return klass
+ def validate_read(self, kwargs):
+ if kwargs.get('columns') is not None:
+ raise TypeError("cannot pass a column specification when reading a Storer")
+ if kwargs.get('where') is not None:
+ raise TypeError("cannot pass a where specification when reading a Storer")
+
@property
def is_exists(self):
return True
@@ -1921,6 +1927,7 @@ def read_index_legacy(self, key):
class LegacySeriesStorer(LegacyStorer):
def read(self, **kwargs):
+ self.validate_read(kwargs)
index = self.read_index_legacy('index')
values = self.read_array('values')
return Series(values, index=index)
@@ -1928,6 +1935,7 @@ def read(self, **kwargs):
class LegacyFrameStorer(LegacyStorer):
def read(self, **kwargs):
+ self.validate_read(kwargs)
index = self.read_index_legacy('index')
columns = self.read_index_legacy('columns')
values = self.read_array('values')
@@ -1945,6 +1953,7 @@ def shape(self):
return None
def read(self, **kwargs):
+ self.validate_read(kwargs)
index = self.read_index('index')
if len(index) > 0:
values = self.read_array('values')
@@ -1963,6 +1972,7 @@ class SparseSeriesStorer(GenericStorer):
attributes = ['name','fill_value','kind']
def read(self, **kwargs):
+ self.validate_read(kwargs)
index = self.read_index('index')
sp_values = self.read_array('sp_values')
sp_index = self.read_index('sp_index')
@@ -1983,6 +1993,7 @@ class SparseFrameStorer(GenericStorer):
attributes = ['default_kind','default_fill_value']
def read(self, **kwargs):
+ self.validate_read(kwargs)
columns = self.read_index('columns')
sdict = {}
for c in columns:
@@ -2013,6 +2024,7 @@ class SparsePanelStorer(GenericStorer):
attributes = ['default_kind','default_fill_value']
def read(self, **kwargs):
+ self.validate_read(kwargs)
items = self.read_index('items')
sdict = {}
@@ -2075,6 +2087,8 @@ def shape(self):
return None
def read(self, **kwargs):
+ self.validate_read(kwargs)
+
axes = []
for i in xrange(self.ndim):
ax = self.read_index('axis%d' % i)
@@ -3124,8 +3138,12 @@ def write(self, obj, data_columns=None, **kwargs):
self.levels = obj.index.names
return super(AppendableMultiFrameTable, self).write(obj=obj.reset_index(), data_columns=data_columns, **kwargs)
- def read(self, *args, **kwargs):
- df = super(AppendableMultiFrameTable, self).read(*args, **kwargs)
+ def read(self, columns=None, **kwargs):
+ if columns is not None:
+ for n in self.levels:
+ if n not in columns:
+ columns.insert(0, n)
+ df = super(AppendableMultiFrameTable, self).read(columns=columns, **kwargs)
df.set_index(self.levels, inplace=True)
return df
diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py
index 73d2e23ae4384..d0f03774f2070 100644
--- a/pandas/io/tests/test_pytables.py
+++ b/pandas/io/tests/test_pytables.py
@@ -1072,6 +1072,26 @@ def test_append_hierarchical(self):
result = store.select('mi')
tm.assert_frame_equal(result, df)
+ # GH 3748
+ result = store.select('mi',columns=['A','B'])
+ expected = df.reindex(columns=['A','B'])
+ tm.assert_frame_equal(result,expected)
+
+ with tm.ensure_clean('test.hdf') as path:
+ df.to_hdf(path,'df',table=True)
+ result = read_hdf(path,'df',columns=['A','B'])
+ expected = df.reindex(columns=['A','B'])
+ tm.assert_frame_equal(result,expected)
+
+ def test_pass_spec_to_storer(self):
+
+ df = tm.makeDataFrame()
+
+ with ensure_clean(self.path) as store:
+ store.put('df',df)
+ self.assertRaises(TypeError, store.select, 'df', columns=['A'])
+ self.assertRaises(TypeError, store.select, 'df',where=[('columns=A')])
+
def test_append_misc(self):
with ensure_clean(self.path) as store:
| closes #3748
API: raise a `TypeError` on passing `where` or `columns` to select with a Storer
e.g. these are invalid parameters at this time
| https://api.github.com/repos/pandas-dev/pandas/pulls/3749 | 2013-06-03T19:08:40Z | 2013-06-03T21:06:35Z | 2013-06-03T21:06:34Z | 2014-07-16T08:11:33Z |
ENH: Experimental CustomBusinessDay DateOffset class. fixes #2301 | diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst
index f11bf60549d93..e2e4e81f13199 100644
--- a/doc/source/timeseries.rst
+++ b/doc/source/timeseries.rst
@@ -382,6 +382,7 @@ frequency increment. Specific offset logic like "month", "business day", or
DateOffset, "Generic offset class, defaults to 1 calendar day"
BDay, "business day (weekday)"
+ CDay, "custom business day (experimental)"
Week, "one week, optionally anchored on a day of the week"
WeekOfMonth, "the x-th day of the y-th week of each month"
MonthEnd, "calendar month end"
@@ -477,6 +478,54 @@ Another example is parameterizing ``YearEnd`` with the specific ending month:
.. _timeseries.alias:
+Custom Business Days (Experimental)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The ``CDay`` or ``CustomBusinessDay`` class provides a parametric
+``BusinessDay`` class which can be used to create customized business day
+calendars which account for local holidays and local weekend conventions.
+
+.. ipython:: python
+
+ from pandas.tseries.offsets import CustomBusinessDay
+ # As an interesting example, let's look at Egypt where
+ # a Friday-Saturday weekend is observed.
+ weekmask_egypt = 'Sun Mon Tue Wed Thu'
+ # They also observe International Workers' Day so let's
+ # add that for a couple of years
+ holidays = ['2012-05-01', datetime(2013, 5, 1), np.datetime64('2014-05-01')]
+ bday_egypt = CustomBusinessDay(holidays=holidays, weekmask=weekmask_egypt)
+ dt = datetime(2013, 4, 30)
+ print dt + 2 * bday_egypt
+ dts = date_range(dt, periods=5, freq=bday_egypt).to_series()
+ print dts
+ print Series(dts.weekday, dts).map(Series('Mon Tue Wed Thu Fri Sat Sun'.split()))
+
+.. note::
+
+ The frequency string 'C' is used to indicate that a CustomBusinessDay
+ DateOffset is used, it is important to note that since CustomBusinessDay is
+ a parameterised type, instances of CustomBusinessDay may differ and this is
+ not detectable from the 'C' frequency string. The user therefore needs to
+ ensure that the 'C' frequency string is used consistently within the user's
+ application.
+
+
+.. note::
+
+ This uses the ``numpy.busdaycalendar`` API introduced in Numpy 1.7 and
+ therefore requires Numpy 1.7.0 or newer.
+
+.. warning::
+
+ There are known problems with the timezone handling in Numpy 1.7 and users
+ should therefore use this **experimental(!)** feature with caution and at
+ their own risk.
+
+ To the extent that the ``datetime64`` and ``busdaycalendar`` APIs in Numpy
+ have to change to fix the timezone issues, the behaviour of the
+ ``CustomBusinessDay`` class may have to change in future versions.
+
Offset Aliases
~~~~~~~~~~~~~~
@@ -489,6 +538,7 @@ frequencies. We will refer to these aliases as *offset aliases*
:widths: 15, 100
"B", "business day frequency"
+ "C", "custom business day frequency (experimental)"
"D", "calendar day frequency"
"W", "weekly frequency"
"M", "month end frequency"
diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt
index 3202efbcef83a..07101ed78ba24 100644
--- a/doc/source/v0.11.1.txt
+++ b/doc/source/v0.11.1.txt
@@ -319,6 +319,32 @@ Other Enhancements
- DatetimeIndexes no longer try to convert mixed-integer indexes during join
operations (:issue:`3877`)
+Experimental Features
+~~~~~~~~~~~~~~~~~~~~~
+
+ - Added experimental ``CustomBusinessDay`` class to support ``DateOffsets``
+ with custom holiday calendars and custom weekmasks. (GH2301_)
+
+ .. note::
+
+ This uses the ``numpy.busdaycalendar`` API introduced in Numpy 1.7 and
+ therefore requires Numpy 1.7.0 or newer.
+
+ .. ipython:: python
+
+ from pandas.tseries.offsets import CustomBusinessDay
+ # As an interesting example, let's look at Egypt where
+ # a Friday-Saturday weekend is observed.
+ weekmask_egypt = 'Sun Mon Tue Wed Thu'
+ # They also observe International Workers' Day so let's
+ # add that for a couple of years
+ holidays = ['2012-05-01', datetime(2013, 5, 1), np.datetime64('2014-05-01')]
+ bday_egypt = CustomBusinessDay(holidays=holidays, weekmask=weekmask_egypt)
+ dt = datetime(2013, 4, 30)
+ print dt + 2 * bday_egypt
+ dts = date_range(dt, periods=5, freq=bday_egypt).to_series()
+ print Series(dts.weekday, dts).map(Series('Mon Tue Wed Thu Fri Sat Sun'.split()))
+
Bug Fixes
~~~~~~~~~
diff --git a/pandas/core/datetools.py b/pandas/core/datetools.py
index aea7b2b6b5462..d6da94856b140 100644
--- a/pandas/core/datetools.py
+++ b/pandas/core/datetools.py
@@ -8,6 +8,12 @@
day = DateOffset()
bday = BDay()
businessDay = bday
+try:
+ cday = CDay()
+ customBusinessDay = CustomBusinessDay()
+except NotImplementedError:
+ cday = None
+ customBusinessDay = None
monthEnd = MonthEnd()
yearEnd = YearEnd()
yearBegin = YearBegin()
diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py
index f54bfee55782a..51b8e5d042ca9 100644
--- a/pandas/tseries/frequencies.py
+++ b/pandas/tseries/frequencies.py
@@ -114,14 +114,21 @@ def _get_freq_str(base, mult=1):
# Offset names ("time rules") and related functions
-from pandas.tseries.offsets import (Day, BDay, Hour, Minute, Second, Milli,
- Week, Micro, MonthEnd, MonthBegin,
- BMonthBegin, BMonthEnd, YearBegin, YearEnd,
- BYearBegin, BYearEnd, QuarterBegin,
- QuarterEnd, BQuarterBegin, BQuarterEnd)
+from pandas.tseries.offsets import (Micro, Milli, Second, Minute, Hour,
+ Day, BDay, CDay, Week, MonthBegin,
+ MonthEnd, BMonthBegin, BMonthEnd,
+ QuarterBegin, QuarterEnd, BQuarterBegin,
+ BQuarterEnd, YearBegin, YearEnd,
+ BYearBegin, BYearEnd,
+ )
+try:
+ cday = CDay()
+except NotImplementedError:
+ cday = None
_offset_map = {
'D': Day(),
+ 'C': cday,
'B': BDay(),
'H': Hour(),
'T': Minute(),
@@ -278,6 +285,7 @@ def _get_freq_str(base, mult=1):
'BAS': 'A',
'MS': 'M',
'D': 'D',
+ 'C': 'C',
'B': 'B',
'T': 'T',
'S': 'S',
@@ -1004,15 +1012,17 @@ def is_subperiod(source, target):
if _is_quarterly(source):
return _quarter_months_conform(_get_rule_month(source),
_get_rule_month(target))
- return source in ['D', 'B', 'M', 'H', 'T', 'S']
+ return source in ['D', 'C', 'B', 'M', 'H', 'T', 'S']
elif _is_quarterly(target):
- return source in ['D', 'B', 'M', 'H', 'T', 'S']
+ return source in ['D', 'C', 'B', 'M', 'H', 'T', 'S']
elif target == 'M':
- return source in ['D', 'B', 'H', 'T', 'S']
+ return source in ['D', 'C', 'B', 'H', 'T', 'S']
elif _is_weekly(target):
- return source in [target, 'D', 'B', 'H', 'T', 'S']
+ return source in [target, 'D', 'C', 'B', 'H', 'T', 'S']
elif target == 'B':
return source in ['B', 'H', 'T', 'S']
+ elif target == 'C':
+ return source in ['C', 'H', 'T', 'S']
elif target == 'D':
return source in ['D', 'H', 'T', 'S']
elif target == 'H':
@@ -1055,17 +1065,19 @@ def is_superperiod(source, target):
smonth = _get_rule_month(source)
tmonth = _get_rule_month(target)
return _quarter_months_conform(smonth, tmonth)
- return target in ['D', 'B', 'M', 'H', 'T', 'S']
+ return target in ['D', 'C', 'B', 'M', 'H', 'T', 'S']
elif _is_quarterly(source):
- return target in ['D', 'B', 'M', 'H', 'T', 'S']
+ return target in ['D', 'C', 'B', 'M', 'H', 'T', 'S']
elif source == 'M':
- return target in ['D', 'B', 'H', 'T', 'S']
+ return target in ['D', 'C', 'B', 'H', 'T', 'S']
elif _is_weekly(source):
- return target in [source, 'D', 'B', 'H', 'T', 'S']
+ return target in [source, 'D', 'C', 'B', 'H', 'T', 'S']
elif source == 'B':
- return target in ['D', 'B', 'H', 'T', 'S']
+ return target in ['D', 'C', 'B', 'H', 'T', 'S']
+ elif source == 'C':
+ return target in ['D', 'C', 'B', 'H', 'T', 'S']
elif source == 'D':
- return target in ['D', 'B', 'H', 'T', 'S']
+ return target in ['D', 'C', 'B', 'H', 'T', 'S']
elif source == 'H':
return target in ['H', 'T', 'S']
elif source == 'T':
diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py
index f560a6bf6e717..56df301b5b027 100644
--- a/pandas/tseries/index.py
+++ b/pandas/tseries/index.py
@@ -11,7 +11,7 @@
from pandas.tseries.frequencies import (
infer_freq, to_offset, get_period_alias,
Resolution, get_reso_string)
-from pandas.tseries.offsets import DateOffset, generate_range, Tick
+from pandas.tseries.offsets import DateOffset, generate_range, Tick, CDay
from pandas.tseries.tools import parse_time_string, normalize_date
from pandas.util.decorators import cache_readonly
import pandas.core.common as com
@@ -1740,6 +1740,57 @@ def bdate_range(start=None, end=None, periods=None, freq='B', tz=None,
freq=freq, tz=tz, normalize=normalize, name=name)
+def cdate_range(start=None, end=None, periods=None, freq='C', tz=None,
+ normalize=True, name=None, **kwargs):
+ """
+ **EXPERIMENTAL** Return a fixed frequency datetime index, with
+ CustomBusinessDay as the default frequency
+
+ .. warning:: EXPERIMENTAL
+
+ The CustomBusinessDay class is not officially supported and the API is
+ likely to change in future versions. Use this at your own risk.
+
+ Parameters
+ ----------
+ start : string or datetime-like, default None
+ Left bound for generating dates
+ end : string or datetime-like, default None
+ Right bound for generating dates
+ periods : integer or None, default None
+ If None, must specify start and end
+ freq : string or DateOffset, default 'C' (CustomBusinessDay)
+ Frequency strings can have multiples, e.g. '5H'
+ tz : string or None
+ Time zone name for returning localized DatetimeIndex, for example
+ Asia/Beijing
+ normalize : bool, default False
+ Normalize start/end dates to midnight before generating date range
+ name : str, default None
+ Name for the resulting index
+ weekmask : str, Default 'Mon Tue Wed Thu Fri'
+ weekmask of valid business days, passed to ``numpy.busdaycalendar``
+ holidays : list
+ list/array of dates to exclude from the set of valid business days,
+ passed to ``numpy.busdaycalendar``
+
+ Notes
+ -----
+ 2 of start, end, or periods must be specified
+
+ Returns
+ -------
+ rng : DatetimeIndex
+ """
+
+ if freq=='C':
+ holidays = kwargs.pop('holidays', [])
+ weekmask = kwargs.pop('weekmask', 'Mon Tue Wed Thu Fri')
+ freq = CDay(holidays=holidays, weekmask=weekmask)
+ return DatetimeIndex(start=start, end=end, periods=periods, freq=freq,
+ tz=tz, normalize=normalize, name=name, **kwargs)
+
+
def _to_m8(key, tz=None):
'''
Timestamp-like => dt64
diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py
index 9585d1f81e81d..deefd9f489611 100644
--- a/pandas/tseries/offsets.py
+++ b/pandas/tseries/offsets.py
@@ -1,4 +1,5 @@
from datetime import date, datetime, timedelta
+import numpy as np
from pandas.tseries.tools import to_datetime
@@ -7,7 +8,7 @@
import pandas.lib as lib
import pandas.tslib as tslib
-__all__ = ['Day', 'BusinessDay', 'BDay',
+__all__ = ['Day', 'BusinessDay', 'BDay', 'CustomBusinessDay', 'CDay',
'MonthBegin', 'BMonthBegin', 'MonthEnd', 'BMonthEnd',
'YearBegin', 'BYearBegin', 'YearEnd', 'BYearEnd',
'QuarterBegin', 'BQuarterBegin', 'QuarterEnd', 'BQuarterEnd',
@@ -100,7 +101,8 @@ def _should_cache(self):
def _params(self):
attrs = [(k, v) for k, v in vars(self).iteritems()
- if k not in ['kwds', '_offset', 'name', 'normalize']]
+ if k not in ['kwds', '_offset', 'name', 'normalize',
+ 'busdaycalendar']]
attrs.extend(self.kwds.items())
attrs = sorted(set(attrs))
@@ -359,6 +361,121 @@ def onOffset(cls, dt):
return dt.weekday() < 5
+class CustomBusinessDay(BusinessDay):
+ """
+ **EXPERIMENTAL** DateOffset subclass representing possibly n business days
+ excluding holidays
+
+ .. warning:: EXPERIMENTAL
+
+ This class is not officially supported and the API is likely to change
+ in future versions. Use this at your own risk.
+
+ Parameters
+ ----------
+ n : int, default 1
+ offset : timedelta, default timedelta(0)
+ normalize : bool, default False
+ Normalize start/end dates to midnight before generating date range
+ weekmask : str, Default 'Mon Tue Wed Thu Fri'
+ weekmask of valid business days, passed to ``numpy.busdaycalendar``
+ holidays : list
+ list/array of dates to exclude from the set of valid business days,
+ passed to ``numpy.busdaycalendar``
+ """
+
+ _cacheable = False
+
+ def __init__(self, n=1, **kwds):
+ # Check we have the required numpy version
+ from distutils.version import LooseVersion
+ if LooseVersion(np.__version__) < '1.7.0':
+ raise NotImplementedError("CustomBusinessDay requires numpy >= "
+ "1.7.0. Current version: " +
+ np.__version__)
+
+ self.n = int(n)
+ self.kwds = kwds
+ self.offset = kwds.get('offset', timedelta(0))
+ self.normalize = kwds.get('normalize', False)
+ self.weekmask = kwds.get('weekmask', 'Mon Tue Wed Thu Fri')
+
+ holidays = kwds.get('holidays', [])
+ holidays = [self._to_dt64(dt, dtype='datetime64[D]') for dt in
+ holidays]
+ self.holidays = tuple(sorted(holidays))
+ self.kwds['holidays'] = self.holidays
+ self._set_busdaycalendar()
+
+ def _set_busdaycalendar(self):
+ holidays = np.array(self.holidays, dtype='datetime64[D]')
+ self.busdaycalendar = np.busdaycalendar(holidays=holidays,
+ weekmask=self.weekmask)
+
+ def __getstate__(self):
+ """"Return a pickleable state"""
+ state = self.__dict__.copy()
+ del state['busdaycalendar']
+ return state
+
+ def __setstate__(self, state):
+ """Reconstruct an instance from a pickled state"""
+ self.__dict__ = state
+ self._set_busdaycalendar()
+
+ @property
+ def rule_code(self):
+ return 'C'
+
+ @staticmethod
+ def _to_dt64(dt, dtype='datetime64'):
+ if isinstance(dt, (datetime, basestring)):
+ dt = np.datetime64(dt, dtype=dtype)
+ if isinstance(dt, np.datetime64):
+ dt = dt.astype(dtype)
+ else:
+ raise TypeError('dt must be datestring, datetime or datetime64')
+ return dt
+
+ def apply(self, other):
+ if isinstance(other, datetime):
+ dtype = type(other)
+ elif isinstance(other, np.datetime64):
+ dtype = other.dtype
+ elif isinstance(other, (timedelta, Tick)):
+ return BDay(self.n, offset=self.offset + other,
+ normalize=self.normalize)
+ else:
+ raise TypeError('Only know how to combine trading day with '
+ 'datetime, datetime64 or timedelta!')
+ dt64 = self._to_dt64(other)
+
+ day64 = dt64.astype('datetime64[D]')
+ time = dt64 - day64
+
+ if self.n<=0:
+ roll = 'forward'
+ else:
+ roll = 'backward'
+
+ result = np.busday_offset(day64, self.n, roll=roll,
+ busdaycal=self.busdaycalendar)
+
+ if not self.normalize:
+ result = result + time
+
+ result = result.astype(dtype)
+
+ if self.offset:
+ result = result + self.offset
+
+ return result
+
+ def onOffset(self, dt):
+ day64 = self._to_dt64(dt).astype('datetime64[D]')
+ return np.is_busday(day64, busdaycal=self.busdaycalendar)
+
+
class MonthEnd(DateOffset, CacheableOffset):
"""DateOffset of one month end"""
@@ -1169,6 +1286,7 @@ class Nano(Tick):
BDay = BusinessDay
BMonthEnd = BusinessMonthEnd
BMonthBegin = BusinessMonthBegin
+CDay = CustomBusinessDay
def _get_firstbday(wkday):
diff --git a/pandas/tseries/tests/test_daterange.py b/pandas/tseries/tests/test_daterange.py
index 7fbdbbe328c84..4c46dcccbce1c 100644
--- a/pandas/tseries/tests/test_daterange.py
+++ b/pandas/tseries/tests/test_daterange.py
@@ -1,6 +1,7 @@
from datetime import datetime
import pickle
import unittest
+import nose
import numpy as np
@@ -9,7 +10,7 @@
from pandas import Timestamp
from pandas.tseries.offsets import generate_range
-from pandas.tseries.index import bdate_range, date_range
+from pandas.tseries.index import cdate_range, bdate_range, date_range
import pandas.tseries.tools as tools
import pandas.core.datetools as datetools
@@ -23,6 +24,11 @@ def _skip_if_no_pytz():
raise nose.SkipTest
+def _skip_if_no_cday():
+ if datetools.cday is None:
+ raise nose.SkipTest("CustomBusinessDay not available.")
+
+
def eq_gen_range(kwargs, expected):
rng = generate_range(**kwargs)
assert(np.array_equal(list(rng), expected))
@@ -37,6 +43,12 @@ def test_generate(self):
rng2 = list(generate_range(START, END, time_rule='B'))
self.assert_(np.array_equal(rng1, rng2))
+ def test_generate_cday(self):
+ _skip_if_no_cday()
+ rng1 = list(generate_range(START, END, offset=datetools.cday))
+ rng2 = list(generate_range(START, END, time_rule='C'))
+ self.assert_(np.array_equal(rng1, rng2))
+
def test_1(self):
eq_gen_range(dict(start=datetime(2009, 3, 25), periods=2),
[datetime(2009, 3, 25), datetime(2009, 3, 26)])
@@ -364,7 +376,235 @@ def test_month_range_union_tz(self):
early_dr.union(late_dr)
+class TestCustomDateRange(unittest.TestCase):
+
+ def setUp(self):
+ _skip_if_no_cday()
+ self.rng = cdate_range(START, END)
+
+ def test_constructor(self):
+ rng = cdate_range(START, END, freq=datetools.cday)
+ rng = cdate_range(START, periods=20, freq=datetools.cday)
+ rng = cdate_range(end=START, periods=20, freq=datetools.cday)
+ self.assertRaises(ValueError, date_range, '2011-1-1', '2012-1-1', 'C')
+ self.assertRaises(ValueError, cdate_range, '2011-1-1', '2012-1-1', 'C')
+
+ def test_cached_range(self):
+ rng = DatetimeIndex._cached_range(START, END,
+ offset=datetools.cday)
+ rng = DatetimeIndex._cached_range(START, periods=20,
+ offset=datetools.cday)
+ rng = DatetimeIndex._cached_range(end=START, periods=20,
+ offset=datetools.cday)
+
+ self.assertRaises(Exception, DatetimeIndex._cached_range, START, END)
+
+ self.assertRaises(Exception, DatetimeIndex._cached_range, START,
+ freq=datetools.cday)
+
+ self.assertRaises(Exception, DatetimeIndex._cached_range, end=END,
+ freq=datetools.cday)
+
+ self.assertRaises(Exception, DatetimeIndex._cached_range, periods=20,
+ freq=datetools.cday)
+
+ def test_comparison(self):
+ d = self.rng[10]
+
+ comp = self.rng > d
+ self.assert_(comp[11])
+ self.assert_(not comp[9])
+
+ def test_copy(self):
+ cp = self.rng.copy()
+ repr(cp)
+ self.assert_(cp.equals(self.rng))
+
+ def test_repr(self):
+ # only really care that it works
+ repr(self.rng)
+
+ def test_getitem(self):
+ smaller = self.rng[:5]
+ self.assert_(np.array_equal(smaller, self.rng.view(np.ndarray)[:5]))
+ self.assertEquals(smaller.offset, self.rng.offset)
+
+ sliced = self.rng[::5]
+ self.assertEquals(sliced.offset, datetools.cday * 5)
+
+ fancy_indexed = self.rng[[4, 3, 2, 1, 0]]
+ self.assertEquals(len(fancy_indexed), 5)
+ self.assert_(isinstance(fancy_indexed, DatetimeIndex))
+ self.assert_(fancy_indexed.freq is None)
+
+ # 32-bit vs. 64-bit platforms
+ self.assertEquals(self.rng[4], self.rng[np.int_(4)])
+
+ def test_getitem_matplotlib_hackaround(self):
+ values = self.rng[:, None]
+ expected = self.rng.values[:, None]
+ self.assert_(np.array_equal(values, expected))
+
+ def test_shift(self):
+ shifted = self.rng.shift(5)
+ self.assertEquals(shifted[0], self.rng[5])
+ self.assertEquals(shifted.offset, self.rng.offset)
+
+ shifted = self.rng.shift(-5)
+ self.assertEquals(shifted[5], self.rng[0])
+ self.assertEquals(shifted.offset, self.rng.offset)
+
+ shifted = self.rng.shift(0)
+ self.assertEquals(shifted[0], self.rng[0])
+ self.assertEquals(shifted.offset, self.rng.offset)
+
+ rng = date_range(START, END, freq=datetools.bmonthEnd)
+ shifted = rng.shift(1, freq=datetools.cday)
+ self.assertEquals(shifted[0], rng[0] + datetools.cday)
+
+ def test_pickle_unpickle(self):
+ pickled = pickle.dumps(self.rng)
+ unpickled = pickle.loads(pickled)
+
+ self.assert_(unpickled.offset is not None)
+
+ def test_union(self):
+ # overlapping
+ left = self.rng[:10]
+ right = self.rng[5:10]
+
+ the_union = left.union(right)
+ self.assert_(isinstance(the_union, DatetimeIndex))
+
+ # non-overlapping, gap in middle
+ left = self.rng[:5]
+ right = self.rng[10:]
+
+ the_union = left.union(right)
+ self.assert_(isinstance(the_union, Index))
+
+ # non-overlapping, no gap
+ left = self.rng[:5]
+ right = self.rng[5:10]
+
+ the_union = left.union(right)
+ self.assert_(isinstance(the_union, DatetimeIndex))
+
+ # order does not matter
+ self.assert_(np.array_equal(right.union(left), the_union))
+
+ # overlapping, but different offset
+ rng = date_range(START, END, freq=datetools.bmonthEnd)
+
+ the_union = self.rng.union(rng)
+ self.assert_(isinstance(the_union, DatetimeIndex))
+
+ def test_outer_join(self):
+ # should just behave as union
+
+ # overlapping
+ left = self.rng[:10]
+ right = self.rng[5:10]
+
+ the_join = left.join(right, how='outer')
+ self.assert_(isinstance(the_join, DatetimeIndex))
+
+ # non-overlapping, gap in middle
+ left = self.rng[:5]
+ right = self.rng[10:]
+
+ the_join = left.join(right, how='outer')
+ self.assert_(isinstance(the_join, DatetimeIndex))
+ self.assert_(the_join.freq is None)
+
+ # non-overlapping, no gap
+ left = self.rng[:5]
+ right = self.rng[5:10]
+
+ the_join = left.join(right, how='outer')
+ self.assert_(isinstance(the_join, DatetimeIndex))
+
+ # overlapping, but different offset
+ rng = date_range(START, END, freq=datetools.bmonthEnd)
+
+ the_join = self.rng.join(rng, how='outer')
+ self.assert_(isinstance(the_join, DatetimeIndex))
+ self.assert_(the_join.freq is None)
+
+ def test_intersection_bug(self):
+ # GH #771
+ a = cdate_range('11/30/2011', '12/31/2011')
+ b = cdate_range('12/10/2011', '12/20/2011')
+ result = a.intersection(b)
+ self.assert_(result.equals(b))
+
+ def test_summary(self):
+ self.rng.summary()
+ self.rng[2:2].summary()
+
+ def test_summary_pytz(self):
+ _skip_if_no_pytz()
+ import pytz
+ cdate_range('1/1/2005', '1/1/2009', tz=pytz.utc).summary()
+
+ def test_misc(self):
+ end = datetime(2009, 5, 13)
+ dr = cdate_range(end=end, periods=20)
+ firstDate = end - 19 * datetools.cday
+
+ assert len(dr) == 20
+ assert dr[0] == firstDate
+ assert dr[-1] == end
+
+ def test_date_parse_failure(self):
+ badly_formed_date = '2007/100/1'
+
+ self.assertRaises(ValueError, Timestamp, badly_formed_date)
+
+ self.assertRaises(ValueError, cdate_range, start=badly_formed_date,
+ periods=10)
+ self.assertRaises(ValueError, cdate_range, end=badly_formed_date,
+ periods=10)
+ self.assertRaises(ValueError, cdate_range, badly_formed_date,
+ badly_formed_date)
+
+ def test_equals(self):
+ self.assertFalse(self.rng.equals(list(self.rng)))
+
+ def test_daterange_bug_456(self):
+ # GH #456
+ rng1 = cdate_range('12/5/2011', '12/5/2011')
+ rng2 = cdate_range('12/2/2011', '12/5/2011')
+ rng2.offset = datetools.CDay()
+
+ result = rng1.union(rng2)
+ self.assert_(isinstance(result, DatetimeIndex))
+
+ def test_cdaterange(self):
+ rng = cdate_range('2013-05-01', periods=3)
+ xp = DatetimeIndex(['2013-05-01', '2013-05-02', '2013-05-03'])
+ self.assert_(xp.equals(rng))
+
+ def test_cdaterange_weekmask(self):
+ rng = cdate_range('2013-05-01', periods=3,
+ weekmask='Sun Mon Tue Wed Thu')
+ xp = DatetimeIndex(['2013-05-01', '2013-05-02', '2013-05-05'])
+ self.assert_(xp.equals(rng))
+
+ def test_cdaterange_holidays(self):
+ rng = cdate_range('2013-05-01', periods=3,
+ holidays=['2013-05-01'])
+ xp = DatetimeIndex(['2013-05-02', '2013-05-03', '2013-05-06'])
+ self.assert_(xp.equals(rng))
+
+ def test_cdaterange_weekmask_and_holidays(self):
+ rng = cdate_range('2013-05-01', periods=3,
+ weekmask='Sun Mon Tue Wed Thu',
+ holidays=['2013-05-01'])
+ xp = DatetimeIndex(['2013-05-02', '2013-05-05', '2013-05-06'])
+ self.assert_(xp.equals(rng))
+
+
if __name__ == '__main__':
- import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
diff --git a/pandas/tseries/tests/test_offsets.py b/pandas/tseries/tests/test_offsets.py
index bcd74e7e6eecd..487a3091fd83b 100644
--- a/pandas/tseries/tests/test_offsets.py
+++ b/pandas/tseries/tests/test_offsets.py
@@ -1,10 +1,13 @@
from datetime import date, datetime, timedelta
import unittest
+import nose
+from nose.tools import assert_raises
+
import numpy as np
from pandas.core.datetools import (
- bday, BDay, BQuarterEnd, BMonthEnd, BYearEnd, MonthEnd, MonthBegin,
- BYearBegin, QuarterBegin, BQuarterBegin, BMonthBegin,
+ bday, BDay, cday, CDay, BQuarterEnd, BMonthEnd, BYearEnd, MonthEnd,
+ MonthBegin, BYearBegin, QuarterBegin, BQuarterBegin, BMonthBegin,
DateOffset, Week, YearBegin, YearEnd, Hour, Minute, Second, Day, Micro,
Milli, Nano,
WeekOfMonth, format, ole2datetime, QuarterEnd, to_datetime, normalize_date,
@@ -16,8 +19,6 @@
from pandas.tseries.tools import parse_time_string
import pandas.tseries.offsets as offsets
-from nose.tools import assert_raises
-
from pandas.tslib import monthrange
from pandas.lib import Timestamp
from pandas.util.testing import assertRaisesRegexp
@@ -31,6 +32,12 @@ def test_monthrange():
for m in range(1, 13):
assert monthrange(y, m) == calendar.monthrange(y, m)
+
+def _skip_if_no_cday():
+ if cday is None:
+ raise nose.SkipTest("CustomBusinessDay not available.")
+
+
####
## Misc function tests
####
@@ -295,6 +302,220 @@ def test_offsets_compare_equal(self):
self.assertFalse(offset1 != offset2)
+class TestCustomBusinessDay(unittest.TestCase):
+ _multiprocess_can_split_ = True
+
+ def setUp(self):
+ self.d = datetime(2008, 1, 1)
+
+ _skip_if_no_cday()
+ self.offset = CDay()
+ self.offset2 = CDay(2)
+
+ def test_different_normalize_equals(self):
+ # equivalent in this special case
+ offset = CDay()
+ offset2 = CDay()
+ offset2.normalize = True
+ self.assertEqual(offset, offset2)
+
+ def test_repr(self):
+ assert repr(self.offset) == '<1 CustomBusinessDay>'
+ assert repr(self.offset2) == '<2 CustomBusinessDays>'
+
+ expected = '<1 BusinessDay: offset=datetime.timedelta(1)>'
+ assert repr(self.offset + timedelta(1)) == expected
+
+ def test_with_offset(self):
+ offset = self.offset + timedelta(hours=2)
+
+ assert (self.d + offset) == datetime(2008, 1, 2, 2)
+
+ def testEQ(self):
+ self.assertEqual(self.offset2, self.offset2)
+
+ def test_mul(self):
+ pass
+
+ def test_hash(self):
+ self.assertEqual(hash(self.offset2), hash(self.offset2))
+
+ def testCall(self):
+ self.assertEqual(self.offset2(self.d), datetime(2008, 1, 3))
+
+ def testRAdd(self):
+ self.assertEqual(self.d + self.offset2, self.offset2 + self.d)
+
+ def testSub(self):
+ off = self.offset2
+ self.assertRaises(Exception, off.__sub__, self.d)
+ self.assertEqual(2 * off - off, off)
+
+ self.assertEqual(self.d - self.offset2, self.d + CDay(-2))
+
+ def testRSub(self):
+ self.assertEqual(self.d - self.offset2, (-self.offset2).apply(self.d))
+
+ def testMult1(self):
+ self.assertEqual(self.d + 10 * self.offset, self.d + CDay(10))
+
+ def testMult2(self):
+ self.assertEqual(self.d + (-5 * CDay(-10)),
+ self.d + CDay(50))
+
+ def testRollback1(self):
+ self.assertEqual(CDay(10).rollback(self.d), self.d)
+
+ def testRollback2(self):
+ self.assertEqual(
+ CDay(10).rollback(datetime(2008, 1, 5)), datetime(2008, 1, 4))
+
+ def testRollforward1(self):
+ self.assertEqual(CDay(10).rollforward(self.d), self.d)
+
+ def testRollforward2(self):
+ self.assertEqual(
+ CDay(10).rollforward(datetime(2008, 1, 5)), datetime(2008, 1, 7))
+
+ def test_roll_date_object(self):
+ offset = CDay()
+
+ dt = date(2012, 9, 15)
+
+ result = offset.rollback(dt)
+ self.assertEqual(result, datetime(2012, 9, 14))
+
+ result = offset.rollforward(dt)
+ self.assertEqual(result, datetime(2012, 9, 17))
+
+ offset = offsets.Day()
+ result = offset.rollback(dt)
+ self.assertEqual(result, datetime(2012, 9, 15))
+
+ result = offset.rollforward(dt)
+ self.assertEqual(result, datetime(2012, 9, 15))
+
+ def test_onOffset(self):
+ tests = [(CDay(), datetime(2008, 1, 1), True),
+ (CDay(), datetime(2008, 1, 5), False)]
+
+ for offset, date, expected in tests:
+ assertOnOffset(offset, date, expected)
+
+ def test_apply(self):
+ from pandas.core.datetools import cday
+ tests = []
+
+ tests.append((cday,
+ {datetime(2008, 1, 1): datetime(2008, 1, 2),
+ datetime(2008, 1, 4): datetime(2008, 1, 7),
+ datetime(2008, 1, 5): datetime(2008, 1, 7),
+ datetime(2008, 1, 6): datetime(2008, 1, 7),
+ datetime(2008, 1, 7): datetime(2008, 1, 8)}))
+
+ tests.append((2 * cday,
+ {datetime(2008, 1, 1): datetime(2008, 1, 3),
+ datetime(2008, 1, 4): datetime(2008, 1, 8),
+ datetime(2008, 1, 5): datetime(2008, 1, 8),
+ datetime(2008, 1, 6): datetime(2008, 1, 8),
+ datetime(2008, 1, 7): datetime(2008, 1, 9)}))
+
+ tests.append((-cday,
+ {datetime(2008, 1, 1): datetime(2007, 12, 31),
+ datetime(2008, 1, 4): datetime(2008, 1, 3),
+ datetime(2008, 1, 5): datetime(2008, 1, 4),
+ datetime(2008, 1, 6): datetime(2008, 1, 4),
+ datetime(2008, 1, 7): datetime(2008, 1, 4),
+ datetime(2008, 1, 8): datetime(2008, 1, 7)}))
+
+ tests.append((-2 * cday,
+ {datetime(2008, 1, 1): datetime(2007, 12, 28),
+ datetime(2008, 1, 4): datetime(2008, 1, 2),
+ datetime(2008, 1, 5): datetime(2008, 1, 3),
+ datetime(2008, 1, 6): datetime(2008, 1, 3),
+ datetime(2008, 1, 7): datetime(2008, 1, 3),
+ datetime(2008, 1, 8): datetime(2008, 1, 4),
+ datetime(2008, 1, 9): datetime(2008, 1, 7)}))
+
+ tests.append((CDay(0),
+ {datetime(2008, 1, 1): datetime(2008, 1, 1),
+ datetime(2008, 1, 4): datetime(2008, 1, 4),
+ datetime(2008, 1, 5): datetime(2008, 1, 7),
+ datetime(2008, 1, 6): datetime(2008, 1, 7),
+ datetime(2008, 1, 7): datetime(2008, 1, 7)}))
+
+ for offset, cases in tests:
+ for base, expected in cases.iteritems():
+ assertEq(offset, base, expected)
+
+ def test_apply_large_n(self):
+ dt = datetime(2012, 10, 23)
+
+ result = dt + CDay(10)
+ self.assertEqual(result, datetime(2012, 11, 6))
+
+ result = dt + CDay(100) - CDay(100)
+ self.assertEqual(result, dt)
+
+ off = CDay() * 6
+ rs = datetime(2012, 1, 1) - off
+ xp = datetime(2011, 12, 23)
+ self.assertEqual(rs, xp)
+
+ st = datetime(2011, 12, 18)
+ rs = st + off
+ xp = datetime(2011, 12, 26)
+ self.assertEqual(rs, xp)
+
+ def test_apply_corner(self):
+ self.assertRaises(Exception, CDay().apply, BMonthEnd())
+
+ def test_offsets_compare_equal(self):
+ # root cause of #456
+ offset1 = CDay()
+ offset2 = CDay()
+ self.assertFalse(offset1 != offset2)
+
+ def test_holidays(self):
+ # Define a TradingDay offset
+ holidays = ['2012-05-01', datetime(2013, 5, 1),
+ np.datetime64('2014-05-01')]
+ tday = CDay(holidays=holidays)
+ for year in range(2012, 2015):
+ dt = datetime(year, 4, 30)
+ xp = datetime(year, 5, 2)
+ rs = dt + tday
+ self.assertEqual(rs, xp)
+
+ def test_weekmask(self):
+ weekmask_saudi = 'Sat Sun Mon Tue Wed' # Thu-Fri Weekend
+ weekmask_uae = '1111001' # Fri-Sat Weekend
+ weekmask_egypt = [1,1,1,1,0,0,1] # Fri-Sat Weekend
+ bday_saudi = CDay(weekmask=weekmask_saudi)
+ bday_uae = CDay(weekmask=weekmask_uae)
+ bday_egypt = CDay(weekmask=weekmask_egypt)
+ dt = datetime(2013, 5, 1)
+ xp_saudi = datetime(2013, 5, 4)
+ xp_uae = datetime(2013, 5, 2)
+ xp_egypt = datetime(2013, 5, 2)
+ self.assertEqual(xp_saudi, dt + bday_saudi)
+ self.assertEqual(xp_uae, dt + bday_uae)
+ self.assertEqual(xp_egypt, dt + bday_egypt)
+ xp2 = datetime(2013, 5, 5)
+ self.assertEqual(xp2, dt + 2 * bday_saudi)
+ self.assertEqual(xp2, dt + 2 * bday_uae)
+ self.assertEqual(xp2, dt + 2 * bday_egypt)
+
+ def test_weekmask_and_holidays(self):
+ weekmask_egypt = 'Sun Mon Tue Wed Thu' # Fri-Sat Weekend
+ holidays = ['2012-05-01', datetime(2013, 5, 1),
+ np.datetime64('2014-05-01')]
+ bday_egypt = CDay(holidays=holidays, weekmask=weekmask_egypt)
+ dt = datetime(2013, 4, 30)
+ xp_egypt = datetime(2013, 5, 5)
+ self.assertEqual(xp_egypt, dt + 2 * bday_egypt)
+
+
def assertOnOffset(offset, date, expected):
actual = offset.onOffset(date)
assert actual == expected
@@ -1160,7 +1381,6 @@ def test_offset(self):
datetime(2007, 12, 15): datetime(2007, 4, 1),
datetime(2012, 1, 31): datetime(2011, 4, 1), }))
-
for offset, cases in tests:
for base, expected in cases.iteritems():
assertEq(offset, base, expected)
| I took a stab at issue #2301 using the busdaycalendar functionality in numpy 1.7.
### Caveats
There are a few issues with this:
- It requires Numpy 1.7 (I've only tested it on numpy 1.7.1).
- The timezone handling in nump 1.7 is broken (according to [this discussion](http://numpy-discussion.10968.n7.nabble.com/timezones-and-datetime64-td33407.html) ) and the datetime64 and timezone API will change in numpy 1.8. So from what I understand the current code as I've written it might move you onto a different day if you're in a UTC-0?00 timezone (I'm in UTC+0200 so it didn't affect me).
That said I didn't want to reinvent the wheel and since the numpy datetime64 api does cater for this usecase I thought it would be good to standardise on that in the long term.
The code does what I need it to do and I'm putting it out there in case it's useful to anyone else. Also, with some feedback maybe we can get it to the point where it could be included in Pandas as an optional DateOffset class for users who have Numpy 1.7.
### Possible Improvements
I can think of:
- Guard in the constructor to raise a meaningful exception when the user doesn't is on Numpy < 1.7.
- Better handling for the timezone issue.
- Unit tests for the CustomBusinessDay behaviour.
### Notes
- I picked the frequency code 'C' because it was available and fit nicely between 'D' and 'B'. I had originally named the class TradingDay and wanted to use 'T' but that's already used for Minutes.
- I have no idea what goes on in frequencies.py and I simply put a 'C' wherever I found a 'B' using the reasoning that as a BusinessDay subclass it should work in all the same places.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3744 | 2013-06-03T14:01:05Z | 2013-06-21T12:39:20Z | 2013-06-21T12:39:20Z | 2014-07-02T10:29:35Z |
BUG: (GH3740) Groupby transform with item-by-item not upcasting correctly | diff --git a/RELEASE.rst b/RELEASE.rst
index c59a53c7f6c69..bbfc9fb948ef4 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -196,6 +196,7 @@ pandas 0.11.1
- ``DataFrame.to_html`` and ``DataFrame.to_latex`` now accept a path for
their first argument (GH3702_)
- Fix file tokenization error with \r delimiter and quoted fields (GH3453_)
+ - Groupby transform with item-by-item not upcasting correctly (GH3740_)
.. _GH3164: https://github.com/pydata/pandas/issues/3164
.. _GH2786: https://github.com/pydata/pandas/issues/2786
@@ -278,6 +279,7 @@ pandas 0.11.1
.. _GH3696: https://github.com/pydata/pandas/issues/3696
.. _GH3667: https://github.com/pydata/pandas/issues/3667
.. _GH3733: https://github.com/pydata/pandas/issues/3733
+.. _GH3740: https://github.com/pydata/pandas/issues/3740
pandas 0.11.0
=============
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index d409adfd71158..64606a6e644f9 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -1532,6 +1532,9 @@ def transform(self, func, *args, **kwargs):
transformed : Series
"""
result = self.obj.copy()
+ if hasattr(result,'values'):
+ result = result.values
+ dtype = result.dtype
if isinstance(func, basestring):
wrapper = lambda x: getattr(x, func)(*args, **kwargs)
@@ -1539,13 +1542,21 @@ def transform(self, func, *args, **kwargs):
wrapper = lambda x: func(x, *args, **kwargs)
for name, group in self:
+
+ group = com.ensure_float(group)
object.__setattr__(group, 'name', name)
res = wrapper(group)
- # result[group.index] = res
indexer = self.obj.index.get_indexer(group.index)
- np.put(result, indexer, res)
+ if hasattr(res,'values'):
+ res = res.values
- return result
+ # need to do a safe put here, as the dtype may be different
+ # this needs to be an ndarray
+ result,_ = com._maybe_upcast_indexer(result, indexer, res)
+
+ # downcast if we can (and need)
+ result = _possibly_downcast_to_dtype(result, dtype)
+ return self.obj.__class__(result,index=self.obj.index,name=self.obj.name)
class NDFrameGroupBy(GroupBy):
diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py
index c56fca49cce48..cf62b16a9dd2a 100644
--- a/pandas/tests/test_groupby.py
+++ b/pandas/tests/test_groupby.py
@@ -616,6 +616,39 @@ def f(x):
assert_series_equal(agged, expected, check_dtype=False)
self.assert_(issubclass(agged.dtype.type, np.dtype(dtype).type))
+ def test_groupby_transform_with_int(self):
+
+ # GH 3740, make sure that we might upcast on item-by-item transform
+
+ # floats
+ df = DataFrame(dict(A = [1,1,1,2,2,2], B = Series(1,dtype='float64'), C = Series([1,2,3,1,2,3],dtype='float64'), D = 'foo'))
+ result = df.groupby('A').transform(lambda x: (x-x.mean())/x.std())
+ expected = DataFrame(dict(B = np.nan, C = Series([-1,0,1,-1,0,1],dtype='float64')))
+ assert_frame_equal(result,expected)
+
+ # int case
+ df = DataFrame(dict(A = [1,1,1,2,2,2], B = 1, C = [1,2,3,1,2,3], D = 'foo'))
+ result = df.groupby('A').transform(lambda x: (x-x.mean())/x.std())
+ expected = DataFrame(dict(B = np.nan, C = [-1,0,1,-1,0,1]))
+ assert_frame_equal(result,expected)
+
+ # int that needs float conversion
+ s = Series([2,3,4,10,5,-1])
+ df = DataFrame(dict(A = [1,1,1,2,2,2], B = 1, C = s, D = 'foo'))
+ result = df.groupby('A').transform(lambda x: (x-x.mean())/x.std())
+
+ s1 = s.iloc[0:3]
+ s1 = (s1-s1.mean())/s1.std()
+ s2 = s.iloc[3:6]
+ s2 = (s2-s2.mean())/s2.std()
+ expected = DataFrame(dict(B = np.nan, C = concat([s1,s2])))
+ assert_frame_equal(result,expected)
+
+ # int downcasting
+ result = df.groupby('A').transform(lambda x: x*2/2)
+ expected = DataFrame(dict(B = 1, C = [2,3,4,10,5,-1]))
+ assert_frame_equal(result,expected)
+
def test_indices_concatenation_order(self):
# GH 2808
| closes #3740
| https://api.github.com/repos/pandas-dev/pandas/pulls/3743 | 2013-06-03T13:24:35Z | 2013-06-03T17:09:29Z | 2013-06-03T17:09:29Z | 2014-07-16T08:11:29Z |
TST/BUG: fix bs4 tests that were getting erroneously run when lxml is installed but not bs4 | diff --git a/RELEASE.rst b/RELEASE.rst
index 2b90edaa327b0..b5dd3eef68dea 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -200,6 +200,7 @@ pandas 0.11.1
- Fix file tokenization error with \r delimiter and quoted fields (GH3453_)
- Groupby transform with item-by-item not upcasting correctly (GH3740_)
- Incorrectly read a HDFStore multi-index Frame witha column specification (GH3748_)
+ - ``read_html`` now correctly skips tests (GH3741_)
.. _GH3164: https://github.com/pydata/pandas/issues/3164
.. _GH2786: https://github.com/pydata/pandas/issues/2786
@@ -284,6 +285,7 @@ pandas 0.11.1
.. _GH3733: https://github.com/pydata/pandas/issues/3733
.. _GH3740: https://github.com/pydata/pandas/issues/3740
.. _GH3748: https://github.com/pydata/pandas/issues/3748
+.. _GH3741: https://github.com/pydata/pandas/issues/3741
pandas 0.11.0
=============
diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt
index 0ef37704b9d8f..b2fee1acbc4d6 100644
--- a/doc/source/v0.11.1.txt
+++ b/doc/source/v0.11.1.txt
@@ -294,6 +294,7 @@ Bug Fixes
- Allow insert/delete to non-unique columns (GH3679_)
- ``DataFrame.from_records`` did not accept empty recarrays (GH3682_)
+ - ``read_html`` now correctly skips tests (GH3741_)
See the `full release notes
<https://github.com/pydata/pandas/blob/master/RELEASE.rst>`__ or issue tracker
@@ -340,3 +341,4 @@ on GitHub for a complete list.
.. _GH3691: https://github.com/pydata/pandas/issues/3691
.. _GH3696: https://github.com/pydata/pandas/issues/3696
.. _GH3667: https://github.com/pydata/pandas/issues/3667
+.. _GH3741: https://github.com/pydata/pandas/issues/3741
diff --git a/pandas/io/tests/data/banklist.csv b/pandas/io/tests/data/banklist.csv
index 6545d31fe5fd4..85cebb56f6adf 100644
--- a/pandas/io/tests/data/banklist.csv
+++ b/pandas/io/tests/data/banklist.csv
@@ -1,8 +1,12 @@
-Bank Name,City,State,CERT #,Acquiring Institution,Closing Date,Updated Date
-Douglas County Bank,Douglasville,GA,21649,Hamilton State Bank,26-Apr-13,30-Apr-13
-Parkway Bank,Lenoir,NC,57158,"CertusBank, National Association",26-Apr-13,30-Apr-13
-Chipola Community Bank,Marianna,FL,58034,First Federal Bank of Florida,19-Apr-13,23-Apr-13
-Heritage Bank of North Florida,Orange Park,FL,26680,FirstAtlantic Bank,19-Apr-13,23-Apr-13
+Bank Name,City,ST,CERT,Acquiring Institution,Closing Date,Updated Date
+Banks of Wisconsin d/b/a Bank of Kenosha,Kenosha,WI,35386,"North Shore Bank, FSB",31-May-13,31-May-13
+Central Arizona Bank,Scottsdale,AZ,34527,Western State Bank,14-May-13,20-May-13
+Sunrise Bank,Valdosta,GA,58185,Synovus Bank,10-May-13,21-May-13
+Pisgah Community Bank,Asheville,NC,58701,"Capital Bank, N.A.",10-May-13,14-May-13
+Douglas County Bank,Douglasville,GA,21649,Hamilton State Bank,26-Apr-13,16-May-13
+Parkway Bank,Lenoir,NC,57158,"CertusBank, National Association",26-Apr-13,17-May-13
+Chipola Community Bank,Marianna,FL,58034,First Federal Bank of Florida,19-Apr-13,16-May-13
+Heritage Bank of North Florida,Orange Park,FL,26680,FirstAtlantic Bank,19-Apr-13,16-May-13
First Federal Bank,Lexington,KY,29594,Your Community Bank,19-Apr-13,23-Apr-13
Gold Canyon Bank,Gold Canyon,AZ,58066,"First Scottsdale Bank, National Association",5-Apr-13,9-Apr-13
Frontier Bank,LaGrange,GA,16431,HeritageBank of the South,8-Mar-13,26-Mar-13
@@ -36,18 +40,18 @@ Waccamaw Bank,Whiteville,NC,34515,First Community Bank,8-Jun-12,8-Nov-12
Farmers' and Traders' State Bank,Shabbona,IL,9257,First State Bank,8-Jun-12,10-Oct-12
Carolina Federal Savings Bank,Charleston,SC,35372,Bank of North Carolina,8-Jun-12,31-Oct-12
First Capital Bank,Kingfisher,OK,416,F & M Bank,8-Jun-12,10-Oct-12
-"Alabama Trust Bank, National Association",Sylacauga,AL,35224,Southern States Bank,18-May-12,31-Oct-12
+"Alabama Trust Bank, National Association",Sylacauga,AL,35224,Southern States Bank,18-May-12,20-May-13
"Security Bank, National Association",North Lauderdale,FL,23156,Banesco USA,4-May-12,31-Oct-12
-Palm Desert National Bank,Palm Desert,CA,23632,Pacific Premier Bank,27-Apr-12,31-Aug-12
-Plantation Federal Bank,Pawleys Island,SC,32503,First Federal Bank,27-Apr-12,31-Oct-12
-"Inter Savings Bank, fsb D/B/A InterBank, fsb",Maple Grove,MN,31495,Great Southern Bank,27-Apr-12,17-Oct-12
-HarVest Bank of Maryland,Gaithersburg,MD,57766,Sonabank,27-Apr-12,17-Oct-12
+Palm Desert National Bank,Palm Desert,CA,23632,Pacific Premier Bank,27-Apr-12,17-May-13
+Plantation Federal Bank,Pawleys Island,SC,32503,First Federal Bank,27-Apr-12,17-May-13
+"Inter Savings Bank, fsb D/B/A InterBank, fsb",Maple Grove,MN,31495,Great Southern Bank,27-Apr-12,17-May-13
+HarVest Bank of Maryland,Gaithersburg,MD,57766,Sonabank,27-Apr-12,17-May-13
Bank of the Eastern Shore,Cambridge,MD,26759,No Acquirer,27-Apr-12,17-Oct-12
-"Fort Lee Federal Savings Bank, FSB",Fort Lee,NJ,35527,Alma Bank,20-Apr-12,31-Aug-12
-Fidelity Bank,Dearborn,MI,33883,The Huntington National Bank,30-Mar-12,9-Aug-12
+"Fort Lee Federal Savings Bank, FSB",Fort Lee,NJ,35527,Alma Bank,20-Apr-12,17-May-13
+Fidelity Bank,Dearborn,MI,33883,The Huntington National Bank,30-Mar-12,16-May-13
Premier Bank,Wilmette,IL,35419,International Bank of Chicago,23-Mar-12,17-Oct-12
Covenant Bank & Trust,Rock Spring,GA,58068,"Stearns Bank, N.A.",23-Mar-12,31-Oct-12
-New City Bank ,Chicago,IL,57597,No Acquirer,9-Mar-12,29-Oct-12
+New City Bank,Chicago,IL,57597,No Acquirer,9-Mar-12,29-Oct-12
Global Commerce Bank,Doraville,GA,34046,Metro City Bank,2-Mar-12,31-Oct-12
Home Savings of America,Little Falls,MN,29178,No Acquirer,24-Feb-12,17-Dec-12
Central Bank of Georgia,Ellaville,GA,5687,Ameris Bank,24-Feb-12,9-Aug-12
@@ -55,7 +59,7 @@ SCB Bank,Shelbyville,IN,29761,"First Merchants Bank, National Association",10-Fe
Charter National Bank and Trust,Hoffman Estates,IL,23187,"Barrington Bank & Trust Company, National Association",10-Feb-12,25-Mar-13
BankEast,Knoxville,TN,19869,U.S.Bank National Association,27-Jan-12,8-Mar-13
Patriot Bank Minnesota,Forest Lake,MN,34823,First Resource Bank,27-Jan-12,12-Sep-12
-Tennessee Commerce Bank ,Franklin,TN,35296,Republic Bank & Trust Company,27-Jan-12,20-Nov-12
+Tennessee Commerce Bank,Franklin,TN,35296,Republic Bank & Trust Company,27-Jan-12,20-Nov-12
First Guaranty Bank and Trust Company of Jacksonville,Jacksonville,FL,16579,"CenterState Bank of Florida, N.A.",27-Jan-12,12-Sep-12
American Eagle Savings Bank,Boothwyn,PA,31581,"Capital Bank, N.A.",20-Jan-12,25-Jan-13
The First State Bank,Stockbridge,GA,19252,Hamilton State Bank,20-Jan-12,25-Jan-13
@@ -130,7 +134,7 @@ The Bank of Commerce,Wood Dale,IL,34292,Advantage National Bank Group,25-Mar-11,
Legacy Bank,Milwaukee,WI,34818,Seaway Bank and Trust Company,11-Mar-11,12-Sep-12
First National Bank of Davis,Davis,OK,4077,The Pauls Valley National Bank,11-Mar-11,20-Aug-12
Valley Community Bank,St. Charles,IL,34187,First State Bank,25-Feb-11,12-Sep-12
-"San Luis Trust Bank, FSB ",San Luis Obispo,CA,34783,First California Bank,18-Feb-11,20-Aug-12
+"San Luis Trust Bank, FSB",San Luis Obispo,CA,34783,First California Bank,18-Feb-11,20-Aug-12
Charter Oak Bank,Napa,CA,57855,Bank of Marin,18-Feb-11,12-Sep-12
Citizens Bank of Effingham,Springfield,GA,34601,Heritage Bank of the South,18-Feb-11,2-Nov-12
Habersham Bank,Clarkesville,GA,151,SCBT National Association,18-Feb-11,2-Nov-12
@@ -153,9 +157,9 @@ Oglethorpe Bank,Brunswick,GA,57440,Bank of the Ozarks,14-Jan-11,2-Nov-12
Legacy Bank,Scottsdale,AZ,57820,Enterprise Bank & Trust,7-Jan-11,12-Sep-12
First Commercial Bank of Florida,Orlando,FL,34965,First Southern Bank,7-Jan-11,2-Nov-12
Community National Bank,Lino Lakes,MN,23306,Farmers & Merchants Savings Bank,17-Dec-10,20-Aug-12
-First Southern Bank ,Batesville,AR,58052,Southern Bank,17-Dec-10,20-Aug-12
+First Southern Bank,Batesville,AR,58052,Southern Bank,17-Dec-10,20-Aug-12
"United Americas Bank, N.A.",Atlanta,GA,35065,State Bank and Trust Company,17-Dec-10,2-Nov-12
-"Appalachian Community Bank, FSB ",McCaysville,GA,58495,Peoples Bank of East Tennessee,17-Dec-10,31-Oct-12
+"Appalachian Community Bank, FSB",McCaysville,GA,58495,Peoples Bank of East Tennessee,17-Dec-10,31-Oct-12
Chestatee State Bank,Dawsonville,GA,34578,Bank of the Ozarks,17-Dec-10,2-Nov-12
"The Bank of Miami,N.A.",Coral Gables,FL,19040,1st United Bank,17-Dec-10,2-Nov-12
Earthstar Bank,Southampton,PA,35561,Polonia Bank,10-Dec-10,20-Aug-12
@@ -195,7 +199,7 @@ Sonoma Valley Bank,Sonoma,CA,27259,Westamerica Bank,20-Aug-10,12-Sep-12
Los Padres Bank,Solvang,CA,32165,Pacific Western Bank,20-Aug-10,12-Sep-12
Butte Community Bank,Chico,CA,33219,"Rabobank, N.A.",20-Aug-10,12-Sep-12
Pacific State Bank,Stockton,CA,27090,"Rabobank, N.A.",20-Aug-10,12-Sep-12
-ShoreBank,Chicago,IL,15640,Urban Partnership Bank,20-Aug-10,12-Sep-12
+ShoreBank,Chicago,IL,15640,Urban Partnership Bank,20-Aug-10,16-May-13
Imperial Savings and Loan Association,Martinsville,VA,31623,"River Community Bank, N.A.",20-Aug-10,24-Aug-12
Independent National Bank,Ocala,FL,27344,"CenterState Bank of Florida, N.A.",20-Aug-10,5-Nov-12
Community National Bank at Bartow,Bartow,FL,25266,"CenterState Bank of Florida, N.A.",20-Aug-10,5-Nov-12
@@ -206,13 +210,13 @@ The Cowlitz Bank,Longview,WA,22643,Heritage Bank,30-Jul-10,22-Aug-12
Coastal Community Bank,Panama City Beach,FL,9619,Centennial Bank,30-Jul-10,5-Nov-12
Bayside Savings Bank,Port Saint Joe,FL,57669,Centennial Bank,30-Jul-10,5-Nov-12
Northwest Bank & Trust,Acworth,GA,57658,State Bank and Trust Company,30-Jul-10,5-Nov-12
-Home Valley Bank ,Cave Junction,OR,23181,South Valley Bank & Trust,23-Jul-10,12-Sep-12
-SouthwestUSA Bank ,Las Vegas,NV,35434,Plaza Bank,23-Jul-10,22-Aug-12
-Community Security Bank ,New Prague,MN,34486,Roundbank,23-Jul-10,12-Sep-12
-Thunder Bank ,Sylvan Grove,KS,10506,The Bennington State Bank,23-Jul-10,13-Sep-12
-Williamsburg First National Bank ,Kingstree,SC,17837,"First Citizens Bank and Trust Company, Inc.",23-Jul-10,5-Nov-12
-Crescent Bank and Trust Company ,Jasper,GA,27559,Renasant Bank,23-Jul-10,5-Nov-12
-Sterling Bank ,Lantana,FL,32536,IBERIABANK,23-Jul-10,5-Nov-12
+Home Valley Bank,Cave Junction,OR,23181,South Valley Bank & Trust,23-Jul-10,12-Sep-12
+SouthwestUSA Bank,Las Vegas,NV,35434,Plaza Bank,23-Jul-10,22-Aug-12
+Community Security Bank,New Prague,MN,34486,Roundbank,23-Jul-10,12-Sep-12
+Thunder Bank,Sylvan Grove,KS,10506,The Bennington State Bank,23-Jul-10,13-Sep-12
+Williamsburg First National Bank,Kingstree,SC,17837,"First Citizens Bank and Trust Company, Inc.",23-Jul-10,5-Nov-12
+Crescent Bank and Trust Company,Jasper,GA,27559,Renasant Bank,23-Jul-10,5-Nov-12
+Sterling Bank,Lantana,FL,32536,IBERIABANK,23-Jul-10,5-Nov-12
"Mainstreet Savings Bank, FSB",Hastings,MI,28136,Commercial Bank,16-Jul-10,13-Sep-12
Olde Cypress Community Bank,Clewiston,FL,28864,"CenterState Bank of Florida, N.A.",16-Jul-10,5-Nov-12
Turnberry Bank,Aventura,FL,32280,NAFH National Bank,16-Jul-10,5-Nov-12
@@ -362,7 +366,7 @@ Brickwell Community Bank,Woodbury,MN,57736,CorTrust Bank N.A.,11-Sep-09,15-Jan-1
"Corus Bank, N.A.",Chicago,IL,13693,"MB Financial Bank, N.A.",11-Sep-09,21-Aug-12
First State Bank,Flagstaff,AZ,34875,Sunwest Bank,4-Sep-09,15-Jan-13
Platinum Community Bank,Rolling Meadows,IL,35030,No Acquirer,4-Sep-09,21-Aug-12
-Vantus Bank,Sioux City,IA,27732,Great Southern Bank,4-Sep-09,21-Aug-12
+Vantus Bank,Sioux City,IN,27732,Great Southern Bank,4-Sep-09,21-Aug-12
InBank,Oak Forest,IL,20203,"MB Financial Bank, N.A.",4-Sep-09,21-Aug-12
First Bank of Kansas City,Kansas City,MO,25231,Great American Bank,4-Sep-09,21-Aug-12
Affinity Bank,Ventura,CA,27197,Pacific Western Bank,28-Aug-09,21-Aug-12
@@ -452,7 +456,7 @@ National Bank of Commerce,Berkeley,IL,19733,Republic Bank of Chicago,16-Jan-09,1
Sanderson State Bank,Sanderson,TX,11568,The Pecos County State Bank,12-Dec-08,4-Sep-12
Haven Trust Bank,Duluth,GA,35379,"Branch Banking & Trust Company, (BB&T)",12-Dec-08,16-Aug-12
First Georgia Community Bank,Jackson,GA,34301,United Bank,5-Dec-08,16-Aug-12
-PFF Bank & Trust ,Pomona,CA,28344,"U.S. Bank, N.A.",21-Nov-08,4-Jan-13
+PFF Bank & Trust,Pomona,CA,28344,"U.S. Bank, N.A.",21-Nov-08,4-Jan-13
Downey Savings & Loan,Newport Beach,CA,30968,"U.S. Bank, N.A.",21-Nov-08,4-Jan-13
Community Bank,Loganville,GA,16490,Bank of Essex,21-Nov-08,4-Sep-12
Security Pacific Bank,Los Angeles,CA,23595,Pacific Western Bank,7-Nov-08,28-Aug-12
diff --git a/pandas/io/tests/data/banklist.html b/pandas/io/tests/data/banklist.html
index 8e15f37ccffdb..801016e7a5478 100644
--- a/pandas/io/tests/data/banklist.html
+++ b/pandas/io/tests/data/banklist.html
@@ -1,61 +1,31 @@
-<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
-<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
+<!DOCTYPE html><!-- HTML5 -->
+<html lang="en-US">
+<!-- Content language is American English. -->
<head>
-
-<!-- Instruction: In the title tag change Product Title to the approved product name -->
- <title>FDIC: Failed Bank List</title>
- <meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1" />
-
- <link rel="stylesheet" type="text/css" media="print" href="http://www.fdic.gov/style_productprint.css" />
-
-
-<style>
-
-* {margin:0; padding:0; outline:none}
-body {font:Arial,Helvetica; margin:10px; background-color:#fff}
-
-.sortable {width:925px; margin:0 auto 15px; font:13px Arial, Helvetica}
-.sortable th {background-color:#003366; text-align:left; color:#fff}
-.sortable th h3 {font-size:13px; padding:2px}
-.sortable td {padding:2px}
-.sortable .head h3 {background: url('images/sort.gif') no-repeat 5px center; cursor:pointer; padding-left:15px; text-decoration:underline}
-.sortable .desc, .sortable .asc {background-color:#404040; font-style:italic; text-decoration:underline}
-.sortable .desc h3 {background: url('images/desc.gif') no-repeat 5px center; cursor:pointer; padding-left:15px}
-.sortable .asc h3 {background: url('images/asc.gif') no-repeat 5px center; cursor:pointer; padding-left:15px}
-.sortable .head:hover, .sortable .desc:hover, .sortable .asc:hover {color:#fff}
-.sortable .evenrow td {background:#fff}
-.sortable .oddrow td {background:#fff}
-.sortable td.evenselected {background:#ebebeb}
-.sortable td.oddselected {background:#ebebeb}
-
-#controls {width:925px; margin:0 auto}
-#perpage {float:left; width:190px}
-#perpage select {float:left; font-size:11px}
-#perpage span {float:left; margin:2px 0 0 5px}
-#navigation {float:left; width:340px; text-align:center}
-#navigation img {cursor:pointer}
-#text {float:left; width:190px; text-align:right; margin-top:2px; font:13px Arial, Helvetica}
-</style>
+<title>FDIC: Failed Bank List</title>
+<!-- Meta Tags -->
+<meta charset="UTF-8">
+<!-- Unicode character encoding -->
+<meta http-equiv="X-UA-Compatible" content="IE=edge">
+<!-- Turns off IE Compatiblity Mode -->
+<meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=1">
+<!-- Makes it so phones don't auto zoom out. -->
+<meta name="author" content="DRR">
+<meta http-equiv="keywords" name="keywords" content="banks, financial institutions, failed, failure, closing, deposits, depositors, banking services, assuming institution, acquiring institution, claims">
+<!-- CSS -->
+<link rel="stylesheet" type="text/css" href="/responsive/css/responsive.css">
+<link rel="stylesheet" type="text/css" href="banklist.css">
</head>
-<body bgcolor="#ffffff" text="#000000">
-
-
-<!-- BEGIN HEADER INCLUDE -->
-<!-- Instruction: The following statement is the header include statement. Do not revise this code. -->
-<!-- begin header -->
-<!-- Last Updated Date: 1-21-2011 Time: 9:00AM Version: 1.5 -->
-<!--<script type="text/javascript" src="http://www.google.com/jsapi?key=ABQIAAAARFKFRzFbjPYbUgzSrdVg0hRrrNc1sGQv42gDojQ1Ll8KWy8MgRRQv_0u-KVSwjYfghDs3QJR40ZHtA"></script>
-<script type="text/javascript">
-google.load("jquery", "1.4.2");
-</script>-->
-<script type="text/javascript" src="/js/jquery-1.4.2.min.js"></script>
-<script type="text/javascript" src="/header/js/navigation.js"></script>
+<body>
+<!-- START of Header -->
+<script type="text/javascript" src="/responsive/header/js/header.js"></script>
+<link rel="stylesheet" type="text/css" href="/responsive/header/css/header.css" />
<!-- googleac.html includes Autocomplete functionality -->
- <!-- Autocomplete files -->
-<link rel="stylesheet" type="text/css" href="/header/css/jquery.autocomplete.css" />
-<script type="text/javascript" src="/header/js/jquery.autocomplete-1.4.2.js"></script>
-
+<!-- Autocomplete files -->
+<link rel="stylesheet" type="text/css" href="/responsive/header/css/jquery.autocomplete.css" />
+<script type="text/javascript" src="/responsive/js/jquery-1.4.1.min.js"></script>
+<script type="text/javascript" src="/responsive/header/js/jquery.autocomplete-1.4.2.js"></script>
<script type="text/javascript">
function findValue(li) {
if( li == null ) return alert("No match!");
@@ -77,7 +47,6 @@
// otherwise, let's just display the value in the text box
else var sValue = li.selectValue;
-
$('#googlesearch2').submit();
}
@@ -100,9 +69,6 @@
function formatResult(row) {
return row[0].replace(/(<.+?>)/gi, '');
}
-
-
-
$("#newSearch").autocomplete("/searchjs.asp", {
width: 179,
@@ -116,7 +82,7 @@
});
- $("#search2").autocomplete("searchjs.asp", {
+ $("#search2").autocomplete("/searchjs.asp", {
width: 160,
autoFill: false,
//delay:10,
@@ -130,5202 +96,4790 @@
});
-
-
-
-
-
</script>
+<!-- END CODE NEEDED TO MAKE THE SEARCH BOX WORK -->
-<!-- Omniture SiteCatalyst Code -->
-<script language="JavaScript" type="text/javascript" src="/js/s_code_v1.js"></script>
<!-- FORESEE Code -->
<script type="text/javascript" src="/foresee/foresee-trigger.js"></script>
-<link rel="stylesheet" type="text/css" href="/header/css/header_style.css" />
-<!--[if lt IE 7]>
- <style media="screen" type="text/css">
- #site-container {
- height: 100%;
- }
- #footer-container {
- bottom: -1px;
- }
- </style>
- <![endif]-->
+<a href="#after_header" class="responsive_header-skip_header">Skip Header</a>
+<header>
+<div id="responsive_header">
+ <div id="responsive_header-right_side">
+ <ul id="responsive_header-links">
+ <li id="responsive_header-twitter" title="Visit FDIC on Twitter"><a tabindex="1" href="/social.html?site=http://twitter.com/FDICgov">Visit FDIC on Twitter</a></li>
+ <li id="responsive_header-facebook" title="Visit FDIC on Facebook"><a tabindex="1" href="/social.html?site=http://www.facebook.com/FDICgov">Visit FDIC on Facebook</a></li>
+ <li id="responsive_header-fdicchannel" title="Visit FDIC on YouTube"><a tabindex="1" href="/social.html?site=http://www.youtube.com/user/FDICchannel">Visit FDIC on YouTube</a></li>
+ <li id="responsive_header-rss" title="FDIC RSS Feed"><a tabindex="1" href="/rss.html">FDIC RSS Feed</a></li>
+ <li id="responsive_header-subscribe" title="Subscribe to FDIC alerts"><a tabindex="1" href="http://service.govdelivery.com/service/multi_subscribe.html?code=USFDIC">Subscribe to FDIC alerts</a></li>
+ </ul>
+ <div id="responsive_header-search">
+ <a href="/search/advanced.html" class="search" title="Advanced Search">Advanced Search</a>
+ <form id="googlesearch" action="http://search.fdic.gov/search" method="get" name="Search box for FDIC.gov">
+ <fieldset>
+ <div class="form">
+ <label for="q">Search FDIC.gov</label>
+ <input tabindex="1" id="newSearch" name="q" class="field" type="text" style="outline: 0 none;" value="Search FDIC..." onblur="if(this.value == '') {this.value = 'Search FDIC...';}" onfocus="if(this.value == 'Search FDIC...') {this.value = '';}" />
+ <input tabindex="1" id="searchsubmit" class="submit" alt="Search Icon" title="Search Icon" type="submit" value="" />
+ <input value="date:D:L:d1" name="sort" type="hidden" />
-<div id ="site-container">
- <div id="header-container"> <!-- start of header container -->
- <!-- everything inside the header is held within this container -->
- <div id="header-nav">
- <div id="header-nav-left-container">
-
- <div id="header-nav-left">
- <a href="/" alt="FDIC Logo" title="FDIC Home - Federal Deposit Insurance Corporation">
- <div id="fdic-logo" class="homeOff"></div>
- </a>
- </div> <!-- close header-nav-left -->
-
- <div id="header-nav-right">
- <div id="header-nav-right-top">
- <div id="fdic-title"></div>
+ <input value="xml_no_dtd" name="output" type="hidden" />
+ <input value="UTF-8" name="ie" type="hidden" />
+ <input value="UTF-8" name="oe" type="hidden" />
+ <input value="wwwGOV" name="client" type="hidden" />
+ <input value="wwwGOV" name="proxystylesheet" type="hidden" />
+ <input value="default" name="site" type="hidden" />
</div>
- <div id="header-nav-right-bottom">
- <h1>Each depositor insured to at least $250,000 per insured bank</h1>
- </div>
- </div> <!-- close header-nav-right -->
-
- </div> <!-- close header-nav-left-container -->
-
- <div id="header-nav-right-container">
- <div id="right-container-top">
- <div id="web2">
- <ul>
- <li><a href="/social.html?site=http://twitter.com/FDICgov"><img src="/header/images/web2/twitter.png" alt="Twitter" title="Twitter" height="24px"/></a></li>
- <li><a href="/social.html?site=http://www.facebook.com/FDICgov"><img src="/header/images/web2/facebook.png" alt="Facebook" title="Facebook" height="24px"/></a></li>
- <li><a href="/social.html?site=http://www.youtube.com/user/FDICchannel"><img src="/header/images/web2/youtube.png" alt="YouTube" title="YouTube" height="24px"/></a></li>
- <li><a href="/rss.html"><img src="/header/images/web2/rss.png" alt="RSS" title="RSS" height="24px"/></a></span></li>
- <li><a href="http://service.govdelivery.com/service/multi_subscribe.html?code=USFDIC"><img src="/header/images/web2/subscribe.png" alt="Subscribe" title="Subscribe" height="24px"/></a></li>
- </ul>
- </div>
- </div> <!-- close right-container-right-top -->
-
- <div id="right-container-center">
- <div id="advanced-search" title="Advanced Search"><a href="/search/advanced.html" class="search">Advanced Search</a></div>
- </div> <!-- close right-container-right-center -->
-
- <div id="right-container-bottom">
- <div id="search">
- <form id="googlesearch" action="http://search.fdic.gov/search" method="get" name="Search box for FDIC.gov">
- <fieldset>
- <div class="form" alt="Search box for FDIC.gov" title="Search box for FDIC.gov">
- <div class="search2">
- <label for="fdic_search"></label>
- <label for="searchsubmit"></label>
- </div>
- <input id="newSearch" name="q" class="field" type="text" style="outline: 0 none;" value="Search FDIC..." onblur="if(this.value == '') {this.value = 'Search FDIC...';}" onfocus="if(this.value == 'Search FDIC...') {this.value = '';}" />
- <input id="searchsubmit" class="submit" alt="Search Icon" title="Search Icon" type="submit" value="" />
- <input value="date:D:L:d1" name="sort" type="hidden" />
- <input value="xml_no_dtd" name="output" type="hidden" />
- <input value="UTF-8" name="ie" type="hidden" />
- <input value="UTF-8" name="oe" type="hidden" />
- <input value="wwwGOV_new" name="client" type="hidden" />
- <input value="wwwGOV_new" name="proxystylesheet" type="hidden" />
- <input value="default" name="site" type="hidden" />
- </div>
- </fieldset>
- </form>
- </div> <!-- close id="search" -->
- </div> <!-- close right-container-right-bottom -->
- </div> <!-- close header-nav-right-container -->
-
- </div> <!-- close header-nav **This is the top part of the header** -->
-
- <div id="top-nav"> <!-- start of top-nav class **This is the main navigation in header, color is light blue**-->
- <!-- top-nav unordered list -->
- <!-- lists all top-nav titles -->
- <!-- **************************************************************** -->
- <ul>
- <li><span id="home" title="Home"><a href="/">Home</a></span></li>
- <li><span>|</span></li>
- <li><span id="deposit" title="Deposit Insurance"><a href="/deposit/">Deposit Insurance</a></span></li>
- <li><span>|</span></li>
- <li><span id="consumers" title="Consumer Protection"><a href="/consumers/">Consumer Protection</a></span></li>
- <li><span>|</span></li>
- <li><span id="bank" title="Industry Analysis"><a href="/bank/">Industry Analysis</a></span></li>
- <li><span>|</span></li>
- <li><span id="regulations" title="Regulations & Examinations"><a href="/regulations/">Regulations & Examinations</a></span></li>
- <li><span>|</span></li>
- <li><span id="buying" title="Asset Sales"><a href="/buying/">Asset Sales</a></span></li>
- <li><span>|</span></li>
- <li><span id="news" title="News & Events"><a href="/news/">News & Events</a></span></li>
- <li><span>|</span></li>
- <li><span id="about" title="About FDIC"><a href="/about/">About FDIC</a></span></li>
- </ul>
- <!-- **************************************************************** -->
- <!-- close top-nav unordered list -->
- </div> <!-- close top-nav id -->
-
- <div id="sub-nav-container"> <!-- start of sub-nav-container **sub-silo of main navigation, color is gold -->
- <div id="sub-nav"> <!-- start of div id sub-nav -->
-
- <!-- lists all sub-nav ul tags -->
- <!-- **************************************************************** -->
- <!-- deposit sub -->
- <div id="deposit_sub" class="sub-wrapper"> <!-- div 1 for "Deposit" -->
- <ul>
- <li><span id="deposit_sub1" title="Bank Find"><a href="http://research.fdic.gov/bankfind/">BankFind</a></span></li>
- <li><span id="deposit_sub2" title="Are My Deposits Insured?"><a href="/deposit/deposits/">Are My Deposits Insured?</a></span></li>
- <li><span id="deposit_sub3" title="Uninsured Investments"><a href="/deposit/investments/">Uninsured Investments</a></span></li>
- <li><span id="deposit_sub4" title="The Deposit Insurance Fund"><a href="/deposit/insurance/index.html">The Deposit Insurance Fund</a></span></li>
- <li><span id="deposit_sub5" title="International Deposit Insurance"><a href="/deposit/deposits/international/">International Deposit Insurance</a></span></li>
- </ul>
- </div> <!-- close div 1-->
-
- <!-- consumer sub -->
- <div id="consumers_sub" class="sub-wrapper"> <!-- div 2 for "Consumer" -->
- <ul>
- <li><span id="consumers_sub1" title="Consumer News & Information"><a href="/consumers/consumer/">Consumer News & Information</a></span></li>
- <li><span id="consumers_sub2" title="Loans & Mortgages"><a href="/consumers/loans/">Loans & Mortgages</a></span></li>
- <li><span id="consumers_sub3" title="Banking & Your Money"><a href="/consumers/banking/">Banking & Your Money</a></span></li>
- <li><span id="consumers_sub4" title="Financial Education & Literacy"><a href="/consumers/education/">Financial Education & Literacy</a></span></li>
- <li><span id="consumers_sub5" title="Community Affairs"><a href="/consumers/community/">Community Affairs</a></span></li>
- <li><span id="consumers_sub6" title="Identity Theft & Fraud"><a href="/consumers/theft/">Identity Theft & Fraud</a></span></li>
- <li><span id="consumers_sub7" title="Consumer Financial Privacy"><a href="/consumers/privacy/">Consumer Financial Privacy</a></span></li>
- </ul>
- </div> <!-- close div 2 -->
-
- <!-- industry sub -->
- <div id="bank_sub" class="sub-wrapper"> <!-- div 3 for "Industry" -->
- <ul>
- <li><span id="bank_sub1" title="Bank Data & Statistics"><a href="/bank/statistical/">Bank Data & Statistics</a></span></li>
- <li><span id="bank_sub2" title="Research & Analysis"><a href="/bank/analytical/">Research & Analysis</a></span></li>
- <li><span id="bank_sub3" title="Failed Banks"><a href="/bank/individual/failed/">Failed Banks</a></span></li>
- </ul>
- </div> <!-- close div 3 -->
-
- <!-- regulations sub -->
- <div id="regulations_sub" class="sub-wrapper"> <!-- div 4 for "Regulations" -->
- <ul>
- <li><span id="regulations_sub1" title="Bank Examinations"><a href="/regulations/examinations/">Bank Examinations</a></span></li>
- <li><span id="regulations_sub2" title="Laws & Regulations"><a href="/regulations/laws/">Laws & Regulations</a></span></li>
- <li><span id="regulations_sub3" title="Resources for Bank Officers & Directors"><a href="/regulations/resources/">Resources for Bank Officers & Directors</a></span></li>
- <li><span id="regulations_sub4" title="FDICconnect"><a href="http://www.fdicconnect.gov/">FDIC<em>connect</em></a></span></li>
- <li><span id="regulations_sub5" title="Required Financial Reports"><a href="/regulations/required/">Required Financial Reports</a></span></li>
- <li><span id="regulations_sub6" title="Examiner Training Programs"><a href="/regulations/examiner/">Examiner Training Programs</a></span></li>
- </ul>
- </div> <!-- close div 4 -->
-
- <!-- asset sub -->
- <div id="buying_sub" class="sub-wrapper"> <!-- div 5 for "Asset" -->
- <ul>
- <li><span id="buying_sub1" title="Loan Sales"><a href="/buying/loan/">Loan Sales</a></span></li>
- <li><span id="buying_sub2" title="Real Estate Sales"><a href="/buying/owned/">Real Estate and Property Marketplace</a></span></li>
- <li><span id="buying_sub3" title="Financial Asset Sales"><a href="/buying/financial/">Financial Asset Sales</a></span></li>
- <li><span id="buying_sub4" title="Servicing Sales Announcements"><a href="/buying/servicing/">Servicing Sales Announcements</a></span></li>
- <li><span id="buying_sub5" title="Other Asset Sales"><a href="/buying/otherasset/">Other Asset Sales</a></span></li>
- <li><span id="buying_sub6" title="Historical Sales"><a href="/buying/historical/">Historical Sales</a></span></li>
- </ul>
- </div> <!-- close div 5 -->
-
- <!-- news sub -->
- <div id="news_sub" class="sub-wrapper"> <!-- div 6 for "News" -->
- <ul>
- <li><span id="news_sub1" title="Press Releases"><a href="/news/news/press/2013/">Press Releases</a></span></li>
- <li><span id="news_sub2" title="Online Press Room"><a href="https://fdicsurvey.inquisiteasp.com/fdic/cgi-bin/qwebcorporate.dll?M58TRS">Online Press Room</a></span></li>
- <li><span id="news_sub3" title="Conferences & Events"><a href="/news/conferences/">Conferences & Events</a></span></li>
- <li><span id="news_sub4" title="Financial Institution Letters"><a href="/news/news/financial/2013/">Financial Institution Letters</a></span></li>
-
- <!-- include this lnk for year 2013 and remove 2012 link below <li><span id="news_sub5" title="Special Alerts"><a href="/news/news/SpecialAlert/2013/">Special Alerts</a></span></li>-->
- <li><span id="news_sub5" title="Special Alerts"><a href="/news/news/SpecialAlert/2012/">Special Alerts</a></span></li>
- <li><span id="news_sub6" title="Letters to the Editor/Opinion Editorials"><a href="/news/letters/">Letters to the Editor/Opinion Editorials</a></span></li>
- <li><span id="news_sub7" title="Speeches & Testimony"><a href="/news/news/speeches/">Speeches & Testimony</a></span></li>
- </ul>
- </div> <!-- close div 6 -->
-
- <!-- news sub -->
- <div id="about_sub" class="sub-wrapper"> <!-- div 6 for "News" -->
- <ul>
- <li><span id="about_sub1" title="Mission & Purpose"><a href="/about/index.html#1">Mission & Purpose</a></span></li>
- <li><span id="about_sub2" title="Advisory Committees"><a href="/about/index.html#2">Advisory Committees</a></span></li>
- <li><span id="about_sub3" title="Careers with the FDIC"><a href="/about/index.html#3">Careers with the FDIC</a></span></li>
- <li><span id="about_sub4" title="Management Team"><a href="/about/index.html#4">Management Team</a></span></li>
- <li><span id="about_sub5" title="Plans & Reports"><a href="/about/index.html#5">Plans & Reports</a></span></li>
- <li><span id="about_sub6" title="What We Can Do for You"><a href="/about/index.html#6">What We Can Do for You</a></span></li>
- <li><span id="about_sub7" title="Diversity with the FDIC"><a href="/about/index.html#7">Diversity at the FDIC</a></span></li>
- </ul>
- </div> <!-- close div 6 -->
-
- <!-- **************************************************************** -->
- </div> <!-- close of id - sub-nav -->
- </div> <!-- close of id - sub-nav-container -->
- </div> <!-- end of the header-container -->
-<div id="body">
-<!-- end header -->
-<font face="arial, helvetica, sans-serif" size="2">
-<!-- END HEADER INCLUDE -->
-
-<!-- Instruction: The following meta tags are for the keywords and document author. If desired change "name of the document" owner to the actual name of the owner and change "add keywords here" to a list of keywords separated by a comma. -->
-<meta name="author" content="DRR" />
-<meta http-equiv="keywords" name="keywords" content="banks, financial institutions, failed, failure, closing, deposits, depositors,
-banking services, assuming institution, acquiring institution, claims" />
-
-
-<link rel="stylesheet" type="text/css" media="print" href="http://www.fdic.gov/style_productprint.css" />
-
-<img src="http://www.fdic.gov/images/spacer.gif" width="1" height="2" alt="" border="0" /><br />
-<table width="670" cellspacing="0" cellpadding="0" border="0">
- <tr>
- <td width="1" bgcolor="#cccccc"><img src="http://www.fdic.gov/images/spacer.gif" width="1" height="1" alt="" border="0" /></td>
- <td width="14" bgcolor="#cccccc"><img src="http://www.fdic.gov/images/spacer.gif" width="14" height="1" alt="" border="0" /></td>
- <td width="739" bgcolor="#cccccc"><span class="noDisplay"><img src="http://www.fdic.gov/images/spacer.gif" width="739" height="1" alt="" border="0" /></span></td>
- <td width="1" bgcolor="#cccccc"><img src="http://www.fdic.gov/images/spacer.gif" width="1" height="1" alt="" border="0" /></td>
- </tr>
-
- <tr>
- <td bgcolor="#cccccc"><img src="http://www.fdic.gov/images/spacer.gif" width="1" height="24" alt="" border="0" /><br /></td>
- <td></td>
- <td width="739">
-
- <!-- BEGIN BREAD CRUMB TRAIL -->
-
- <!-- Instruction: Change the "Tertiary" link text to the correct third-level menu page name and the href value to the appropriate relative path to the third-level menu page. -->
-
- <!-- Instruction: Change the "Product Title" text to the name of the approved product title. -->
-
- <font face="arial, helvetica,sans-serif" size="1"><a href="/index.html">Home</a> > <a href="/bank/index.html">Industry
- Analysis</a> > <a href="/bank/individual/failed/index.html">Failed Banks</a> > Failed
- Bank List</font><br />
-
- <!-- END BREAD CRUMB TRAIL -->
-
- </td>
- <td bgcolor="#cccccc"><img src="http://www.fdic.gov/images/spacer.gif" width="1" height="1" alt="" border="0" /><br /></td>
- </tr>
-
- <tr>
- <td colspan="4" bgcolor="#cccccc"><img src="http://www.fdic.gov/images/spacer.gif" width="1" height="1" alt="" border="0" /><br /></td>
- </tr>
-</table>
-
-<table width="640" cellspacing="0" cellpadding="0" border="0">
- <tr>
- <td width="25"><img src="http://www.fdic.gov/images/spacer.gif" width="25" height="1" alt="" border="0" /><br /></td>
- <td colspan="2">
- <br />
-<!-- DRR BEGIN Product Title & Body-->
-<!-- DRR BEGIN Product Title & Body-->
-<table width="100%" cellpadding="0" cellspacing="0" border="0">
-<!-- BEGIN PRODUCT TITLE -->
-<tr>
- <td>
- <!-- Instruction: Change the "Product Title" text to the name of the approved product title. -->
-
- <font face="arial, helvetica, sans-serif" size="4" color="#003366"><strong><a name="top">Failed
- Bank List</a></strong></font>
- <hr size="1" color="#003366" noshade />
-
-
- </td>
-</tr>
-
-<!-- END PRODUCT TITLE -->
-<!-- DOCUMENT BODY BEGINS HERE -->
-<tr>
- <td valign="top">
- <table border="0" cellpadding="0" cellspacing="0" width="900">
-
- <tr>
- <td> <font face="arial, helvetica, sans-serif" size="2">
- <br />The FDIC is often appointed as receiver for failed banks. This page contains useful information for the customers and vendors of these banks. This includes information on the acquiring bank (if applicable), how your accounts and loans are affected, and how vendors can file claims against the receivership.
- <a href="http://www2.fdic.gov/drrip/cs/index.asp">Failed Financial Institution Contact Search</a>
- displays point of contact information related to failed banks.<br /><br />
-
- This list includes banks which have failed since October 1, 2000. To search for banks that failed prior to those on this page, visit this link: <a href="http://www2.fdic.gov/hsob/SelectRpt.asp?EntryTyp=30">Failures and Assistance Transactions
-</a><br /><br />
-
- <!-- <a href="banklist.csv">Open Bank List as CSV file</a> -->
- <a href="banklist.csv">Failed Bank List</a> - CSV file (Updated on Mondays. Also opens in Excel - <a href="http://www.fdic.gov/excel.html">Excel
- Help</a>)
- <br />
- <script type="text/javascript">
- <!--
- document.writeln("<br /><em>Click arrows next to headers to sort in Ascending or Descending order.</em><br />");
-//-->
- </script><br />
- </font>
- </td>
- </tr>
- </table>
- </td>
-</tr>
-
-<tr>
- <td>
- <table cellpadding="0" cellspacing="0" bordercolordark="#003366" bordercolorlight="ebebeb" border="1" id="table" class="sortable">
- <thead>
- <tr bgcolor="#003366">
- <th id="Institution"><h3>Bank Name</h3></th>
- <th class="nosort" id="city" style="padding-left:3px"><h3>City</h3></th>
- <th id="state"><h3>State</h3></th>
- <th id="CERT #" class="nosort" style="padding-left:3px"><h3>CERT #</h3></th>
- <th id="AI" style="padding-left:3px"><h3>Acquiring Institution</h3></th>
- <th id="Closing"><h3>Closing Date</h3></th>
- <th id="Updated"><h3>Updated Date</h3></th>
- </tr>
- </thead>
- <tbody>
-
- <tr>
- <td><a href="douglascb.html">Douglas County Bank</a></td>
- <td headers="city">Douglasville</td>
- <td headers="state">GA</td>
- <td headers="CERT #">21649</td>
- <td headers="AI">Hamilton State Bank</td>
- <td headers="Closing Date">April 26, 2013</td>
- <td headers="Updated">April 30, 2013</td>
-</tr>
- <tr>
- <td><a href="parkway.html">Parkway Bank</a></td>
- <td headers="city">Lenoir</td>
- <td headers="state">NC</td>
- <td headers="CERT #">57158</td>
- <td headers="AI">CertusBank, National Association</td>
- <td headers="Closing Date">April 26, 2013</td>
- <td headers="Updated">April 30, 2013</td>
-</tr>
-<tr>
- <td><a href="chipola.html">Chipola Community Bank</a></td>
- <td headers="city">Marianna</td>
- <td headers="state">FL</td>
- <td headers="CERT #">58034</td>
- <td headers="AI">First Federal Bank of Florida</td>
- <td headers="Closing Date">April 19, 2013</td>
- <td headers="Updated">April 23, 2013</td>
-</tr>
-<tr>
- <td><a href="heritagebank-fl.html">Heritage Bank of North Florida</a></td>
- <td headers="city">Orange Park</td>
- <td headers="state">FL</td>
- <td headers="CERT #">26680</td>
- <td headers="AI">FirstAtlantic Bank</td>
- <td headers="Closing Date">April 19, 2013</td>
- <td headers="Updated">April 23, 2013</td>
-</tr>
-<tr>
- <td><a href="firstfederal-ky.html">First Federal Bank</a></td>
- <td headers="city">Lexington</td>
- <td headers="state">KY</td>
- <td headers="CERT #">29594</td>
- <td headers="AI">Your Community Bank</td>
- <td headers="Closing Date">April 19, 2013</td>
- <td headers="Updated">April 23, 2013</td>
-</tr>
-<td><a href="goldcanyon.html">Gold Canyon Bank</a></td>
- <td headers="city">Gold Canyon</td>
- <td headers="state">AZ</td>
- <td headers="CERT #">58066</td>
- <td headers="AI">First Scottsdale Bank,
-National Association</td>
- <td headers="Closing Date">April 5, 2013</td>
- <td headers="Updated">April 9, 2013</td>
-</tr>
-<tr>
- <td><a href="frontier-ga.html">Frontier Bank</a></td>
- <td headers="city">LaGrange</td>
- <td headers="state">GA</td>
- <td headers="CERT #">16431</td>
- <td headers="AI">HeritageBank of the South</td>
- <td headers="Closing Date">March 8, 2013</td>
- <td headers="Updated">March 26, 2013</td>
-</tr>
-<tr>
- <td><a href="covenant-il.html">Covenant Bank</a></td>
- <td headers="city">Chicago</td>
- <td headers="state">IL</td>
- <td headers="CERT #">22476</td>
- <td headers="AI">Liberty Bank and Trust Company</td>
- <td headers="Closing Date">February 15, 2013</td>
- <td headers="Updated">March 4, 2013</td>
-</tr>
-<tr>
- <td><a href="1stregents.html">1st Regents Bank</a></td>
- <td headers="city">Andover</td>
- <td headers="state">MN</td>
- <td headers="CERT #">57157</td>
- <td headers="AI">First Minnesota Bank</td>
- <td headers="Closing Date">January 18, 2013</td>
- <td headers="Updated">February 28, 2013</td>
-</tr>
-<tr>
- <td><a href="westside.html">Westside Community Bank</a></td>
- <td headers="city">University Place</td>
- <td headers="state">WA</td>
- <td headers="CERT #">33997</td>
- <td headers="AI">Sunwest Bank</td>
- <td headers="Closing Date">January 11, 2013</td>
- <td headers="Updated">January 24, 2013</td>
-</tr>
-<tr>
- <td><a href="cmbkozarks.html">Community Bank of the Ozarks</a></td>
- <td headers="city">Sunrise Beach</td>
- <td headers="state">MO</td>
- <td headers="CERT #">27331</td>
- <td headers="AI">Bank of Sullivan</td>
- <td headers="Closing Date">December 14, 2012</td>
- <td headers="Updated">January 24, 2013</td>
-</tr>
-<tr>
- <td><a href="hometown.html">Hometown Community Bank</a></td>
- <td headers="city">Braselton</td>
- <td headers="state">GA</td>
- <td headers="CERT #">57928</td>
- <td headers="AI">CertusBank, National Association</td>
- <td headers="Closing Date">November 16, 2012</td>
- <td headers="Updated">January 24, 2013</td>
-</tr>
-<tr>
- <td><a href="cfnb.html">Citizens First National Bank</a></td>
- <td headers="city">Princeton</td>
- <td headers="state">IL</td>
- <td headers="CERT #">3731</td>
- <td headers="AI">Heartland Bank and Trust Company</td>
- <td headers="Closing Date">November 2, 2012</td>
- <td headers="Updated">January 24, 2013</td>
-</tr>
- <tr>
- <td><a href="heritage_fl.html">Heritage Bank of Florida</a></td>
- <td headers="city">Lutz</td>
- <td headers="state">FL</td>
- <td headers="CERT #">35009</td>
- <td headers="AI">Centennial Bank</td>
- <td headers="Closing Date">November 2, 2012</td>
- <td headers="Updated">January 24, 2013</td>
-</tr>
-<tr>
- <td><a href="novabank.html">NOVA Bank</a></td>
- <td headers="city">Berwyn</td>
- <td headers="state">PA</td>
- <td headers="CERT #">27148</td>
- <td headers="AI">No Acquirer</td>
- <td headers="Closing Date">October 26, 2012</td>
- <td headers="Updated">January 24, 2013</td>
-</tr>
-<tr>
- <td><a href="excelbank.html">Excel Bank</a></td>
- <td headers="city">Sedalia</td>
- <td headers="state">MO</td>
- <td headers="CERT #">19189</td>
- <td headers="AI">Simmons First National Bank</td>
- <td headers="Closing Date">October 19, 2012</td>
- <td headers="Updated">January 24, 2013</td>
-</tr>
-<tr>
- <td><a href="firsteastside.html">First East Side Savings Bank</a></td>
- <td headers="city">Tamarac</td>
- <td headers="state">FL</td>
- <td headers="CERT #">28144</td>
- <td headers="AI">Stearns Bank N.A.</td>
- <td headers="Closing Date">October 19, 2012</td>
- <td headers="Updated">January 24, 2013</td>
-</tr>
-<tr>
- <td><a href="gulfsouth.html">GulfSouth Private Bank</a></td>
- <td headers="city">Destin</td>
- <td headers="state">FL</td>
- <td headers="CERT #">58073</td>
- <td headers="AI">SmartBank</td>
- <td headers="Closing Date">October 19, 2012</td>
- <td headers="Updated">January 24, 2013</td>
-</tr>
-<tr>
- <td><a href="firstunited.html">First United Bank</a></td>
- <td headers="city">Crete</td>
- <td headers="state">IL</td>
- <td headers="CERT #">20685</td>
- <td headers="AI">Old Plank Trail Community Bank, National Association</td>
- <td headers="Closing Date">September 28, 2012</td>
- <td headers="Updated">November 15, 2012</td>
-</tr>
-<tr>
- <td><a href="truman.html">Truman Bank</a></td>
- <td headers="city">St. Louis</td>
- <td headers="state">MO</td>
- <td headers="CERT #">27316</td>
- <td headers="AI">Simmons First National Bank</td>
- <td headers="Closing Date">September 14, 2012</td>
- <td headers="Updated">December 17, 2012</td>
-</tr>
-<tr>
- <td><a href="firstcommbk_mn.html">First Commercial Bank</a></td>
- <td headers="city">Bloomington</td>
- <td headers="state">MN</td>
- <td headers="CERT #">35246</td>
- <td headers="AI">Republic Bank & Trust Company</td>
- <td headers="Closing Date">September 7, 2012</td>
- <td headers="Updated">December 17, 2012</td>
-</tr>
-<tr>
- <td><a href="waukegan.html">Waukegan Savings Bank</a></td>
- <td headers="city">Waukegan</td>
- <td headers="state">IL</td>
- <td headers="CERT #">28243</td>
- <td headers="AI"> First Midwest Bank</td>
- <td headers="Closing Date">August 3, 2012</td>
- <td headers="Updated">October 11, 2012</td>
-</tr>
-<tr>
- <td><a href="jasper.html">Jasper Banking Company</a></td>
- <td headers="city">Jasper</td>
- <td headers="state">GA</td>
- <td headers="CERT #">16240</td>
- <td headers="AI">Stearns Bank N.A.</td>
- <td headers="Closing Date">July 27, 2012</td>
- <td headers="Updated">December 17, 2012</td>
-</tr>
-<tr>
- <td><a href="secondfederal.html">Second Federal Savings and Loan Association of Chicago</a></td>
- <td headers="city">Chicago</td>
- <td headers="state">IL</td>
- <td headers="CERT #">27986</td>
- <td headers="AI">Hinsdale Bank & Trust Company</td>
- <td headers="Closing Date">July 20, 2012</td>
- <td headers="Updated">January 14, 2013</td>
-</tr>
-<tr>
- <td><a href="heartland.html">Heartland Bank</a></td>
- <td headers="city">Leawood</td>
- <td headers="state">KS</td>
- <td headers="CERT #">1361</td>
- <td headers="AI">Metcalf Bank</td>
- <td headers="Closing Date">July 20, 2012</td>
- <td headers="Updated">December 17, 2012</td>
-</tr>
-<tr>
- <td><a href="cherokee.html">First Cherokee State Bank</a></td>
- <td headers="city">Woodstock</td>
- <td headers="state">GA</td>
- <td headers="CERT #">32711</td>
- <td headers="AI">Community & Southern Bank</td>
- <td headers="Closing Date">July 20, 2012</td>
- <td headers="Updated">October 31, 2012</td>
-</tr>
-<tr>
- <td><a href="georgiatrust.html">Georgia Trust Bank</a></td>
- <td headers="city">Buford</td>
- <td headers="state">GA</td>
- <td headers="CERT #">57847</td>
- <td headers="AI">Community & Southern Bank</td>
- <td headers="Closing Date">July 20, 2012</td>
- <td headers="Updated">December 17, 2012</td>
-</tr>
-<tr>
- <td><a href="royalpalm.html">The Royal Palm Bank of Florida</a></td>
- <td headers="city">Naples</td>
- <td headers="state">FL</td>
- <td headers="CERT #">57096</td>
- <td headers="AI">First National Bank of the Gulf Coast</td>
- <td headers="Closing Date">July 20, 2012</td>
- <td headers="Updated">January 7, 2013</td>
-</tr>
-<tr>
- <td><a href="glasgow.html">Glasgow Savings Bank</a></td>
- <td headers="city">Glasgow</td>
- <td headers="state">MO</td>
- <td headers="CERT #">1056</td>
- <td headers="AI"> Regional Missouri Bank</td>
- <td headers="Closing Date">July 13, 2012</td>
- <td headers="Updated">October 11, 2012</td>
-</tr>
-<tr>
- <td><a href="montgomery.html">Montgomery Bank & Trust</a></td>
- <td headers="city">Ailey</td>
- <td headers="state">GA</td>
- <td headers="CERT #">19498</td>
- <td headers="AI"> Ameris Bank</td>
- <td headers="Closing Date">July 6, 2012</td>
- <td headers="Updated">October 31, 2012</td>
-</tr>
-<tr>
- <td><a href="farmersbank.html">The Farmers Bank of Lynchburg</a></td>
- <td headers="city">Lynchburg</td>
- <td headers="state">TN</td>
- <td headers="CERT #">1690</td>
- <td headers="AI">Clayton Bank and Trust</td>
- <td headers="Closing Date">June 15, 2012</td>
- <td headers="Updated">October 31, 2012</td>
-</tr>
-<tr>
- <td><a href="securityexchange.html">Security Exchange Bank</a></td>
- <td headers="city">Marietta</td>
- <td headers="state">GA</td>
- <td headers="CERT #">35299</td>
- <td headers="AI">Fidelity Bank</td>
- <td headers="Closing Date">June 15, 2012</td>
- <td headers="Updated">October 10, 2012</td>
-</tr>
-<tr>
- <td><a href="putnam.html">Putnam State Bank</a></td>
- <td headers="city">Palatka</td>
- <td headers="state">FL</td>
- <td headers="CERT #">27405</td>
- <td headers="AI">Harbor Community Bank</td>
- <td headers="Closing Date">June 15, 2012</td>
- <td headers="Updated">October 10, 2012</td>
-</tr>
-<tr>
- <td><a href="waccamaw.html">Waccamaw Bank</a></td>
- <td headers="city">Whiteville</td>
- <td headers="state">NC</td>
- <td headers="CERT #">34515</td>
- <td headers="AI">First Community Bank</td>
- <td headers="Closing Date">June 8, 2012</td>
- <td headers="Updated">November 8, 2012</td>
-</tr>
-<tr>
- <td><a href="ftsb.html">Farmers' and Traders' State Bank</a></td>
- <td headers="city">Shabbona</td>
- <td headers="state">IL</td>
- <td headers="CERT #">9257</td>
- <td headers="AI">First State Bank</td>
- <td headers="Closing Date">June 8, 2012</td>
- <td headers="Updated">October 10, 2012</td>
-</tr>
-<tr>
- <td><a href="carolina.html">Carolina Federal Savings Bank</a></td>
- <td headers="city">Charleston</td>
- <td headers="state">SC</td>
- <td headers="CERT #">35372</td>
- <td headers="AI">Bank of North Carolina</td>
- <td headers="Closing Date">June 8, 2012</td>
- <td headers="Updated">October 31, 2012</td>
-</tr>
-<tr>
- <td><a href="firstcapital.html">First Capital Bank</a></td>
- <td headers="city">Kingfisher</td>
- <td headers="state">OK</td>
- <td headers="CERT #">416</td>
- <td headers="AI">F & M Bank</td>
- <td headers="Closing Date">June 8, 2012</td>
- <td headers="Updated">October 10, 2012</td>
-</tr>
-<tr>
- <td><a href="alabamatrust.html">Alabama Trust Bank, National Association</a></td>
- <td headers="city">Sylacauga</td>
- <td headers="state">AL</td>
- <td headers="CERT #">35224</td>
- <td headers="AI">Southern States Bank</td>
- <td headers="Closing Date">May 18, 2012</td>
- <td headers="Updated">October 31, 2012</td>
-</tr>
-<tr>
- <td><a href="securitybank.html">Security Bank, National Association</a></td>
- <td headers="city">North Lauderdale</td>
- <td headers="state">FL</td>
- <td headers="CERT #">23156</td>
- <td headers="AI">Banesco USA</td>
- <td headers="Closing Date">May 4, 2012</td>
- <td headers="Updated">October 31, 2012</td>
-</tr>
-<tr>
- <td><a href="palmdesert.html">Palm Desert National Bank</a></td>
- <td headers="city">Palm Desert</td>
- <td headers="state">CA</td>
- <td headers="CERT #">23632</td>
- <td headers="AI">Pacific Premier Bank</td>
- <td headers="Closing Date">April 27, 2012</td>
- <td headers="Updated">August 31, 2012</td>
-</tr>
-<tr>
- <td><a href="plantation.html">Plantation Federal Bank</a></td>
- <td headers="city">Pawleys Island</td>
- <td headers="state">SC</td>
- <td headers="CERT #">32503</td>
- <td headers="AI">First Federal Bank</td>
- <td headers="Closing Date">April 27, 2012</td>
- <td headers="Updated">October 31, 2012</td>
-</tr>
-<tr>
- <td><a href="interbank.html">Inter Savings Bank, fsb D/B/A InterBank, fsb</a></td>
- <td headers="city">Maple Grove</td>
- <td headers="state">MN</td>
- <td headers="CERT #">31495</td>
- <td headers="AI">Great Southern Bank</td>
- <td headers="Closing Date">April 27, 2012</td>
- <td headers="Updated">October 17, 2012</td>
-</tr>
-<tr>
- <td><a href="harvest.html">HarVest Bank of Maryland</a></td>
- <td headers="city">Gaithersburg</td>
- <td headers="state">MD</td>
- <td headers="CERT #">57766</td>
- <td headers="AI">Sonabank</td>
- <td headers="Closing Date">April 27, 2012</td>
- <td headers="Updated">October 17, 2012</td>
-</tr>
-<tr>
- <td><a href="easternshore.html">Bank of the Eastern Shore</a></td>
- <td headers="city">Cambridge</td>
- <td headers="state">MD</td>
- <td headers="CERT #">26759</td>
- <td headers="AI">No Acquirer</td>
- <td headers="Closing Date">April 27, 2012</td>
- <td headers="Updated">October 17, 2012</td>
-</tr>
-<tr>
- <td><a href="fortlee.html">Fort Lee Federal Savings Bank, FSB</a></td>
- <td headers="city">Fort Lee</td>
- <td headers="state">NJ</td>
- <td headers="CERT #">35527</td>
- <td headers="AI">Alma Bank</td>
- <td headers="Closing Date">April 20, 2012</td>
- <td headers="Updated">August 31, 2012</td>
-</tr>
-<tr>
- <td><a href="fidelity.html">Fidelity Bank</a></td>
- <td headers="city">Dearborn</td>
- <td headers="state">MI</td>
- <td headers="CERT #">33883</td>
- <td headers="AI">The Huntington National Bank</td>
- <td headers="Closing Date">March 30, 2012</td>
- <td headers="Updated">August 9, 2012</td>
-</tr>
-<tr>
- <td><a href="premier-il.html">Premier Bank</a></td>
- <td headers="city">Wilmette</td>
- <td headers="state">IL</td>
- <td headers="CERT #">35419</td>
- <td headers="AI">International Bank of Chicago</td>
- <td headers="Closing Date">March 23, 2012</td>
- <td headers="Updated">October 17, 2012</td>
-</tr>
-<tr>
- <td><a href="covenant.html">Covenant Bank & Trust</a></td>
- <td headers="city">Rock Spring</td>
- <td headers="state">GA</td>
- <td headers="CERT #">58068</td>
- <td headers="AI">Stearns Bank, N.A.</td>
- <td headers="Closing Date">March 23, 2012</td>
- <td headers="Updated">October 31, 2012</td>
-</tr>
-<tr>
- <td><a href="newcity.html">New City Bank </a></td>
- <td headers="city">Chicago</td>
- <td headers="state">IL</td>
- <td headers="CERT #">57597</td>
- <td headers="AI">No Acquirer</td>
- <td headers="Closing Date">March 9, 2012</td>
- <td headers="Updated">October 29, 2012</td>
-</tr>
-<tr>
- <td><a href="global.html">Global Commerce Bank</a></td>
- <td headers="city">Doraville</td>
- <td headers="state">GA</td>
- <td headers="CERT #">34046</td>
- <td headers="AI">Metro City Bank</td>
- <td headers="Closing Date">March 2, 2012</td>
- <td headers="Updated">October 31, 2012</td>
-</tr>
-<tr>
- <td><a href="homesvgs.html">Home Savings of America</a></td>
- <td headers="city">Little Falls</td>
- <td headers="state">MN</td>
- <td headers="CERT #">29178</td>
- <td headers="AI">No Acquirer</td>
- <td headers="Closing Date">February 24, 2012</td>
- <td headers="Updated">December 17, 2012</td>
-</tr>
-<tr>
- <td><a href="cbg.html">Central Bank of Georgia</a></td>
- <td headers="city">Ellaville</td>
- <td headers="state">GA</td>
- <td headers="CERT #">5687</td>
- <td headers="AI">Ameris Bank</td>
- <td headers="Closing Date">February 24, 2012</td>
- <td headers="Updated">August 9, 2012</td>
-</tr>
-<tr>
- <td><a href="scbbank.html">SCB Bank</a></td>
- <td headers="city">Shelbyville</td>
- <td headers="state">IN</td>
- <td headers="CERT #">29761</td>
- <td headers="AI">First Merchants Bank, National Association</td>
- <td headers="Closing Date">February 10, 2012</td>
- <td headers="Updated">March 25, 2013</td>
-</tr>
-<tr>
- <td><a href="cnbt.html">Charter National Bank and Trust</a></td>
- <td headers="city">Hoffman Estates</td>
- <td headers="state">IL</td>
- <td headers="CERT #">23187</td>
- <td headers="AI">Barrington Bank & Trust
-Company, National Association</td>
- <td headers="Closing Date">February 10, 2012</td>
- <td headers="Updated">March 25, 2013</td>
-</tr>
-<tr>
- <td><a href="bankeast.html">BankEast</a></td>
- <td headers="city">Knoxville</td>
- <td headers="state">TN</td>
- <td headers="CERT #">19869</td>
- <td headers="AI">U.S.Bank National Association </td>
- <td headers="Closing Date">January 27, 2012</td>
- <td headers="Updated">March 8, 2013</td>
-</tr>
-<tr>
- <td><a href="patriot-mn.html">Patriot Bank Minnesota</a></td>
- <td headers="city">Forest Lake</td>
- <td headers="state">MN</td>
- <td headers="CERT #">34823</td>
- <td headers="AI">First Resource Bank</td>
- <td headers="Closing Date">January 27, 2012</td>
- <td headers="Updated">September 12, 2012</td>
-</tr>
-<tr>
- <td><a href="tcb.html">Tennessee Commerce Bank
-</a></td>
- <td headers="city">Franklin</td>
- <td headers="state">TN</td>
- <td headers="CERT #">35296</td>
- <td headers="AI">Republic Bank & Trust Company</td>
- <td headers="Closing Date">January 27, 2012</td>
- <td headers="Updated">November 20, 2012</td>
-</tr>
-<tr>
- <td><a href="fgbtcj.html">First Guaranty Bank and Trust Company of Jacksonville</a></td>
- <td headers="city">Jacksonville</td>
- <td headers="state">FL</td>
- <td headers="CERT #">16579</td>
- <td headers="AI">CenterState Bank of Florida, N.A.</td>
- <td headers="Closing Date">January 27, 2012</td>
- <td headers="Updated">September 12, 2012</td>
-</tr>
-<tr>
- <td><a href="americaneagle.html">American Eagle Savings Bank</a></td>
- <td headers="city">Boothwyn</td>
- <td headers="state">PA</td>
- <td headers="CERT #">31581</td>
- <td headers="AI">Capital Bank, N.A.</td>
- <td headers="Closing Date">January 20, 2012</td>
- <td headers="Updated">January 25, 2013</td>
-</tr>
-<tr>
- <td><a href="firststatebank-ga.html">The First State Bank</a></td>
- <td headers="city">Stockbridge</td>
- <td headers="state">GA</td>
- <td headers="CERT #">19252</td>
- <td headers="AI">Hamilton State Bank</td>
- <td headers="Closing Date">January 20, 2012</td>
- <td headers="Updated">January 25, 2013</td>
-</tr>
-<tr>
- <td><a href="cfsb.html">Central Florida State Bank</a></td>
- <td headers="city">Belleview</td>
- <td headers="state">FL</td>
- <td headers="CERT #">57186</td>
- <td headers="AI">CenterState Bank of Florida, N.A.</td>
- <td headers="Closing Date">January 20, 2012</td>
- <td headers="Updated">January 25, 2013</td>
-</tr>
-<tr>
- <td><a href="westernnatl.html">Western National Bank</a></td>
- <td headers="city">Phoenix</td>
- <td headers="state">AZ</td>
- <td headers="CERT #">57917</td>
- <td headers="AI"> Washington Federal</td>
- <td headers="Closing Date">December 16, 2011</td>
- <td headers="Updated">August 13, 2012</td>
-</tr>
-<tr>
-<td><a href="premier-fl.html">Premier Community Bank of the Emerald Coast</a></td>
- <td headers="city">Crestview</td>
- <td headers="state">FL</td>
- <td headers="CERT #">58343</td>
- <td headers="AI"> Summit Bank</td>
- <td headers="Closing Date">December 16, 2011</td>
- <td headers="Updated">September 12, 2012</td>
-</tr>
-<tr>
- <td><a href="centralprog.html">Central Progressive Bank</a></td>
- <td headers="city">Lacombe</td>
- <td headers="state">LA</td>
- <td headers="CERT #">19657</td>
- <td headers="AI"> First NBC Bank</td>
- <td headers="Closing Date">November 18, 2011</td>
- <td headers="Updated">August 13, 2012</td>
-</tr>
-<tr>
- <td><a href="polkcounty.html">Polk County Bank</a></td>
- <td headers="city">Johnston</td>
- <td headers="state">IA</td>
- <td headers="CERT #">14194</td>
- <td headers="AI">Grinnell State Bank</td>
- <td headers="Closing Date">November 18, 2011</td>
- <td headers="Updated">August 15, 2012</td>
-</tr>
-<tr>
- <td><a href="rockmart.html">Community Bank of Rockmart</a></td>
- <td headers="city">Rockmart</td>
- <td headers="state">GA</td>
- <td headers="CERT #">57860</td>
- <td headers="AI">Century Bank of Georgia</td>
- <td headers="Closing Date">November 10, 2011</td>
- <td headers="Updated">August 13, 2012</td>
-</tr>
-<tr>
- <td><a href="sunfirst.html">SunFirst Bank</a></td>
- <td headers="city">Saint George</td>
- <td headers="state">UT</td>
- <td headers="CERT #">57087</td>
- <td headers="AI">Cache Valley Bank</td>
- <td headers="Closing Date">November 4, 2011</td>
- <td headers="Updated">November 16, 2012</td>
-</tr>
-<tr>
- <td><a href="midcity.html">Mid City Bank, Inc.</a></td>
- <td headers="city">Omaha</td>
- <td headers="state">NE</td>
- <td headers="CERT #">19397</td>
- <td headers="AI">Premier Bank</td>
- <td headers="Closing Date">November 4, 2011</td>
- <td headers="Updated">August 15, 2012</td>
-</tr>
-<tr>
- <td><a href="allamerican.html ">All American Bank</a></td>
- <td headers="city">Des Plaines</td>
- <td headers="state">IL</td>
- <td headers="CERT #">57759</td>
- <td headers="AI">International Bank of Chicago</td>
- <td headers="Closing Date">October 28, 2011</td>
- <td headers="Updated">August 15, 2012</td>
-</tr>
-<tr>
- <td><a href="commbanksco.html">Community Banks of Colorado</a></td>
- <td headers="city">Greenwood Village</td>
- <td headers="state">CO</td>
- <td headers="CERT #">21132</td>
- <td headers="AI">Bank Midwest, N.A.</td>
- <td headers="Closing Date">October 21, 2011</td>
- <td headers="Updated">January 2, 2013</td>
-</tr>
-<tr>
- <td><a href="commcapbk.html">Community Capital Bank</a></td>
- <td headers="city">Jonesboro</td>
- <td headers="state">GA</td>
- <td headers="CERT #">57036</td>
- <td headers="AI">State Bank and Trust Company</td>
- <td headers="Closing Date">October 21, 2011</td>
- <td headers="Updated">November 8, 2012</td>
-</tr>
-<tr>
- <td><a href="decatur.html">Decatur First Bank</a></td>
- <td headers="city">Decatur</td>
- <td headers="state">GA</td>
- <td headers="CERT #">34392</td>
- <td headers="AI">Fidelity Bank</td>
- <td headers="Closing Date">October 21, 2011</td>
- <td headers="Updated">November 8, 2012</td>
-</tr>
-<tr>
- <td><a href="oldharbor.html">Old Harbor Bank</a></td>
- <td headers="city">Clearwater</td>
- <td headers="state">FL</td>
- <td headers="CERT #">57537</td>
- <td headers="AI">1st United Bank</td>
- <td headers="Closing Date">October 21, 2011</td>
- <td headers="Updated">November 8, 2012</td>
-</tr>
-<tr>
- <td><a href="countrybank.html">Country Bank</a></td>
- <td headers="city">Aledo</td>
- <td headers="state">IL</td>
- <td headers="CERT #">35395</td>
- <td headers="AI">Blackhawk Bank & Trust</td>
- <td headers="Closing Date">October 14, 2011</td>
- <td headers="Updated">August 15, 2012</td>
-</tr>
-<tr>
- <td><a href="firststatebank-nj.html">First State Bank</a></td>
- <td headers="city">Cranford</td>
- <td headers="state">NJ</td>
- <td headers="CERT #">58046</td>
- <td headers="AI">Northfield Bank</td>
- <td headers="Closing Date">October 14, 2011</td>
- <td headers="Updated">November 8, 2012</td>
-</tr>
-<tr>
- <td><a href="blueridge.html">Blue Ridge Savings Bank, Inc.</a></td>
- <td headers="city">Asheville</td>
- <td headers="state">NC</td>
- <td headers="CERT #">32347</td>
- <td headers="AI">Bank of North Carolina</td>
- <td headers="Closing Date">October 14, 2011</td>
- <td headers="Updated">November 8, 2012</td>
-</tr>
-<tr>
- <td><a href="piedmont-ga.html">Piedmont Community Bank</a></td>
- <td headers="city">Gray</td>
- <td headers="state">GA</td>
- <td headers="CERT #">57256</td>
- <td headers="AI">State Bank and Trust Company</td>
- <td headers="Closing Date">October 14, 2011</td>
- <td headers="Updated">January 22, 2013</td>
-</tr>
-<tr>
- <td><a href="sunsecurity.html">Sun Security Bank</a></td>
- <td headers="city">Ellington</td>
- <td headers="state">MO</td>
- <td headers="CERT #">20115</td>
- <td headers="AI"> Great Southern Bank </td>
- <td headers="Closing Date">October 7, 2011</td>
- <td headers="Updated">November 7, 2012</td>
-</tr>
-<tr>
- <td><a href="riverbank.html">The RiverBank</a></td>
- <td headers="city">Wyoming</td>
- <td headers="state">MN</td>
- <td headers="CERT #">10216</td>
- <td headers="AI"> Central Bank </td>
- <td headers="Closing Date">October 7, 2011</td>
- <td headers="Updated">November 7, 2012</td>
-</tr>
-<tr>
- <td><a href="firstintlbank.html">First International Bank</a></td>
- <td headers="city">Plano</td>
- <td headers="state">TX</td>
- <td headers="CERT #">33513</td>
- <td headers="AI"> American First National Bank </td>
- <td headers="Closing Date">September 30, 2011</td>
- <td headers="Updated">October 9, 2012</td>
-</tr>
-<tr>
- <td><a href="cbnc.html">Citizens Bank of Northern California</a></td>
- <td headers="city">Nevada City</td>
- <td headers="state">CA</td>
- <td headers="CERT #">33983</td>
- <td headers="AI"> Tri Counties Bank</td>
- <td headers="Closing Date">September 23, 2011</td>
- <td headers="Updated">October 9, 2012</td>
-</tr>
-<tr>
- <td><a href="boc-va.html">Bank of the Commonwealth</a></td>
- <td headers="city">Norfolk</td>
- <td headers="state">VA</td>
- <td headers="CERT #">20408</td>
- <td headers="AI">Southern Bank and Trust Company</td>
- <td headers="Closing Date">September 23, 2011</td>
- <td headers="Updated">October 9, 2012</td>
-</tr>
-<tr>
- <td><a href="fnbf.html">The First National Bank of Florida</a></td>
- <td headers="city">Milton</td>
- <td headers="state">FL</td>
- <td headers="CERT #">25155</td>
- <td headers="AI">CharterBank</td>
- <td headers="Closing Date">September 9, 2011</td>
- <td headers="Updated">September 6, 2012</td>
-</tr>
-<tr>
- <td><a href="creekside.html">CreekSide Bank</a></td>
- <td headers="city">Woodstock</td>
- <td headers="state">GA</td>
- <td headers="CERT #">58226</td>
- <td headers="AI">Georgia Commerce Bank</td>
- <td headers="Closing Date">September 2, 2011</td>
- <td headers="Updated">September 6, 2012</td>
-</tr>
-<tr>
- <td><a href="patriot.html">Patriot Bank of Georgia</a></td>
- <td headers="city">Cumming</td>
- <td headers="state">GA</td>
- <td headers="CERT #">58273</td>
- <td headers="AI">Georgia Commerce Bank</td>
- <td headers="Closing Date">September 2, 2011</td>
- <td headers="Updated">November 2, 2012</td>
-</tr>
-<tr>
- <td><a href="firstchoice-il.html">First Choice Bank</a></td>
- <td headers="city">Geneva</td>
- <td headers="state">IL</td>
- <td headers="CERT #">57212</td>
- <td headers="AI">Inland Bank & Trust</td>
- <td headers="Closing Date">August 19, 2011</td>
- <td headers="Updated">August 15, 2012</td>
-</tr>
-<tr>
- <td><a href="firstsouthern-ga.html">First Southern National Bank</a></td>
- <td headers="city">Statesboro</td>
- <td headers="state">GA</td>
- <td headers="CERT #">57239</td>
- <td headers="AI">Heritage Bank of the South</td>
- <td headers="Closing Date">August 19, 2011</td>
- <td headers="Updated">November 2, 2012</td>
-</tr>
-<tr>
- <td><a href="lydian.html">Lydian Private Bank</a></td>
- <td headers="city">Palm Beach</td>
- <td headers="state">FL</td>
- <td headers="CERT #">35356</td>
- <td headers="AI">Sabadell United Bank, N.A.</td>
- <td headers="Closing Date">August 19, 2011</td>
- <td headers="Updated">November 2, 2012</td>
-</tr>
-<tr>
- <td><a href="publicsvgs.html">Public Savings Bank</a></td>
- <td headers="city">Huntingdon Valley</td>
- <td headers="state">PA</td>
- <td headers="CERT #">34130</td>
- <td headers="AI">Capital Bank, N.A.</td>
- <td headers="Closing Date">August 18, 2011</td>
- <td headers="Updated">August 15, 2012</td>
-</tr>
-<tr>
- <td><a href="fnbo.html">The First National Bank of Olathe</a></td>
- <td headers="city">Olathe</td>
- <td headers="state">KS</td>
- <td headers="CERT #">4744</td>
- <td headers="AI">Enterprise Bank & Trust</td>
- <td headers="Closing Date">August 12, 2011</td>
- <td headers="Updated">August 23, 2012</td>
-</tr>
-<tr>
- <td><a href="whitman.html">Bank of Whitman</a></td>
- <td headers="city">Colfax</td>
- <td headers="state">WA</td>
- <td headers="CERT #">22528</td>
- <td headers="AI">Columbia State Bank</td>
- <td headers="Closing Date">August 5, 2011</td>
- <td headers="Updated">August 16, 2012</td>
-</tr>
-<tr>
- <td><a href="shorewood.html">Bank of Shorewood</a></td>
- <td headers="city">Shorewood</td>
- <td headers="state">IL</td>
- <td headers="CERT #">22637</td>
- <td headers="AI">Heartland Bank and Trust Company</td>
- <td headers="Closing Date">August 5, 2011</td>
- <td headers="Updated">August 16, 2012</td>
-</tr>
-<tr>
- <td><a href="integra.html">Integra Bank National Association</a></td>
- <td headers="city">Evansville</td>
- <td headers="state">IN</td>
- <td headers="CERT #">4392</td>
- <td headers="AI">Old National Bank</td>
- <td headers="Closing Date">July 29, 2011</td>
- <td headers="Updated">August 16, 2012</td>
-</tr>
-<tr>
- <td><a href="bankmeridian.html">BankMeridian, N.A.</a></td>
- <td headers="city">Columbia</td>
- <td headers="state">SC</td>
- <td headers="CERT #">58222</td>
- <td headers="AI">SCBT National Association</td>
- <td headers="Closing Date">July 29, 2011</td>
- <td headers="Updated">November 2, 2012</td>
-</tr>
-<tr>
- <td><a href="vbb.html">Virginia Business Bank</a></td>
- <td headers="city">Richmond</td>
- <td headers="state">VA</td>
- <td headers="CERT #">58283</td>
- <td headers="AI">Xenith Bank</td>
- <td headers="Closing Date">July 29, 2011</td>
- <td headers="Updated">October 9, 2012</td>
-</tr>
-<tr>
- <td><a href="bankofchoice.html">Bank of Choice</a></td>
- <td headers="city">Greeley</td>
- <td headers="state">CO</td>
- <td headers="CERT #">2994</td>
- <td headers="AI">Bank Midwest, N.A.</td>
- <td headers="Closing Date">July 22, 2011</td>
- <td headers="Updated">September 12, 2012</td>
-</tr>
-<tr>
- <td><a href="landmark.html">LandMark Bank of Florida</a></td>
- <td headers="city">Sarasota</td>
- <td headers="state">FL</td>
- <td headers="CERT #">35244</td>
- <td headers="AI">American Momentum Bank</td>
- <td headers="Closing Date">July 22, 2011</td>
- <td headers="Updated">November 2, 2012</td>
-</tr>
-<tr>
- <td><a href="southshore.html">Southshore Community Bank</a></td>
- <td headers="city">Apollo Beach</td>
- <td headers="state">FL</td>
- <td headers="CERT #">58056</td>
- <td headers="AI">American Momentum Bank</td>
- <td headers="Closing Date">July 22, 2011</td>
- <td headers="Updated">November 2, 2012</td>
-</tr>
-<tr>
- <td><a href="summitbank.html">Summit Bank</a></td>
- <td headers="city">Prescott </td>
- <td headers="state">AZ</td>
- <td headers="CERT #">57442 </td>
- <td headers="AI">The Foothills Bank</td>
- <td width="125" headers="Closing Date">July 15, 2011</td>
- <td width="125" headers="Updated">August 16, 2012</td>
-</tr>
-<tr>
- <td><a href="firstpeoples.html">First Peoples Bank</a></td>
- <td headers="city">Port St. Lucie </td>
- <td headers="state">FL</td>
- <td headers="CERT #">34870 </td>
- <td headers="AI">Premier American Bank, N.A.</td>
- <td width="125" headers="Closing Date">July 15, 2011</td>
- <td width="125" headers="Updated">November 2, 2012</td>
-</tr>
-<tr>
- <td><a href="hightrust.html">High Trust Bank</a></td>
- <td headers="city">Stockbridge </td>
- <td headers="state">GA</td>
- <td headers="CERT #">19554 </td>
- <td headers="AI">Ameris Bank</td>
- <td width="125" headers="Closing Date">July 15, 2011</td>
- <td width="125" headers="Updated">November 2, 2012</td>
-</tr>
-<tr>
- <td><a href="onegeorgia.html">One Georgia Bank</a></td>
- <td headers="city">Atlanta </td>
- <td headers="state">GA</td>
- <td headers="CERT #">58238 </td>
- <td headers="AI">Ameris Bank</td>
- <td width="125" headers="Closing Date">July 15, 2011</td>
- <td width="125" headers="Updated">November 2, 2012</td>
-</tr>
-<tr>
- <td><a href="signaturebank.html">Signature Bank</a></td>
- <td headers="city">Windsor </td>
- <td headers="state">CO</td>
- <td headers="CERT #">57835 </td>
- <td headers="AI">Points West Community Bank</td>
- <td width="125" headers="Closing Date">July 8, 2011</td>
- <td width="125" headers="Updated">October 26, 2012</td>
-</tr>
-<tr>
- <td><a href="coloradocapital.html">Colorado Capital Bank</a></td>
- <td headers="city">Castle Rock </td>
- <td headers="state">CO</td>
- <td headers="CERT #">34522</td>
- <td headers="AI">First-Citizens Bank & Trust Company</td>
- <td width="125" headers="Closing Date">July 8, 2011</td>
- <td width="125" headers="Updated">January 15, 2013</td>
-</tr>
-<tr>
- <td><a href="firstchicago.html">First Chicago Bank & Trust</a></td>
- <td headers="city">Chicago</td>
- <td headers="state">IL</td>
- <td headers="CERT #">27935</td>
- <td headers="AI">Northbrook Bank & Trust Company</td>
- <td width="125" headers="Closing Date">July 8, 2011</td>
- <td width="125" headers="Updated">September 9, 2012</td>
-</tr>
-<tr>
- <td><a href="mountain.html">Mountain Heritage Bank</a></td>
- <td headers="city">Clayton</td>
- <td headers="state">GA</td>
- <td headers="CERT #">57593</td>
- <td headers="AI">First American Bank and Trust Company</td>
- <td width="125" headers="Closing Date">June 24, 2011</td>
- <td width="125" headers="Updated">November 2, 2012</td>
-</tr>
-<tr>
- <td><a href="fcbtb.html">First Commercial Bank of Tampa Bay</a></td>
- <td headers="city">Tampa</td>
- <td headers="state">FL</td>
- <td headers="CERT #">27583</td>
- <td headers="AI">Stonegate Bank</td>
- <td width="125" headers="Closing Date">June 17, 2011</td>
- <td width="125" headers="Updated">November 2, 2012</td>
-</tr>
-<tr>
- <td><a href="mcintoshstate.html">McIntosh State Bank</a></td>
- <td headers="city">Jackson</td>
- <td headers="state">GA</td>
- <td headers="CERT #">19237</td>
- <td headers="AI">Hamilton State Bank</td>
- <td width="125" headers="Closing Date">June 17, 2011</td>
- <td width="125" headers="Updated">November 2, 2012</td>
-</tr>
-<tr>
- <td><a href="atlanticbanktrust.html">Atlantic Bank and Trust</a>
- </td>
- <td headers="city">Charleston</td>
- <td headers="state">SC</td>
- <td headers="CERT #">58420</td>
- <td headers="AI">First Citizens Bank and Trust Company, Inc.</td>
- <td width="125" headers="Closing Date">June 3, 2011</td>
- <td width="125" headers="Updated">October 31, 2012</td>
-</tr>
-<tr>
- <td><a href="firstheritage.html">First Heritage Bank</a></td>
- <td headers="city">Snohomish</td>
- <td headers="state">WA</td>
- <td headers="CERT #">23626</td>
- <td headers="AI">Columbia State Bank</td>
- <td width="125" headers="Closing Date">May 27, 2011</td>
- <td width="125" headers="Updated">January 28, 2013</td>
-</tr>
-<tr>
- <td><a href="summit.html">Summit Bank</a></td>
- <td headers="city">Burlington</td>
- <td headers="state">WA</td>
- <td headers="CERT #">513</td>
- <td headers="AI">Columbia State Bank</td>
- <td width="125" headers="Closing Date">May 20, 2011</td>
- <td width="125" headers="Updated">January 22, 2013</td>
-</tr>
-<tr>
- <td><a href="fgbc.html">First Georgia Banking Company</a></td>
- <td headers="city">Franklin</td>
- <td headers="state">GA</td>
- <td headers="CERT #">57647</td>
- <td headers="AI">CertusBank, National Association</td>
- <td width="125" headers="Closing Date">May 20, 2011</td>
- <td width="125" headers="Updated">November 13, 2012</td>
-</tr>
-<tr>
- <td><a href="atlanticsthrn.html">Atlantic Southern Bank</a></td>
- <td headers="city">Macon</td>
- <td headers="state">GA</td>
- <td headers="CERT #">57213</td>
- <td headers="AI">CertusBank, National Association</td>
- <td width="125" headers="Closing Date">May 20, 2011</td>
- <td width="125" headers="Updated">October 31, 2012</td>
-</tr>
-<tr>
- <td><a href="coastal_fl.html">Coastal Bank</a></td>
- <td headers="city">Cocoa Beach</td>
- <td headers="state">FL</td>
- <td headers="CERT #">34898</td>
- <td headers="AI">Florida Community Bank, a division of Premier American Bank, N.A.</td>
- <td width="125" headers="Closing Date">May 6, 2011</td>
- <td width="125" headers="Updated">November 30, 2012</td>
-</tr>
-<tr>
- <td><a href="communitycentral.html">Community Central Bank</a></td>
- <td headers="city">Mount Clemens</td>
- <td headers="state">MI</td>
- <td headers="CERT #">34234</td>
- <td headers="AI">Talmer Bank & Trust</td>
- <td headers="Closing Date">April 29, 2011</td>
- <td headers="Updated">August 16, 2012</td>
-</tr>
-<tr>
- <td><a href="parkavenue_ga.html">The Park Avenue Bank</a></td>
- <td headers="city">Valdosta</td>
- <td headers="state">GA</td>
- <td headers="CERT #">19797</td>
- <td headers="AI">Bank of the Ozarks</td>
- <td headers="Closing Date">April 29, 2011</td>
- <td headers="Updated">November 30, 2012</td>
-</tr>
-<tr>
- <td><a href="firstchoice.html">First Choice Community Bank</a></td>
- <td headers="city">Dallas</td>
- <td headers="state">GA</td>
- <td headers="CERT #">58539</td>
- <td headers="AI">Bank of the Ozarks</td>
- <td headers="Closing Date">April 29, 2011</td>
- <td headers="Updated">January 22, 2013</td>
-</tr>
-<tr>
- <td><a href="cortez.html">Cortez Community Bank</a></td>
- <td headers="city">Brooksville</td>
- <td headers="state">FL</td>
- <td headers="CERT #">57625</td>
- <td headers="AI">Florida Community Bank, a division of Premier American Bank, N.A.
- </td>
- <td headers="Closing Date">April 29, 2011</td>
- <td headers="Updated">November 30, 2012</td>
-</tr>
-<tr>
- <td><a href="fnbcf.html">First National Bank of Central Florida</a></td>
- <td headers="city">Winter Park</td>
- <td headers="state">FL</td>
- <td headers="CERT #">26297</td>
- <td headers="AI">Florida Community Bank, a division of Premier American Bank, N.A.</td>
- <td headers="Closing Date">April 29, 2011</td>
- <td headers="Updated">November 30, 2012</td>
-</tr>
-<tr>
- <td><a href="heritage_ms.html">Heritage Banking Group</a></td>
- <td headers="city">Carthage</td>
- <td headers="state">MS</td>
- <td headers="CERT #">14273</td>
- <td headers="AI">Trustmark National Bank</td>
- <td headers="Closing Date">April 15, 2011</td>
- <td headers="Updated">November 30, 2012</td>
-</tr>
-<tr>
- <td><a href="rosemount.html">Rosemount National Bank</a></td>
- <td headers="city">Rosemount</td>
- <td headers="state">MN</td>
- <td headers="CERT #">24099</td>
- <td headers="AI">Central Bank</td>
- <td headers="Closing Date">April 15, 2011</td>
- <td headers="Updated">August 16, 2012</td>
-</tr>
-<tr>
- <td><a href="superior_al.html">Superior Bank</a></td>
- <td headers="city">Birmingham</td>
- <td headers="state">AL</td>
- <td headers="CERT #">17750</td>
- <td headers="AI">Superior Bank, National Association</td>
- <td headers="Closing Date">April 15, 2011</td>
- <td headers="Updated">November 30, 2012</td>
-</tr>
-<tr>
- <td><a href="nexity.html">Nexity Bank</a></td>
- <td headers="city">Birmingham</td>
- <td headers="state">AL</td>
- <td headers="CERT #">19794</td>
- <td headers="AI">AloStar Bank of Commerce</td>
- <td headers="Closing Date">April 15, 2011</td>
- <td headers="Updated">September 4, 2012</td>
-</tr>
-<tr>
- <td><a href="newhorizons.html">New Horizons Bank</a></td>
- <td headers="city">East Ellijay</td>
- <td headers="state">GA</td>
- <td headers="CERT #">57705</td>
- <td headers="AI">Citizens South Bank</td>
- <td headers="Closing Date">April 15, 2011</td>
- <td headers="Updated">August 16, 2012</td>
-</tr>
-<tr>
- <td><a href="bartow.html">Bartow County Bank</a></td>
- <td headers="city">Cartersville</td>
- <td headers="state">GA</td>
- <td headers="CERT #">21495</td>
- <td headers="AI">Hamilton State Bank</td>
- <td headers="Closing Date">April 15, 2011</td>
- <td headers="Updated">January 22, 2013</td>
-</tr>
-<tr>
- <td><a href="nevadacommerce.html">Nevada Commerce Bank</a></td>
- <td headers="city">Las Vegas</td>
- <td headers="state">NV</td>
- <td headers="CERT #">35418</td>
- <td headers="AI">City National Bank</td>
- <td headers="Closing Date">April 8, 2011</td>
- <td headers="Updated">September 9, 2012</td>
-</tr>
-<tr>
- <td><a href="westernsprings.html">Western Springs National Bank and Trust</a></td>
- <td headers="city">Western Springs</td>
- <td headers="state">IL</td>
- <td headers="CERT #">10086</td>
- <td headers="AI">Heartland Bank and Trust Company</td>
- <td headers="Closing Date">April 8, 2011</td>
- <td headers="Updated">January 22, 2013</td>
-</tr>
-<tr>
- <td><a href="bankofcommerce.html">The Bank of Commerce</a></td>
- <td headers="city">Wood Dale</td>
- <td headers="state">IL</td>
- <td headers="CERT #">34292</td>
- <td headers="AI">Advantage National Bank Group</td>
- <td headers="Closing Date">March 25, 2011</td>
- <td headers="Updated">January 22, 2013</td>
-</tr>
-<tr>
- <td><a href="legacy-wi.html">Legacy Bank</a></td>
- <td headers="city">Milwaukee</td>
- <td headers="state">WI</td>
- <td headers="CERT #">34818</td>
- <td headers="AI">Seaway Bank and Trust Company</td>
- <td headers="Closing Date">March 11, 2011</td>
- <td headers="Updated">September 12, 2012</td>
- </tr>
- <tr>
- <td><a href="firstnatldavis.html">First National Bank of Davis</a></td>
- <td headers="city">Davis</td>
- <td headers="state">OK</td>
- <td headers="CERT #">4077</td>
- <td headers="AI">The Pauls Valley National Bank</td>
- <td headers="Closing Date">March 11, 2011</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="valleycomm.html">Valley Community Bank</a></td>
- <td headers="city">St. Charles</td>
- <td headers="state">IL</td>
- <td headers="CERT #">34187</td>
- <td headers="AI">First State Bank</td>
- <td headers="Closing Date">February 25, 2011</td>
- <td headers="Updated">September 12, 2012</td>
- </tr>
- <tr>
- <td><a href="sanluistrust.html">San Luis Trust Bank, FSB </a></td>
- <td headers="city">San Luis Obispo</td>
- <td headers="state">CA</td>
- <td headers="CERT #">34783</td>
- <td headers="AI">First California Bank</td>
- <td headers="Closing Date">February 18, 2011</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
-
- <tr>
- <td><a href="charteroak.html">Charter Oak Bank</a></td>
- <td headers="city">Napa</td>
- <td headers="state">CA</td>
- <td headers="CERT #">57855</td>
- <td headers="AI">Bank of Marin</td>
- <td headers="Closing Date">February 18, 2011</td>
- <td headers="Updated">September 12, 2012</td>
- </tr>
-
- <tr>
- <td><a href="citizensbk_ga.html">Citizens Bank of Effingham</a></td>
- <td headers="city">Springfield</td>
- <td headers="state">GA</td>
- <td headers="CERT #">34601</td>
- <td headers="AI">Heritage Bank of the South</td>
- <td headers="Closing Date">February 18, 2011</td>
- <td headers="Updated">November 2, 2012</td>
- </tr>
-
- <tr>
- <td><a href="habersham.html">Habersham Bank</a></td>
- <td headers="city">Clarkesville</td>
- <td headers="state">GA</td>
- <td headers="CERT #">151</td>
- <td headers="AI">SCBT National Association</td>
- <td headers="Closing Date">February 18, 2011</td>
- <td headers="Updated">November 2, 2012</td>
- </tr>
-
-
- <tr>
- <td><a href="canyonstate.html">Canyon National Bank</a></td>
- <td headers="city">Palm Springs</td>
- <td headers="state">CA</td>
- <td headers="CERT #">34692</td>
- <td headers="AI">Pacific Premier Bank</td>
- <td headers="Closing Date">February 11, 2011</td>
- <td headers="Updated">September 12, 2012</td>
- </tr>
-
-
-
- <tr>
- <td><a href="badgerstate.html">Badger State Bank</a></td>
- <td headers="city">Cassville</td>
- <td headers="state">WI</td>
- <td headers="CERT #">13272</td>
- <td headers="AI">Royal Bank </td>
- <td headers="Closing Date">February 11, 2011</td>
- <td headers="Updated">September 12, 2012</td>
- </tr>
-
-
- <tr>
- <td><a href="peoplesstatebank.html">Peoples State Bank</a></td>
- <td headers="city">Hamtramck</td>
- <td headers="state">MI</td>
- <td headers="CERT #">14939</td>
- <td headers="AI">First Michigan Bank</td>
- <td headers="Closing Date">February 11, 2011</td>
- <td headers="Updated">January 22, 2013</td>
- </tr>
-
-
- <tr>
- <td><a href="sunshinestate.html">Sunshine State Community Bank</a></td>
- <td headers="city">Port Orange</td>
- <td headers="state">FL</td>
- <td headers="CERT #">35478</td>
- <td headers="AI">Premier American Bank, N.A.</td>
- <td headers="Closing Date">February 11, 2011</td>
- <td headers="Updated">November 2, 2012</td>
- </tr>
-
- <tr>
- <td><a href="commfirst_il.html">Community First Bank Chicago</a></td>
- <td headers="city">Chicago</td>
- <td headers="state">IL</td>
- <td headers="CERT #">57948</td>
- <td headers="AI">Northbrook Bank & Trust Company</td>
- <td headers="Closing Date">February 4, 2011</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="northgabank.html">North Georgia Bank</a></td>
- <td headers="city">Watkinsville</td>
- <td headers="state">GA</td>
- <td headers="CERT #">35242</td>
- <td headers="AI">BankSouth</td>
- <td headers="Closing Date">February 4, 2011</td>
- <td headers="Updated">November 2, 2012</td>
- </tr>
- <tr>
- <td><a href="americantrust.html">American Trust Bank</a></td>
- <td headers="city">Roswell</td>
- <td headers="state">GA</td>
- <td headers="CERT #">57432</td>
- <td headers="AI">Renasant Bank</td>
- <td headers="Closing Date">February 4, 2011</td>
- <td headers="Updated">October 31, 2012</td>
- </tr>
- <tr>
- <td><a href="firstcomm_nm.html">First Community Bank</a></td>
- <td headers="city">Taos</td>
- <td headers="state">NM</td>
- <td headers="CERT #">12261</td>
- <td headers="AI">U.S. Bank, N.A.</td>
- <td headers="Closing Date">January 28, 2011</td>
- <td headers="Updated">September 12, 2012</td>
- </tr>
-
- <tr>
- <td><a href="firstier.html">FirsTier Bank</a></td>
- <td headers="city">Louisville</td>
- <td headers="state">CO</td>
- <td headers="CERT #">57646</td>
- <td headers="AI">No Acquirer</td>
- <td headers="Closing Date">January 28, 2011</td>
- <td headers="Updated">September 12, 2012</td>
- </tr>
-
- <tr>
- <td><a href="evergreenstatewi.html">Evergreen State Bank</a></td>
- <td headers="city">Stoughton</td>
- <td headers="state">WI</td>
- <td headers="CERT #">5328</td>
- <td headers="AI">McFarland State Bank</td>
- <td headers="Closing Date">January 28, 2011</td>
- <td headers="Updated">September 12, 2012</td>
- </tr>
-
-
- <tr>
- <td><a href="firststatebank_ok.html">The First State Bank</a></td>
- <td headers="city">Camargo</td>
- <td headers="state">OK</td>
- <td headers="CERT #">2303</td>
- <td headers="AI">Bank 7</td>
- <td headers="Closing Date">January 28, 2011</td>
- <td headers="Updated">September 12, 2012</td>
- </tr>
-
-
- <tr>
- <td><a href="unitedwestern.html">United Western Bank</a></td>
- <td headers="city">Denver</td>
- <td headers="state">CO</td>
- <td headers="CERT #">31293</td>
- <td headers="AI">First-Citizens Bank & Trust Company</td>
- <td headers="Closing Date">January 21, 2011</td>
- <td headers="Updated">September 12, 2012</td>
- </tr>
-
-
- <tr>
- <td><a href="bankofasheville.html">The Bank of Asheville</a></td>
- <td headers="city">Asheville</td>
- <td headers="state">NC</td>
- <td headers="CERT #">34516</td>
- <td headers="AI">First Bank</td>
- <td headers="Closing Date">January 21, 2011</td>
- <td headers="Updated">November 2, 2012</td>
- </tr>
-
-
- <tr>
- <td><a href="commsouth.html">CommunitySouth Bank & Trust</a></td>
- <td headers="city">Easley</td>
- <td headers="state">SC</td>
- <td headers="CERT #">57868</td>
- <td headers="AI">CertusBank, National Association</td>
- <td headers="Closing Date">January 21, 2011</td>
- <td headers="Updated">November 2, 2012</td>
- </tr>
-
- <tr>
- <td><a href="enterprise.html">Enterprise Banking Company</a></td>
- <td headers="city">McDonough</td>
- <td headers="state">GA</td>
- <td headers="CERT #">19758</td>
- <td headers="AI">No Acquirer</td>
- <td headers="Closing Date">January 21, 2011</td>
- <td headers="Updated">November 2, 2012</td>
- </tr>
-
-
- <tr>
- <td><a href="oglethorpe.html">Oglethorpe Bank</a></td>
- <td headers="city">Brunswick</td>
- <td headers="state">GA</td>
- <td headers="CERT #">57440</td>
- <td headers="AI">Bank of the Ozarks </td>
- <td headers="Closing Date">January 14, 2011</td>
- <td headers="Updated">November 2, 2012</td>
- </tr>
-
- <tr>
- <td><a href="legacybank.html">Legacy Bank</a></td>
- <td headers="city">Scottsdale</td>
- <td headers="state">AZ</td>
- <td headers="CERT #">57820</td>
- <td headers="AI">Enterprise Bank & Trust </td>
- <td headers="Closing Date">January 7, 2011</td>
- <td headers="Updated">September 12, 2012</td>
- </tr>
- <tr>
- <td><a href="firstcommercial.html">First Commercial Bank of Florida</a></td>
- <td headers="city">Orlando</td>
- <td headers="state">FL</td>
- <td headers="CERT #">34965</td>
- <td headers="AI">First Southern Bank</td>
- <td headers="Closing Date">January 7, 2011</td>
- <td headers="Updated">November 2, 2012</td>
- </tr>
- <tr>
- <td><a href="communitynatl.html">Community National Bank</a></td>
- <td headers="city">Lino Lakes</td>
- <td headers="state">MN</td>
- <td headers="CERT #">23306</td>
- <td headers="AI">Farmers & Merchants Savings Bank</td>
- <td headers="Closing Date">December 17, 2010</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
-
-
- <tr>
- <td><a href="firstsouthern.html">First Southern Bank </a></td>
- <td headers="city">Batesville</td>
- <td headers="state">AR</td>
- <td headers="CERT #">58052</td>
- <td headers="AI">Southern Bank</td>
- <td headers="Closing Date">December 17, 2010</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
-
-
- <tr>
- <td><a href="unitedamericas.html">United Americas Bank, N.A.</a></td>
- <td headers="city">Atlanta</td>
- <td headers="state">GA</td>
- <td headers="CERT #">35065</td>
- <td headers="AI">State Bank and Trust Company</td>
- <td headers="Closing Date">December 17, 2010</td>
- <td headers="Updated">November 2, 2012</td>
- </tr>
-
-
- <tr>
- <td><a href="appalachianga.html">Appalachian Community Bank, FSB </a></td>
- <td headers="city">McCaysville</td>
- <td headers="state">GA</td>
- <td headers="CERT #">58495</td>
- <td headers="AI">Peoples Bank of East Tennessee</td>
- <td headers="Closing Date">December 17, 2010</td>
- <td headers="Updated">October 31, 2012</td>
- </tr>
-
-
- <tr>
- <td><a href="chestatee.html">Chestatee State Bank</a></td>
- <td headers="city">Dawsonville</td>
- <td headers="state">GA</td>
- <td headers="CERT #">34578</td>
- <td headers="AI">Bank of the Ozarks</td>
- <td headers="Closing Date">December 17, 2010</td>
- <td headers="Updated">November 2, 2012</td>
- </tr>
-
-
- <tr>
- <td><a href="bankofmiami.html">The Bank of Miami,N.A.</a></td>
- <td headers="city">Coral Gables</td>
- <td headers="state">FL</td>
- <td headers="CERT #">19040</td>
- <td headers="AI">1st United Bank </td>
- <td headers="Closing Date">December 17, 2010</td>
- <td headers="Updated">November 2, 2012</td>
- </tr>
-
- <tr>
- <td><a href="earthstar.html">Earthstar Bank</a></td>
- <td headers="city">Southampton</td>
- <td headers="state">PA</td>
- <td headers="CERT #">35561</td>
- <td headers="AI">Polonia Bank</td>
- <td headers="Closing Date">December 10, 2010</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="paramount.html">Paramount Bank</a></td>
- <td headers="city">Farmington Hills</td>
- <td headers="state">MI</td>
- <td headers="CERT #">34673</td>
- <td headers="AI">Level One Bank</td>
- <td headers="Closing Date">December 10, 2010</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="firstbanking.html">First Banking Center</a></td>
- <td headers="city">Burlington</td>
- <td headers="state">WI</td>
- <td headers="CERT #">5287</td>
- <td headers="AI">First Michigan Bank</td>
- <td headers="Closing Date">November 19, 2010</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
-
-
-
- <tr>
- <td><a href="allegbank.html">Allegiance Bank of North America</a></td>
- <td headers="city">Bala Cynwyd</td>
- <td headers="state">PA</td>
- <td headers="CERT #">35078</td>
- <td headers="AI">VIST Bank</td>
- <td headers="Closing Date">November 19, 2010</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
-
-
- <tr>
- <td><a href="gulfstate.html">Gulf State Community Bank</a></td>
- <td headers="city">Carrabelle</td>
- <td headers="state">FL</td>
- <td headers="CERT #">20340</td>
- <td headers="AI">Centennial Bank</td>
- <td headers="Closing Date">November 19, 2010</td>
- <td headers="Updated">November 2, 2012</td>
- </tr>
-
-
- <tr>
- <td><a href="copperstar.html">Copper Star Bank</a></td>
- <td headers="city">Scottsdale</td>
- <td headers="state">AZ</td>
- <td headers="CERT #">35463</td>
- <td headers="AI">Stearns Bank, N.A.</td>
- <td headers="Closing Date">November 12, 2010</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="darbybank.html">Darby Bank & Trust Co.</a></td>
- <td headers="city">Vidalia</td>
- <td headers="state">GA</td>
- <td headers="CERT #">14580</td>
- <td headers="AI">Ameris Bank</td>
- <td headers="Closing Date">November 12, 2010</td>
- <td headers="Updated">January 15, 2013</td>
- </tr>
- <tr>
- <td><a href="tifton.html">Tifton Banking Company</a></td>
- <td headers="city">Tifton</td>
- <td headers="state">GA</td>
- <td headers="CERT #">57831</td>
- <td headers="AI">Ameris Bank</td>
- <td headers="Closing Date">November 12, 2010</td>
- <td headers="Updated">November 2, 2012</td>
- </tr>
-
- <tr>
- <td><a href="firstvietnamese.html">First Vietnamese American Bank</a><br />
- <a href="firstvietnamese_viet.pdf">In Vietnamese</a></td>
- <td headers="city">Westminster</td>
- <td headers="state">CA</td>
- <td headers="CERT #">57885</td>
- <td headers="AI">Grandpoint Bank</td>
- <td headers="Closing Date">November 5, 2010</td>
- <td headers="Updated">September 12, 2012</td>
- </tr>
- <tr>
- <td><a href="piercecommercial.html">Pierce Commercial Bank</a></td>
- <td headers="city">Tacoma</td>
- <td headers="state">WA</td>
- <td headers="CERT #">34411</td>
- <td headers="AI">Heritage Bank</td>
- <td headers="Closing Date">November 5, 2010</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="westerncommercial_ca.html">Western Commercial Bank</a></td>
- <td headers="city">Woodland Hills</td>
- <td headers="state">CA</td>
- <td headers="CERT #">58087</td>
- <td headers="AI">First California Bank</td>
- <td headers="Closing Date">November 5, 2010</td>
- <td headers="Updated">September 12, 2012</td>
- </tr>
- <tr>
- <td><a href="kbank.html">K Bank</a></td>
- <td headers="city">Randallstown</td>
- <td headers="state">MD</td>
- <td headers="CERT #">31263</td>
- <td headers="AI">Manufacturers and Traders Trust Company (M&T Bank)</td>
- <td headers="Closing Date">November 5, 2010</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="firstazfsb.html">First Arizona Savings, A FSB</a></td>
- <td headers="city">Scottsdale</td>
- <td headers="state">AZ</td>
- <td headers="CERT #">32582</td>
- <td headers="AI">No Acquirer</td>
- <td headers="Closing Date">October 22, 2010</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="hillcrest_ks.html">Hillcrest Bank</a></td>
- <td headers="city">Overland Park</td>
- <td headers="state">KS</td>
- <td headers="CERT #">22173</td>
- <td headers="AI">Hillcrest Bank, N.A.</td>
- <td headers="Closing Date">October 22, 2010</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
-
-
- <tr>
- <td><a href="firstsuburban.html">First Suburban National Bank</a></td>
- <td headers="city">Maywood</td>
- <td headers="state">IL</td>
- <td headers="CERT #">16089</td>
- <td headers="AI">Seaway Bank and Trust Company</td>
- <td headers="Closing Date">October 22, 2010</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
-
-
-
- <tr>
- <td><a href="fnbbarnesville.html">The First National Bank of Barnesville</a></td>
- <td headers="city">Barnesville</td>
- <td headers="state">GA</td>
- <td headers="CERT #">2119</td>
- <td headers="AI">United Bank</td>
- <td headers="Closing Date">October 22, 2010</td>
- <td headers="Updated">November 2, 2012</td>
- </tr>
-
-
-
- <tr>
- <td><a href="gordon.html">The Gordon Bank</a></td>
- <td headers="city">Gordon</td>
- <td headers="state">GA</td>
- <td headers="CERT #">33904</td>
- <td headers="AI">Morris Bank</td>
- <td headers="Closing Date">October 22, 2010</td>
- <td headers="Updated">November 2, 2012</td>
- </tr>
-
-
-
- <tr>
- <td><a href="progress_fl.html">Progress Bank of Florida</a></td>
- <td headers="city">Tampa</td>
- <td headers="state">FL</td>
- <td headers="CERT #">32251</td>
- <td headers="AI">Bay Cities Bank</td>
- <td headers="Closing Date">October 22, 2010</td>
- <td headers="Updated">November 2, 2012</td>
- </tr>
-
-
-
- <tr>
- <td><a href="firstbankjacksonville.html">First Bank of Jacksonville</a></td>
- <td headers="city">Jacksonville</td>
- <td headers="state">FL</td>
- <td headers="CERT #">27573</td>
- <td headers="AI">Ameris Bank</td>
- <td headers="Closing Date">October 22, 2010</td>
- <td headers="Updated">November 2, 2012</td>
- </tr>
-
- <tr>
- <td><a href="premier_mo.html">Premier Bank</a></td>
- <td headers="city">Jefferson City</td>
- <td headers="state">MO</td>
- <td headers="CERT #">34016</td>
- <td headers="AI">Providence Bank</td>
- <td headers="Closing Date">October 15, 2010</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="westbridge.html">WestBridge Bank and Trust Company</a></td>
- <td headers="city">Chesterfield</td>
- <td headers="state">MO</td>
- <td headers="CERT #">58205</td>
- <td headers="AI">Midland States Bank</td>
- <td headers="Closing Date">October 15, 2010</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="securitysavingsfsb.html">Security Savings Bank, F.S.B.</a></td>
- <td headers="city">Olathe</td>
- <td headers="state">KS</td>
- <td headers="CERT #">30898</td>
- <td headers="AI">Simmons First National Bank</td>
- <td headers="Closing Date">October 15, 2010</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="shoreline.html">Shoreline Bank</a></td>
- <td headers="city">Shoreline</td>
- <td headers="state">WA</td>
- <td headers="CERT #">35250</td>
- <td headers="AI">GBC International Bank</td>
- <td headers="Closing Date">October 1, 2010</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="wakulla.html">Wakulla Bank</a></td>
- <td headers="city">Crawfordville</td>
- <td headers="state"> FL </td>
- <td headers="CERT #">21777</td>
- <td headers="AI">Centennial Bank</td>
- <td headers="Closing Date">October 1, 2010</td>
- <td headers="Updated">November 2, 2012</td>
- </tr>
- <tr>
- <td><a href="northcounty.html">North County Bank</a></td>
- <td headers="city">Arlington</td>
- <td headers="state"> WA </td>
- <td headers="CERT #">35053</td>
- <td headers="AI">Whidbey Island Bank</td>
- <td headers="Closing Date">September 24, 2010</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="haventrust_fl.html">Haven Trust Bank Florida</a></td>
- <td headers="city">Ponte Vedra Beach</td>
- <td headers="state"> FL </td>
- <td headers="CERT #">58308</td>
- <td headers="AI">First Southern Bank</td>
- <td headers="Closing Date">September 24, 2010</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
-
- <tr>
- <td><a href="maritimesavings.html">Maritime Savings Bank</a></td>
- <td headers="city">West Allis</td>
- <td headers="state"> WI </td>
- <td headers="CERT #">28612</td>
- <td headers="AI">North Shore Bank, FSB</td>
- <td headers="Closing Date">September 17, 2010</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
-
- <tr>
- <td><a href="bramblesavings.html">Bramble Savings Bank</a></td>
- <td headers="city">Milford</td>
- <td headers="state"> OH </td>
- <td headers="CERT #">27808</td>
- <td headers="AI">Foundation Bank</td>
- <td headers="Closing Date">September 17, 2010</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="peoplesbank_ga.html">The Peoples Bank</a></td>
- <td headers="city">Winder</td>
- <td headers="state"> GA </td>
- <td headers="CERT #">182</td>
- <td headers="AI">Community & Southern Bank</td>
- <td headers="Closing Date">September 17, 2010</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
-
- <tr>
- <td><a href="firstcommerce_ga.html">First Commerce Community Bank</a></td>
- <td headers="city">Douglasville</td>
- <td headers="state"> GA </td>
- <td headers="CERT #">57448</td>
- <td headers="AI">Community & Southern Bank</td>
- <td headers="Closing Date">September 17, 2010</td>
- <td headers="Updated">January 15, 2013</td>
- </tr>
-
- <tr>
- <td><a href="ellijay.html">Bank of Ellijay</a></td>
- <td headers="city"> Ellijay </td>
- <td headers="state"> GA </td>
- <td headers="CERT #">58197</td>
- <td headers="AI">Community & Southern Bank</td>
- <td headers="Closing Date">September 17, 2010</td>
- <td headers="Updated">January 15, 2013</td>
- </tr>
-
-
- <tr>
- <td><a href="isnbank.html">ISN Bank</a></td>
- <td headers="city">Cherry Hill </td>
- <td headers="state"> NJ </td>
- <td headers="CERT #">57107</td>
- <td headers="AI">Customers Bank</td>
- <td headers="Closing Date">September 17, 2010</td>
- <td headers="Updated">August 22, 2012</td>
- </tr>
-
- <tr>
- <td><a href="horizonfl.html">Horizon Bank</a></td>
- <td headers="city">Bradenton</td>
- <td headers="state"> FL </td>
- <td headers="CERT #">35061</td>
- <td headers="AI">Bank of the Ozarks</td>
- <td headers="Closing Date">September 10, 2010</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
-
- <tr>
- <td><a href="sonoma.html">Sonoma Valley Bank</a></td>
- <td headers="city">Sonoma</td>
- <td headers="state"> CA </td>
- <td headers="CERT #">27259</td>
- <td headers="AI">Westamerica Bank</td>
- <td headers="Closing Date">August 20, 2010</td>
- <td headers="Updated">September 12, 2012</td>
- </tr>
-
-
- <tr>
- <td><a href="lospadres.html">Los Padres Bank</a></td>
- <td headers="city">Solvang </td>
- <td headers="state">CA</td>
- <td headers="CERT #">32165</td>
- <td headers="AI">Pacific Western Bank</td>
- <td headers="Closing Date">August 20, 2010</td>
- <td headers="Updated">September 12, 2012</td>
- </tr>
-
- <tr>
- <td><a href="butte.html">Butte Community Bank</a></td>
- <td headers="city">Chico</td>
- <td headers="state"> CA </td>
- <td headers="CERT #">33219</td>
- <td headers="AI">Rabobank, N.A.</td>
- <td headers="Closing Date">August 20, 2010</td>
- <td headers="Updated">September 12, 2012</td>
- </tr>
-
- <tr>
- <td><a href="pacificbk.html">Pacific State Bank</a></td>
- <td headers="city">Stockton</td>
- <td headers="state"> CA </td>
- <td headers="CERT #">27090</td>
- <td headers="AI">Rabobank, N.A.</td>
- <td headers="Closing Date">August 20, 2010</td>
- <td headers="Updated">September 12, 2012</td>
- </tr>
-
- <tr>
- <td><a href="shorebank.html">ShoreBank</a></td>
- <td headers="city">Chicago </td>
- <td headers="state">IL</td>
- <td headers="CERT #">15640</td>
- <td headers="AI">Urban Partnership Bank</td>
- <td headers="Closing Date">August 20, 2010</td>
- <td headers="Updated">September 12, 2012</td>
- </tr>
-
-
- <tr>
- <td><a href="imperialsvgs.html">Imperial Savings and Loan Association</a></td>
- <td headers="city">Martinsville</td>
- <td headers="state">VA</td>
- <td headers="CERT #">31623</td>
- <td headers="AI">River Community Bank, N.A.</td>
- <td headers="Closing Date">August 20, 2010</td>
- <td headers="Updated">August 24, 2012</td>
- </tr>
-
-
- <tr>
- <td><a href="inatbank.html">Independent National Bank</a></td>
- <td headers="city">Ocala</td>
- <td headers="state">FL</td>
- <td headers="CERT #">27344</td>
- <td headers="AI">CenterState Bank of Florida, N.A.</td>
- <td headers="Closing Date">August 20, 2010</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
-
- <tr>
- <td><a href="cnbbartow.html">Community National Bank at Bartow</a></td>
- <td headers="city">Bartow</td>
- <td headers="state">FL</td>
- <td headers="CERT #">25266</td>
- <td headers="AI">CenterState Bank of Florida, N.A.</td>
- <td headers="Closing Date">August 20, 2010</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
-
-
-
- <tr>
- <td><a href="palosbank.html">Palos Bank and Trust Company</a></td>
- <td headers="city">Palos Heights</td>
- <td headers="state">IL</td>
- <td headers="CERT #">17599</td>
- <td headers="AI">First Midwest Bank</td>
- <td headers="Closing Date">August 13, 2010</td>
- <td headers="Updated">August 22, 2012</td>
- </tr>
- <tr>
- <td><a href="ravenswood.html">Ravenswood Bank</a></td>
- <td headers="city">Chicago</td>
- <td headers="state">IL</td>
- <td headers="CERT #">34231</td>
- <td headers="AI">Northbrook Bank & Trust Company</td>
- <td headers="Closing Date">August 6, 2010</td>
- <td headers="Updated">August 22, 2012</td>
- </tr>
-
-
- <tr>
- <td><a href="libertyor.html">LibertyBank</a></td>
- <td headers="city">Eugene</td>
- <td headers="state">OR</td>
- <td headers="CERT #">31964</td>
- <td headers="AI">Home Federal Bank</td>
- <td headers="Closing Date">July 30, 2010</td>
- <td headers="Updated">August 22, 2012</td>
- </tr>
- <tr>
- <td><a href="cowlitz.html">The Cowlitz Bank</a></td>
- <td headers="city">Longview</td>
- <td headers="state">WA</td>
- <td headers="CERT #">22643</td>
- <td headers="AI">Heritage Bank</td>
- <td headers="Closing Date">July 30, 2010</td>
- <td headers="Updated">August 22, 2012</td>
- </tr>
- <tr>
- <td><a href="coastal.html">Coastal Community Bank</a></td>
- <td headers="city">Panama City Beach</td>
- <td headers="state">FL</td>
- <td headers="CERT #">9619</td>
- <td headers="AI">Centennial Bank</td>
- <td headers="Closing Date">July 30, 2010</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
- <tr>
- <td><a href="bayside.html">Bayside Savings Bank</a></td>
- <td headers="city">Port Saint Joe</td>
- <td headers="state">FL</td>
- <td headers="CERT #">57669</td>
- <td headers="AI">Centennial Bank</td>
- <td headers="Closing Date">July 30, 2010</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
- <tr>
- <td><a href="northwestga.html">Northwest Bank & Trust</a></td>
- <td headers="city">Acworth</td>
- <td headers="state">GA</td>
- <td headers="CERT #">57658</td>
- <td headers="AI">State Bank and Trust Company</td>
- <td headers="Closing Date">July 30, 2010</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
- <tr>
- <td><a href="homevalleyor.html">Home Valley Bank </a></td>
- <td headers="city">Cave Junction</td>
- <td headers="state">OR</td>
- <td headers="CERT #">23181</td>
- <td headers="AI">South Valley Bank & Trust</td>
- <td headers="Closing Date">July 23, 2010</td>
- <td headers="Updated">September 12, 2012</td>
- </tr>
- <tr>
- <td><a href="southwestusanv.html">SouthwestUSA Bank </a></td>
- <td headers="city">Las Vegas</td>
- <td headers="state">NV</td>
- <td headers="CERT #">35434</td>
- <td headers="AI">Plaza Bank</td>
- <td headers="Closing Date">July 23, 2010</td>
- <td headers="Updated">August 22, 2012</td>
- </tr>
- <tr>
- <td><a href="communitysecmn.html">Community Security Bank </a></td>
- <td headers="city">New Prague</td>
- <td headers="state">MN</td>
- <td headers="CERT #">34486</td>
- <td headers="AI">Roundbank</td>
- <td headers="Closing Date">July 23, 2010</td>
- <td headers="Updated">September 12, 2012</td>
- </tr>
- <tr>
- <td><a href="thunderbankks.html">Thunder Bank </a></td>
- <td headers="city">Sylvan Grove</td>
- <td headers="state">KS</td>
- <td headers="CERT #">10506</td>
- <td headers="AI">The Bennington State Bank</td>
- <td headers="Closing Date">July 23, 2010</td>
- <td headers="Updated">September 13, 2012</td>
- </tr>
- <tr>
- <td><a href="williamsburgsc.html">Williamsburg First National Bank </a></td>
- <td headers="city">Kingstree</td>
- <td headers="state">SC</td>
- <td headers="CERT #">17837</td>
- <td headers="AI">First Citizens Bank and Trust Company, Inc.</td>
- <td headers="Closing Date">July 23, 2010</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
- <tr>
- <td><a href="crescentga.html">Crescent Bank and Trust Company </a></td>
- <td headers="city">Jasper</td>
- <td headers="state">GA</td>
- <td headers="CERT #">27559</td>
- <td headers="AI">Renasant Bank</td>
- <td headers="Closing Date">July 23, 2010</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
- <tr>
- <td><a href="sterlingfl.html">Sterling Bank </a></td>
- <td headers="city">Lantana</td>
- <td headers="state">FL</td>
- <td headers="CERT #">32536</td>
- <td headers="AI">IBERIABANK</td>
- <td headers="Closing Date">July 23, 2010</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
- <tr>
- <td><a href="mainstsvgs.html">Mainstreet Savings Bank, FSB</a></td>
- <td headers="city">Hastings</td>
- <td headers="state">MI</td>
- <td headers="CERT #">28136</td>
- <td headers="AI">Commercial Bank</td>
- <td headers="Closing Date">July 16, 2010</td>
- <td headers="Updated">September 13, 2012</td>
- </tr>
- <tr>
- <td><a href="oldecypress.html">Olde Cypress Community Bank</a></td>
- <td headers="city">Clewiston</td>
- <td headers="state">FL</td>
- <td headers="CERT #">28864</td>
- <td headers="AI">CenterState Bank of Florida, N.A.</td>
- <td headers="Closing Date">July 16, 2010</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
- <tr>
- <td><a href="turnberry.html">Turnberry Bank</a></td>
- <td headers="city">Aventura</td>
- <td headers="state">FL</td>
- <td headers="CERT #">32280</td>
- <td headers="AI">NAFH National Bank</td>
- <td headers="Closing Date">July 16, 2010</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
- <tr>
- <td><a href="metrobankfl.html">Metro Bank of Dade County</a></td>
- <td headers="city">Miami</td>
- <td headers="state">FL</td>
- <td headers="CERT #">25172</td>
- <td headers="AI">NAFH National Bank</td>
- <td headers="Closing Date">July 16, 2010</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
- <tr>
- <td><a href="firstnatlsc.html">First National Bank of the South</a></td>
- <td headers="city">Spartanburg</td>
- <td headers="state">SC</td>
- <td headers="CERT #">35383</td>
- <td headers="AI">NAFH National Bank</td>
- <td headers="Closing Date">July 16, 2010</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
- <tr>
- <td height="24"><a href="woodlands.html">Woodlands Bank</a></td>
- <td headers="city">Bluffton</td>
- <td headers="state">SC</td>
- <td headers="CERT #">32571</td>
- <td headers="AI">Bank of the Ozarks</td>
- <td headers="Closing Date">July 16, 2010</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
- <tr>
- <td><a href="homenatlok.html">Home National Bank</a></td>
- <td headers="city">Blackwell</td>
- <td headers="state">OK</td>
- <td headers="CERT #">11636</td>
- <td headers="AI">RCB Bank</td>
- <td headers="Closing Date">July 9, 2010</td>
- <td headers="Updated">December 10, 2012</td>
- </tr>
- <tr>
- <td><a href="usabankny.html">USA Bank</a></td>
- <td headers="city">Port Chester</td>
- <td headers="state">NY</td>
- <td headers="CERT #">58072</td>
- <td headers="AI">New Century Bank</td>
- <td headers="Closing Date">July 9, 2010</td>
- <td headers="Updated">September 14, 2012</td>
- </tr>
- <tr>
- <td><a href="idealfedsvngsmd.html">Ideal Federal Savings Bank</a></td>
- <td headers="city">Baltimore</td>
- <td headers="state">MD</td>
- <td headers="CERT #">32456</td>
- <td headers="AI">No Acquirer</td>
- <td headers="Closing Date">July 9, 2010</td>
- <td headers="Updated">September 14, 2012</td>
- </tr>
- <tr>
- <td><a href="baynatlmd.html">Bay National Bank</a></td>
- <td headers="city">Baltimore</td>
- <td headers="state">MD</td>
- <td headers="CERT #">35462</td>
- <td headers="AI">Bay Bank, FSB</td>
- <td headers="Closing Date">July 9, 2010</td>
- <td headers="Updated">January 15, 2013</td>
- </tr>
- <tr>
- <td><a href="highdesertnm.html">High Desert State Bank</a></td>
- <td headers="city">Albuquerque</td>
- <td headers="state">NM</td>
- <td headers="CERT #">35279</td>
- <td headers="AI">First American Bank</td>
- <td headers="Closing Date">June 25, 2010</td>
- <td headers="Updated">September 14, 2012</td>
- </tr>
- <tr>
- <td><a href="firstnatga.html">First National Bank</a></td>
- <td headers="city">Savannah</td>
- <td headers="state">GA</td>
- <td headers="CERT #">34152</td>
- <td headers="AI">The Savannah Bank, N.A.</td>
- <td headers="Closing Date">June 25, 2010</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
- <tr>
- <td><a href="peninsulafl.html">Peninsula Bank</a></td>
- <td headers="city">Englewood</td>
- <td headers="state">FL</td>
- <td headers="CERT #">26563</td>
- <td headers="AI">Premier American Bank, N.A.</td>
- <td headers="Closing Date">June 25, 2010</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
- <tr>
- <td><a href="nevsecbank.html">Nevada Security Bank</a></td>
- <td headers="city">Reno</td>
- <td headers="state">NV</td>
- <td headers="CERT #">57110</td>
- <td headers="AI">Umpqua Bank</td>
- <td headers="Closing Date">June 18, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="washfirstintl.html">Washington First International Bank</a></td>
- <td headers="city">Seattle</td>
- <td headers="state">WA</td>
- <td headers="CERT #">32955</td>
- <td headers="AI">East West Bank</td>
- <td headers="Closing Date">June 11, 2010</td>
- <td headers="Updated">September 14, 2012</td>
- </tr>
- <tr>
- <td><a href="tieronebankne.html">TierOne Bank</a></td>
- <td headers="city">Lincoln</td>
- <td headers="state">NE</td>
- <td headers="CERT #">29341</td>
- <td headers="AI">Great Western Bank</td>
- <td headers="Closing Date">June 4, 2010</td>
- <td headers="Updated">September 14, 2012</td>
- </tr>
- <tr>
- <td><a href="arcolail.html">Arcola Homestead Savings Bank</a></td>
- <td headers="city">Arcola</td>
- <td headers="state">IL</td>
- <td headers="CERT #">31813</td>
- <td headers="AI">No Acquirer</td>
- <td headers="Closing Date">June 4, 2010</td>
- <td headers="Updated">September 14, 2012</td>
- </tr>
- <tr>
- <td><a href="firstnatms.html">First National Bank</a></td>
- <td headers="city">Rosedale </td>
- <td headers="state">MS</td>
- <td headers="CERT #">15814</td>
- <td headers="AI">The Jefferson Bank</td>
- <td headers="Closing Date">June 4, 2010</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
- <tr>
- <td><a href="swbnevada.html">Sun West Bank</a></td>
- <td headers="city">Las Vegas </td>
- <td headers="state">NV</td>
- <td headers="CERT #">34785</td>
- <td headers="AI">City National Bank</td>
- <td headers="Closing Date">May 28, 2010</td>
- <td headers="Updated">September 14, 2012</td>
- </tr>
- <tr>
- <td><a href="graniteca.html">Granite Community Bank, NA</a></td>
- <td headers="city">Granite Bay </td>
- <td headers="state">CA</td>
- <td headers="CERT #">57315</td>
- <td headers="AI">Tri Counties Bank</td>
- <td headers="Closing Date">May 28, 2010</td>
- <td headers="Updated">September 14, 2012</td>
- </tr>
- <tr>
- <td><a href="bankoffloridatb.html">Bank of Florida - Tampa</a></td>
- <td headers="city">Tampa</td>
- <td headers="state">FL</td>
- <td headers="CERT #">57814</td>
- <td headers="AI">EverBank</td>
- <td headers="Closing Date">May 28, 2010</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
- <tr>
- <td><a href="bankoffloridasw.html">Bank of Florida - Southwest</a></td>
- <td headers="city">Naples </td>
- <td headers="state">FL</td>
- <td headers="CERT #">35106</td>
- <td headers="AI">EverBank</td>
- <td headers="Closing Date">May 28, 2010</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
- <tr>
- <td><a href="bankoffloridase.html">Bank of Florida - Southeast</a></td>
- <td headers="city">Fort Lauderdale </td>
- <td headers="state">FL</td>
- <td headers="CERT #">57360</td>
- <td headers="AI">EverBank</td>
- <td headers="Closing Date">May 28, 2010</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
- <tr>
- <td><a href="pinehurstmn.html">Pinehurst Bank</a></td>
- <td headers="city">Saint Paul </td>
- <td headers="state">MN</td>
- <td headers="CERT #">57735</td>
- <td headers="AI">Coulee Bank</td>
- <td headers="Closing Date">May 21, 2010</td>
- <td headers="Updated">October 26, 2012</td>
- </tr>
- <tr>
- <td><a href="midwestil.html">Midwest Bank and Trust Company</a></td>
- <td headers="city">Elmwood Park </td>
- <td headers="state">IL</td>
- <td headers="CERT #">18117</td>
- <td headers="AI">FirstMerit Bank, N.A.</td>
- <td headers="Closing Date">May 14, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="swcmntymo.html">Southwest Community Bank</a></td>
- <td headers="city">Springfield</td>
- <td headers="state">MO</td>
- <td headers="CERT #">34255</td>
- <td headers="AI">Simmons First National Bank</td>
- <td headers="Closing Date">May 14, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="newlibertymi.html">New Liberty Bank</a></td>
- <td headers="city">Plymouth</td>
- <td headers="state">MI</td>
- <td headers="CERT #">35586</td>
- <td headers="AI">Bank of Ann Arbor</td>
- <td headers="Closing Date">May 14, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="satillacmntyga.html">Satilla Community Bank</a></td>
- <td headers="city">Saint Marys</td>
- <td headers="state">GA</td>
- <td headers="CERT #">35114</td>
- <td headers="AI">Ameris Bank</td>
- <td headers="Closing Date">May 14, 2010</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
- <tr>
- <td><a href="1stpacific.html">1st Pacific Bank of California</a></td>
- <td headers="city">San Diego</td>
- <td headers="state">CA</td>
- <td headers="CERT #">35517</td>
- <td headers="AI">City National Bank</td>
- <td headers="Closing Date">May 7, 2010</td>
- <td headers="Updated">December 13, 2012</td>
- </tr>
- <tr>
- <td><a href="townebank.html">Towne Bank of Arizona</a></td>
- <td headers="city">Mesa</td>
- <td headers="state">AZ</td>
- <td headers="CERT #">57697</td>
- <td headers="AI">Commerce Bank of Arizona</td>
- <td headers="Closing Date">May 7, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="accessbank.html">Access Bank</a></td>
- <td headers="city">Champlin</td>
- <td headers="state">MN</td>
- <td headers="CERT #">16476</td>
- <td headers="AI">PrinsBank</td>
- <td headers="Closing Date">May 7, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="bonifay.html">The Bank of Bonifay</a></td>
- <td headers="city">Bonifay</td>
- <td headers="state">FL</td>
- <td headers="CERT #">14246</td>
- <td headers="AI">First Federal Bank of Florida</td>
- <td headers="Closing Date">May 7, 2010</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
- <tr>
- <td><a href="frontier.html">Frontier Bank</a></td>
- <td headers="city">Everett</td>
- <td headers="state">WA</td>
- <td headers="CERT #">22710</td>
- <td headers="AI">Union Bank, N.A.</td>
- <td headers="Closing Date">April 30, 2010</td>
- <td headers="Updated">January 15, 2013</td>
- </tr>
- <tr>
- <td><a href="bc-natl.html">BC National Banks</a></td>
- <td headers="city">Butler</td>
- <td headers="state">MO</td>
- <td headers="CERT #">17792</td>
- <td headers="AI">Community First Bank</td>
- <td headers="Closing Date">April 30, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="champion.html">Champion Bank</a></td>
- <td headers="city">Creve Coeur</td>
- <td headers="state">MO</td>
- <td headers="CERT #">58362</td>
- <td headers="AI">BankLiberty</td>
- <td headers="Closing Date">April 30, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="cfbancorp.html">CF Bancorp</a></td>
- <td headers="city">Port Huron</td>
- <td headers="state">MI</td>
- <td headers="CERT #">30005</td>
- <td headers="AI">First Michigan Bank</td>
- <td headers="Closing Date">April 30, 2010</td>
- <td headers="Updated">January 15, 2013</td>
- </tr>
- <tr>
- <td headers="Instituition"><a href="westernbank-puertorico.html">Westernbank Puerto Rico</a><br />
- <a href="westernbank-puertorico_spanish.html">En Espanol</a></td>
- <td headers="city">Mayaguez</td>
- <td headers="state">PR</td>
- <td headers="CERT #">31027</td>
- <td headers="AI">Banco Popular de Puerto Rico</td>
- <td headers="Closing Date">April 30, 2010</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
- <tr>
- <td headers="Instituition"><a href="r-gpremier-puertorico.html">R-G Premier Bank of Puerto Rico</a><br />
- <a href="r-gpremier-puertorico_spanish.html">En Espanol</a></td>
- <td headers="city">Hato Rey</td>
- <td headers="state">PR</td>
- <td headers="CERT #">32185</td>
- <td headers="AI">Scotiabank de Puerto Rico</td>
- <td headers="Closing Date">April 30, 2010</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
- <tr>
- <td headers="Instituition"><a href="eurobank-puertorico.html">Eurobank</a><br />
- <a href="eurobank-puertorico_spanish.html">En Espanol</a></td>
- <td headers="city">San Juan</td>
- <td headers="state">PR</td>
- <td headers="CERT #">27150</td>
- <td headers="AI">Oriental Bank and Trust</td>
- <td headers="Closing Date">April 30, 2010</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
- <tr>
- <td><a href="wheatland.html">Wheatland Bank</a></td>
- <td headers="city">Naperville</td>
- <td headers="state">IL</td>
- <td headers="CERT #">58429</td>
- <td headers="AI">Wheaton Bank & Trust</td>
- <td headers="Closing Date">April 23, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="peotone.html">Peotone Bank and Trust Company</a></td>
- <td headers="city">Peotone</td>
- <td headers="state">IL</td>
- <td headers="CERT #">10888</td>
- <td headers="AI">First Midwest Bank</td>
- <td headers="Closing Date">April 23, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="lincoln-park.html">Lincoln Park Savings Bank</a></td>
- <td headers="city">Chicago</td>
- <td headers="state">IL</td>
- <td headers="CERT #">30600</td>
- <td headers="AI">Northbrook Bank & Trust Company</td>
- <td headers="Closing Date">April 23, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="new-century-il.html">New Century Bank</a></td>
- <td headers="city">Chicago</td>
- <td headers="state">IL</td>
- <td headers="CERT #">34821</td>
- <td headers="AI">MB Financial Bank, N.A.</td>
- <td headers="Closing Date">April 23, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="citizens-bank.html">Citizens Bank and Trust Company of Chicago</a></td>
- <td headers="city">Chicago</td>
- <td headers="state">IL</td>
- <td headers="CERT #">34658</td>
- <td headers="AI">Republic Bank of Chicago</td>
- <td headers="Closing Date">April 23, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="broadway.html">Broadway Bank</a></td>
- <td headers="city">Chicago</td>
- <td headers="state">IL</td>
- <td headers="CERT #">22853</td>
- <td headers="AI">MB Financial Bank, N.A.</td>
- <td headers="Closing Date">April 23, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="amcore.html">Amcore Bank, National Association</a></td>
- <td headers="city">Rockford</td>
- <td headers="state">IL</td>
- <td headers="CERT #">3735</td>
- <td headers="AI">Harris N.A.</td>
- <td headers="Closing Date">April 23, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
-
- <tr>
- <td><a href="citybank.html">City Bank</a></td>
- <td headers="city">Lynnwood</td>
- <td headers="state">WA</td>
- <td headers="CERT #">21521</td>
- <td headers="AI">Whidbey Island Bank</td>
- <td headers="Closing Date">April 16, 2010</td>
- <td headers="Updated">September 14, 2012</td>
- </tr>
- <tr>
- <td><a href="tamalpais.html">Tamalpais Bank</a></td>
- <td headers="city">San Rafael</td>
- <td headers="state">CA</td>
- <td headers="CERT #">33493</td>
- <td headers="AI">Union Bank, N.A.</td>
- <td headers="Closing Date">April 16, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="innovative.html">Innovative Bank</a></td>
- <td headers="city">Oakland</td>
- <td headers="state">CA</td>
- <td headers="CERT #">23876</td>
- <td headers="AI">Center Bank</td>
- <td headers="Closing Date">April 16, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="butlerbank.html">Butler Bank</a></td>
- <td headers="city">Lowell</td>
- <td headers="state">MA</td>
- <td headers="CERT #">26619</td>
- <td headers="AI">People's United Bank</td>
- <td headers="Closing Date">April 16, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="riverside-natl.html">Riverside National Bank of Florida</a></td>
- <td headers="city">Fort Pierce</td>
- <td headers="state">FL</td>
- <td headers="CERT #">24067</td>
- <td headers="AI">TD Bank, N.A.</td>
- <td headers="Closing Date">April 16, 2010</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
- <tr>
- <td><a href="americanfirst.html">AmericanFirst Bank</a></td>
- <td headers="city">Clermont</td>
- <td headers="state">FL</td>
- <td headers="CERT #">57724</td>
- <td headers="AI">TD Bank, N.A.</td>
- <td headers="Closing Date">April 16, 2010</td>
- <td headers="Updated">October 31, 2012</td>
- </tr>
- <tr>
- <td><a href="ffbnf.html">First Federal Bank of North Florida</a></td>
- <td headers="city">Palatka</td>
- <td headers="state">FL </td>
- <td headers="CERT #">28886</td>
- <td headers="AI">TD Bank, N.A.</td>
- <td headers="Closing Date">April 16, 2010</td>
- <td headers="Updated">January 15, 2013</td>
- </tr>
- <tr>
- <td><a href="lakeside-comm.html">Lakeside Community Bank</a></td>
- <td headers="city">Sterling Heights</td>
- <td headers="state">MI</td>
- <td headers="CERT #">34878</td>
- <td headers="AI">No Acquirer</td>
- <td headers="Closing Date">April 16, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="beachfirst.html">Beach First National Bank</a></td>
- <td headers="city">Myrtle Beach</td>
- <td headers="state">SC</td>
- <td headers="CERT #">34242</td>
- <td headers="AI">Bank of North Carolina</td>
- <td headers="Closing Date">April 9, 2010</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
- <tr>
- <td><a href="deserthills.html">Desert Hills Bank</a></td>
- <td headers="city">Phoenix</td>
- <td headers="state">AZ</td>
- <td headers="CERT #">57060</td>
- <td headers="AI">New York Community Bank</td>
- <td headers="Closing Date">March 26, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="unity-natl.html">Unity National Bank</a></td>
- <td headers="city">Cartersville</td>
- <td headers="state">GA</td>
- <td headers="CERT #">34678</td>
- <td headers="AI">Bank of the Ozarks</td>
- <td headers="Closing Date">March 26, 2010</td>
- <td headers="Updated">September 14, 2012</td>
- </tr>
- <tr>
- <td><a href="key-west.html">Key West Bank</a></td>
- <td headers="city">Key West</td>
- <td headers="state">FL</td>
- <td headers="CERT #">34684</td>
- <td headers="AI">Centennial Bank</td>
- <td headers="Closing Date">March 26, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="mcintosh.html">McIntosh Commercial Bank</a></td>
- <td headers="city">Carrollton</td>
- <td headers="state">GA</td>
- <td headers="CERT #">57399</td>
- <td headers="AI">CharterBank</td>
- <td headers="Closing Date">March 26, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="state-aurora.html">State Bank of Aurora</a></td>
- <td headers="city">Aurora</td>
- <td headers="state">MN</td>
- <td headers="CERT #">8221</td>
- <td headers="AI">Northern State Bank</td>
- <td headers="Closing Date">March 19, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="firstlowndes.html">First Lowndes Bank</a></td>
- <td headers="city">Fort Deposit</td>
- <td headers="state">AL</td>
- <td headers="CERT #">24957</td>
- <td headers="AI">First Citizens Bank</td>
- <td headers="Closing Date">March 19, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="bankofhiawassee.html">Bank of Hiawassee</a></td>
- <td headers="city">Hiawassee</td>
- <td headers="state">GA</td>
- <td headers="CERT #">10054</td>
- <td headers="AI">Citizens South Bank</td>
- <td headers="Closing Date">March 19, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="appalachian.html">Appalachian Community Bank</a></td>
- <td headers="city">Ellijay</td>
- <td headers="state">GA</td>
- <td headers="CERT #">33989</td>
- <td headers="AI">Community & Southern Bank</td>
- <td headers="Closing Date">March 19, 2010</td>
- <td headers="Updated">October 31, 2012</td>
- </tr>
- <tr>
- <td><a href="advanta-ut.html">Advanta Bank Corp.</a></td>
- <td headers="city">Draper</td>
- <td headers="state">UT</td>
- <td headers="CERT #">33535</td>
- <td headers="AI">No Acquirer</td>
- <td headers="Closing Date">March 19, 2010</td>
- <td headers="Updated">September 14, 2012</td>
- </tr>
- <tr>
- <td><a href="cent-security.html">Century Security Bank</a></td>
- <td headers="city">Duluth</td>
- <td headers="state">GA</td>
- <td headers="CERT #">58104</td>
- <td headers="AI">Bank of Upson</td>
- <td headers="Closing Date">March 19, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="amer-natl-oh.html">American National Bank</a></td>
- <td headers="city">Parma</td>
- <td headers="state">OH</td>
- <td headers="CERT #">18806</td>
- <td headers="AI">The National Bank and Trust Company</td>
- <td headers="Closing Date">March 19, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="statewide.html">Statewide Bank</a></td>
- <td headers="city">Covington</td>
- <td headers="state">LA</td>
- <td headers="CERT #">29561</td>
- <td headers="AI">Home Bank</td>
- <td headers="Closing Date">March 12, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
-<tr>
- <td><a href="oldsouthern.html">Old Southern Bank</a></td>
- <td headers="city">Orlando</td>
- <td headers="state">FL</td>
- <td headers="CERT #">58182</td>
- <td headers="AI">Centennial Bank</td>
- <td headers="Closing Date">March 12, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="parkavenue-ny.html">The Park Avenue Bank</a></td>
- <td headers="city">New York</td>
- <td headers="state">NY</td>
- <td headers="CERT #">27096</td>
- <td headers="AI">Valley National Bank</td>
- <td headers="Closing Date">March 12, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="libertypointe.html">LibertyPointe Bank</a></td>
- <td headers="city">New York</td>
- <td headers="state">NY</td>
- <td headers="CERT #">58071</td>
- <td headers="AI">Valley National Bank</td>
- <td headers="Closing Date">March 11, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="centennial-ut.html">Centennial Bank</a></td>
- <td headers="city">Ogden</td>
- <td headers="state">UT</td>
- <td headers="CERT #">34430</td>
- <td headers="AI">No Acquirer</td>
- <td headers="Closing Date">March 5, 2010</td>
- <td headers="Updated">September 14, 2012</td>
- </tr>
- <tr>
- <td><a href="waterfield.html">Waterfield Bank</a></td>
- <td headers="city">Germantown</td>
- <td headers="state">MD</td>
- <td headers="CERT #">34976</td>
- <td headers="AI">No Acquirer</td>
- <td headers="Closing Date">March 5, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="bankofillinois.html">Bank of Illinois</a></td>
- <td headers="city">Normal</td>
- <td headers="state">IL</td>
- <td headers="CERT #">9268</td>
- <td headers="AI">Heartland Bank and Trust Company</td>
- <td headers="Closing Date">March 5, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="sunamerican.html">Sun American Bank</a></td>
- <td headers="city">Boca Raton</td>
- <td headers="state">FL</td>
- <td headers="CERT #">27126</td>
- <td headers="AI">First-Citizens Bank & Trust Company</td>
- <td headers="Closing Date">March 5, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="rainier.html">Rainier Pacific Bank</a></td>
- <td headers="city">Tacoma</td>
- <td headers="state">WA</td>
- <td headers="CERT #">38129</td>
- <td headers="AI">Umpqua Bank</td>
- <td headers="Closing Date">February 26, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="carsonriver.html">Carson River Community Bank</a></td>
- <td headers="city">Carson City</td>
- <td headers="state">NV</td>
- <td headers="CERT #">58352</td>
- <td headers="AI">Heritage Bank of Nevada</td>
- <td headers="Closing Date">February 26, 2010</td>
- <td headers="Updated">January 15, 2013</td>
- </tr>
- <tr>
- <td><a href="lajolla.html">La Jolla Bank, FSB</a></td>
- <td headers="city">La Jolla</td>
- <td headers="state">CA</td>
- <td headers="CERT #">32423</td>
- <td headers="AI">OneWest Bank, FSB</td>
- <td headers="Closing Date">February 19, 2010</td>
- <td headers="Updated">August 24, 2012</td>
- </tr>
- <tr>
- <td><a href="georgewashington.html">George Washington Savings Bank</a></td>
- <td headers="city">Orland Park</td>
- <td headers="state">IL</td>
- <td headers="CERT #">29952</td>
- <td headers="AI">FirstMerit Bank, N.A.</td>
- <td headers="Closing Date">February 19, 2010</td>
- <td headers="Updated">August 24, 2012</td>
- </tr>
- <tr>
- <td><a href="lacoste.html">The La Coste National Bank</a></td>
- <td headers="city">La Coste</td>
- <td headers="state">TX</td>
- <td headers="CERT #">3287</td>
- <td headers="AI">Community National Bank</td>
- <td headers="Closing Date">February 19, 2010</td>
- <td headers="Updated">September 14, 2012</td>
- </tr>
- <tr>
- <td><a href="marco.html">Marco Community Bank</a></td>
- <td headers="city">Marco Island</td>
- <td headers="state">FL</td>
- <td headers="CERT #">57586</td>
- <td headers="AI">Mutual of Omaha Bank</td>
- <td headers="Closing Date">February 19, 2010</td>
- <td headers="Updated">August 24, 2012</td>
- </tr>
- <tr>
- <td><a href="1stamerican.html">1st American State Bank of Minnesota</a></td>
- <td headers="city">Hancock</td>
- <td headers="state">MN</td>
- <td headers="CERT #">15448</td>
- <td headers="AI">Community Development Bank, FSB</td>
- <td headers="Closing Date">February 5, 2010</td>
- <td headers="Updated">August 24, 2012</td>
- </tr>
- <tr>
- <td><a href="americanmarine.html">American Marine Bank</a></td>
- <td headers="city">Bainbridge Island</td>
- <td headers="state">WA</td>
- <td headers="CERT #">16730</td>
- <td headers="AI">Columbia State Bank</td>
- <td headers="Closing Date">January 29, 2010</td>
- <td headers="Updated">August 24, 2012</td>
- </tr>
- <tr>
- <td><a href="firstregional.html">First Regional Bank</a></td>
- <td headers="city">Los Angeles</td>
- <td headers="state">CA</td>
- <td headers="CERT #">23011</td>
- <td headers="AI">First-Citizens Bank & Trust Company</td>
- <td headers="Closing Date">January 29, 2010</td>
- <td headers="Updated">August 24, 2012</td>
- </tr>
- <tr>
- <td><a href="cbt-cornelia.html">Community Bank and Trust</a></td>
- <td headers="city">Cornelia</td>
- <td headers="state">GA</td>
- <td headers="CERT #">5702</td>
- <td headers="AI">SCBT National Association</td>
- <td headers="Closing Date">January 29, 2010</td>
- <td headers="Updated">January 15, 2013</td>
- </tr>
- <tr>
- <td><a href="marshall-mn.html">Marshall Bank, N.A.</a></td>
- <td headers="city">Hallock</td>
- <td headers="state">MN</td>
- <td headers="CERT #">16133</td>
- <td headers="AI">United Valley Bank</td>
- <td headers="Closing Date">January 29, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="floridacommunity.html">Florida Community Bank</a></td>
- <td headers="city">Immokalee</td>
- <td headers="state">FL</td>
- <td headers="CERT #">5672</td>
- <td headers="AI">Premier American Bank, N.A.</td>
- <td headers="Closing Date">January 29, 2010</td>
- <td headers="Updated">January 15, 2013</td>
- </tr>
- <tr>
- <td><a href="firstnational-carrollton.html">First National Bank of Georgia</a></td>
- <td headers="city">Carrollton</td>
- <td headers="state">GA</td>
- <td headers="CERT #">16480</td>
- <td headers="AI">Community & Southern Bank</td>
- <td headers="Closing Date">January 29, 2010</td>
- <td headers="Updated">December 13, 2012</td>
- </tr>
- <tr>
- <td><a href="columbiariver.html">Columbia River Bank</a></td>
- <td headers="city">The Dalles</td>
- <td headers="state">OR</td>
- <td headers="CERT #">22469</td>
- <td headers="AI">Columbia State Bank</td>
- <td headers="Closing Date">January 22, 2010</td>
- <td headers="Updated">September 14, 2012</td>
- </tr>
- <tr>
- <td><a href="evergreen-wa.html">Evergreen Bank</a></td>
- <td headers="city">Seattle</td>
- <td headers="state">WA</td>
- <td headers="CERT #">20501</td>
- <td headers="AI">Umpqua Bank</td>
- <td headers="Closing Date">January 22, 2010</td>
- <td headers="Updated">January 15, 2013</td>
- </tr>
- <tr>
- <td><a href="charter-nm.html">Charter Bank</a></td>
- <td headers="city">Santa Fe</td>
- <td headers="state">NM</td>
- <td headers="CERT #">32498</td>
- <td headers="AI">Charter Bank</td>
- <td headers="Closing Date">January 22, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="leeton.html">Bank of Leeton</a></td>
- <td headers="city">Leeton</td>
- <td headers="state">MO</td>
- <td headers="CERT #">8265</td>
- <td headers="AI">Sunflower Bank, N.A.</td>
- <td headers="Closing Date">January 22, 2010</td>
- <td headers="Updated">January 15, 2013</td>
- </tr>
- <tr>
- <td><a href="premieramerican.html">Premier American Bank</a></td>
- <td headers="city">Miami</td>
- <td headers="state">FL</td>
- <td headers="CERT #">57147</td>
- <td headers="AI">Premier American Bank, N.A.</td>
- <td headers="Closing Date">January 22, 2010</td>
- <td headers="Updated">December 13, 2012</td>
- </tr>
- <tr>
- <td><a href="barnes.html">Barnes Banking Company</a></td>
- <td headers="city">Kaysville</td>
- <td headers="state">UT</td>
- <td headers="CERT #">1252</td>
- <td headers="AI">No Acquirer</td>
- <td headers="Closing Date">January 15, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="ststephen.html">St. Stephen State Bank</a></td>
- <td headers="city">St. Stephen</td>
- <td headers="state">MN</td>
- <td headers="CERT #">17522</td>
- <td headers="AI">First State Bank of St. Joseph</td>
- <td headers="Closing Date">January 15, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="towncommunity.html">Town Community
- Bank & Trust</a></td>
- <td headers="city">Antioch</td>
- <td headers="state">IL</td>
- <td headers="CERT #">34705</td>
- <td headers="AI">First American Bank</td>
- <td headers="Closing Date">January 15, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="horizon-wa.html">Horizon Bank</a></td>
- <td headers="city">Bellingham</td>
- <td headers="state">WA</td>
- <td headers="CERT #">22977</td>
- <td headers="AI">Washington Federal Savings and Loan Association</td>
- <td headers="Closing Date">January 8, 2010</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="firstfederal-ca.html">First Federal Bank of California, F.S.B.</a></td>
- <td headers="city">Santa Monica</td>
- <td headers="state">CA</td>
- <td headers="CERT #">28536</td>
- <td headers="AI">OneWest Bank, FSB</td>
- <td headers="Closing Date">December 18, 2009</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="imperialcapital.html">Imperial Capital Bank</a></td>
- <td headers="city">La Jolla</td>
- <td headers="state">CA</td>
- <td headers="CERT #">26348</td>
- <td headers="AI">City National Bank</td>
- <td headers="Closing Date">December 18, 2009</td>
- <td headers="Updated">September 5, 2012</td>
- </tr>
- <tr>
- <td><a href="ibb.html">Independent Bankers' Bank</a></td>
- <td headers="city">Springfield</td>
- <td headers="state">IL</td>
- <td headers="CERT #">26820</td>
- <td headers="AI">The Independent BankersBank (TIB)</td>
- <td headers="Closing Date">December 18, 2009</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="newsouth.html">New South Federal Savings Bank</a></td>
- <td headers="city">Irondale</td>
- <td headers="state">AL</td>
- <td headers="CERT #">32276</td>
- <td headers="AI">Beal Bank</td>
- <td headers="Closing Date">December 18, 2009</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="citizensstate-mi.html">Citizens State Bank</a></td>
- <td headers="city">New Baltimore</td>
- <td headers="state">MI</td>
- <td headers="CERT #">1006</td>
- <td headers="AI">No Acquirer</td>
- <td headers="Closing Date">December 18, 2009</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
- <tr>
- <td><a href="peoplesfirst-fl.html">Peoples First Community Bank</a></td>
- <td headers="city">Panama City</td>
- <td headers="state">FL</td>
- <td headers="CERT #">32167</td>
- <td headers="AI">Hancock Bank</td>
- <td headers="Closing Date">December 18, 2009</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
- <tr>
- <td><a href="rockbridge.html">RockBridge Commercial Bank</a></td>
- <td headers="city">Atlanta</td>
- <td headers="state">GA</td>
- <td headers="CERT #">58315</td>
- <td headers="AI">No Acquirer</td>
- <td headers="Closing Date">December 18, 2009</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
- <tr>
- <td><a href="solutions.html">SolutionsBank</a></td>
- <td headers="city">Overland Park</td>
- <td headers="state">KS</td>
- <td headers="CERT #">4731</td>
- <td headers="AI">Arvest Bank</td>
- <td headers="Closing Date">December 11, 2009</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="valleycapital.html">Valley Capital Bank, N.A.</a></td>
- <td headers="city">Mesa</td>
- <td headers="state">AZ</td>
- <td headers="CERT #">58399</td>
- <td headers="AI">Enterprise Bank & Trust</td>
- <td headers="Closing Date">December 11, 2009</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="republicfederal.html">Republic Federal Bank, N.A.</a></td>
- <td headers="city">Miami</td>
- <td headers="state">FL</td>
- <td headers="CERT #">22846</td>
- <td headers="AI">1st United Bank</td>
- <td headers="Closing Date">December 11, 2009</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
- <tr>
- <td><a href="atlantic-va.html">Greater Atlantic Bank</a></td>
- <td headers="city">Reston</td>
- <td headers="state">VA</td>
- <td headers="CERT #">32583</td>
- <td headers="AI">Sonabank</td>
- <td headers="Closing Date">December 4, 2009</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
- <tr>
- <td><a href="benchmark-il.html">Benchmark Bank</a></td>
- <td headers="city">Aurora</td>
- <td headers="state">IL</td>
- <td headers="CERT #">10440</td>
- <td headers="AI">MB Financial Bank, N.A.</td>
- <td headers="Closing Date">December 4, 2009</td>
- <td headers="Updated">August 23, 2012</td>
- </tr>
- <tr>
- <td><a href="amtrust.html">AmTrust Bank</a></td>
- <td headers="city">Cleveland</td>
- <td headers="state">OH</td>
- <td headers="CERT #">29776</td>
- <td headers="AI">New York Community Bank</td>
- <td headers="Closing Date">December 4, 2009</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
-<tr>
- <td><a href="tattnall.html">The Tattnall Bank</a></td>
- <td headers="city">Reidsville</td>
- <td headers="state">GA</td>
- <td headers="CERT #">12080</td>
- <td headers="AI">Heritage Bank of the South</td>
- <td headers="Closing Date">December 4, 2009</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
- <tr>
- <td><a href="firstsecurity.html">First Security National Bank</a></td>
- <td headers="city">Norcross</td>
- <td headers="state">GA</td>
- <td headers="CERT #">26290</td>
- <td headers="AI">State Bank and Trust Company</td>
- <td headers="Closing Date">December 4, 2009</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
-<tr>
- <td><a href="buckheadcommunity.html">The Buckhead Community Bank</a></td>
- <td headers="city">Atlanta</td>
- <td headers="state">GA</td>
- <td headers="CERT #">34663</td>
- <td headers="AI">State Bank and Trust Company</td>
- <td headers="Closing Date">December 4, 2009</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
- <tr>
- <td><a href="commercesw-fl.html">Commerce Bank of Southwest Florida</a></td>
- <td headers="city">Fort Myers</td>
- <td headers="state">FL</td>
- <td headers="CERT #">58016</td>
- <td headers="AI">Central Bank</td>
- <td headers="Closing Date">November 20, 2009</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
- <tr>
- <td><a href="pacificcoastnatl.html">Pacific Coast National Bank</a></td>
- <td headers="city">San Clemente</td>
- <td headers="state">CA</td>
- <td headers="CERT #">57914</td>
- <td headers="AI">Sunwest Bank</td>
- <td headers="Closing Date">November 13, 2009</td>
- <td headers="Updated">August 22, 2012</td>
- </tr>
- <tr>
- <td><a href="orion-fl.html">Orion Bank</a></td>
- <td headers="city">Naples</td>
- <td headers="state">FL</td>
- <td headers="CERT #">22427</td>
- <td headers="AI">IBERIABANK</td>
- <td headers="Closing Date">November 13, 2009</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
-<tr>
- <td><a href="centuryfsb.html">Century Bank,
- F.S.B.</a></td>
- <td headers="city">Sarasota</td>
- <td headers="state">FL</td>
- <td headers="CERT #">32267</td>
- <td headers="AI">IBERIABANK</td>
- <td headers="Closing Date">November 13, 2009</td>
- <td headers="Updated">August 22, 2012</td>
- </tr>
-
- <tr>
- <td><a href="ucb.html">United Commercial Bank</a></td>
- <td headers="city">San Francisco</td>
- <td headers="state">CA</td>
- <td headers="CERT #">32469</td>
- <td headers="AI">East West Bank</td>
- <td headers="Closing Date">November 6, 2009</td>
- <td headers="Updated">November 5, 2012</td>
- </tr>
- <tr>
- <td><a href="gateway-mo.html">Gateway Bank of St. Louis</a></td>
- <td headers="city">St. Louis</td>
- <td headers="state">MO</td>
- <td headers="CERT #">19450</td>
- <td headers="AI">Central Bank of Kansas City</td>
- <td headers="Closing Date">November 6, 2009</td>
- <td headers="Updated">August 22, 2012</td>
- </tr>
- <tr>
- <td><a href="prosperan.html">Prosperan Bank</a></td>
- <td headers="city">Oakdale</td>
- <td headers="state">MN</td>
- <td headers="CERT #">35074</td>
- <td headers="AI">Alerus Financial, N.A.</td>
- <td headers="Closing Date">November 6, 2009</td>
- <td headers="Updated">August 22, 2012</td>
- </tr>
- <tr>
- <td><a href="homefsb-mi.html">Home Federal Savings Bank</a></td>
- <td headers="city">Detroit</td>
- <td headers="state">MI</td>
- <td headers="CERT #">30329</td>
- <td headers="AI">Liberty Bank and Trust Company</td>
- <td headers="Closing Date">November 6, 2009</td>
- <td headers="Updated">August 22, 2012</td>
- </tr>
- <tr>
- <td><a href="unitedsecurity-ga.html">United Security Bank</a></td>
- <td headers="city">Sparta</td>
- <td headers="state">GA</td>
- <td headers="CERT #">22286</td>
- <td headers="AI">Ameris Bank</td>
- <td headers="Closing Date">November 6, 2009</td>
- <td headers="Updated">January 15, 2013</td>
- </tr>
- <tr>
- <td><a href="northhouston-tx.html">North Houston Bank</a></td>
- <td headers="city">Houston</td>
- <td headers="state">TX</td>
- <td headers="CERT #">18776</td>
- <td headers="AI">U.S. Bank N.A.</td>
- <td headers="Closing Date">October 30, 2009</td>
- <td headers="Updated">August 22, 2012</td>
- </tr>
-<tr>
- <td><a href="madisonville-tx.html">Madisonville State Bank</a></td>
- <td headers="city">Madisonville</td>
- <td headers="state">TX</td>
- <td headers="CERT #">33782</td>
- <td headers="AI">U.S. Bank N.A.</td>
- <td headers="Closing Date">October 30, 2009</td>
- <td headers="Updated">August 22, 2012</td>
- </tr>
-<tr>
- <td><a href="citizens-teague.html">Citizens National Bank</a></td>
- <td headers="city">Teague</td>
- <td headers="state">TX</td>
- <td headers="CERT #">25222</td>
- <td headers="AI">U.S. Bank N.A.</td>
- <td headers="Closing Date">October 30, 2009</td>
- <td headers="Updated">August 22, 2012</td>
- </tr>
-<tr>
- <td><a href="park-il.html">Park National Bank</a></td>
- <td headers="city">Chicago</td>
- <td headers="state">IL</td>
- <td headers="CERT #">11677</td>
- <td headers="AI">U.S. Bank N.A.</td>
- <td headers="Closing Date">October 30, 2009</td>
- <td headers="Updated">August 22, 2012</td>
- </tr>
-<tr>
- <td><a href="pacificnational-ca.html">Pacific National Bank</a></td>
- <td headers="city">San Francisco</td>
- <td headers="state">CA</td>
- <td headers="CERT #">30006</td>
- <td headers="AI">U.S. Bank N.A.</td>
- <td headers="Closing Date">October 30, 2009</td>
- <td headers="Updated">August 22, 2012</td>
- </tr>
-<tr>
- <td><a href="calnational.html">California National Bank</a></td>
- <td headers="city">Los Angeles</td>
- <td headers="state">CA</td>
- <td headers="CERT #">34659</td>
- <td headers="AI">U.S. Bank N.A.</td>
- <td headers="Closing Date">October 30, 2009</td>
- <td headers="Updated">September 5, 2012</td>
- </tr>
-<tr>
- <td><a href="sandiegonational.html">San Diego National Bank</a></td>
- <td headers="city">San Diego</td>
- <td headers="state">CA</td>
- <td headers="CERT #">23594</td>
- <td headers="AI">U.S. Bank N.A.</td>
- <td headers="Closing Date">October 30, 2009</td>
- <td headers="Updated">August 22, 2012</td>
- </tr>
-<tr>
- <td><a href="community-lemont.html">Community Bank of Lemont</a></td>
- <td headers="city">Lemont</td>
- <td headers="state">IL</td>
- <td headers="CERT #">35291</td>
- <td headers="AI">U.S. Bank N.A.</td>
- <td headers="Closing Date">October 30, 2009</td>
- <td headers="Updated">January 15, 2013</td>
- </tr>
-<tr>
- <td><a href="bankusa-az.html">Bank USA, N.A</a>.</td>
- <td headers="city">Phoenix</td>
- <td headers="state">AZ</td>
- <td headers="CERT #">32218</td>
- <td headers="AI">U.S. Bank N.A.</td>
- <td headers="Closing Date">October 30, 2009</td>
- <td headers="Updated">August 22, 2012</td>
- </tr>
- <tr>
- <td><a href="firstdupage.html">First DuPage Bank</a></td>
- <td headers="city">Westmont</td>
- <td headers="state">IL</td>
- <td headers="CERT #">35038</td>
- <td headers="AI">First Midwest Bank</td>
- <td headers="Closing Date">October 23, 2009</td>
- <td headers="Updated">August 22, 2012</td>
- </tr>
- <tr>
- <td><a href="riverview-mn.html">Riverview Community Bank</a></td>
- <td headers="city">Otsego</td>
- <td headers="state">MN</td>
- <td headers="CERT #">57525</td>
- <td headers="AI">Central Bank</td>
- <td headers="Closing Date">October 23, 2009</td>
- <td headers="Updated">August 22, 2012</td>
- </tr>
- <tr>
- <td><a href="elmwood.html">Bank of Elmwood</a></td>
- <td headers="city">Racine</td>
- <td headers="state">WI</td>
- <td headers="CERT #">18321</td>
- <td headers="AI">Tri City National Bank</td>
- <td headers="Closing Date">October 23, 2009</td>
- <td headers="Updated">August 22, 2012</td>
- </tr>
-
- <tr>
- <td><a href="flagship.html">Flagship National Bank</a></td>
- <td headers="city">Bradenton</td>
- <td headers="state">FL</td>
- <td headers="CERT #">35044</td>
- <td headers="AI">First Federal Bank of Florida</td>
- <td headers="Closing Date">October 23, 2009</td>
- <td headers="Updated">August 22, 2012</td>
- </tr>
- <tr>
- <td><a href="hillcrest-fl.html">Hillcrest Bank Florida</a></td>
- <td headers="city">Naples</td>
- <td headers="state">FL</td>
- <td headers="CERT #">58336</td>
- <td headers="AI">Stonegate Bank</td>
- <td headers="Closing Date">October 23, 2009</td>
- <td headers="Updated">August 22, 2012</td>
- </tr>
- <tr>
- <td><a href="americanunited.html">American United Bank</a></td>
- <td headers="city">Lawrenceville</td>
- <td headers="state">GA</td>
- <td headers="CERT #">57794</td>
- <td headers="AI">Ameris Bank</td>
- <td headers="Closing Date">October 23, 2009</td>
- <td headers="Updated">September 5, 2012</td>
- </tr>
- <tr>
- <td><a href="partners-fl.html">Partners Bank</a></td>
- <td headers="city">Naples</td>
- <td headers="state">FL</td>
- <td headers="CERT #">57959</td>
- <td headers="AI">Stonegate Bank</td>
- <td headers="Closing Date">October 23, 2009</td>
- <td headers="Updated">January 15, 2013</td>
- </tr>
- <tr>
- <td><a href="sanjoaquin.html">San Joaquin Bank</a></td>
- <td headers="city">Bakersfield</td>
- <td headers="state">CA</td>
- <td headers="CERT #">23266</td>
- <td headers="AI">Citizens Business Bank</td>
- <td headers="Closing Date">October 16, 2009</td>
- <td headers="Updated">August 22, 2012</td>
- </tr>
- <tr>
- <td><a href="scnb-co.html">Southern Colorado National Bank</a></td>
- <td headers="city">Pueblo</td>
- <td headers="state">CO</td>
- <td headers="CERT #">57263</td>
- <td headers="AI">Legacy Bank</td>
- <td headers="Closing Date">October 2, 2009</td>
- <td headers="Updated">September 5, 2012</td>
- </tr>
- <tr>
- <td><a href="jennings-mn.html">Jennings State Bank</a></td>
- <td headers="city">Spring Grove</td>
- <td headers="state">MN</td>
- <td headers="CERT #">11416</td>
- <td headers="AI">Central Bank</td>
- <td headers="Closing Date">October 2, 2009</td>
- <td headers="Updated">August 21, 2012</td>
- </tr>
- <tr>
- <td><a href="warren-mi.html">Warren Bank</a></td>
- <td headers="city">Warren</td>
- <td headers="state">MI</td>
- <td headers="CERT #">34824</td>
- <td headers="AI">The Huntington National Bank</td>
- <td headers="Closing Date">October 2, 2009</td>
- <td headers="Updated">August 21, 2012</td>
- </tr>
- <tr>
- <td><a href="georgian.html">Georgian Bank</a></td>
- <td headers="city">Atlanta</td>
- <td headers="state">GA</td>
- <td headers="CERT #">57151</td>
- <td headers="AI">First Citizens Bank and Trust Company, Inc.</td>
- <td headers="Closing Date">September 25, 2009</td>
- <td headers="Updated">August 21, 2012</td>
- </tr>
- <tr>
- <td><a href="irwin-ky.html">Irwin Union Bank, F.S.B.</a></td>
- <td headers="city">Louisville</td>
- <td headers="state">KY</td>
- <td headers="CERT #">57068</td>
- <td headers="AI">First Financial Bank, N.A.</td>
- <td headers="Closing Date">September 18, 2009</td>
- <td headers="Updated">September 5, 2012</td>
- </tr>
- <tr>
- <td><a href="irwin-in.html">Irwin Union Bank and Trust Company</a></td>
- <td headers="city">Columbus</td>
- <td headers="state">IN</td>
- <td headers="CERT #">10100</td>
- <td headers="AI">First Financial Bank, N.A.</td>
- <td headers="Closing Date">September 18, 2009</td>
- <td headers="Updated">August 21, 2012</td>
- </tr>
- <tr>
- <td><a href="venture-wa.html">Venture Bank</a></td>
- <td headers="city">Lacey</td>
- <td headers="state">WA</td>
- <td headers="CERT #">22868</td>
- <td headers="AI">First-Citizens Bank & Trust Company</td>
- <td headers="Closing Date">September 11, 2009</td>
- <td headers="Updated">August 21, 2012</td>
- </tr>
- <tr>
- <td><a href="brickwell-mn.html">Brickwell Community Bank</a></td>
- <td headers="city">Woodbury</td>
- <td headers="state">MN</td>
- <td headers="CERT #">57736</td>
- <td headers="AI">CorTrust Bank N.A.</td>
- <td headers="Closing Date">September 11, 2009</td>
- <td headers="Updated">January 15, 2013</td>
- </tr>
- <tr>
- <td><a href="corus.html">Corus Bank, N.A.</a></td>
- <td headers="city">Chicago</td>
- <td headers="state">IL</td>
- <td headers="CERT #">13693</td>
- <td headers="AI">MB Financial Bank, N.A.</td>
- <td headers="Closing Date">September 11, 2009</td>
- <td headers="Updated">August 21, 2012</td>
- </tr>
- <tr>
- <td><a href="firststate-az.html">First State Bank</a></td>
- <td headers="city">Flagstaff</td>
- <td headers="state">AZ</td>
- <td headers="CERT #">34875</td>
- <td headers="AI">Sunwest Bank</td>
- <td headers="Closing Date">September 4, 2009</td>
- <td headers="Updated">January 15, 2013</td>
- </tr>
- <tr>
- <td><a href="platinum-il.html">Platinum Community Bank</a></td>
- <td headers="city">Rolling Meadows</td>
- <td headers="state">IL</td>
- <td headers="CERT #">35030</td>
- <td headers="AI">No Acquirer</td>
- <td headers="Closing Date">September 4, 2009</td>
- <td headers="Updated">August 21, 2012</td>
- </tr>
- <tr>
- <td><a href="vantus.html">Vantus Bank</a></td>
- <td headers="city">Sioux City</td>
- <td headers="state">IA</td>
- <td headers="CERT #">27732</td>
- <td headers="AI">Great Southern Bank</td>
- <td headers="Closing Date">September 4, 2009</td>
- <td headers="Updated">August 21, 2012</td>
- </tr>
- <tr>
- <td><a href="inbank.html">InBank</a></td>
- <td headers="city">Oak Forest</td>
- <td headers="state">IL</td>
- <td headers="CERT #">20203</td>
- <td headers="AI">MB Financial Bank, N.A.</td>
- <td headers="Closing Date">September 4, 2009</td>
- <td headers="Updated">August 21, 2012</td>
- </tr>
- <tr>
- <td><a href="firstbankkc-mo.html">First Bank of Kansas City</a></td>
- <td headers="city">Kansas City</td>
- <td headers="state">MO</td>
- <td headers="CERT #">25231</td>
- <td headers="AI">Great American Bank</td>
- <td headers="Closing Date">September 4, 2009</td>
- <td headers="Updated">August 21, 2012</td>
- </tr>
- <tr>
- <td><a href="affinity-ca.html">Affinity Bank</a></td>
- <td headers="city">Ventura</td>
- <td headers="state">CA</td>
- <td headers="CERT #">27197</td>
- <td headers="AI">Pacific Western Bank</td>
- <td headers="Closing Date">August 28, 2009</td>
- <td headers="Updated">August 21, 2012</td>
- </tr>
- <tr>
- <td><a href="mainstreet-mn.html">Mainstreet Bank</a></td>
- <td headers="city">Forest Lake</td>
- <td headers="state">MN</td>
- <td headers="CERT #">1909</td>
- <td headers="AI">Central Bank</td>
- <td headers="Closing Date">August 28, 2009</td>
- <td headers="Updated">August 21, 2012</td>
- </tr>
- <tr>
- <td><a href="bradford-md.html">Bradford Bank</a></td>
- <td headers="city">Baltimore</td>
- <td headers="state">MD</td>
- <td headers="CERT #">28312</td>
- <td headers="AI">Manufacturers and Traders Trust Company (M&T Bank)</td>
- <td headers="Closing Date">August 28, 2009</td>
- <td headers="Updated">January 15, 2013</td>
- </tr>
- <tr>
- <td><a href="guaranty-tx.html">Guaranty Bank</a></td>
- <td headers="city">Austin</td>
- <td headers="state">TX</td>
- <td headers="CERT #">32618</td>
- <td headers="AI">BBVA Compass</td>
- <td headers="Closing Date">August 21, 2009</td>
- <td headers="Updated">August 21, 2012</td>
- </tr>
- <tr>
- <td><a href="capitalsouth.html">CapitalSouth Bank</a></td>
- <td headers="city">Birmingham </td>
- <td headers="state">AL</td>
- <td headers="CERT #">22130</td>
- <td headers="AI">IBERIABANK</td>
- <td headers="Closing Date">August 21, 2009</td>
- <td headers="Updated">January 15, 2013</td>
- </tr>
- <tr>
- <td><a href="coweta.html">First Coweta Bank</a> </td>
- <td headers="city">Newnan</td>
- <td headers="state">GA</td>
- <td headers="CERT #">57702</td>
- <td headers="AI">United Bank</td>
- <td headers="Closing Date">August 21, 2009</td>
- <td headers="Updated">January 15, 2013</td>
- </tr>
- <tr>
- <td><a href="ebank.html">ebank</a></td>
- <td headers="city">Atlanta</td>
- <td headers="state">GA</td>
- <td headers="CERT #">34682</td>
- <td headers="AI">Stearns Bank, N.A.</td>
- <td headers="Closing Date">August 21, 2009</td>
- <td headers="Updated">August 21, 2012</td>
- </tr>
- <tr>
- <td><a href="community-nv.html">Community Bank of Nevada</a></td>
- <td headers="city">Las Vegas</td>
- <td headers="state">NV</td>
- <td headers="CERT #">34043</td>
- <td headers="AI">No Acquirer</td>
- <td headers="Closing Date">August 14, 2009</td>
- <td headers="Updated">August 21, 2012</td>
- </tr>
- <tr>
- <td><a href="community-az.html">Community Bank of Arizona</a></td>
- <td headers="city">Phoenix</td>
- <td headers="state">AZ</td>
- <td headers="CERT #">57645</td>
- <td headers="AI">MidFirst Bank</td>
- <td headers="Closing Date">August 14, 2009</td>
- <td headers="Updated">August 21, 2012</td>
- </tr>
- <tr>
- <td><a href="union-az.html">Union Bank, National Association</a></td>
- <td headers="city">Gilbert</td>
- <td headers="state">AZ</td>
- <td headers="CERT #">34485</td>
- <td headers="AI">MidFirst Bank</td>
- <td headers="Closing Date">August 14, 2009</td>
- <td headers="Updated">August 21, 2012</td>
- </tr>
- <tr>
- <td><a href="colonial-al.html">Colonial Bank</a></td>
- <td headers="city">Montgomery</td>
- <td headers="state">AL</td>
- <td headers="CERT #">9609</td>
- <td headers="AI">Branch Banking & Trust Company, (BB&T) </td>
- <td headers="Closing Date">August 14, 2009</td>
- <td headers="Updated">September 5, 2012</td>
- </tr>
- <tr>
- <td><a href="dwelling.html">Dwelling House Savings and Loan Association</a></td>
- <td headers="city">Pittsburgh</td>
- <td headers="state">PA</td>
- <td headers="CERT #">31559</td>
- <td headers="AI">PNC Bank, N.A.</td>
- <td headers="Closing Date">August 14, 2009</td>
- <td headers="Updated">January 15, 2013</td>
- </tr>
- <tr>
- <td><a href="community-prineville.html">Community First Bank</a></td>
- <td headers="city">Prineville</td>
- <td headers="state">OR</td>
- <td headers="CERT #">23268</td>
- <td headers="AI">Home Federal Bank</td>
- <td headers="Closing Date">August 7, 2009</td>
- <td headers="Updated">January 15, 2013</td>
- </tr>
- <tr>
- <td><a href="community-venice.html">Community National Bank of Sarasota County</a></td>
- <td headers="city">Venice</td>
- <td headers="state">FL</td>
- <td headers="CERT #">27183</td>
- <td headers="AI">Stearns Bank, N.A.</td>
- <td headers="Closing Date">August 7, 2009</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="fsb-sarasota.html">First State Bank</a></td>
- <td headers="city">Sarasota</td>
- <td headers="state">FL</td>
- <td headers="CERT #">27364</td>
- <td headers="AI">Stearns Bank, N.A.</td>
- <td headers="Closing Date">August 7, 2009</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="mutual-harvey.html">Mutual Bank</a></td>
- <td headers="city">Harvey</td>
- <td headers="state">IL</td>
- <td headers="CERT #">18659</td>
- <td headers="AI">United Central Bank</td>
- <td headers="Closing Date">July 31, 2009</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="americano.html">First BankAmericano</a></td>
- <td headers="city">Elizabeth</td>
- <td headers="state">NJ</td>
- <td headers="CERT #">34270</td>
- <td headers="AI">Crown Bank</td>
- <td headers="Closing Date">July 31, 2009</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="peoplescommunity-oh.html">Peoples Community Bank</a></td>
- <td headers="city">West Chester</td>
- <td headers="state">OH</td>
- <td headers="CERT #">32288</td>
- <td headers="AI">First Financial Bank, N.A.</td>
- <td headers="Closing Date">July 31, 2009</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="integrity-fl.html">Integrity Bank</a></td>
- <td headers="city">Jupiter</td>
- <td headers="state">FL</td>
- <td headers="CERT #">57604</td>
- <td headers="AI">Stonegate Bank</td>
- <td headers="Closing Date">July 31, 2009</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="fsb-altus.html">First State Bank of Altus</a></td>
- <td headers="city">Altus</td>
- <td headers="state">OK</td>
- <td headers="CERT #">9873</td>
- <td headers="AI">Herring Bank</td>
- <td headers="Closing Date">July 31, 2009</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="sb-jones.html">Security Bank of Jones County</a></td>
- <td headers="city">Gray</td>
- <td headers="state">GA</td>
- <td headers="CERT #">8486</td>
- <td headers="AI">State Bank and Trust Company</td>
- <td headers="Closing Date">July 24, 2009</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="sb-houston.html">Security Bank of Houston County</a></td>
- <td headers="city">Perry</td>
- <td headers="state">GA</td>
- <td headers="CERT #">27048</td>
- <td headers="AI">State Bank and Trust Company</td>
- <td headers="Closing Date">July 24, 2009</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="sb-bibb.html">Security Bank of Bibb County</a></td>
- <td headers="city">Macon</td>
- <td headers="state">GA</td>
- <td headers="CERT #">27367</td>
- <td headers="AI">State Bank and Trust Company</td>
- <td headers="Closing Date">July 24, 2009</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="sb-metro.html">Security Bank of North Metro</a></td>
- <td headers="city">Woodstock</td>
- <td headers="state">GA</td>
- <td headers="CERT #">57105</td>
- <td headers="AI">State Bank and Trust Company</td>
- <td headers="Closing Date">July 24, 2009</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="sb-fulton.html">Security Bank of North Fulton</a></td>
- <td headers="city">Alpharetta</td>
- <td headers="state">GA</td>
- <td headers="CERT #">57430</td>
- <td headers="AI">State Bank and Trust Company</td>
- <td headers="Closing Date">July 24, 2009</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="sb-gwinnett.html">Security Bank of Gwinnett County</a></td>
- <td headers="city">Suwanee</td>
- <td headers="state">GA</td>
- <td headers="CERT #">57346</td>
- <td headers="AI">State Bank and Trust Company</td>
- <td headers="Closing Date">July 24, 2009</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="waterford.html">Waterford Village Bank</a></td>
- <td headers="city">Williamsville</td>
- <td headers="state">NY</td>
- <td headers="CERT #">58065</td>
- <td headers="AI">Evans Bank, N.A.</td>
- <td headers="Closing Date">July 24, 2009</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="temecula.html">Temecula Valley Bank</a></td>
- <td headers="city">Temecula</td>
- <td headers="state">CA</td>
- <td headers="CERT #">34341</td>
- <td headers="AI">First-Citizens Bank & Trust Company</td>
- <td headers="Closing Date">July 17, 2009</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="vineyard.html">Vineyard Bank</a></td>
- <td headers="city">Rancho Cucamonga</td>
- <td headers="state">CA</td>
- <td headers="CERT #">23556</td>
- <td headers="AI">California Bank & Trust</td>
- <td headers="Closing Date">July 17, 2009</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="bankfirst.html">BankFirst</a></td>
- <td headers="city">Sioux Falls</td>
- <td headers="state">SD</td>
- <td headers="CERT #">34103</td>
- <td headers="AI">Alerus Financial, N.A.</td>
- <td headers="Closing Date">July 17, 2009</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="piedmont.html">First Piedmont Bank</a></td>
- <td headers="city">Winder</td>
- <td headers="state">GA</td>
- <td headers="CERT #">34594</td>
- <td headers="AI">First American Bank and Trust Company</td>
- <td headers="Closing Date">July 17, 2009</td>
- <td headers="Updated">January 15, 2013</td>
- </tr>
- <tr>
- <td><a href="wyoming.html">Bank of Wyoming</a></td>
- <td headers="city">Thermopolis</td>
- <td headers="state">WY</td>
- <td headers="CERT #">22754</td>
- <td headers="AI">Central Bank & Trust</td>
- <td headers="Closing Date">July 10, 2009</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="founders.html">Founders Bank</a></td>
- <td headers="city">Worth</td>
- <td headers="state">IL</td>
- <td headers="CERT #">18390</td>
- <td headers="AI">The PrivateBank and Trust Company</td>
- <td headers="Closing Date">July 2, 2009</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="millennium.html">Millennium State Bank of Texas</a></td>
- <td headers="city">Dallas</td>
- <td headers="state">TX</td>
- <td headers="CERT #">57667</td>
- <td headers="AI">State Bank of Texas</td>
- <td headers="Closing Date">July 2, 2009</td>
- <td headers="Updated">October 26, 2012</td>
- </tr>
- <tr>
- <td><a href="danville.html">First National Bank of Danville</a></td>
- <td headers="city">Danville</td>
- <td headers="state">IL</td>
- <td headers="CERT #">3644</td>
- <td headers="AI">First Financial Bank, N.A.</td>
- <td headers="Closing Date">July 2, 2009</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="elizabeth.html">Elizabeth State Bank</a></td>
- <td headers="city">Elizabeth</td>
- <td headers="state">IL</td>
- <td headers="CERT #">9262</td>
- <td headers="AI">Galena State Bank and Trust Company</td>
- <td headers="Closing Date">July 2, 2009</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="rockriver.html">Rock River Bank</a></td>
- <td headers="city">Oregon</td>
- <td headers="state">IL</td>
- <td headers="CERT #">15302</td>
- <td headers="AI">The Harvard State Bank</td>
- <td headers="Closing Date">July 2, 2009</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="winchester.html">First State Bank of Winchester</a></td>
- <td headers="city">Winchester</td>
- <td headers="state">IL</td>
- <td headers="CERT #">11710</td>
- <td headers="AI">The First National Bank of Beardstown</td>
- <td headers="Closing Date">July 2, 2009</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="warner.html">John Warner Bank</a></td>
- <td headers="city">Clinton</td>
- <td headers="state">IL</td>
- <td headers="CERT #">12093</td>
- <td headers="AI">State Bank of Lincoln</td>
- <td headers="Closing Date">July 2, 2009</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="mirae.html">Mirae Bank</a></td>
- <td headers="city">Los Angeles</td>
- <td headers="state">CA</td>
- <td headers="CERT #">57332</td>
- <td headers="AI">Wilshire State Bank</td>
- <td headers="Closing Date">June 26, 2009</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="metropacific.html">MetroPacific Bank</a></td>
- <td headers="city">Irvine</td>
- <td headers="state">CA</td>
- <td headers="CERT #">57893</td>
- <td headers="AI">Sunwest Bank</td>
- <td headers="Closing Date">June 26, 2009</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="horizon.html">Horizon Bank</a></td>
- <td headers="city">Pine City</td>
- <td headers="state">MN</td>
- <td headers="CERT #">9744</td>
- <td headers="AI">Stearns Bank, N.A.</td>
- <td headers="Closing Date">June 26, 2009</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="neighbor.html">Neighborhood Community Bank</a></td>
- <td headers="city">Newnan</td>
- <td headers="state">GA</td>
- <td headers="CERT #">35285</td>
- <td headers="AI">CharterBank</td>
- <td headers="Closing Date">June 26, 2009</td>
- <td headers="Updated">August 20, 2012</td>
- </tr>
- <tr>
- <td><a href="communityga.html">Community Bank of West Georgia</a></td>
- <td headers="city">Villa Rica</td>
- <td headers="state">GA</td>
- <td headers="CERT #">57436</td>
- <td headers="AI">No Acquirer</td>
- <td headers="Closing Date">June 26, 2009</td>
- <td headers="Updated">August 17, 2012</td>
- </tr>
- <tr>
- <td><a href="anthony.html">First National Bank of Anthony</a></td>
- <td headers="city">Anthony</td>
- <td headers="state">KS</td>
- <td headers="CERT #">4614</td>
- <td headers="AI">Bank of Kansas</td>
- <td headers="Closing Date">June 19, 2009</td>
- <td headers="Updated">August 17, 2012</td>
- </tr>
- <tr>
- <td><a href="cooperative.html">Cooperative Bank</a></td>
- <td headers="city">Wilmington</td>
- <td headers="state">NC</td>
- <td headers="CERT #">27837</td>
- <td headers="AI">First Bank</td>
- <td headers="Closing Date">June 19, 2009</td>
- <td headers="Updated">August 17, 2012</td>
- </tr>
- <tr>
- <td><a href="scb.html">Southern Community Bank</a></td>
- <td headers="city">Fayetteville</td>
- <td headers="state">GA</td>
- <td headers="CERT #">35251</td>
- <td headers="AI">United Community Bank</td>
- <td headers="Closing Date">June 19, 2009</td>
- <td headers="Updated">August 17, 2012</td>
- </tr>
- <tr>
- <td><a href="lincolnwood.html">Bank of Lincolnwood</a></td>
- <td headers="city">Lincolnwood</td>
- <td headers="state">IL</td>
- <td headers="CERT #">17309</td>
- <td headers="AI">Republic Bank of Chicago</td>
- <td headers="Closing Date">June 5, 2009</td>
- <td headers="Updated">August 17, 2012</td>
- </tr>
- <tr>
- <td><a href="citizensnational.html">Citizens National Bank</a></td>
- <td headers="city">Macomb</td>
- <td headers="state">IL</td>
- <td headers="CERT #">5757</td>
- <td headers="AI">Morton Community Bank</td>
- <td headers="Closing Date">May 22, 2009</td>
- <td headers="Updated">September 4, 2012</td>
- </tr>
- <tr>
- <td><a href="strategiccapital.html">Strategic Capital Bank</a></td>
- <td headers="city">Champaign</td>
- <td headers="state">IL</td>
- <td headers="CERT #">35175</td>
- <td headers="AI">Midland States Bank</td>
- <td headers="Closing Date">May 22, 2009</td>
- <td headers="Updated">September 4, 2012</td>
- </tr>
- <tr>
- <td><a href="bankunited.html">BankUnited, FSB</a></td>
- <td headers="city">Coral Gables</td>
- <td headers="state">FL</td>
- <td headers="CERT #">32247</td>
- <td headers="AI">BankUnited</td>
- <td headers="Closing Date">May 21, 2009</td>
- <td headers="Updated">August 17, 2012</td>
- </tr>
- <tr>
- <td><a href="westsound.html">Westsound Bank</a></td>
- <td headers="city">Bremerton</td>
- <td headers="state">WA</td>
- <td headers="CERT #">34843</td>
- <td headers="AI">Kitsap Bank</td>
- <td headers="Closing Date">May 8, 2009</td>
- <td headers="Updated">September 4, 2012</td>
- </tr>
- <tr>
- <td><a href="americawest.html">America West Bank</a></td>
- <td headers="city">Layton</td>
- <td headers="state">UT</td>
- <td headers="CERT #">35461</td>
- <td headers="AI">Cache Valley Bank</td>
- <td headers="Closing Date">May 1, 2009</td>
- <td headers="Updated">August 17, 2012</td>
- </tr>
- <tr>
- <td><a href="citizens.html">Citizens Community Bank</a></td>
- <td headers="city">Ridgewood</td>
- <td headers="state">NJ</td>
- <td headers="CERT #">57563</td>
- <td headers="AI">North Jersey Community Bank</td>
- <td headers="Closing Date">May 1, 2009</td>
- <td headers="Updated">September 4, 2012</td>
- </tr>
- <tr>
- <td><a href="silverton.html">Silverton Bank, NA</a></td>
- <td headers="city">Atlanta</td>
- <td headers="state">GA</td>
- <td headers="CERT #">26535</td>
- <td headers="AI">No Acquirer</td>
- <td headers="Closing Date">May 1, 2009</td>
- <td headers="Updated">August 17, 2012</td>
- </tr>
- <tr>
- <td><a href="firstbankidaho.html">First Bank of Idaho</a></td>
- <td headers="city">Ketchum</td>
- <td headers="state">ID</td>
- <td headers="CERT #">34396</td>
- <td headers="AI">U.S. Bank, N.A.</td>
- <td headers="Closing Date">April 24, 2009</td>
- <td headers="Updated">August 17, 2012</td>
- </tr>
- <tr>
- <td><a href="beverlyhills.html">First Bank of Beverly Hills</a></td>
- <td headers="city">Calabasas</td>
- <td headers="state">CA</td>
- <td headers="CERT #">32069</td>
- <td headers="AI">No Acquirer</td>
- <td headers="Closing Date">April 24, 2009</td>
- <td headers="Updated">September 4, 2012</td>
- </tr>
- <tr>
- <td><a href="michiganheritage.html">Michigan Heritage Bank</a></td>
- <td headers="city">Farmington Hills</td>
- <td headers="state">MI</td>
- <td headers="CERT #">34369</td>
- <td headers="AI">Level One Bank</td>
- <td headers="Closing Date">April 24, 2009</td>
- <td headers="Updated">August 17, 2012</td>
- </tr>
- <tr>
- <td><a href="amsouthern.html">American Southern Bank</a></td>
- <td headers="city">Kennesaw</td>
- <td headers="state">GA</td>
- <td headers="CERT #">57943</td>
- <td headers="AI">Bank of North Georgia</td>
- <td headers="Closing Date">April 24, 2009</td>
- <td headers="Updated">August 17, 2012</td>
- </tr>
- <tr>
- <td><a href="greatbasin.html">Great Basin Bank of Nevada</a></td>
- <td headers="city">Elko</td>
- <td headers="state">NV</td>
- <td headers="CERT #">33824</td>
- <td headers="AI">Nevada State Bank</td>
- <td headers="Closing Date">April 17, 2009</td>
- <td headers="Updated">September 4, 2012</td>
- </tr>
- <tr>
- <td><a href="amsterling.html">American Sterling Bank</a></td>
- <td headers="city">Sugar Creek</td>
- <td headers="state">MO</td>
- <td headers="CERT #">8266</td>
- <td headers="AI">Metcalf Bank</td>
- <td headers="Closing Date">April 17, 2009</td>
- <td headers="Updated">August 31, 2012</td>
- </tr>
- <tr>
- <td><a href="newfrontier.html">New Frontier Bank</a></td>
- <td headers="city">Greeley</td>
- <td headers="state">CO</td>
- <td headers="CERT #">34881</td>
- <td headers="AI">No Acquirer</td>
- <td headers="Closing Date">April 10, 2009</td>
- <td headers="Updated">September 4, 2012</td>
- </tr>
- <tr>
- <td><a href="capefear.html">Cape Fear Bank</a></td>
- <td headers="city">Wilmington</td>
- <td headers="state">NC</td>
- <td headers="CERT #">34639</td>
- <td headers="AI">First Federal Savings and Loan Association</td>
- <td headers="Closing Date">April 10, 2009</td>
- <td headers="Updated">August 17, 2012</td>
- </tr>
- <tr>
- <td><a href="omni.html">Omni National Bank</a></td>
- <td headers="city">Atlanta</td>
- <td headers="state">GA</td>
- <td headers="CERT #">22238</td>
- <td headers="AI">No Acquirer</td>
- <td headers="Closing Date">March 27, 2009</td>
- <td headers="Updated">August 17, 2012</td>
- </tr>
- <tr>
- <td><a href="teambank.html">TeamBank, NA</a></td>
- <td headers="city">Paola</td>
- <td headers="state">KS</td>
- <td headers="CERT #">4754</td>
- <td headers="AI">Great Southern Bank</td>
- <td headers="Closing Date">March 20, 2009</td>
- <td headers="Updated">August 17, 2012</td>
- </tr>
- <tr>
- <td><a href="coloradonational.html">Colorado National Bank</a></td>
- <td headers="city">Colorado Springs</td>
- <td headers="state">CO</td>
- <td headers="CERT #">18896</td>
- <td headers="AI">Herring Bank</td>
- <td headers="Closing Date">March 20, 2009</td>
- <td headers="Updated">August 17, 2012</td>
- </tr>
- <tr>
- <td><a href="firstcity.html">FirstCity Bank</a></td>
- <td headers="city">Stockbridge</td>
- <td headers="state">GA</td>
- <td headers="CERT #">18243</td>
- <td headers="AI">No Acquirer</td>
- <td headers="Closing Date">March 20, 2009</td>
- <td headers="Updated">August 17, 2012</td>
- </tr>
- <tr>
- <td><a href="freedomga.html">Freedom Bank of Georgia</a></td>
- <td headers="city">Commerce</td>
- <td headers="state">GA</td>
- <td headers="CERT #">57558</td>
- <td headers="AI">Northeast Georgia Bank</td>
- <td headers="Closing Date">March 6, 2009</td>
- <td headers="Updated">August 17, 2012</td>
- </tr>
- <tr>
- <td><a href="securitysavings.html">Security Savings Bank</a></td>
- <td headers="city">Henderson</td>
- <td headers="state">NV</td>
- <td headers="CERT #">34820</td>
- <td headers="AI">Bank of Nevada</td>
- <td headers="Closing Date">February 27, 2009</td>
- <td headers="Updated">September 7, 2012</td>
- </tr>
- <tr>
- <td><a href="heritagebank.html">Heritage Community Bank</a></td>
- <td headers="city">Glenwood</td>
- <td headers="state">IL</td>
- <td headers="CERT #">20078</td>
- <td headers="AI">MB Financial Bank, N.A.</td>
- <td headers="Closing Date">February 27, 2009</td>
- <td headers="Updated">August 17, 2012</td>
- </tr>
- <tr>
- <td><a href="silverfalls.html">Silver Falls Bank</a></td>
- <td headers="city">Silverton</td>
- <td headers="state">OR</td>
- <td headers="CERT #">35399</td>
- <td headers="AI">Citizens Bank</td>
- <td headers="Closing Date">February 20, 2009</td>
- <td headers="Updated">August 17, 2012</td>
- </tr>
- <tr>
- <td><a href="pinnacle.html">Pinnacle Bank of Oregon</a></td>
- <td headers="city">Beaverton</td>
- <td headers="state">OR</td>
- <td headers="CERT #">57342</td>
- <td headers="AI">Washington Trust Bank of Spokane</td>
- <td headers="Closing Date">February 13, 2009</td>
- <td headers="Updated">August 17, 2012</td>
- </tr>
- <tr>
- <td><a href="cornbelt.html">Corn Belt Bank & Trust Co.</a></td>
- <td headers="city">Pittsfield</td>
- <td headers="state">IL</td>
- <td headers="CERT #">16500</td>
- <td headers="AI">The Carlinville National Bank</td>
- <td headers="Closing Date">February 13, 2009</td>
- <td headers="Updated">August 17, 2012</td>
- </tr>
- <tr>
- <td><a href="riverside.html">Riverside Bank of the Gulf Coast</a></td>
- <td headers="city">Cape Coral</td>
- <td headers="state">FL</td>
- <td headers="CERT #">34563</td>
- <td headers="AI">TIB Bank</td>
- <td headers="Closing Date">February 13, 2009</td>
- <td headers="Updated">August 17, 2012</td>
- </tr>
- <tr>
- <td><a href="sherman.html">Sherman County Bank</a></td>
- <td headers="city">Loup City</td>
- <td headers="state">NE</td>
- <td headers="CERT #">5431</td>
- <td headers="AI">Heritage Bank</td>
- <td headers="Closing Date">February 13, 2009</td>
- <td headers="Updated">August 17, 2012</td>
- </tr>
- <tr>
- <td><a href="county.html">County Bank</a></td>
- <td headers="city">Merced</td>
- <td headers="state">CA</td>
- <td headers="CERT #">22574</td>
- <td headers="AI">Westamerica Bank</td>
- <td headers="Closing Date">February 6, 2009</td>
- <td headers="Updated">September 4, 2012</td>
- </tr>
- <tr>
- <td><a href="alliance.html">Alliance Bank</a></td>
- <td headers="city">Culver City</td>
- <td headers="state">CA</td>
- <td headers="CERT #"> 23124</td>
- <td headers="AI">California Bank & Trust</td>
- <td headers="Closing Date">February 6, 2009</td>
- <td headers="Updated">August 16, 2012</td>
- </tr>
- <tr>
- <td><a href="firstbank.html">FirstBank Financial Services</a></td>
- <td headers="city">McDonough</td>
- <td headers="state">GA</td>
- <td headers="CERT #">57017</td>
- <td headers="AI">Regions Bank</td>
- <td headers="Closing Date">February 6, 2009</td>
- <td headers="Updated">August 16, 2012</td>
- </tr>
- <tr>
- <td><a href="ocala.html">Ocala National Bank</a></td>
- <td headers="city">Ocala</td>
- <td headers="state">FL</td>
- <td headers="CERT #">26538</td>
- <td headers="AI">CenterState Bank of Florida, N.A.</td>
- <td headers="Closing Date">January 30, 2009</td>
- <td headers="Updated">September 4, 2012</td>
- </tr>
- <tr>
- <td><a href="suburban.html">Suburban FSB</a></td>
- <td headers="city">Crofton</td>
- <td headers="state">MD</td>
- <td headers="CERT #">30763</td>
- <td headers="AI">Bank of Essex</td>
- <td headers="Closing Date">January 30, 2009</td>
- <td headers="Updated">August 16, 2012</td>
- </tr>
- <tr>
- <td><a href="magnet.html">MagnetBank</a></td>
- <td headers="city">Salt Lake City</td>
- <td headers="state">UT</td>
- <td headers="CERT #">58001</td>
- <td headers="AI">No Acquirer</td>
- <td headers="Closing Date">January 30, 2009</td>
- <td headers="Updated">August 16, 2012</td>
- </tr>
- <tr>
- <td><a href="centennial.html">1st Centennial Bank</a></td>
- <td headers="city">Redlands</td>
- <td headers="state">CA</td>
- <td headers="CERT #">33025</td>
- <td headers="AI">First California Bank</td>
- <td headers="Closing Date">January 23, 2009</td>
- <td headers="Updated">August 16, 2012</td>
- </tr>
- <tr>
- <td><a href="clark.html">Bank of Clark County</a></td>
- <td headers="city">Vancouver</td>
- <td headers="state">WA</td>
- <td headers="CERT #">34959</td>
- <td headers="AI">Umpqua Bank</td>
- <td headers="Closing Date">January 16, 2009</td>
- <td headers="Updated">August 16, 2012</td>
- </tr>
- <tr>
- <td><a href="commerce.html">National Bank of Commerce</a></td>
- <td headers="city">Berkeley</td>
- <td headers="state">IL</td>
- <td headers="CERT #">19733</td>
- <td headers="AI">Republic Bank of Chicago</td>
- <td headers="Closing Date">January 16, 2009</td>
- <td headers="Updated">August 16, 2012</td>
- </tr>
- <tr>
- <td><a href="sanderson.html">Sanderson State Bank</a><br />
- <a href="sanderson_spanish.html">En Espanol</a></td>
- <td headers="city">Sanderson</td>
- <td headers="state">TX</td>
- <td headers="CERT #">11568</td>
- <td headers="AI">The Pecos County State Bank</td>
- <td headers="Closing Date">December 12, 2008</td>
- <td headers="Updated">September 4, 2012</td>
- </tr>
- <tr>
- <td><a href="haventrust.html">Haven Trust Bank</a></td>
- <td headers="city">Duluth</td>
- <td headers="state">GA</td>
- <td headers="CERT #">35379</td>
- <td headers="AI">Branch Banking & Trust Company, (BB&T) </td>
- <td headers="Closing Date">December 12, 2008</td>
- <td headers="Updated">August 16, 2012</td>
- </tr>
- <tr>
- <td><a href="firstga.html">First Georgia Community Bank</a></td>
- <td headers="city">Jackson</td>
- <td headers="state">GA</td>
- <td headers="CERT #">34301</td>
- <td headers="AI">United Bank</td>
- <td headers="Closing Date">December 5, 2008</td>
- <td headers="Updated">August 16, 2012</td>
- </tr>
- <tr>
- <td><a href="pff.html">PFF Bank & Trust </a></td>
- <td headers="city">Pomona</td>
- <td headers="state">CA</td>
- <td headers="CERT #">28344</td>
- <td headers="AI">U.S. Bank, N.A.</td>
- <td headers="Closing Date">November 21, 2008</td>
- <td headers="Updated">January 4, 2013</td>
- </tr>
- <tr>
- <td><a href="downey.html">Downey Savings & Loan</a></td>
- <td headers="city">Newport Beach</td>
- <td headers="state">CA</td>
- <td headers="CERT #">30968</td>
- <td headers="AI">U.S. Bank, N.A.</td>
- <td headers="Closing Date">November 21, 2008</td>
- <td headers="Updated">January 4, 2013</td>
- </tr>
- <tr>
- <td><a href="community.html">Community Bank</a></td>
- <td headers="city">Loganville</td>
- <td headers="state">GA</td>
- <td headers="CERT #">16490</td>
- <td headers="AI">Bank of Essex</td>
- <td headers="Closing Date">November 21, 2008</td>
- <td headers="Updated">September 4, 2012</td>
- </tr>
- <tr>
- <td><a href="securitypacific.html">Security Pacific Bank</a></td>
- <td headers="city">Los Angeles</td>
- <td headers="state">CA</td>
- <td headers="CERT #">23595</td>
- <td headers="AI">Pacific Western Bank</td>
- <td headers="Closing Date">November 7, 2008</td>
- <td headers="Updated">August 28, 2012</td>
- </tr>
- <tr>
- <td><a href="franklinbank.html">Franklin Bank, SSB</a></td>
- <td headers="city">Houston</td>
- <td headers="state">TX</td>
- <td headers="CERT #">26870</td>
- <td headers="AI">Prosperity Bank</td>
- <td headers="Closing Date">November 7, 2008</td>
- <td headers="Updated">August 16, 2012</td>
- </tr>
- <tr>
- <td><a href="freedom.html">Freedom Bank</a></td>
- <td headers="city">Bradenton</td>
- <td headers="state">FL</td>
- <td headers="CERT #">57930</td>
- <td headers="AI">Fifth Third Bank</td>
- <td headers="Closing Date">October 31, 2008</td>
- <td headers="Updated">August 16, 2012</td>
- </tr>
- <tr>
- <td><a href="alpha.html">Alpha Bank & Trust</a></td>
- <td headers="city">Alpharetta</td>
- <td headers="state">GA</td>
- <td headers="CERT #">58241</td>
- <td headers="AI">Stearns Bank, N.A.</td>
- <td headers="Closing Date">October 24, 2008</td>
- <td headers="Updated">August 16, 2012</td>
- </tr>
- <tr>
- <td><a href="meridian.html">Meridian Bank</a></td>
- <td headers="city">Eldred</td>
- <td headers="state">IL</td>
- <td headers="CERT #">13789</td>
- <td headers="AI">National Bank</td>
- <td headers="Closing Date">October 10, 2008</td>
- <td headers="Updated">May 31, 2012</td>
- </tr>
- <tr>
- <td><a href="mainstreet.html">Main Street Bank</a></td>
- <td headers="city">Northville</td>
- <td headers="state">MI</td>
- <td headers="CERT #">57654</td>
- <td headers="AI">Monroe Bank & Trust</td>
- <td headers="Closing Date">October 10, 2008</td>
- <td headers="Updated">August 16, 2012</td>
- </tr>
- <tr>
- <td><a href="wamu.html">Washington Mutual Bank <br />
- (Including its subsidiary Washington Mutual Bank FSB)</a></td>
- <td headers="city">Henderson</td>
- <td headers="state">NV</td>
- <td headers="CERT #">32633</td>
- <td headers="AI">JP Morgan Chase Bank</td>
- <td headers="Closing Date">September 25, 2008</td>
- <td headers="Updated">August 16, 2012</td>
- </tr>
- <!-- <tr>
- <td width="210"><a href="wamu.html">Washington Mutual Bank FSB</a></td>
- <td headers="city" width="126">Park City</td>
- <td headers="state" width="44">UT</td>
- <td headers="CERT #" width="61">33891</td>
- <td headers="Closing Date" width="117">September 25, 2008</td>
- <td headers="Updated" width="129">November 23, 2009</td>
- </tr> -->
- <tr>
- <td><a href="ameribank.html">Ameribank</a></td>
- <td headers="city">Northfork</td>
- <td headers="state">WV</td>
- <td headers="CERT #">6782</td>
- <td headers="AI">The Citizens Savings Bank<br /><br />Pioneer Community Bank, Inc.</td>
- <td headers="Closing Date">September 19, 2008</td>
- <td headers="Updated">August 16, 2012</td>
- </tr>
- <tr>
- <td><a href="silverstate.html">Silver State Bank</a><br />
- <a href="silverstatesp.html">En Espanol </a></td>
- <td headers="city">Henderson</td>
- <td headers="state">NV</td>
- <td headers="CERT #">34194</td>
- <td headers="AI">Nevada State Bank</td>
- <td headers="Closing Date">September 5, 2008</td>
- <td headers="Updated">August 16, 2012</td>
- </tr>
- <tr>
- <td><a href="integrity.html">Integrity Bank</a></td>
- <td headers="city">Alpharetta</td>
- <td headers="state">GA</td>
- <td headers="CERT #">35469</td>
- <td headers="AI">Regions Bank</td>
- <td headers="Closing Date">August 29, 2008</td>
- <td headers="Updated">August 16, 2012</td>
- </tr>
- <tr>
- <td><a href="columbian.html">Columbian Bank & Trust</a></td>
- <td headers="city">Topeka</td>
- <td headers="state">KS</td>
- <td headers="CERT #">22728</td>
- <td headers="AI">Citizens Bank & Trust</td>
- <td headers="Closing Date">August 22, 2008</td>
- <td headers="Updated">August 16, 2012</td>
- </tr>
- <tr>
- <td><a href="firstprioritybank.html">First Priority Bank</a></td>
- <td headers="city">Bradenton</td>
- <td headers="state">FL</td>
- <td headers="CERT #">57523</td>
- <td headers="AI">SunTrust Bank</td>
- <td headers="Closing Date">August 1, 2008</td>
- <td headers="Updated">August 16, 2012</td>
- </tr>
- <tr>
- <td><a href="heritage.html">First Heritage Bank, NA</a></td>
- <td headers="city">Newport Beach</td>
- <td headers="state">CA</td>
- <td headers="CERT #">57961</td>
- <td headers="AI">Mutual of Omaha Bank</td>
- <td headers="Closing Date">July 25, 2008</td>
- <td headers="Updated">August 28, 2012</td>
- </tr>
- <tr>
- <td><a href="fnbnv.html">First National Bank of Nevada</a></td>
- <td headers="city">Reno</td>
- <td headers="state">NV</td>
- <td headers="CERT #">27011</td>
- <td headers="AI">Mutual of Omaha Bank</td>
- <td headers="Closing Date">July 25, 2008</td>
- <td headers="Updated">August 28, 2012</td>
- </tr>
- <tr>
- <td><a href="IndyMac.html">IndyMac Bank</a></td>
- <td headers="city">Pasadena</td>
- <td headers="state">CA</td>
- <td headers="CERT #">29730</td>
- <td headers="AI">OneWest Bank, FSB</td>
- <td headers="Closing Date">July 11, 2008</td>
- <td headers="Updated">August 28, 2012</td>
- </tr>
- <tr>
- <td><a href="first_integrity_bank.html">First Integrity Bank, NA</a></td>
- <td headers="city">Staples</td>
- <td headers="state">MN</td>
- <td headers="CERT #">12736</td>
- <td headers="AI">First International Bank and Trust</td>
- <td headers="Closing Date">May 30, 2008</td>
- <td headers="Updated">August 28, 2012</td>
- </tr><tr>
- <td><a href="anb.html">ANB Financial, NA</a></td>
- <td headers="city">Bentonville</td>
- <td headers="state">AR</td>
- <td headers="CERT #">33901</td>
- <td headers="AI">Pulaski Bank and Trust Company</td>
- <td headers="Closing Date">May 9, 2008</td>
- <td headers="Updated">August 28, 2012</td>
- </tr><tr>
- <td><a href="Hume.html">Hume Bank</a></td>
- <td headers="city">Hume</td>
- <td headers="state">MO</td>
- <td headers="CERT #">1971</td>
- <td headers="AI">Security Bank</td>
- <td headers="Closing Date">March 7, 2008</td>
- <td headers="Updated">August 28, 2012</td>
- </tr>
- <tr>
- <td><a href="Douglass.html">Douglass National Bank</a></td>
- <td headers="city">Kansas City</td>
- <td headers="state">MO</td>
- <td headers="CERT #">24660</td>
- <td headers="AI">Liberty Bank and Trust Company</td>
- <td headers="Closing Date">January 25, 2008</td>
- <td headers="Updated">October 26, 2012</td>
- </tr>
- <tr>
- <td><a href="MiamiValley.html">Miami Valley Bank</a></td>
- <td headers="city">Lakeview</td>
- <td headers="state">OH</td>
- <td headers="CERT #">16848</td>
- <td headers="AI">The Citizens Banking Company</td>
- <td headers="Closing Date">October 4, 2007</td>
- <td headers="Updated">August 28, 2012</td>
- </tr>
- <tr>
- <td><a href="NetBank.html">NetBank</a></td>
- <td headers="city">Alpharetta</td>
- <td headers="state">GA</td>
- <td headers="CERT #">32575</td>
- <td headers="AI">ING DIRECT</td>
- <td headers="Closing Date">September 28, 2007</td>
- <td headers="Updated">August 28, 2012</td>
- </tr>
- <tr>
- <td><a href="MetropolitanSB.html">Metropolitan Savings Bank</a></td>
- <td headers="city">Pittsburgh</td>
- <td headers="state">PA</td>
- <td headers="CERT #">35353</td>
- <td headers="AI">Allegheny Valley Bank of Pittsburgh</td>
- <td headers="Closing Date">February 2, 2007</td>
- <td headers="Updated">October 27, 2010</td>
- </tr>
- <tr>
- <td><a href="ephraim.html">Bank of Ephraim</a></td>
- <td headers="city">Ephraim</td>
- <td headers="state">UT</td>
- <td headers="CERT #">1249</td>
- <td headers="AI">Far West Bank</td>
- <td headers="Closing Date">June 25, 2004</td>
- <td headers="Updated">April 9, 2008</td>
- </tr>
- <tr>
- <td><a href="reliance.html">Reliance Bank</a></td>
- <td headers="city">White Plains</td>
- <td headers="state">NY</td>
- <td headers="CERT #">26778</td>
- <td headers="AI">Union State Bank</td>
- <td headers="Closing Date">March 19, 2004</td>
- <td headers="Updated">April 9, 2008</td>
- </tr>
- <tr>
- <td><a href="gnb.html">Guaranty National Bank of Tallahassee</a></td>
- <td headers="city">Tallahassee</td>
- <td headers="state">FL</td>
- <td headers="CERT #">26838</td>
- <td headers="AI">Hancock Bank of Florida</td>
- <td headers="Closing Date">March 12, 2004</td>
- <td headers="Updated">June 5, 2012</td>
- </tr>
- <tr>
- <td><a href="dollar.html">Dollar Savings Bank</a></td>
- <td headers="city">Newark</td>
- <td headers="state">NJ</td>
- <td headers="CERT #">31330</td>
- <td headers="AI">No Acquirer</td>
- <td headers="Closing Date">February 14, 2004</td>
- <td headers="Updated">April 9, 2008</td>
- </tr>
- <tr>
- <td><a href="pulaski.html">Pulaski Savings Bank</a></td>
- <td headers="city">Philadelphia</td>
- <td headers="state">PA</td>
- <td headers="CERT #">27203</td>
- <td headers="AI">Earthstar Bank</td>
- <td headers="Closing Date">November 14, 2003</td>
- <td headers="Updated">July 22, 2005</td>
- </tr>
- <tr>
- <td><a href="blanchardville.html">First National Bank of Blanchardville</a></td>
- <td headers="city">Blanchardville</td>
- <td headers="state">WI</td>
- <td headers="CERT #">11639</td>
- <td headers="AI">The Park Bank</td>
- <td headers="Closing Date">May 9, 2003</td>
- <td headers="Updated">June 5, 2012</td>
- </tr>
- <tr>
- <td><a href="spbank.html">Southern Pacific Bank</a></td>
- <td headers="city">Torrance</td>
- <td headers="state">CA</td>
- <td headers="CERT #">27094</td>
- <td headers="AI">Beal Bank</td>
- <td headers="Closing Date">February 7, 2003</td>
- <td headers="Updated">October 20, 2008</td>
- </tr>
- <tr>
- <td><a href="farmers.html">Farmers Bank of Cheneyville</a></td>
- <td headers="city">Cheneyville</td>
- <td headers="state">LA</td>
- <td headers="CERT #">16445</td>
- <td headers="AI">Sabine State Bank & Trust</td>
- <td headers="Closing Date">December 17, 2002</td>
- <td headers="Updated">October 20, 2004</td>
- </tr>
- <tr>
- <td><a href="bankofalamo.html">Bank of Alamo</a></td>
- <td headers="city">Alamo</td>
- <td headers="state">TN</td>
- <td headers="CERT #">9961</td>
- <td headers="AI">No Acquirer</td>
- <td headers="Closing Date">November 8, 2002</td>
- <td headers="Updated">March 18, 2005</td>
- </tr>
- <tr>
- <td><a href="amtrade.html">AmTrade International Bank</a><br /><a href="amtrade-spanish.html">En Espanol </a></td>
- <td headers="city">Atlanta</td>
- <td headers="state">GA</td>
- <td headers="CERT #">33784</td>
- <td headers="AI">No Acquirer</td>
- <td headers="Closing Date">September 30, 2002</td>
- <td headers="Updated">September 11, 2006</td>
- </tr>
- <tr>
- <td headers="Instituition"><a href="universal.html">Universal Federal Savings Bank</a></td>
- <td headers="city">Chicago</td>
- <td headers="state">IL</td>
- <td headers="CERT #">29355</td>
- <td headers="AI">Chicago Community Bank</td>
- <td headers="Closing Date">June 27, 2002</td>
- <td headers="Updated">April 9, 2008</td>
- </tr>
- <tr>
- <td headers="Instituition"><a href="cbc.html">Connecticut Bank of Commerce</a></td>
- <td headers="city">Stamford</td>
- <td headers="state">CT</td>
- <td headers="CERT #">19183</td>
- <td headers="AI">Hudson United Bank</td>
- <td headers="Closing Date">June 26, 2002</td>
- <td headers="Updated">February 14, 2012</td>
- </tr>
- <tr>
- <td headers="Instituition"><a href="newcentury.html">New Century Bank</a></td>
- <td headers="city">Shelby Township</td>
- <td headers="state">MI</td>
- <td headers="CERT #">34979</td>
- <td headers="AI">No Acquirer</td>
- <td headers="Closing Date">March 28, 2002</td>
- <td headers="Updated">March 18, 2005</td>
- </tr>
- <tr>
- <td headers="Instituition"><a href="netfirst.html">Net 1st National Bank</a></td>
- <td headers="city">Boca Raton</td>
- <td headers="state">FL</td>
- <td headers="CERT #">26652</td>
- <td headers="AI">Bank Leumi USA</td>
- <td headers="Closing Date">March 1, 2002</td>
- <td headers="Updated">April 9, 2008</td>
- </tr>
- <tr>
- <td headers="Instituition"><a href="nextbank.html">NextBank, NA</a></td>
- <td headers="city">Phoenix</td>
- <td headers="state">AZ</td>
- <td headers="CERT #">22314</td>
- <td headers="AI">No Acquirer</td>
- <td headers="Closing Date">February 7, 2002</td>
- <td headers="Updated">August 27, 2010</td>
- </tr>
- <tr>
- <td headers="Instituition"><a href="Oakwood.html">Oakwood Deposit Bank Co.</a></td>
- <td headers="city">Oakwood</td>
- <td headers="state">OH</td>
- <td headers="CERT #">8966</td>
- <td headers="AI">The State Bank & Trust Company</td>
- <td headers="Closing Date">February 1, 2002</td>
- <td headers="Updated">October 25, 2012</td>
- </tr>
- <tr>
- <td headers="Instituition"><a href="sierrablanca.html">Bank of Sierra Blanca</a></td>
- <td headers="city">Sierra Blanca</td>
- <td headers="state">TX</td>
- <td headers="CERT #">22002</td>
- <td headers="AI">The Security State Bank of Pecos</td>
- <td headers="Closing Date">January 18, 2002</td>
- <td headers="Updated">November 6, 2003</td>
- </tr>
- <tr>
- <td headers="Instituition"><a href="hamilton.html">Hamilton Bank, NA</a><br />
- <a href="hamilton-spanish.html">En Espanol</a></td>
- <td headers="city">Miami</td>
- <td headers="state">FL</td>
- <td headers="CERT #">24382</td>
- <td headers="AI">Israel Discount Bank of New York</td>
- <td headers="Closing Date">January 11, 2002</td>
- <td headers="Updated">June 5, 2012</td>
- </tr>
- <tr>
- <td headers="Instituition"><a href="sinclair.html">Sinclair National Bank</a></td>
- <td headers="city">Gravette</td>
- <td headers="state">AR</td>
- <td headers="CERT #">34248</td>
- <td headers="AI">Delta Trust & Bank</td>
- <td headers="Closing Date">September 7, 2001</td>
- <td headers="Updated">February 10, 2004</td>
- </tr>
- <tr>
- <td headers="Instituition"><a href="superior.html">Superior Bank, FSB</a></td>
- <td headers="city">Hinsdale</td>
- <td headers="state">IL</td>
- <td headers="CERT #">32646</td>
- <td headers="AI">Superior Federal, FSB</td>
- <td headers="Closing Date">July 27, 2001</td>
- <td headers="Updated">June 5, 2012</td>
- </tr>
- <tr>
- <td headers="Instituition"><a href="Malta.html">Malta National Bank</a></td>
- <td headers="city">Malta</td>
- <td headers="state">OH</td>
- <td headers="CERT #">6629</td>
- <td headers="AI">North Valley Bank</td>
- <td headers="Closing Date">May 3, 2001</td>
- <td headers="Updated">November 18, 2002</td>
- </tr>
- <tr>
- <td headers="Instituition"><a href="firstalliance.html">First Alliance Bank & Trust Co.</a></td>
- <td headers="city">Manchester</td>
- <td headers="state">NH</td>
- <td headers="CERT #">34264</td>
- <td headers="AI">Southern New Hampshire Bank & Trust</td>
- <td headers="Closing Date">February 2, 2001</td>
- <td headers="Updated">February 18, 2003</td>
- </tr>
- <tr>
- <td headers="Instituition"><a href="nsb.html">National State Bank of Metropolis</a></td>
- <td headers="city">Metropolis</td>
- <td headers="state">IL</td>
- <td headers="CERT #">3815</td>
- <td headers="AI">Banterra Bank of Marion</td>
- <td headers="Closing Date">December 14, 2000</td>
- <td headers="Updated">March 17, 2005</td>
- </tr>
- <tr>
- <td headers="Instituition"><a href="boh.html">Bank of Honolulu</a></td>
- <td headers="city">Honolulu</td>
- <td headers="state">HI</td>
- <td headers="CERT #">21029</td>
- <td headers="AI">Bank of the Orient</td>
- <td headers="Closing Date">October 13, 2000</td>
- <td headers="Updated">March 17, 2005</td>
- </tr>
- </tbody>
-</table>
- <!--
-<script language="javascript">
-
-document.writeln("<div id=\"controls\">");
-document.writeln("<div id=\"perpage\">");
-document.writeln("<select onchange=\"sorter.size(this.value)\">");
-document.writeln("<option value=\"5\">5</option>");
-document.writeln("<option value=\"10\" >10</option>");
-document.writeln("<option value=\"20\"selected=\"selected\">20</option>");
-document.writeln("<option value=\"50\">50</option>");
-document.writeln("<option value=\"100\">100</option>");
-document.writeln("<option value=\"150\">150</option>");
-document.writeln("</select>");
-document.writeln(" Entries Per Page");
-document.writeln("</div>");
-document.writeln("<div id=\"navigation\">");
-document.writeln("<img src=\"images/first.gif\" width=\"16\" height=\"16\" alt=\"First Page\" onclick=\"sorter.move(-1,true)\" />");
-document.writeln("<img src=\"images/previous.gif\" width=\"16\" height=\"16\" alt=\"Previous Page\" onclick=\"sorter.move(-1)\" />");
-document.writeln("<img src=\"images/next.gif\" width=\"16\" height=\"16\" alt=\"Next Page\" onclick=\"sorter.move(1)\" />");
-document.writeln("<img src=\"images/last.gif\" width=\"16\" height=\"16\" alt=\"Last Page\" onclick=\"sorter.move(1,true)\" />");
-document.writeln("</div>");
-document.writeln("<div id=\"text\">Displaying Page ");
-document.writeln("<span id=\"currentpage\">");
-document.writeln("</span>");
-document.writeln(" of ");
-document.writeln("<span id=\"pagelimit\">");
-document.writeln("</span>");
-document.writeln("</div>");
-document.writeln("</div>");
-
- </script>
--->
-
+ </fieldset>
+ </form>
+ </div>
+ </div>
+ <!-- close right side -->
+ <a id="responsive_header-fdic_logo" href="/" title="FDIC Homepage">FDIC Homepage</a>
+ <h1>Federal Deposit<br>Insurance Corporation</h1>
+ <h2>Each depositor insured to at least $250,000 per insured bank</h2>
+ <div class="clear"></div>
+ <nav>
+ <div id="responsive_header_nav">
+ <div id="responsive_header-topnav">
+ <div id="responsive_header-topnav-downarrow" onclick="show_rwdnav(this)"></div>
+ <ul id="responsive_header-topnav-list">
+ <li id="responsive_header-topnav-home" title="Home" onmouseover="show_responsive_header_subnav(this)"><a href="/">Home</a></li>
+ <li id="responsive_header-topnav-deposit" title="Deposit Insurance" onmouseover="show_responsive_header_subnav(this)"><a href="/deposit/">Deposit Insurance</a></li>
+ <li id="responsive_header-topnav-consumers" title="Consumer Protection" onmouseover="show_responsive_header_subnav(this)"><a href="/consumers/">Consumer Protection</a></li>
+ <li id="responsive_header-topnav-bank" title="Industry Analysis" onmouseover="show_responsive_header_subnav(this)"><a href="/bank/">Industry Analysis</a></li>
+ <li id="responsive_header-topnav-regulations" title="Regulations & Examinations" onmouseover="show_responsive_header_subnav(this)"><a href="/regulations/">Regulations & Examinations</a></li>
+ <li id="responsive_header-topnav-buying" title="Asset Sales" onmouseover="show_responsive_header_subnav(this)"><a href="/buying/">Asset Sales</a></li>
+ <li id="responsive_header-topnav-news" title="News & Events" onmouseover="show_responsive_header_subnav(this)"><a href="/news/">News & Events</a></li>
+ <li id="responsive_header-topnav-about" title="About FDIC" onmouseover="show_responsive_header_subnav(this)"><a href="/about/">About FDIC</a></li>
+ </ul>
+ <div class="clear"></div>
+ </div>
+ <div id="responsive_header-topnav_subnav">
+ <div id="responsive_header-topnav_subnav-downarrow" onclick="show_rwdnav(this)"></div>
+ <ul id="responsive_header-topnav-home_subnav"><li><a> </a></li></ul>
+ <ul id="responsive_header-topnav-deposit_subnav">
+ <li title="BankFind"><a href="http://research.fdic.gov/bankfind/">BankFind</a></li>
+ <li title="Are My Deposits Insured?"><a href="/deposit/deposits/">Are My Deposits Insured?</a></li>
+ <li title="Uninsured Investments"><a href="/deposit/investments/">Uninsured Investments</a></li>
+ <li title="The Deposit Insurance Fund"><a href="/deposit/insurance/index.html">The Deposit Insurance Fund</a></li>
+ <li title="International Deposit Insurance"><a href="/deposit/deposits/international/">International Deposit Insurance</a></li>
+ </ul>
+ <ul id="responsive_header-topnav-consumers_subnav">
+ <li title="Consumer News & Information"><a href="/consumers/consumer/">Consumer News & Information</a></li>
+ <li title="Loans & Mortgages"><a href="/consumers/loans/">Loans & Mortgages</a></li>
+ <li title="Banking & Your Money"><a href="/consumers/banking/">Banking & Your Money</a></li>
+ <li title="Financial Education & Literacy"><a href="/consumers/education/">Financial Education & Literacy</a></li>
+ <li title="Community Affairs"><a href="/consumers/community/">Community Affairs</a></li>
+ <li title="Identity Theft & Fraud"><a href="/consumers/theft/">Identity Theft & Fraud</a></li>
+ <li title="Consumer Financial Privacy"><a href="/consumers/privacy/">Consumer Financial Privacy</a></li>
+ </ul>
+ <ul id="responsive_header-topnav-bank_subnav">
+ <li title="Bank Data & Statistics"><a href="/bank/statistical/">Bank Data & Statistics</a></li>
+ <li title="Research & Analysis"><a href="/bank/analytical/">Research & Analysis</a></li>
+ <li title="Failed Banks"><a href="/bank/individual/failed/">Failed Banks</a></li>
+ </ul>
+ <ul id="responsive_header-topnav-regulations_subnav">
+ <li title="Bank Examinations"><a href="/regulations/examinations/">Bank Examinations</a></li>
+ <li title="Laws & Regulations"><a href="/regulations/laws/">Laws & Regulations</a></li>
+ <li title="Resources for Bank Officers & Directors"><a href="/regulations/resources/">Resources for Bank Officers & Directors</a></li>
+ <li title="FDICconnect"><a href="http://www.fdicconnect.gov/">FDIC<em>connect</em></a></li>
+ <li title="Required Financial Reports"><a href="/regulations/required/">Required Financial Reports</a></li>
+ <li title="Examiner Training Programs"><a href="/regulations/examiner/">Examiner Training Programs</a></li>
+ </ul>
+ <ul id="responsive_header-topnav-buying_subnav">
+ <li title="Loan Sales"><a href="/buying/loan/">Loan Sales</a></li>
+ <li title="Real Estate Sales"><a href="/buying/owned/">Real Estate and Property Marketplace</a></li>
+ <li title="Financial Asset Sales"><a href="/buying/financial/">Financial Asset Sales</a></li>
+ <li title="Servicing Sales Announcements"><a href="/buying/servicing/">Servicing Sales Announcements</a></li>
+ <li title="Other Asset Sales"><a href="/buying/otherasset/">Other Asset Sales</a></li>
+ <li title="Historical Sales"><a href="/buying/historical/">Historical Sales</a></li>
+ </ul>
+ <ul id="responsive_header-topnav-news_subnav">
+ <li title="Press Releases"><a href="/news/news/press/2013/">Press Releases</a></li>
+ <li title="Online Press Room"><a href="https://fdicsurvey.inquisiteasp.com/fdic/cgi-bin/qwebcorporate.dll?M58TRS">Online Press Room</a></li>
+ <li title="Conferences & Events"><a href="/news/conferences/">Conferences & Events</a></li>
+ <li title="Financial Institution Letters"><a href="/news/news/financial/2013/">Financial Institution Letters</a></li>
+ <li title="Special Alerts"><a href="/news/news/SpecialAlert/2012/">Special Alerts</a></li>
+ <li title="Letters to the Editor/Opinion Editorials"><a href="/news/letters/">Letters to the Editor/Opinion Editorials</a></li>
+ <li title="Speeches & Testimony"><a href="/news/news/speeches/chairman/">Speeches & Testimony</a></li>
+ </ul>
+ <ul id="responsive_header-topnav-about_subnav">
+ <li title="Mission & Purpose"><a href="/about/index.html#1">Mission & Purpose</a></span></li>
+ <li title="Advisory Committees"><a href="/about/index.html#2">Advisory Committees</a></span></li>
+ <li title="Careers with the FDIC"><a href="/about/index.html#3">Careers with the FDIC</a></span></li>
+ <li title="Management Team"><a href="/about/index.html#4">Management Team</a></span></li>
+ <li title="Plans & Reports"><a href="/about/index.html#5">Plans & Reports</a></span></li>
+ <li title="What We Can Do for You"><a href="/about/index.html#6">What We Can Do for You</a></span></li>
+ <li title="Diversity at the FDIC"><a href="/about/index.html#7">Diversity at the FDIC</a></span></li>
+ </ul>
+ </div><!-- Close subnav -->
+ <div class="clear"></div>
+ </div>
+ </nav>
+</div>
+</header>
+<a id="after_header" name="after_header"></a>
<script type="text/javascript">
-var TINY={};
+prepare_responsive_header_nav();
+</script>
+<!-- END of Header -->
-function T$(i){return document.getElementById(i)}
-function T$$(e,p){return p.getElementsByTagName(e)}
+<div id="breadcrumbs"><a href="/">Home</a> > <a href="/bank/">Industry Analysis</a> > <a href="/bank/individual/failed/">Failed Banks</a> > Failed Bank List</div>
-TINY.table=function(){
- function sorter(n){this.n=n; this.pagesize=20; this.paginate=0}
- sorter.prototype.init=function(e,f){
- var t=ge(e), i=0; this.e=e; this.l=t.r.length; t.a=[];
- t.h=T$$('thead',T$(e))[0].rows[0]; t.w=t.h.cells.length;
- for(i;i<t.w;i++){
- var c=t.h.cells[i];
- if(c.className!='nosort'){
- c.className=this.head; c.onclick=new Function(this.n+'.wk(this.cellIndex)')
- }
- }
- for(i=0;i<this.l;i++){t.a[i]={}}
- if(f!=null){var a=new Function(this.n+'.wk('+f+')'); a()}
- if(this.paginate){this.g=1; this.pages()}
- };
- sorter.prototype.wk=function(y){
- var t=ge(this.e), x=t.h.cells[y], i=0;
- for(i;i<this.l;i++){
- t.a[i].o=i; var v=t.r[i].cells[y]; t.r[i].style.display='';
- while(v.hasChildNodes()){v=v.firstChild}
- t.a[i].v=v.nodeValue?v.nodeValue:''
- }
- for(i=0;i<t.w;i++){var c=t.h.cells[i]; if(c.className!='nosort'){c.className=this.head}}
-
-
- if(t.p==y)
- {
- t.a.reverse();
- x.className=t.d?this.asc:this.desc;
- t.d=t.d?0:1
- }
-
- else
- {
- t.p = y;
- t.a.sort(cp);
- t.d = 0;
- x.className = this.asc;
- }
-
-
-
-
- var n=document.createElement('tbody');
- for(i=0;i<this.l;i++){
- var r=t.r[t.a[i].o].cloneNode(true); n.appendChild(r);
- r.className=i%2==0?this.even:this.odd; var cells=T$$('td',r);
- for(var z=0;z<t.w;z++){cells[z].className=y==z?i%2==0?this.evensel:this.oddsel:''}
- }
- t.replaceChild(n,t.b); if(this.paginate){this.size(this.pagesize)}
- };
- sorter.prototype.page=function(s){
- var t=ge(this.e), i=0, l=s+parseInt(this.pagesize);
- if(this.currentid&&this.limitid){T$(this.currentid).innerHTML=this.g}
- for(i;i<this.l;i++){t.r[i].style.display=i>=s&&i<l?'':'none'}
- };
- sorter.prototype.move=function(d,m){
- var s=d==1?(m?this.d:this.g+1):(m?1:this.g-1);
- if(s<=this.d&&s>0){this.g=s; this.page((s-1)*this.pagesize)}
- };
- sorter.prototype.size=function(s){
- this.pagesize=s; this.g=1; this.pages(); this.page(0);
- if(this.currentid&&this.limitid){T$(this.limitid).innerHTML=this.d}
- };
- sorter.prototype.pages=function(){this.d=Math.ceil(this.l/this.pagesize)};
- function ge(e){var t=T$(e); t.b=T$$('tbody',t)[0]; t.r=t.b.rows; return t};
- function cp(f,c){
- var g,h; f=g=f.v.toLowerCase(), c=h=c.v.toLowerCase();
- var i=parseFloat(f.replace(/(\$|\,)/g,'')), n=parseFloat(c.replace(/(\$|\,)/g,''));
- if(!isNaN(i)&&!isNaN(n)){g=i,h=n}
- i=Date.parse(f); n=Date.parse(c);
- if(!isNaN(i)&&!isNaN(n))
- {
- g=i;
- h=n;
-
- }
-
- /**** This string returns the sort by ASCENDING Order *****/
- //return g>h?1:(g<h?-1:0)
-
-
- /**** This string returns the sort by DESCENDING Order *****/
- return g<h?1:(g>h?-1:0)
-
- };
- return{sorter:sorter}
-}();
+<div id="content" class="failed_bank_list">
- </script>
-<script type="text/javascript">
- var sorter = new TINY.table.sorter("sorter");
- sorter.head = "head";
- sorter.asc = "asc";
- sorter.desc = "desc";
- sorter.even = "evenrow";
- sorter.odd = "oddrow";
- sorter.evensel = "evenselected";
- sorter.oddsel = "oddselected";
- sorter.paginate = false;
- sorter.currentid = "currentpage";
- sorter.limitid = "pagelimit";
- sorter.init("table",5);
- </script>
-</td></tr>
-</table>
+ <h1 class="page_title">Failed Bank List</h1>
-<!-- DRR END Product Title & Body-->
- <br />
- <br />
+ <p>The FDIC is often appointed as receiver for failed banks. This page contains useful information for the customers and vendors of these banks. This includes information on the acquiring bank (if applicable), how your accounts and loans are affected, and how vendors can file claims against the receivership. <a href="http://www2.fdic.gov/drrip/cs/index.asp">Failed Financial Institution Contact Search</a> displays point of contact information related to failed banks.</p>
+ <p>This list includes banks which have failed since October 1, 2000. To search for banks that failed prior to those on this page, visit this link: <a href="http://www2.fdic.gov/hsob/SelectRpt.asp?EntryTyp=30">Failures and Assistance Transactions</a></p>
+ <p><a href="banklist.csv">Failed Bank List</a> - CSV file (Updated on Mondays. Also opens in Excel - <a href="/excel.html">Excel Help</a>)</p>
+
+ <p class="small_screen_warning">Due to the small screen size some information is no longer visible.<br>Full information available when viewed on a larger screen.</p>
+ <script type="text/javascript">
+ <!--
+ document.writeln("<p><em>Click arrows next to headers to sort in Ascending or Descending order.</em></p>");
+ //-->
+ </script>
- </td>
- </tr>
-
- <!-- begin: last updated date and contact information -->
- <tr>
- <td width="25"><img src="http://www.fdic.gov/images/spacer.gif" width="25" height="1" alt="" border="0" /><br /></td>
- <td>
-
- <!-- Instruction: change "mm/dd/yyyy" to the date the document was created or last modfied -->
-
- <font face="arial, helvetica, sans-serif" size="1" color="#000066">Last Updated
- 04/30/2013</font></td>
- <td align="right"><font face="arial, helvetica, sans-serif" size="1" color="#000066">
-
-<!-- Instruction: change the link text and href value of "Insert_Content_Email_Address@fdic.gov" to the fdic.gov e-mail address of the document's point of contact -->
-
-<a HREF="mailto:cservicefdicdal@fdic.gov">cservicefdicdal@fdic.gov</a></font></td>
- </tr>
- <!-- end: last updated date and contact information -->
-</table>
-<!-- BEGIN FOOTER INCLUDE -->
-<!-- Instruction: The following statement is the footer include statement. Do not revise this code. -->
-<br />
-</font><!-- Ends Opening Font Tag -->
-<!-- begin footer -->
-<!-- Last Updated Date: 1-18-2011 Time: 2:24PM Version: 1.4 -->
-</div><!-- ends body tag -->
-<!-- begin footer -->
- <div id="footer-container">
- <div>
- <ul id="footer-top">
- <li><a href="/" title="Home">Home</a> </li>
- <li>|</li>
- <li><a href="/about/contact/ask/" title="Contact Us">Contact Us</a></li>
- <li>|</li>
- <li><a href="/search/" title="Search">Search</a></li>
- <li>|</li>
- <li><a href="/help/" title="Help">Help</a></li>
- <li>|</li>
- <li><a href="/sitemap/" title="SiteMap">SiteMap</a></li>
- <li>|</li>
- <li><a href="/regulations/laws/forms/" title="Forms">Forms</a></li>
- <li>|</li>
- <li><a href="/quicklinks/spanish.html" title="En Español">En Español</a></li>
-
- </ul>
- </div>
-
- <div>
- <ul id="footer-middle">
- <li><a href="/about/policies/" title="Website Policies">Website Policies</a></li>
- <li>|</li>
- <li><a href="/about/privacy/policy/index.html" title="Privacy Policy">Privacy Policy</a></li>
- <li>|</li>
- <li><a href="/plainlanguage/index.html" title="Privacy Policy">Plain Writing Act of 2010 </a></li>
- <li>|</li>
- <li><a href="http://www.usa.gov/" title="USA.gov">USA.gov</a></li>
- <li>|</li>
- <li><a href="http://www.fdicoig.gov/" title="FDIC Office of Inspector General">FDIC Office of Inspector General</a></li>
- </ul>
- </div>
+ <div id="table_wrapper">
+ <table id="table" class="sortable">
+ <thead>
+ <tr>
+ <th id="institution" scope="col">Bank Name</th>
+ <th id="city" class="nosort" scope="col">City</th>
+ <th id="state" scope="col">ST</th>
+ <th id="cert" class="nosort" scope="col">CERT</th>
+ <th id="ai" scope="col">Acquiring Institution</th>
+ <th id="closing" scope="col">Closing Date</th>
+ <th id="updated" scope="col">Updated Date</th>
+ </tr>
+ </thead>
+ <tbody>
+ <tr>
+ <td class="institution"><a href="kenosha.html">Banks of Wisconsin d/b/a Bank of Kenosha</a></td>
+ <td class="city">Kenosha</td>
+ <td class="state">WI</td>
+ <td class="cert">35386</td>
+ <td class="ai">North Shore Bank, FSB</td>
+ <td class="closing">May 31, 2013</td>
+ <td class="updated">May 31, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="centralaz.html">Central Arizona Bank</a></td>
+ <td class="city">Scottsdale</td>
+ <td class="state">AZ</td>
+ <td class="cert">34527</td>
+ <td class="ai">Western State Bank</td>
+ <td class="closing">May 14, 2013</td>
+ <td class="updated">May 20, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="sunrisebank.html">Sunrise Bank</a></td>
+ <td class="city">Valdosta</td>
+ <td class="state">GA</td>
+ <td class="cert">58185</td>
+ <td class="ai">Synovus Bank</td>
+ <td class="closing">May 10, 2013</td>
+ <td class="updated">May 21, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="pisgahcommbk.html">Pisgah Community Bank</a></td>
+ <td class="city">Asheville</td>
+ <td class="state">NC</td>
+ <td class="cert">58701</td>
+ <td class="ai">Capital Bank, N.A.</td>
+ <td class="closing">May 10, 2013</td>
+ <td class="updated">May 14, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="douglascb.html">Douglas County Bank</a></td>
+ <td class="city">Douglasville</td>
+ <td class="state">GA</td>
+ <td class="cert">21649</td>
+ <td class="ai">Hamilton State Bank</td>
+ <td class="closing">April 26, 2013</td>
+ <td class="updated">May 16, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="parkway.html">Parkway Bank</a></td>
+ <td class="city">Lenoir</td>
+ <td class="state">NC</td>
+ <td class="cert">57158</td>
+ <td class="ai">CertusBank, National Association</td>
+ <td class="closing">April 26, 2013</td>
+ <td class="updated">May 17, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="chipola.html">Chipola Community Bank</a></td>
+ <td class="city">Marianna</td>
+ <td class="state">FL</td>
+ <td class="cert">58034</td>
+ <td class="ai">First Federal Bank of Florida</td>
+ <td class="closing">April 19, 2013</td>
+ <td class="updated">May 16, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="heritagebank-fl.html">Heritage Bank of North Florida</a></td>
+ <td class="city">Orange Park</td>
+ <td class="state">FL</td>
+ <td class="cert">26680</td>
+ <td class="ai">FirstAtlantic Bank</td>
+ <td class="closing">April 19, 2013</td>
+ <td class="updated">May 16, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstfederal-ky.html">First Federal Bank</a></td>
+ <td class="city">Lexington</td>
+ <td class="state">KY</td>
+ <td class="cert">29594</td>
+ <td class="ai">Your Community Bank</td>
+ <td class="closing">April 19, 2013</td>
+ <td class="updated">April 23, 2013</td>
+ </tr>
+ <td class="institution"><a href="goldcanyon.html">Gold Canyon Bank</a></td>
+ <td class="city">Gold Canyon</td>
+ <td class="state">AZ</td>
+ <td class="cert">58066</td>
+ <td class="ai">First Scottsdale Bank, National Association</td>
+ <td class="closing">April 5, 2013</td>
+ <td class="updated">April 9, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="frontier-ga.html">Frontier Bank</a></td>
+ <td class="city">LaGrange</td>
+ <td class="state">GA</td>
+ <td class="cert">16431</td>
+ <td class="ai">HeritageBank of the South</td>
+ <td class="closing">March 8, 2013</td>
+ <td class="updated">March 26, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="covenant-il.html">Covenant Bank</a></td>
+ <td class="city">Chicago</td>
+ <td class="state">IL</td>
+ <td class="cert">22476</td>
+ <td class="ai">Liberty Bank and Trust Company</td>
+ <td class="closing">February 15, 2013</td>
+ <td class="updated">March 4, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="1stregents.html">1st Regents Bank</a></td>
+ <td class="city">Andover</td>
+ <td class="state">MN</td>
+ <td class="cert">57157</td>
+ <td class="ai">First Minnesota Bank</td>
+ <td class="closing">January 18, 2013</td>
+ <td class="updated">February 28, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="westside.html">Westside Community Bank</a></td>
+ <td class="city">University Place</td>
+ <td class="state">WA</td>
+ <td class="cert">33997</td>
+ <td class="ai">Sunwest Bank</td>
+ <td class="closing">January 11, 2013</td>
+ <td class="updated">January 24, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="cmbkozarks.html">Community Bank of the Ozarks</a></td>
+ <td class="city">Sunrise Beach</td>
+ <td class="state">MO</td>
+ <td class="cert">27331</td>
+ <td class="ai">Bank of Sullivan</td>
+ <td class="closing">December 14, 2012</td>
+ <td class="updated">January 24, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="hometown.html">Hometown Community Bank</a></td>
+ <td class="city">Braselton</td>
+ <td class="state">GA</td>
+ <td class="cert">57928</td>
+ <td class="ai">CertusBank, National Association</td>
+ <td class="closing">November 16, 2012</td>
+ <td class="updated">January 24, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="cfnb.html">Citizens First National Bank</a></td>
+ <td class="city">Princeton</td>
+ <td class="state">IL</td>
+ <td class="cert">3731</td>
+ <td class="ai">Heartland Bank and Trust Company</td>
+ <td class="closing">November 2, 2012</td>
+ <td class="updated">January 24, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="heritage_fl.html">Heritage Bank of Florida</a></td>
+ <td class="city">Lutz</td>
+ <td class="state">FL</td>
+ <td class="cert">35009</td>
+ <td class="ai">Centennial Bank</td>
+ <td class="closing">November 2, 2012</td>
+ <td class="updated">January 24, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="novabank.html">NOVA Bank</a></td>
+ <td class="city">Berwyn</td>
+ <td class="state">PA</td>
+ <td class="cert">27148</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">October 26, 2012</td>
+ <td class="updated">January 24, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="excelbank.html">Excel Bank</a></td>
+ <td class="city">Sedalia</td>
+ <td class="state">MO</td>
+ <td class="cert">19189</td>
+ <td class="ai">Simmons First National Bank</td>
+ <td class="closing">October 19, 2012</td>
+ <td class="updated">January 24, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firsteastside.html">First East Side Savings Bank</a></td>
+ <td class="city">Tamarac</td>
+ <td class="state">FL</td>
+ <td class="cert">28144</td>
+ <td class="ai">Stearns Bank N.A.</td>
+ <td class="closing">October 19, 2012</td>
+ <td class="updated">January 24, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="gulfsouth.html">GulfSouth Private Bank</a></td>
+ <td class="city">Destin</td>
+ <td class="state">FL</td>
+ <td class="cert">58073</td>
+ <td class="ai">SmartBank</td>
+ <td class="closing">October 19, 2012</td>
+ <td class="updated">January 24, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstunited.html">First United Bank</a></td>
+ <td class="city">Crete</td>
+ <td class="state">IL</td>
+ <td class="cert">20685</td>
+ <td class="ai">Old Plank Trail Community Bank, National Association</td>
+ <td class="closing">September 28, 2012</td>
+ <td class="updated">November 15, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="truman.html">Truman Bank</a></td>
+ <td class="city">St. Louis</td>
+ <td class="state">MO</td>
+ <td class="cert">27316</td>
+ <td class="ai">Simmons First National Bank</td>
+ <td class="closing">September 14, 2012</td>
+ <td class="updated">December 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstcommbk_mn.html">First Commercial Bank</a></td>
+ <td class="city">Bloomington</td>
+ <td class="state">MN</td>
+ <td class="cert">35246</td>
+ <td class="ai">Republic Bank & Trust Company</td>
+ <td class="closing">September 7, 2012</td>
+ <td class="updated">December 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="waukegan.html">Waukegan Savings Bank</a></td>
+ <td class="city">Waukegan</td>
+ <td class="state">IL</td>
+ <td class="cert">28243</td>
+ <td class="ai">First Midwest Bank</td>
+ <td class="closing">August 3, 2012</td>
+ <td class="updated">October 11, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="jasper.html">Jasper Banking Company</a></td>
+ <td class="city">Jasper</td>
+ <td class="state">GA</td>
+ <td class="cert">16240</td>
+ <td class="ai">Stearns Bank N.A.</td>
+ <td class="closing">July 27, 2012</td>
+ <td class="updated">December 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="secondfederal.html">Second Federal Savings and Loan Association of Chicago</a></td>
+ <td class="city">Chicago</td>
+ <td class="state">IL</td>
+ <td class="cert">27986</td>
+ <td class="ai">Hinsdale Bank & Trust Company</td>
+ <td class="closing">July 20, 2012</td>
+ <td class="updated">January 14, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="heartland.html">Heartland Bank</a></td>
+ <td class="city">Leawood</td>
+ <td class="state">KS</td>
+ <td class="cert">1361</td>
+ <td class="ai">Metcalf Bank</td>
+ <td class="closing">July 20, 2012</td>
+ <td class="updated">December 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="cherokee.html">First Cherokee State Bank</a></td>
+ <td class="city">Woodstock</td>
+ <td class="state">GA</td>
+ <td class="cert">32711</td>
+ <td class="ai">Community & Southern Bank</td>
+ <td class="closing">July 20, 2012</td>
+ <td class="updated">October 31, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="georgiatrust.html">Georgia Trust Bank</a></td>
+ <td class="city">Buford</td>
+ <td class="state">GA</td>
+ <td class="cert">57847</td>
+ <td class="ai">Community & Southern Bank</td>
+ <td class="closing">July 20, 2012</td>
+ <td class="updated">December 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="royalpalm.html">The Royal Palm Bank of Florida</a></td>
+ <td class="city">Naples</td>
+ <td class="state">FL</td>
+ <td class="cert">57096</td>
+ <td class="ai">First National Bank of the Gulf Coast</td>
+ <td class="closing">July 20, 2012</td>
+ <td class="updated">January 7, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="glasgow.html">Glasgow Savings Bank</a></td>
+ <td class="city">Glasgow</td>
+ <td class="state">MO</td>
+ <td class="cert">1056</td>
+ <td class="ai">Regional Missouri Bank</td>
+ <td class="closing">July 13, 2012</td>
+ <td class="updated">October 11, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="montgomery.html">Montgomery Bank & Trust</a></td>
+ <td class="city">Ailey</td>
+ <td class="state">GA</td>
+ <td class="cert">19498</td>
+ <td class="ai">Ameris Bank</td>
+ <td class="closing">July 6, 2012</td>
+ <td class="updated">October 31, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="farmersbank.html">The Farmers Bank of Lynchburg</a></td>
+ <td class="city">Lynchburg</td>
+ <td class="state">TN</td>
+ <td class="cert">1690</td>
+ <td class="ai">Clayton Bank and Trust</td>
+ <td class="closing">June 15, 2012</td>
+ <td class="updated">October 31, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="securityexchange.html">Security Exchange Bank</a></td>
+ <td class="city">Marietta</td>
+ <td class="state">GA</td>
+ <td class="cert">35299</td>
+ <td class="ai">Fidelity Bank</td>
+ <td class="closing">June 15, 2012</td>
+ <td class="updated">October 10, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="putnam.html">Putnam State Bank</a></td>
+ <td class="city">Palatka</td>
+ <td class="state">FL</td>
+ <td class="cert">27405</td>
+ <td class="ai">Harbor Community Bank</td>
+ <td class="closing">June 15, 2012</td>
+ <td class="updated">October 10, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="waccamaw.html">Waccamaw Bank</a></td>
+ <td class="city">Whiteville</td>
+ <td class="state">NC</td>
+ <td class="cert">34515</td>
+ <td class="ai">First Community Bank</td>
+ <td class="closing">June 8, 2012</td>
+ <td class="updated">November 8, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="ftsb.html">Farmers' and Traders' State Bank</a></td>
+ <td class="city">Shabbona</td>
+ <td class="state">IL</td>
+ <td class="cert">9257</td>
+ <td class="ai">First State Bank</td>
+ <td class="closing">June 8, 2012</td>
+ <td class="updated">October 10, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="carolina.html">Carolina Federal Savings Bank</a></td>
+ <td class="city">Charleston</td>
+ <td class="state">SC</td>
+ <td class="cert">35372</td>
+ <td class="ai">Bank of North Carolina</td>
+ <td class="closing">June 8, 2012</td>
+ <td class="updated">October 31, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstcapital.html">First Capital Bank</a></td>
+ <td class="city">Kingfisher</td>
+ <td class="state">OK</td>
+ <td class="cert">416</td>
+ <td class="ai">F & M Bank</td>
+ <td class="closing">June 8, 2012</td>
+ <td class="updated">October 10, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="alabamatrust.html">Alabama Trust Bank, National Association</a></td>
+ <td class="city">Sylacauga</td>
+ <td class="state">AL</td>
+ <td class="cert">35224</td>
+ <td class="ai">Southern States Bank</td>
+ <td class="closing">May 18, 2012</td>
+ <td class="updated">May 20, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="securitybank.html">Security Bank, National Association</a></td>
+ <td class="city">North Lauderdale</td>
+ <td class="state">FL</td>
+ <td class="cert">23156</td>
+ <td class="ai">Banesco USA</td>
+ <td class="closing">May 4, 2012</td>
+ <td class="updated">October 31, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="palmdesert.html">Palm Desert National Bank</a></td>
+ <td class="city">Palm Desert</td>
+ <td class="state">CA</td>
+ <td class="cert">23632</td>
+ <td class="ai">Pacific Premier Bank</td>
+ <td class="closing">April 27, 2012</td>
+ <td class="updated">May 17, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="plantation.html">Plantation Federal Bank</a></td>
+ <td class="city">Pawleys Island</td>
+ <td class="state">SC</td>
+ <td class="cert">32503</td>
+ <td class="ai">First Federal Bank</td>
+ <td class="closing">April 27, 2012</td>
+ <td class="updated">May 17, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="interbank.html">Inter Savings Bank, fsb D/B/A InterBank, fsb</a></td>
+ <td class="city">Maple Grove</td>
+ <td class="state">MN</td>
+ <td class="cert">31495</td>
+ <td class="ai">Great Southern Bank</td>
+ <td class="closing">April 27, 2012</td>
+ <td class="updated">May 17, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="harvest.html">HarVest Bank of Maryland</a></td>
+ <td class="city">Gaithersburg</td>
+ <td class="state">MD</td>
+ <td class="cert">57766</td>
+ <td class="ai">Sonabank</td>
+ <td class="closing">April 27, 2012</td>
+ <td class="updated">May 17, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="easternshore.html">Bank of the Eastern Shore</a></td>
+ <td class="city">Cambridge</td>
+ <td class="state">MD</td>
+ <td class="cert">26759</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">April 27, 2012</td>
+ <td class="updated">October 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="fortlee.html">Fort Lee Federal Savings Bank, FSB</a></td>
+ <td class="city">Fort Lee</td>
+ <td class="state">NJ</td>
+ <td class="cert">35527</td>
+ <td class="ai">Alma Bank</td>
+ <td class="closing">April 20, 2012</td>
+ <td class="updated">May 17, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="fidelity.html">Fidelity Bank</a></td>
+ <td class="city">Dearborn</td>
+ <td class="state">MI</td>
+ <td class="cert">33883</td>
+ <td class="ai">The Huntington National Bank</td>
+ <td class="closing">March 30, 2012</td>
+ <td class="updated">May 16, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="premier-il.html">Premier Bank</a></td>
+ <td class="city">Wilmette</td>
+ <td class="state">IL</td>
+ <td class="cert">35419</td>
+ <td class="ai">International Bank of Chicago</td>
+ <td class="closing">March 23, 2012</td>
+ <td class="updated">October 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="covenant.html">Covenant Bank & Trust</a></td>
+ <td class="city">Rock Spring</td>
+ <td class="state">GA</td>
+ <td class="cert">58068</td>
+ <td class="ai">Stearns Bank, N.A.</td>
+ <td class="closing">March 23, 2012</td>
+ <td class="updated">October 31, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="newcity.html">New City Bank</a></td>
+ <td class="city">Chicago</td>
+ <td class="state">IL</td>
+ <td class="cert">57597</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">March 9, 2012</td>
+ <td class="updated">October 29, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="global.html">Global Commerce Bank</a></td>
+ <td class="city">Doraville</td>
+ <td class="state">GA</td>
+ <td class="cert">34046</td>
+ <td class="ai">Metro City Bank</td>
+ <td class="closing">March 2, 2012</td>
+ <td class="updated">October 31, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="homesvgs.html">Home Savings of America</a></td>
+ <td class="city">Little Falls</td>
+ <td class="state">MN</td>
+ <td class="cert">29178</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">February 24, 2012</td>
+ <td class="updated">December 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="cbg.html">Central Bank of Georgia</a></td>
+ <td class="city">Ellaville</td>
+ <td class="state">GA</td>
+ <td class="cert">5687</td>
+ <td class="ai">Ameris Bank</td>
+ <td class="closing">February 24, 2012</td>
+ <td class="updated">August 9, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="scbbank.html">SCB Bank</a></td>
+ <td class="city">Shelbyville</td>
+ <td class="state">IN</td>
+ <td class="cert">29761</td>
+ <td class="ai">First Merchants Bank, National Association</td>
+ <td class="closing">February 10, 2012</td>
+ <td class="updated">March 25, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="cnbt.html">Charter National Bank and Trust</a></td>
+ <td class="city">Hoffman Estates</td>
+ <td class="state">IL</td>
+ <td class="cert">23187</td>
+ <td class="ai">Barrington Bank & Trust Company, National Association</td>
+ <td class="closing">February 10, 2012</td>
+ <td class="updated">March 25, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="bankeast.html">BankEast</a></td>
+ <td class="city">Knoxville</td>
+ <td class="state">TN</td>
+ <td class="cert">19869</td>
+ <td class="ai">U.S.Bank National Association</td>
+ <td class="closing">January 27, 2012</td>
+ <td class="updated">March 8, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="patriot-mn.html">Patriot Bank Minnesota</a></td>
+ <td class="city">Forest Lake</td>
+ <td class="state">MN</td>
+ <td class="cert">34823</td>
+ <td class="ai">First Resource Bank</td>
+ <td class="closing">January 27, 2012</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="tcb.html">Tennessee Commerce Bank</a></td>
+ <td class="city">Franklin</td>
+ <td class="state">TN</td>
+ <td class="cert">35296</td>
+ <td class="ai">Republic Bank & Trust Company</td>
+ <td class="closing">January 27, 2012</td>
+ <td class="updated">November 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="fgbtcj.html">First Guaranty Bank and Trust Company of Jacksonville</a></td>
+ <td class="city">Jacksonville</td>
+ <td class="state">FL</td>
+ <td class="cert">16579</td>
+ <td class="ai">CenterState Bank of Florida, N.A.</td>
+ <td class="closing">January 27, 2012</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="americaneagle.html">American Eagle Savings Bank</a></td>
+ <td class="city">Boothwyn</td>
+ <td class="state">PA</td>
+ <td class="cert">31581</td>
+ <td class="ai">Capital Bank, N.A.</td>
+ <td class="closing">January 20, 2012</td>
+ <td class="updated">January 25, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firststatebank-ga.html">The First State Bank</a></td>
+ <td class="city">Stockbridge</td>
+ <td class="state">GA</td>
+ <td class="cert">19252</td>
+ <td class="ai">Hamilton State Bank</td>
+ <td class="closing">January 20, 2012</td>
+ <td class="updated">January 25, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="cfsb.html">Central Florida State Bank</a></td>
+ <td class="city">Belleview</td>
+ <td class="state">FL</td>
+ <td class="cert">57186</td>
+ <td class="ai">CenterState Bank of Florida, N.A.</td>
+ <td class="closing">January 20, 2012</td>
+ <td class="updated">January 25, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="westernnatl.html">Western National Bank</a></td>
+ <td class="city">Phoenix</td>
+ <td class="state">AZ</td>
+ <td class="cert">57917</td>
+ <td class="ai">Washington Federal</td>
+ <td class="closing">December 16, 2011</td>
+ <td class="updated">August 13, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="premier-fl.html">Premier Community Bank of the Emerald Coast</a></td>
+ <td class="city">Crestview</td>
+ <td class="state">FL</td>
+ <td class="cert">58343</td>
+ <td class="ai">Summit Bank</td>
+ <td class="closing">December 16, 2011</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="centralprog.html">Central Progressive Bank</a></td>
+ <td class="city">Lacombe</td>
+ <td class="state">LA</td>
+ <td class="cert">19657</td>
+ <td class="ai">First NBC Bank</td>
+ <td class="closing">November 18, 2011</td>
+ <td class="updated">August 13, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="polkcounty.html">Polk County Bank</a></td>
+ <td class="city">Johnston</td>
+ <td class="state">IA</td>
+ <td class="cert">14194</td>
+ <td class="ai">Grinnell State Bank</td>
+ <td class="closing">November 18, 2011</td>
+ <td class="updated">August 15, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="rockmart.html">Community Bank of Rockmart</a></td>
+ <td class="city">Rockmart</td>
+ <td class="state">GA</td>
+ <td class="cert">57860</td>
+ <td class="ai">Century Bank of Georgia</td>
+ <td class="closing">November 10, 2011</td>
+ <td class="updated">August 13, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="sunfirst.html">SunFirst Bank</a></td>
+ <td class="city">Saint George</td>
+ <td class="state">UT</td>
+ <td class="cert">57087</td>
+ <td class="ai">Cache Valley Bank</td>
+ <td class="closing">November 4, 2011</td>
+ <td class="updated">November 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="midcity.html">Mid City Bank, Inc.</a></td>
+ <td class="city">Omaha</td>
+ <td class="state">NE</td>
+ <td class="cert">19397</td>
+ <td class="ai">Premier Bank</td>
+ <td class="closing">November 4, 2011</td>
+ <td class="updated">August 15, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="allamerican.html ">All American Bank</a></td>
+ <td class="city">Des Plaines</td>
+ <td class="state">IL</td>
+ <td class="cert">57759</td>
+ <td class="ai">International Bank of Chicago</td>
+ <td class="closing">October 28, 2011</td>
+ <td class="updated">August 15, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="commbanksco.html">Community Banks of Colorado</a></td>
+ <td class="city">Greenwood Village</td>
+ <td class="state">CO</td>
+ <td class="cert">21132</td>
+ <td class="ai">Bank Midwest, N.A.</td>
+ <td class="closing">October 21, 2011</td>
+ <td class="updated">January 2, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="commcapbk.html">Community Capital Bank</a></td>
+ <td class="city">Jonesboro</td>
+ <td class="state">GA</td>
+ <td class="cert">57036</td>
+ <td class="ai">State Bank and Trust Company</td>
+ <td class="closing">October 21, 2011</td>
+ <td class="updated">November 8, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="decatur.html">Decatur First Bank</a></td>
+ <td class="city">Decatur</td>
+ <td class="state">GA</td>
+ <td class="cert">34392</td>
+ <td class="ai">Fidelity Bank</td>
+ <td class="closing">October 21, 2011</td>
+ <td class="updated">November 8, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="oldharbor.html">Old Harbor Bank</a></td>
+ <td class="city">Clearwater</td>
+ <td class="state">FL</td>
+ <td class="cert">57537</td>
+ <td class="ai">1st United Bank</td>
+ <td class="closing">October 21, 2011</td>
+ <td class="updated">November 8, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="countrybank.html">Country Bank</a></td>
+ <td class="city">Aledo</td>
+ <td class="state">IL</td>
+ <td class="cert">35395</td>
+ <td class="ai">Blackhawk Bank & Trust</td>
+ <td class="closing">October 14, 2011</td>
+ <td class="updated">August 15, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firststatebank-nj.html">First State Bank</a></td>
+ <td class="city">Cranford</td>
+ <td class="state">NJ</td>
+ <td class="cert">58046</td>
+ <td class="ai">Northfield Bank</td>
+ <td class="closing">October 14, 2011</td>
+ <td class="updated">November 8, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="blueridge.html">Blue Ridge Savings Bank, Inc.</a></td>
+ <td class="city">Asheville</td>
+ <td class="state">NC</td>
+ <td class="cert">32347</td>
+ <td class="ai">Bank of North Carolina</td>
+ <td class="closing">October 14, 2011</td>
+ <td class="updated">November 8, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="piedmont-ga.html">Piedmont Community Bank</a></td>
+ <td class="city">Gray</td>
+ <td class="state">GA</td>
+ <td class="cert">57256</td>
+ <td class="ai">State Bank and Trust Company</td>
+ <td class="closing">October 14, 2011</td>
+ <td class="updated">January 22, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="sunsecurity.html">Sun Security Bank</a></td>
+ <td class="city">Ellington</td>
+ <td class="state">MO</td>
+ <td class="cert">20115</td>
+ <td class="ai">Great Southern Bank</td>
+ <td class="closing">October 7, 2011</td>
+ <td class="updated">November 7, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="riverbank.html">The RiverBank</a></td>
+ <td class="city">Wyoming</td>
+ <td class="state">MN</td>
+ <td class="cert">10216</td>
+ <td class="ai">Central Bank</td>
+ <td class="closing">October 7, 2011</td>
+ <td class="updated">November 7, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstintlbank.html">First International Bank</a></td>
+ <td class="city">Plano</td>
+ <td class="state">TX</td>
+ <td class="cert">33513</td>
+ <td class="ai">American First National Bank</td>
+ <td class="closing">September 30, 2011</td>
+ <td class="updated">October 9, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="cbnc.html">Citizens Bank of Northern California</a></td>
+ <td class="city">Nevada City</td>
+ <td class="state">CA</td>
+ <td class="cert">33983</td>
+ <td class="ai">Tri Counties Bank</td>
+ <td class="closing">September 23, 2011</td>
+ <td class="updated">October 9, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="boc-va.html">Bank of the Commonwealth</a></td>
+ <td class="city">Norfolk</td>
+ <td class="state">VA</td>
+ <td class="cert">20408</td>
+ <td class="ai">Southern Bank and Trust Company</td>
+ <td class="closing">September 23, 2011</td>
+ <td class="updated">October 9, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="fnbf.html">The First National Bank of Florida</a></td>
+ <td class="city">Milton</td>
+ <td class="state">FL</td>
+ <td class="cert">25155</td>
+ <td class="ai">CharterBank</td>
+ <td class="closing">September 9, 2011</td>
+ <td class="updated">September 6, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="creekside.html">CreekSide Bank</a></td>
+ <td class="city">Woodstock</td>
+ <td class="state">GA</td>
+ <td class="cert">58226</td>
+ <td class="ai">Georgia Commerce Bank</td>
+ <td class="closing">September 2, 2011</td>
+ <td class="updated">September 6, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="patriot.html">Patriot Bank of Georgia</a></td>
+ <td class="city">Cumming</td>
+ <td class="state">GA</td>
+ <td class="cert">58273</td>
+ <td class="ai">Georgia Commerce Bank</td>
+ <td class="closing">September 2, 2011</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstchoice-il.html">First Choice Bank</a></td>
+ <td class="city">Geneva</td>
+ <td class="state">IL</td>
+ <td class="cert">57212</td>
+ <td class="ai">Inland Bank & Trust</td>
+ <td class="closing">August 19, 2011</td>
+ <td class="updated">August 15, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstsouthern-ga.html">First Southern National Bank</a></td>
+ <td class="city">Statesboro</td>
+ <td class="state">GA</td>
+ <td class="cert">57239</td>
+ <td class="ai">Heritage Bank of the South</td>
+ <td class="closing">August 19, 2011</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="lydian.html">Lydian Private Bank</a></td>
+ <td class="city">Palm Beach</td>
+ <td class="state">FL</td>
+ <td class="cert">35356</td>
+ <td class="ai">Sabadell United Bank, N.A.</td>
+ <td class="closing">August 19, 2011</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="publicsvgs.html">Public Savings Bank</a></td>
+ <td class="city">Huntingdon Valley</td>
+ <td class="state">PA</td>
+ <td class="cert">34130</td>
+ <td class="ai">Capital Bank, N.A.</td>
+ <td class="closing">August 18, 2011</td>
+ <td class="updated">August 15, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="fnbo.html">The First National Bank of Olathe</a></td>
+ <td class="city">Olathe</td>
+ <td class="state">KS</td>
+ <td class="cert">4744</td>
+ <td class="ai">Enterprise Bank & Trust</td>
+ <td class="closing">August 12, 2011</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="whitman.html">Bank of Whitman</a></td>
+ <td class="city">Colfax</td>
+ <td class="state">WA</td>
+ <td class="cert">22528</td>
+ <td class="ai">Columbia State Bank</td>
+ <td class="closing">August 5, 2011</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="shorewood.html">Bank of Shorewood</a></td>
+ <td class="city">Shorewood</td>
+ <td class="state">IL</td>
+ <td class="cert">22637</td>
+ <td class="ai">Heartland Bank and Trust Company</td>
+ <td class="closing">August 5, 2011</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="integra.html">Integra Bank National Association</a></td>
+ <td class="city">Evansville</td>
+ <td class="state">IN</td>
+ <td class="cert">4392</td>
+ <td class="ai">Old National Bank</td>
+ <td class="closing">July 29, 2011</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="bankmeridian.html">BankMeridian, N.A.</a></td>
+ <td class="city">Columbia</td>
+ <td class="state">SC</td>
+ <td class="cert">58222</td>
+ <td class="ai">SCBT National Association</td>
+ <td class="closing">July 29, 2011</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="vbb.html">Virginia Business Bank</a></td>
+ <td class="city">Richmond</td>
+ <td class="state">VA</td>
+ <td class="cert">58283</td>
+ <td class="ai">Xenith Bank</td>
+ <td class="closing">July 29, 2011</td>
+ <td class="updated">October 9, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="bankofchoice.html">Bank of Choice</a></td>
+ <td class="city">Greeley</td>
+ <td class="state">CO</td>
+ <td class="cert">2994</td>
+ <td class="ai">Bank Midwest, N.A.</td>
+ <td class="closing">July 22, 2011</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="landmark.html">LandMark Bank of Florida</a></td>
+ <td class="city">Sarasota</td>
+ <td class="state">FL</td>
+ <td class="cert">35244</td>
+ <td class="ai">American Momentum Bank</td>
+ <td class="closing">July 22, 2011</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="southshore.html">Southshore Community Bank</a></td>
+ <td class="city">Apollo Beach</td>
+ <td class="state">FL</td>
+ <td class="cert">58056</td>
+ <td class="ai">American Momentum Bank</td>
+ <td class="closing">July 22, 2011</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="summitbank.html">Summit Bank</a></td>
+ <td class="city">Prescott</td>
+ <td class="state">AZ</td>
+ <td class="cert">57442</td>
+ <td class="ai">The Foothills Bank</td>
+ <td class="closing">July 15, 2011</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstpeoples.html">First Peoples Bank</a></td>
+ <td class="city">Port St. Lucie</td>
+ <td class="state">FL</td>
+ <td class="cert">34870</td>
+ <td class="ai">Premier American Bank, N.A.</td>
+ <td class="closing">July 15, 2011</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="hightrust.html">High Trust Bank</a></td>
+ <td class="city">Stockbridge</td>
+ <td class="state">GA</td>
+ <td class="cert">19554</td>
+ <td class="ai">Ameris Bank</td>
+ <td class="closing">July 15, 2011</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="onegeorgia.html">One Georgia Bank</a></td>
+ <td class="city">Atlanta</td>
+ <td class="state">GA</td>
+ <td class="cert">58238</td>
+ <td class="ai">Ameris Bank</td>
+ <td class="closing">July 15, 2011</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="signaturebank.html">Signature Bank</a></td>
+ <td class="city">Windsor</td>
+ <td class="state">CO</td>
+ <td class="cert">57835</td>
+ <td class="ai">Points West Community Bank</td>
+ <td class="closing">July 8, 2011</td>
+ <td class="updated">October 26, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="coloradocapital.html">Colorado Capital Bank</a></td>
+ <td class="city">Castle Rock</td>
+ <td class="state">CO</td>
+ <td class="cert">34522</td>
+ <td class="ai">First-Citizens Bank & Trust Company</td>
+ <td class="closing">July 8, 2011</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstchicago.html">First Chicago Bank & Trust</a></td>
+ <td class="city">Chicago</td>
+ <td class="state">IL</td>
+ <td class="cert">27935</td>
+ <td class="ai">Northbrook Bank & Trust Company</td>
+ <td class="closing">July 8, 2011</td>
+ <td class="updated">September 9, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="mountain.html">Mountain Heritage Bank</a></td>
+ <td class="city">Clayton</td>
+ <td class="state">GA</td>
+ <td class="cert">57593</td>
+ <td class="ai">First American Bank and Trust Company</td>
+ <td class="closing">June 24, 2011</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="fcbtb.html">First Commercial Bank of Tampa Bay</a></td>
+ <td class="city">Tampa</td>
+ <td class="state">FL</td>
+ <td class="cert">27583</td>
+ <td class="ai">Stonegate Bank</td>
+ <td class="closing">June 17, 2011</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="mcintoshstate.html">McIntosh State Bank</a></td>
+ <td class="city">Jackson</td>
+ <td class="state">GA</td>
+ <td class="cert">19237</td>
+ <td class="ai">Hamilton State Bank</td>
+ <td class="closing">June 17, 2011</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="atlanticbanktrust.html">Atlantic Bank and Trust</a></td>
+ <td class="city">Charleston</td>
+ <td class="state">SC</td>
+ <td class="cert">58420</td>
+ <td class="ai">First Citizens Bank and Trust Company, Inc.</td>
+ <td class="closing">June 3, 2011</td>
+ <td class="updated">October 31, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstheritage.html">First Heritage Bank</a></td>
+ <td class="city">Snohomish</td>
+ <td class="state">WA</td>
+ <td class="cert">23626</td>
+ <td class="ai">Columbia State Bank</td>
+ <td class="closing">May 27, 2011</td>
+ <td class="updated">January 28, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="summit.html">Summit Bank</a></td>
+ <td class="city">Burlington</td>
+ <td class="state">WA</td>
+ <td class="cert">513</td>
+ <td class="ai">Columbia State Bank</td>
+ <td class="closing">May 20, 2011</td>
+ <td class="updated">January 22, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="fgbc.html">First Georgia Banking Company</a></td>
+ <td class="city">Franklin</td>
+ <td class="state">GA</td>
+ <td class="cert">57647</td>
+ <td class="ai">CertusBank, National Association</td>
+ <td class="closing">May 20, 2011</td>
+ <td class="updated">November 13, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="atlanticsthrn.html">Atlantic Southern Bank</a></td>
+ <td class="city">Macon</td>
+ <td class="state">GA</td>
+ <td class="cert">57213</td>
+ <td class="ai">CertusBank, National Association</td>
+ <td class="closing">May 20, 2011</td>
+ <td class="updated">October 31, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="coastal_fl.html">Coastal Bank</a></td>
+ <td class="city">Cocoa Beach</td>
+ <td class="state">FL</td>
+ <td class="cert">34898</td>
+ <td class="ai">Florida Community Bank, a division of Premier American Bank, N.A.</td>
+ <td class="closing">May 6, 2011</td>
+ <td class="updated">November 30, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="communitycentral.html">Community Central Bank</a></td>
+ <td class="city">Mount Clemens</td>
+ <td class="state">MI</td>
+ <td class="cert">34234</td>
+ <td class="ai">Talmer Bank & Trust</td>
+ <td class="closing">April 29, 2011</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="parkavenue_ga.html">The Park Avenue Bank</a></td>
+ <td class="city">Valdosta</td>
+ <td class="state">GA</td>
+ <td class="cert">19797</td>
+ <td class="ai">Bank of the Ozarks</td>
+ <td class="closing">April 29, 2011</td>
+ <td class="updated">November 30, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstchoice.html">First Choice Community Bank</a></td>
+ <td class="city">Dallas</td>
+ <td class="state">GA</td>
+ <td class="cert">58539</td>
+ <td class="ai">Bank of the Ozarks</td>
+ <td class="closing">April 29, 2011</td>
+ <td class="updated">January 22, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="cortez.html">Cortez Community Bank</a></td>
+ <td class="city">Brooksville</td>
+ <td class="state">FL</td>
+ <td class="cert">57625</td>
+ <td class="ai">Florida Community Bank, a division of Premier American Bank, N.A.</td>
+ <td class="closing">April 29, 2011</td>
+ <td class="updated">November 30, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="fnbcf.html">First National Bank of Central Florida</a></td>
+ <td class="city">Winter Park</td>
+ <td class="state">FL</td>
+ <td class="cert">26297</td>
+ <td class="ai">Florida Community Bank, a division of Premier American Bank, N.A.</td>
+ <td class="closing">April 29, 2011</td>
+ <td class="updated">November 30, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="heritage_ms.html">Heritage Banking Group</a></td>
+ <td class="city">Carthage</td>
+ <td class="state">MS</td>
+ <td class="cert">14273</td>
+ <td class="ai">Trustmark National Bank</td>
+ <td class="closing">April 15, 2011</td>
+ <td class="updated">November 30, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="rosemount.html">Rosemount National Bank</a></td>
+ <td class="city">Rosemount</td>
+ <td class="state">MN</td>
+ <td class="cert">24099</td>
+ <td class="ai">Central Bank</td>
+ <td class="closing">April 15, 2011</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="superior_al.html">Superior Bank</a></td>
+ <td class="city">Birmingham</td>
+ <td class="state">AL</td>
+ <td class="cert">17750</td>
+ <td class="ai">Superior Bank, National Association</td>
+ <td class="closing">April 15, 2011</td>
+ <td class="updated">November 30, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="nexity.html">Nexity Bank</a></td>
+ <td class="city">Birmingham</td>
+ <td class="state">AL</td>
+ <td class="cert">19794</td>
+ <td class="ai">AloStar Bank of Commerce</td>
+ <td class="closing">April 15, 2011</td>
+ <td class="updated">September 4, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="newhorizons.html">New Horizons Bank</a></td>
+ <td class="city">East Ellijay</td>
+ <td class="state">GA</td>
+ <td class="cert">57705</td>
+ <td class="ai">Citizens South Bank</td>
+ <td class="closing">April 15, 2011</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="bartow.html">Bartow County Bank</a></td>
+ <td class="city">Cartersville</td>
+ <td class="state">GA</td>
+ <td class="cert">21495</td>
+ <td class="ai">Hamilton State Bank</td>
+ <td class="closing">April 15, 2011</td>
+ <td class="updated">January 22, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="nevadacommerce.html">Nevada Commerce Bank</a></td>
+ <td class="city">Las Vegas</td>
+ <td class="state">NV</td>
+ <td class="cert">35418</td>
+ <td class="ai">City National Bank</td>
+ <td class="closing">April 8, 2011</td>
+ <td class="updated">September 9, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="westernsprings.html">Western Springs National Bank and Trust</a></td>
+ <td class="city">Western Springs</td>
+ <td class="state">IL</td>
+ <td class="cert">10086</td>
+ <td class="ai">Heartland Bank and Trust Company</td>
+ <td class="closing">April 8, 2011</td>
+ <td class="updated">January 22, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="bankofcommerce.html">The Bank of Commerce</a></td>
+ <td class="city">Wood Dale</td>
+ <td class="state">IL</td>
+ <td class="cert">34292</td>
+ <td class="ai">Advantage National Bank Group</td>
+ <td class="closing">March 25, 2011</td>
+ <td class="updated">January 22, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="legacy-wi.html">Legacy Bank</a></td>
+ <td class="city">Milwaukee</td>
+ <td class="state">WI</td>
+ <td class="cert">34818</td>
+ <td class="ai">Seaway Bank and Trust Company</td>
+ <td class="closing">March 11, 2011</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstnatldavis.html">First National Bank of Davis</a></td>
+ <td class="city">Davis</td>
+ <td class="state">OK</td>
+ <td class="cert">4077</td>
+ <td class="ai">The Pauls Valley National Bank</td>
+ <td class="closing">March 11, 2011</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="valleycomm.html">Valley Community Bank</a></td>
+ <td class="city">St. Charles</td>
+ <td class="state">IL</td>
+ <td class="cert">34187</td>
+ <td class="ai">First State Bank</td>
+ <td class="closing">February 25, 2011</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="sanluistrust.html">San Luis Trust Bank, FSB</a></td>
+ <td class="city">San Luis Obispo</td>
+ <td class="state">CA</td>
+ <td class="cert">34783</td>
+ <td class="ai">First California Bank</td>
+ <td class="closing">February 18, 2011</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="charteroak.html">Charter Oak Bank</a></td>
+ <td class="city">Napa</td>
+ <td class="state">CA</td>
+ <td class="cert">57855</td>
+ <td class="ai">Bank of Marin</td>
+ <td class="closing">February 18, 2011</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="citizensbk_ga.html">Citizens Bank of Effingham</a></td>
+ <td class="city">Springfield</td>
+ <td class="state">GA</td>
+ <td class="cert">34601</td>
+ <td class="ai">Heritage Bank of the South</td>
+ <td class="closing">February 18, 2011</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="habersham.html">Habersham Bank</a></td>
+ <td class="city">Clarkesville</td>
+ <td class="state">GA</td>
+ <td class="cert">151</td>
+ <td class="ai">SCBT National Association</td>
+ <td class="closing">February 18, 2011</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="canyonstate.html">Canyon National Bank</a></td>
+ <td class="city">Palm Springs</td>
+ <td class="state">CA</td>
+ <td class="cert">34692</td>
+ <td class="ai">Pacific Premier Bank</td>
+ <td class="closing">February 11, 2011</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="badgerstate.html">Badger State Bank</a></td>
+ <td class="city">Cassville</td>
+ <td class="state">WI</td>
+ <td class="cert">13272</td>
+ <td class="ai">Royal Bank</td>
+ <td class="closing">February 11, 2011</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="peoplesstatebank.html">Peoples State Bank</a></td>
+ <td class="city">Hamtramck</td>
+ <td class="state">MI</td>
+ <td class="cert">14939</td>
+ <td class="ai">First Michigan Bank</td>
+ <td class="closing">February 11, 2011</td>
+ <td class="updated">January 22, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="sunshinestate.html">Sunshine State Community Bank</a></td>
+ <td class="city">Port Orange</td>
+ <td class="state">FL</td>
+ <td class="cert">35478</td>
+ <td class="ai">Premier American Bank, N.A.</td>
+ <td class="closing">February 11, 2011</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="commfirst_il.html">Community First Bank Chicago</a></td>
+ <td class="city">Chicago</td>
+ <td class="state">IL</td>
+ <td class="cert">57948</td>
+ <td class="ai">Northbrook Bank & Trust Company</td>
+ <td class="closing">February 4, 2011</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="northgabank.html">North Georgia Bank</a></td>
+ <td class="city">Watkinsville</td>
+ <td class="state">GA</td>
+ <td class="cert">35242</td>
+ <td class="ai">BankSouth</td>
+ <td class="closing">February 4, 2011</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="americantrust.html">American Trust Bank</a></td>
+ <td class="city">Roswell</td>
+ <td class="state">GA</td>
+ <td class="cert">57432</td>
+ <td class="ai">Renasant Bank</td>
+ <td class="closing">February 4, 2011</td>
+ <td class="updated">October 31, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstcomm_nm.html">First Community Bank</a></td>
+ <td class="city">Taos</td>
+ <td class="state">NM</td>
+ <td class="cert">12261</td>
+ <td class="ai">U.S. Bank, N.A.</td>
+ <td class="closing">January 28, 2011</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstier.html">FirsTier Bank</a></td>
+ <td class="city">Louisville</td>
+ <td class="state">CO</td>
+ <td class="cert">57646</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">January 28, 2011</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="evergreenstatewi.html">Evergreen State Bank</a></td>
+ <td class="city">Stoughton</td>
+ <td class="state">WI</td>
+ <td class="cert">5328</td>
+ <td class="ai">McFarland State Bank</td>
+ <td class="closing">January 28, 2011</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firststatebank_ok.html">The First State Bank</a></td>
+ <td class="city">Camargo</td>
+ <td class="state">OK</td>
+ <td class="cert">2303</td>
+ <td class="ai">Bank 7</td>
+ <td class="closing">January 28, 2011</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="unitedwestern.html">United Western Bank</a></td>
+ <td class="city">Denver</td>
+ <td class="state">CO</td>
+ <td class="cert">31293</td>
+ <td class="ai">First-Citizens Bank & Trust Company</td>
+ <td class="closing">January 21, 2011</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="bankofasheville.html">The Bank of Asheville</a></td>
+ <td class="city">Asheville</td>
+ <td class="state">NC</td>
+ <td class="cert">34516</td>
+ <td class="ai">First Bank</td>
+ <td class="closing">January 21, 2011</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="commsouth.html">CommunitySouth Bank & Trust</a></td>
+ <td class="city">Easley</td>
+ <td class="state">SC</td>
+ <td class="cert">57868</td>
+ <td class="ai">CertusBank, National Association</td>
+ <td class="closing">January 21, 2011</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="enterprise.html">Enterprise Banking Company</a></td>
+ <td class="city">McDonough</td>
+ <td class="state">GA</td>
+ <td class="cert">19758</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">January 21, 2011</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="oglethorpe.html">Oglethorpe Bank</a></td>
+ <td class="city">Brunswick</td>
+ <td class="state">GA</td>
+ <td class="cert">57440</td>
+ <td class="ai">Bank of the Ozarks</td>
+ <td class="closing">January 14, 2011</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="legacybank.html">Legacy Bank</a></td>
+ <td class="city">Scottsdale</td>
+ <td class="state">AZ</td>
+ <td class="cert">57820</td>
+ <td class="ai">Enterprise Bank & Trust</td>
+ <td class="closing">January 7, 2011</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstcommercial.html">First Commercial Bank of Florida</a></td>
+ <td class="city">Orlando</td>
+ <td class="state">FL</td>
+ <td class="cert">34965</td>
+ <td class="ai">First Southern Bank</td>
+ <td class="closing">January 7, 2011</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="communitynatl.html">Community National Bank</a></td>
+ <td class="city">Lino Lakes</td>
+ <td class="state">MN</td>
+ <td class="cert">23306</td>
+ <td class="ai">Farmers & Merchants Savings Bank</td>
+ <td class="closing">December 17, 2010</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstsouthern.html">First Southern Bank</a></td>
+ <td class="city">Batesville</td>
+ <td class="state">AR</td>
+ <td class="cert">58052</td>
+ <td class="ai">Southern Bank</td>
+ <td class="closing">December 17, 2010</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="unitedamericas.html">United Americas Bank, N.A.</a></td>
+ <td class="city">Atlanta</td>
+ <td class="state">GA</td>
+ <td class="cert">35065</td>
+ <td class="ai">State Bank and Trust Company</td>
+ <td class="closing">December 17, 2010</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="appalachianga.html">Appalachian Community Bank, FSB</a></td>
+ <td class="city">McCaysville</td>
+ <td class="state">GA</td>
+ <td class="cert">58495</td>
+ <td class="ai">Peoples Bank of East Tennessee</td>
+ <td class="closing">December 17, 2010</td>
+ <td class="updated">October 31, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="chestatee.html">Chestatee State Bank</a></td>
+ <td class="city">Dawsonville</td>
+ <td class="state">GA</td>
+ <td class="cert">34578</td>
+ <td class="ai">Bank of the Ozarks</td>
+ <td class="closing">December 17, 2010</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="bankofmiami.html">The Bank of Miami,N.A.</a></td>
+ <td class="city">Coral Gables</td>
+ <td class="state">FL</td>
+ <td class="cert">19040</td>
+ <td class="ai">1st United Bank</td>
+ <td class="closing">December 17, 2010</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="earthstar.html">Earthstar Bank</a></td>
+ <td class="city">Southampton</td>
+ <td class="state">PA</td>
+ <td class="cert">35561</td>
+ <td class="ai">Polonia Bank</td>
+ <td class="closing">December 10, 2010</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="paramount.html">Paramount Bank</a></td>
+ <td class="city">Farmington Hills</td>
+ <td class="state">MI</td>
+ <td class="cert">34673</td>
+ <td class="ai">Level One Bank</td>
+ <td class="closing">December 10, 2010</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstbanking.html">First Banking Center</a></td>
+ <td class="city">Burlington</td>
+ <td class="state">WI</td>
+ <td class="cert">5287</td>
+ <td class="ai">First Michigan Bank</td>
+ <td class="closing">November 19, 2010</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="allegbank.html">Allegiance Bank of North America</a></td>
+ <td class="city">Bala Cynwyd</td>
+ <td class="state">PA</td>
+ <td class="cert">35078</td>
+ <td class="ai">VIST Bank</td>
+ <td class="closing">November 19, 2010</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="gulfstate.html">Gulf State Community Bank</a></td>
+ <td class="city">Carrabelle</td>
+ <td class="state">FL</td>
+ <td class="cert">20340</td>
+ <td class="ai">Centennial Bank</td>
+ <td class="closing">November 19, 2010</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="copperstar.html">Copper Star Bank</a></td>
+ <td class="city">Scottsdale</td>
+ <td class="state">AZ</td>
+ <td class="cert">35463</td>
+ <td class="ai">Stearns Bank, N.A.</td>
+ <td class="closing">November 12, 2010</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="darbybank.html">Darby Bank & Trust Co.</a></td>
+ <td class="city">Vidalia</td>
+ <td class="state">GA</td>
+ <td class="cert">14580</td>
+ <td class="ai">Ameris Bank</td>
+ <td class="closing">November 12, 2010</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="tifton.html">Tifton Banking Company</a></td>
+ <td class="city">Tifton</td>
+ <td class="state">GA</td>
+ <td class="cert">57831</td>
+ <td class="ai">Ameris Bank</td>
+ <td class="closing">November 12, 2010</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstvietnamese.html">First Vietnamese American Bank</a><br><a href="firstvietnamese_viet.pdf">In Vietnamese</a></td>
+ <td class="city">Westminster</td>
+ <td class="state">CA</td>
+ <td class="cert">57885</td>
+ <td class="ai">Grandpoint Bank</td>
+ <td class="closing">November 5, 2010</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="piercecommercial.html">Pierce Commercial Bank</a></td>
+ <td class="city">Tacoma</td>
+ <td class="state">WA</td>
+ <td class="cert">34411</td>
+ <td class="ai">Heritage Bank</td>
+ <td class="closing">November 5, 2010</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="westerncommercial_ca.html">Western Commercial Bank</a></td>
+ <td class="city">Woodland Hills</td>
+ <td class="state">CA</td>
+ <td class="cert">58087</td>
+ <td class="ai">First California Bank</td>
+ <td class="closing">November 5, 2010</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="kbank.html">K Bank</a></td>
+ <td class="city">Randallstown</td>
+ <td class="state">MD</td>
+ <td class="cert">31263</td>
+ <td class="ai">Manufacturers and Traders Trust Company (M&T Bank)</td>
+ <td class="closing">November 5, 2010</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstazfsb.html">First Arizona Savings, A FSB</a></td>
+ <td class="city">Scottsdale</td>
+ <td class="state">AZ</td>
+ <td class="cert">32582</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">October 22, 2010</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="hillcrest_ks.html">Hillcrest Bank</a></td>
+ <td class="city">Overland Park</td>
+ <td class="state">KS</td>
+ <td class="cert">22173</td>
+ <td class="ai">Hillcrest Bank, N.A.</td>
+ <td class="closing">October 22, 2010</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstsuburban.html">First Suburban National Bank</a></td>
+ <td class="city">Maywood</td>
+ <td class="state">IL</td>
+ <td class="cert">16089</td>
+ <td class="ai">Seaway Bank and Trust Company</td>
+ <td class="closing">October 22, 2010</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="fnbbarnesville.html">The First National Bank of Barnesville</a></td>
+ <td class="city">Barnesville</td>
+ <td class="state">GA</td>
+ <td class="cert">2119</td>
+ <td class="ai">United Bank</td>
+ <td class="closing">October 22, 2010</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="gordon.html">The Gordon Bank</a></td>
+ <td class="city">Gordon</td>
+ <td class="state">GA</td>
+ <td class="cert">33904</td>
+ <td class="ai">Morris Bank</td>
+ <td class="closing">October 22, 2010</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="progress_fl.html">Progress Bank of Florida</a></td>
+ <td class="city">Tampa</td>
+ <td class="state">FL</td>
+ <td class="cert">32251</td>
+ <td class="ai">Bay Cities Bank</td>
+ <td class="closing">October 22, 2010</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstbankjacksonville.html">First Bank of Jacksonville</a></td>
+ <td class="city">Jacksonville</td>
+ <td class="state">FL</td>
+ <td class="cert">27573</td>
+ <td class="ai">Ameris Bank</td>
+ <td class="closing">October 22, 2010</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="premier_mo.html">Premier Bank</a></td>
+ <td class="city">Jefferson City</td>
+ <td class="state">MO</td>
+ <td class="cert">34016</td>
+ <td class="ai">Providence Bank</td>
+ <td class="closing">October 15, 2010</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="westbridge.html">WestBridge Bank and Trust Company</a></td>
+ <td class="city">Chesterfield</td>
+ <td class="state">MO</td>
+ <td class="cert">58205</td>
+ <td class="ai">Midland States Bank</td>
+ <td class="closing">October 15, 2010</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="securitysavingsfsb.html">Security Savings Bank, F.S.B.</a></td>
+ <td class="city">Olathe</td>
+ <td class="state">KS</td>
+ <td class="cert">30898</td>
+ <td class="ai">Simmons First National Bank</td>
+ <td class="closing">October 15, 2010</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="shoreline.html">Shoreline Bank</a></td>
+ <td class="city">Shoreline</td>
+ <td class="state">WA</td>
+ <td class="cert">35250</td>
+ <td class="ai">GBC International Bank</td>
+ <td class="closing">October 1, 2010</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="wakulla.html">Wakulla Bank</a></td>
+ <td class="city">Crawfordville</td>
+ <td class="state">FL</td>
+ <td class="cert">21777</td>
+ <td class="ai">Centennial Bank</td>
+ <td class="closing">October 1, 2010</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="northcounty.html">North County Bank</a></td>
+ <td class="city">Arlington</td>
+ <td class="state">WA</td>
+ <td class="cert">35053</td>
+ <td class="ai">Whidbey Island Bank</td>
+ <td class="closing">September 24, 2010</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="haventrust_fl.html">Haven Trust Bank Florida</a></td>
+ <td class="city">Ponte Vedra Beach</td>
+ <td class="state">FL</td>
+ <td class="cert">58308</td>
+ <td class="ai">First Southern Bank</td>
+ <td class="closing">September 24, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="maritimesavings.html">Maritime Savings Bank</a></td>
+ <td class="city">West Allis</td>
+ <td class="state">WI</td>
+ <td class="cert">28612</td>
+ <td class="ai">North Shore Bank, FSB</td>
+ <td class="closing">September 17, 2010</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="bramblesavings.html">Bramble Savings Bank</a></td>
+ <td class="city">Milford</td>
+ <td class="state">OH</td>
+ <td class="cert">27808</td>
+ <td class="ai">Foundation Bank</td>
+ <td class="closing">September 17, 2010</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="peoplesbank_ga.html">The Peoples Bank</a></td>
+ <td class="city">Winder</td>
+ <td class="state">GA</td>
+ <td class="cert">182</td>
+ <td class="ai">Community & Southern Bank</td>
+ <td class="closing">September 17, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstcommerce_ga.html">First Commerce Community Bank</a></td>
+ <td class="city">Douglasville</td>
+ <td class="state">GA</td>
+ <td class="cert">57448</td>
+ <td class="ai">Community & Southern Bank</td>
+ <td class="closing">September 17, 2010</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="ellijay.html">Bank of Ellijay</a></td>
+ <td class="city">Ellijay</td>
+ <td class="state">GA</td>
+ <td class="cert">58197</td>
+ <td class="ai">Community & Southern Bank</td>
+ <td class="closing">September 17, 2010</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="isnbank.html">ISN Bank</a></td>
+ <td class="city">Cherry Hill</td>
+ <td class="state">NJ</td>
+ <td class="cert">57107</td>
+ <td class="ai">Customers Bank</td>
+ <td class="closing">September 17, 2010</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="horizonfl.html">Horizon Bank</a></td>
+ <td class="city">Bradenton</td>
+ <td class="state">FL</td>
+ <td class="cert">35061</td>
+ <td class="ai">Bank of the Ozarks</td>
+ <td class="closing">September 10, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="sonoma.html">Sonoma Valley Bank</a></td>
+ <td class="city">Sonoma</td>
+ <td class="state">CA</td>
+ <td class="cert">27259</td>
+ <td class="ai">Westamerica Bank</td>
+ <td class="closing">August 20, 2010</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="lospadres.html">Los Padres Bank</a></td>
+ <td class="city">Solvang</td>
+ <td class="state">CA</td>
+ <td class="cert">32165</td>
+ <td class="ai">Pacific Western Bank</td>
+ <td class="closing">August 20, 2010</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="butte.html">Butte Community Bank</a></td>
+ <td class="city">Chico</td>
+ <td class="state">CA</td>
+ <td class="cert">33219</td>
+ <td class="ai">Rabobank, N.A.</td>
+ <td class="closing">August 20, 2010</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="pacificbk.html">Pacific State Bank</a></td>
+ <td class="city">Stockton</td>
+ <td class="state">CA</td>
+ <td class="cert">27090</td>
+ <td class="ai">Rabobank, N.A.</td>
+ <td class="closing">August 20, 2010</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="shorebank.html">ShoreBank</a></td>
+ <td class="city">Chicago</td>
+ <td class="state">IL</td>
+ <td class="cert">15640</td>
+ <td class="ai">Urban Partnership Bank</td>
+ <td class="closing">August 20, 2010</td>
+ <td class="updated">May 16, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="imperialsvgs.html">Imperial Savings and Loan Association</a></td>
+ <td class="city">Martinsville</td>
+ <td class="state">VA</td>
+ <td class="cert">31623</td>
+ <td class="ai">River Community Bank, N.A.</td>
+ <td class="closing">August 20, 2010</td>
+ <td class="updated">August 24, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="inatbank.html">Independent National Bank</a></td>
+ <td class="city">Ocala</td>
+ <td class="state">FL</td>
+ <td class="cert">27344</td>
+ <td class="ai">CenterState Bank of Florida, N.A.</td>
+ <td class="closing">August 20, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="cnbbartow.html">Community National Bank at Bartow</a></td>
+ <td class="city">Bartow</td>
+ <td class="state">FL</td>
+ <td class="cert">25266</td>
+ <td class="ai">CenterState Bank of Florida, N.A.</td>
+ <td class="closing">August 20, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="palosbank.html">Palos Bank and Trust Company</a></td>
+ <td class="city">Palos Heights</td>
+ <td class="state">IL</td>
+ <td class="cert">17599</td>
+ <td class="ai">First Midwest Bank</td>
+ <td class="closing">August 13, 2010</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="ravenswood.html">Ravenswood Bank</a></td>
+ <td class="city">Chicago</td>
+ <td class="state">IL</td>
+ <td class="cert">34231</td>
+ <td class="ai">Northbrook Bank & Trust Company</td>
+ <td class="closing">August 6, 2010</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="libertyor.html">LibertyBank</a></td>
+ <td class="city">Eugene</td>
+ <td class="state">OR</td>
+ <td class="cert">31964</td>
+ <td class="ai">Home Federal Bank</td>
+ <td class="closing">July 30, 2010</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="cowlitz.html">The Cowlitz Bank</a></td>
+ <td class="city">Longview</td>
+ <td class="state">WA</td>
+ <td class="cert">22643</td>
+ <td class="ai">Heritage Bank</td>
+ <td class="closing">July 30, 2010</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="coastal.html">Coastal Community Bank</a></td>
+ <td class="city">Panama City Beach</td>
+ <td class="state">FL</td>
+ <td class="cert">9619</td>
+ <td class="ai">Centennial Bank</td>
+ <td class="closing">July 30, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="bayside.html">Bayside Savings Bank</a></td>
+ <td class="city">Port Saint Joe</td>
+ <td class="state">FL</td>
+ <td class="cert">57669</td>
+ <td class="ai">Centennial Bank</td>
+ <td class="closing">July 30, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="northwestga.html">Northwest Bank & Trust</a></td>
+ <td class="city">Acworth</td>
+ <td class="state">GA</td>
+ <td class="cert">57658</td>
+ <td class="ai">State Bank and Trust Company</td>
+ <td class="closing">July 30, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="homevalleyor.html">Home Valley Bank</a></td>
+ <td class="city">Cave Junction</td>
+ <td class="state">OR</td>
+ <td class="cert">23181</td>
+ <td class="ai">South Valley Bank & Trust</td>
+ <td class="closing">July 23, 2010</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="southwestusanv.html">SouthwestUSA Bank</a></td>
+ <td class="city">Las Vegas</td>
+ <td class="state">NV</td>
+ <td class="cert">35434</td>
+ <td class="ai">Plaza Bank</td>
+ <td class="closing">July 23, 2010</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="communitysecmn.html">Community Security Bank</a></td>
+ <td class="city">New Prague</td>
+ <td class="state">MN</td>
+ <td class="cert">34486</td>
+ <td class="ai">Roundbank</td>
+ <td class="closing">July 23, 2010</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="thunderbankks.html">Thunder Bank</a></td>
+ <td class="city">Sylvan Grove</td>
+ <td class="state">KS</td>
+ <td class="cert">10506</td>
+ <td class="ai">The Bennington State Bank</td>
+ <td class="closing">July 23, 2010</td>
+ <td class="updated">September 13, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="williamsburgsc.html">Williamsburg First National Bank</a></td>
+ <td class="city">Kingstree</td>
+ <td class="state">SC</td>
+ <td class="cert">17837</td>
+ <td class="ai">First Citizens Bank and Trust Company, Inc.</td>
+ <td class="closing">July 23, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="crescentga.html">Crescent Bank and Trust Company</a></td>
+ <td class="city">Jasper</td>
+ <td class="state">GA</td>
+ <td class="cert">27559</td>
+ <td class="ai">Renasant Bank</td>
+ <td class="closing">July 23, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="sterlingfl.html">Sterling Bank</a></td>
+ <td class="city">Lantana</td>
+ <td class="state">FL</td>
+ <td class="cert">32536</td>
+ <td class="ai">IBERIABANK</td>
+ <td class="closing">July 23, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="mainstsvgs.html">Mainstreet Savings Bank, FSB</a></td>
+ <td class="city">Hastings</td>
+ <td class="state">MI</td>
+ <td class="cert">28136</td>
+ <td class="ai">Commercial Bank</td>
+ <td class="closing">July 16, 2010</td>
+ <td class="updated">September 13, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="oldecypress.html">Olde Cypress Community Bank</a></td>
+ <td class="city">Clewiston</td>
+ <td class="state">FL</td>
+ <td class="cert">28864</td>
+ <td class="ai">CenterState Bank of Florida, N.A.</td>
+ <td class="closing">July 16, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="turnberry.html">Turnberry Bank</a></td>
+ <td class="city">Aventura</td>
+ <td class="state">FL</td>
+ <td class="cert">32280</td>
+ <td class="ai">NAFH National Bank</td>
+ <td class="closing">July 16, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="metrobankfl.html">Metro Bank of Dade County</a></td>
+ <td class="city">Miami</td>
+ <td class="state">FL</td>
+ <td class="cert">25172</td>
+ <td class="ai">NAFH National Bank</td>
+ <td class="closing">July 16, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstnatlsc.html">First National Bank of the South</a></td>
+ <td class="city">Spartanburg</td>
+ <td class="state">SC</td>
+ <td class="cert">35383</td>
+ <td class="ai">NAFH National Bank</td>
+ <td class="closing">July 16, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="woodlands.html">Woodlands Bank</a></td>
+ <td class="city">Bluffton</td>
+ <td class="state">SC</td>
+ <td class="cert">32571</td>
+ <td class="ai">Bank of the Ozarks</td>
+ <td class="closing">July 16, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="homenatlok.html">Home National Bank</a></td>
+ <td class="city">Blackwell</td>
+ <td class="state">OK</td>
+ <td class="cert">11636</td>
+ <td class="ai">RCB Bank</td>
+ <td class="closing">July 9, 2010</td>
+ <td class="updated">December 10, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="usabankny.html">USA Bank</a></td>
+ <td class="city">Port Chester</td>
+ <td class="state">NY</td>
+ <td class="cert">58072</td>
+ <td class="ai">New Century Bank</td>
+ <td class="closing">July 9, 2010</td>
+ <td class="updated">September 14, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="idealfedsvngsmd.html">Ideal Federal Savings Bank</a></td>
+ <td class="city">Baltimore</td>
+ <td class="state">MD</td>
+ <td class="cert">32456</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">July 9, 2010</td>
+ <td class="updated">September 14, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="baynatlmd.html">Bay National Bank</a></td>
+ <td class="city">Baltimore</td>
+ <td class="state">MD</td>
+ <td class="cert">35462</td>
+ <td class="ai">Bay Bank, FSB</td>
+ <td class="closing">July 9, 2010</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="highdesertnm.html">High Desert State Bank</a></td>
+ <td class="city">Albuquerque</td>
+ <td class="state">NM</td>
+ <td class="cert">35279</td>
+ <td class="ai">First American Bank</td>
+ <td class="closing">June 25, 2010</td>
+ <td class="updated">September 14, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstnatga.html">First National Bank</a></td>
+ <td class="city">Savannah</td>
+ <td class="state">GA</td>
+ <td class="cert">34152</td>
+ <td class="ai">The Savannah Bank, N.A.</td>
+ <td class="closing">June 25, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="peninsulafl.html">Peninsula Bank</a></td>
+ <td class="city">Englewood</td>
+ <td class="state">FL</td>
+ <td class="cert">26563</td>
+ <td class="ai">Premier American Bank, N.A.</td>
+ <td class="closing">June 25, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="nevsecbank.html">Nevada Security Bank</a></td>
+ <td class="city">Reno</td>
+ <td class="state">NV</td>
+ <td class="cert">57110</td>
+ <td class="ai">Umpqua Bank</td>
+ <td class="closing">June 18, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="washfirstintl.html">Washington First International Bank</a></td>
+ <td class="city">Seattle</td>
+ <td class="state">WA</td>
+ <td class="cert">32955</td>
+ <td class="ai">East West Bank</td>
+ <td class="closing">June 11, 2010</td>
+ <td class="updated">September 14, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="tieronebankne.html">TierOne Bank</a></td>
+ <td class="city">Lincoln</td>
+ <td class="state">NE</td>
+ <td class="cert">29341</td>
+ <td class="ai">Great Western Bank</td>
+ <td class="closing">June 4, 2010</td>
+ <td class="updated">September 14, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="arcolail.html">Arcola Homestead Savings Bank</a></td>
+ <td class="city">Arcola</td>
+ <td class="state">IL</td>
+ <td class="cert">31813</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">June 4, 2010</td>
+ <td class="updated">September 14, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstnatms.html">First National Bank</a></td>
+ <td class="city">Rosedale</td>
+ <td class="state">MS</td>
+ <td class="cert">15814</td>
+ <td class="ai">The Jefferson Bank</td>
+ <td class="closing">June 4, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="swbnevada.html">Sun West Bank</a></td>
+ <td class="city">Las Vegas</td>
+ <td class="state">NV</td>
+ <td class="cert">34785</td>
+ <td class="ai">City National Bank</td>
+ <td class="closing">May 28, 2010</td>
+ <td class="updated">September 14, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="graniteca.html">Granite Community Bank, NA</a></td>
+ <td class="city">Granite Bay</td>
+ <td class="state">CA</td>
+ <td class="cert">57315</td>
+ <td class="ai">Tri Counties Bank</td>
+ <td class="closing">May 28, 2010</td>
+ <td class="updated">September 14, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="bankoffloridatb.html">Bank of Florida - Tampa</a></td>
+ <td class="city">Tampa</td>
+ <td class="state">FL</td>
+ <td class="cert">57814</td>
+ <td class="ai">EverBank</td>
+ <td class="closing">May 28, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="bankoffloridasw.html">Bank of Florida - Southwest</a></td>
+ <td class="city">Naples</td>
+ <td class="state">FL</td>
+ <td class="cert">35106</td>
+ <td class="ai">EverBank</td>
+ <td class="closing">May 28, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="bankoffloridase.html">Bank of Florida - Southeast</a></td>
+ <td class="city">Fort Lauderdale</td>
+ <td class="state">FL</td>
+ <td class="cert">57360</td>
+ <td class="ai">EverBank</td>
+ <td class="closing">May 28, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="pinehurstmn.html">Pinehurst Bank</a></td>
+ <td class="city">Saint Paul</td>
+ <td class="state">MN</td>
+ <td class="cert">57735</td>
+ <td class="ai">Coulee Bank</td>
+ <td class="closing">May 21, 2010</td>
+ <td class="updated">October 26, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="midwestil.html">Midwest Bank and Trust Company</a></td>
+ <td class="city">Elmwood Park</td>
+ <td class="state">IL</td>
+ <td class="cert">18117</td>
+ <td class="ai">FirstMerit Bank, N.A.</td>
+ <td class="closing">May 14, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="swcmntymo.html">Southwest Community Bank</a></td>
+ <td class="city">Springfield</td>
+ <td class="state">MO</td>
+ <td class="cert">34255</td>
+ <td class="ai">Simmons First National Bank</td>
+ <td class="closing">May 14, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="newlibertymi.html">New Liberty Bank</a></td>
+ <td class="city">Plymouth</td>
+ <td class="state">MI</td>
+ <td class="cert">35586</td>
+ <td class="ai">Bank of Ann Arbor</td>
+ <td class="closing">May 14, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="satillacmntyga.html">Satilla Community Bank</a></td>
+ <td class="city">Saint Marys</td>
+ <td class="state">GA</td>
+ <td class="cert">35114</td>
+ <td class="ai">Ameris Bank</td>
+ <td class="closing">May 14, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="1stpacific.html">1st Pacific Bank of California</a></td>
+ <td class="city">San Diego</td>
+ <td class="state">CA</td>
+ <td class="cert">35517</td>
+ <td class="ai">City National Bank</td>
+ <td class="closing">May 7, 2010</td>
+ <td class="updated">December 13, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="townebank.html">Towne Bank of Arizona</a></td>
+ <td class="city">Mesa</td>
+ <td class="state">AZ</td>
+ <td class="cert">57697</td>
+ <td class="ai">Commerce Bank of Arizona</td>
+ <td class="closing">May 7, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="accessbank.html">Access Bank</a></td>
+ <td class="city">Champlin</td>
+ <td class="state">MN</td>
+ <td class="cert">16476</td>
+ <td class="ai">PrinsBank</td>
+ <td class="closing">May 7, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="bonifay.html">The Bank of Bonifay</a></td>
+ <td class="city">Bonifay</td>
+ <td class="state">FL</td>
+ <td class="cert">14246</td>
+ <td class="ai">First Federal Bank of Florida</td>
+ <td class="closing">May 7, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="frontier.html">Frontier Bank</a></td>
+ <td class="city">Everett</td>
+ <td class="state">WA</td>
+ <td class="cert">22710</td>
+ <td class="ai">Union Bank, N.A.</td>
+ <td class="closing">April 30, 2010</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="bc-natl.html">BC National Banks</a></td>
+ <td class="city">Butler</td>
+ <td class="state">MO</td>
+ <td class="cert">17792</td>
+ <td class="ai">Community First Bank</td>
+ <td class="closing">April 30, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="champion.html">Champion Bank</a></td>
+ <td class="city">Creve Coeur</td>
+ <td class="state">MO</td>
+ <td class="cert">58362</td>
+ <td class="ai">BankLiberty</td>
+ <td class="closing">April 30, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="cfbancorp.html">CF Bancorp</a></td>
+ <td class="city">Port Huron</td>
+ <td class="state">MI</td>
+ <td class="cert">30005</td>
+ <td class="ai">First Michigan Bank</td>
+ <td class="closing">April 30, 2010</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="westernbank-puertorico.html">Westernbank Puerto Rico</a><br><a href="westernbank-puertorico_spanish.html">En Espanol</a></td>
+ <td class="city">Mayaguez</td>
+ <td class="state">PR</td>
+ <td class="cert">31027</td>
+ <td class="ai">Banco Popular de Puerto Rico</td>
+ <td class="closing">April 30, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="r-gpremier-puertorico.html">R-G Premier Bank of Puerto Rico</a><br><a href="r-gpremier-puertorico_spanish.html">En Espanol</a></td>
+ <td class="city">Hato Rey</td>
+ <td class="state">PR</td>
+ <td class="cert">32185</td>
+ <td class="ai">Scotiabank de Puerto Rico</td>
+ <td class="closing">April 30, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="eurobank-puertorico.html">Eurobank</a><br><a href="eurobank-puertorico_spanish.html">En Espanol</a></td>
+ <td class="city">San Juan</td>
+ <td class="state">PR</td>
+ <td class="cert">27150</td>
+ <td class="ai">Oriental Bank and Trust</td>
+ <td class="closing">April 30, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="wheatland.html">Wheatland Bank</a></td>
+ <td class="city">Naperville</td>
+ <td class="state">IL</td>
+ <td class="cert">58429</td>
+ <td class="ai">Wheaton Bank & Trust</td>
+ <td class="closing">April 23, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="peotone.html">Peotone Bank and Trust Company</a></td>
+ <td class="city">Peotone</td>
+ <td class="state">IL</td>
+ <td class="cert">10888</td>
+ <td class="ai">First Midwest Bank</td>
+ <td class="closing">April 23, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="lincoln-park.html">Lincoln Park Savings Bank</a></td>
+ <td class="city">Chicago</td>
+ <td class="state">IL</td>
+ <td class="cert">30600</td>
+ <td class="ai">Northbrook Bank & Trust Company</td>
+ <td class="closing">April 23, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="new-century-il.html">New Century Bank</a></td>
+ <td class="city">Chicago</td>
+ <td class="state">IL</td>
+ <td class="cert">34821</td>
+ <td class="ai">MB Financial Bank, N.A.</td>
+ <td class="closing">April 23, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="citizens-bank.html">Citizens Bank and Trust Company of Chicago</a></td>
+ <td class="city">Chicago</td>
+ <td class="state">IL</td>
+ <td class="cert">34658</td>
+ <td class="ai">Republic Bank of Chicago</td>
+ <td class="closing">April 23, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="broadway.html">Broadway Bank</a></td>
+ <td class="city">Chicago</td>
+ <td class="state">IL</td>
+ <td class="cert">22853</td>
+ <td class="ai">MB Financial Bank, N.A.</td>
+ <td class="closing">April 23, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="amcore.html">Amcore Bank, National Association</a></td>
+ <td class="city">Rockford</td>
+ <td class="state">IL</td>
+ <td class="cert">3735</td>
+ <td class="ai">Harris N.A.</td>
+ <td class="closing">April 23, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="citybank.html">City Bank</a></td>
+ <td class="city">Lynnwood</td>
+ <td class="state">WA</td>
+ <td class="cert">21521</td>
+ <td class="ai">Whidbey Island Bank</td>
+ <td class="closing">April 16, 2010</td>
+ <td class="updated">September 14, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="tamalpais.html">Tamalpais Bank</a></td>
+ <td class="city">San Rafael</td>
+ <td class="state">CA</td>
+ <td class="cert">33493</td>
+ <td class="ai">Union Bank, N.A.</td>
+ <td class="closing">April 16, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="innovative.html">Innovative Bank</a></td>
+ <td class="city">Oakland</td>
+ <td class="state">CA</td>
+ <td class="cert">23876</td>
+ <td class="ai">Center Bank</td>
+ <td class="closing">April 16, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="butlerbank.html">Butler Bank</a></td>
+ <td class="city">Lowell</td>
+ <td class="state">MA</td>
+ <td class="cert">26619</td>
+ <td class="ai">People's United Bank</td>
+ <td class="closing">April 16, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="riverside-natl.html">Riverside National Bank of Florida</a></td>
+ <td class="city">Fort Pierce</td>
+ <td class="state">FL</td>
+ <td class="cert">24067</td>
+ <td class="ai">TD Bank, N.A.</td>
+ <td class="closing">April 16, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="americanfirst.html">AmericanFirst Bank</a></td>
+ <td class="city">Clermont</td>
+ <td class="state">FL</td>
+ <td class="cert">57724</td>
+ <td class="ai">TD Bank, N.A.</td>
+ <td class="closing">April 16, 2010</td>
+ <td class="updated">October 31, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="ffbnf.html">First Federal Bank of North Florida</a></td>
+ <td class="city">Palatka</td>
+ <td class="state">FL</td>
+ <td class="cert">28886</td>
+ <td class="ai">TD Bank, N.A.</td>
+ <td class="closing">April 16, 2010</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="lakeside-comm.html">Lakeside Community Bank</a></td>
+ <td class="city">Sterling Heights</td>
+ <td class="state">MI</td>
+ <td class="cert">34878</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">April 16, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="beachfirst.html">Beach First National Bank</a></td>
+ <td class="city">Myrtle Beach</td>
+ <td class="state">SC</td>
+ <td class="cert">34242</td>
+ <td class="ai">Bank of North Carolina</td>
+ <td class="closing">April 9, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="deserthills.html">Desert Hills Bank</a></td>
+ <td class="city">Phoenix</td>
+ <td class="state">AZ</td>
+ <td class="cert">57060</td>
+ <td class="ai">New York Community Bank</td>
+ <td class="closing">March 26, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="unity-natl.html">Unity National Bank</a></td>
+ <td class="city">Cartersville</td>
+ <td class="state">GA</td>
+ <td class="cert">34678</td>
+ <td class="ai">Bank of the Ozarks</td>
+ <td class="closing">March 26, 2010</td>
+ <td class="updated">September 14, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="key-west.html">Key West Bank</a></td>
+ <td class="city">Key West</td>
+ <td class="state">FL</td>
+ <td class="cert">34684</td>
+ <td class="ai">Centennial Bank</td>
+ <td class="closing">March 26, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="mcintosh.html">McIntosh Commercial Bank</a></td>
+ <td class="city">Carrollton</td>
+ <td class="state">GA</td>
+ <td class="cert">57399</td>
+ <td class="ai">CharterBank</td>
+ <td class="closing">March 26, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="state-aurora.html">State Bank of Aurora</a></td>
+ <td class="city">Aurora</td>
+ <td class="state">MN</td>
+ <td class="cert">8221</td>
+ <td class="ai">Northern State Bank</td>
+ <td class="closing">March 19, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstlowndes.html">First Lowndes Bank</a></td>
+ <td class="city">Fort Deposit</td>
+ <td class="state">AL</td>
+ <td class="cert">24957</td>
+ <td class="ai">First Citizens Bank</td>
+ <td class="closing">March 19, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="bankofhiawassee.html">Bank of Hiawassee</a></td>
+ <td class="city">Hiawassee</td>
+ <td class="state">GA</td>
+ <td class="cert">10054</td>
+ <td class="ai">Citizens South Bank</td>
+ <td class="closing">March 19, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="appalachian.html">Appalachian Community Bank</a></td>
+ <td class="city">Ellijay</td>
+ <td class="state">GA</td>
+ <td class="cert">33989</td>
+ <td class="ai">Community & Southern Bank</td>
+ <td class="closing">March 19, 2010</td>
+ <td class="updated">October 31, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="advanta-ut.html">Advanta Bank Corp.</a></td>
+ <td class="city">Draper</td>
+ <td class="state">UT</td>
+ <td class="cert">33535</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">March 19, 2010</td>
+ <td class="updated">September 14, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="cent-security.html">Century Security Bank</a></td>
+ <td class="city">Duluth</td>
+ <td class="state">GA</td>
+ <td class="cert">58104</td>
+ <td class="ai">Bank of Upson</td>
+ <td class="closing">March 19, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="amer-natl-oh.html">American National Bank</a></td>
+ <td class="city">Parma</td>
+ <td class="state">OH</td>
+ <td class="cert">18806</td>
+ <td class="ai">The National Bank and Trust Company</td>
+ <td class="closing">March 19, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="statewide.html">Statewide Bank</a></td>
+ <td class="city">Covington</td>
+ <td class="state">LA</td>
+ <td class="cert">29561</td>
+ <td class="ai">Home Bank</td>
+ <td class="closing">March 12, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="oldsouthern.html">Old Southern Bank</a></td>
+ <td class="city">Orlando</td>
+ <td class="state">FL</td>
+ <td class="cert">58182</td>
+ <td class="ai">Centennial Bank</td>
+ <td class="closing">March 12, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="parkavenue-ny.html">The Park Avenue Bank</a></td>
+ <td class="city">New York</td>
+ <td class="state">NY</td>
+ <td class="cert">27096</td>
+ <td class="ai">Valley National Bank</td>
+ <td class="closing">March 12, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="libertypointe.html">LibertyPointe Bank</a></td>
+ <td class="city">New York</td>
+ <td class="state">NY</td>
+ <td class="cert">58071</td>
+ <td class="ai">Valley National Bank</td>
+ <td class="closing">March 11, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="centennial-ut.html">Centennial Bank</a></td>
+ <td class="city">Ogden</td>
+ <td class="state">UT</td>
+ <td class="cert">34430</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">March 5, 2010</td>
+ <td class="updated">September 14, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="waterfield.html">Waterfield Bank</a></td>
+ <td class="city">Germantown</td>
+ <td class="state">MD</td>
+ <td class="cert">34976</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">March 5, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="bankofillinois.html">Bank of Illinois</a></td>
+ <td class="city">Normal</td>
+ <td class="state">IL</td>
+ <td class="cert">9268</td>
+ <td class="ai">Heartland Bank and Trust Company</td>
+ <td class="closing">March 5, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="sunamerican.html">Sun American Bank</a></td>
+ <td class="city">Boca Raton</td>
+ <td class="state">FL</td>
+ <td class="cert">27126</td>
+ <td class="ai">First-Citizens Bank & Trust Company</td>
+ <td class="closing">March 5, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="rainier.html">Rainier Pacific Bank</a></td>
+ <td class="city">Tacoma</td>
+ <td class="state">WA</td>
+ <td class="cert">38129</td>
+ <td class="ai">Umpqua Bank</td>
+ <td class="closing">February 26, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="carsonriver.html">Carson River Community Bank</a></td>
+ <td class="city">Carson City</td>
+ <td class="state">NV</td>
+ <td class="cert">58352</td>
+ <td class="ai">Heritage Bank of Nevada</td>
+ <td class="closing">February 26, 2010</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="lajolla.html">La Jolla Bank, FSB</a></td>
+ <td class="city">La Jolla</td>
+ <td class="state">CA</td>
+ <td class="cert">32423</td>
+ <td class="ai">OneWest Bank, FSB</td>
+ <td class="closing">February 19, 2010</td>
+ <td class="updated">August 24, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="georgewashington.html">George Washington Savings Bank</a></td>
+ <td class="city">Orland Park</td>
+ <td class="state">IL</td>
+ <td class="cert">29952</td>
+ <td class="ai">FirstMerit Bank, N.A.</td>
+ <td class="closing">February 19, 2010</td>
+ <td class="updated">August 24, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="lacoste.html">The La Coste National Bank</a></td>
+ <td class="city">La Coste</td>
+ <td class="state">TX</td>
+ <td class="cert">3287</td>
+ <td class="ai">Community National Bank</td>
+ <td class="closing">February 19, 2010</td>
+ <td class="updated">September 14, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="marco.html">Marco Community Bank</a></td>
+ <td class="city">Marco Island</td>
+ <td class="state">FL</td>
+ <td class="cert">57586</td>
+ <td class="ai">Mutual of Omaha Bank</td>
+ <td class="closing">February 19, 2010</td>
+ <td class="updated">August 24, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="1stamerican.html">1st American State Bank of Minnesota</a></td>
+ <td class="city">Hancock</td>
+ <td class="state">MN</td>
+ <td class="cert">15448</td>
+ <td class="ai">Community Development Bank, FSB</td>
+ <td class="closing">February 5, 2010</td>
+ <td class="updated">August 24, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="americanmarine.html">American Marine Bank</a></td>
+ <td class="city">Bainbridge Island</td>
+ <td class="state">WA</td>
+ <td class="cert">16730</td>
+ <td class="ai">Columbia State Bank</td>
+ <td class="closing">January 29, 2010</td>
+ <td class="updated">August 24, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstregional.html">First Regional Bank</a></td>
+ <td class="city">Los Angeles</td>
+ <td class="state">CA</td>
+ <td class="cert">23011</td>
+ <td class="ai">First-Citizens Bank & Trust Company</td>
+ <td class="closing">January 29, 2010</td>
+ <td class="updated">August 24, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="cbt-cornelia.html">Community Bank and Trust</a></td>
+ <td class="city">Cornelia</td>
+ <td class="state">GA</td>
+ <td class="cert">5702</td>
+ <td class="ai">SCBT National Association</td>
+ <td class="closing">January 29, 2010</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="marshall-mn.html">Marshall Bank, N.A.</a></td>
+ <td class="city">Hallock</td>
+ <td class="state">MN</td>
+ <td class="cert">16133</td>
+ <td class="ai">United Valley Bank</td>
+ <td class="closing">January 29, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="floridacommunity.html">Florida Community Bank</a></td>
+ <td class="city">Immokalee</td>
+ <td class="state">FL</td>
+ <td class="cert">5672</td>
+ <td class="ai">Premier American Bank, N.A.</td>
+ <td class="closing">January 29, 2010</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstnational-carrollton.html">First National Bank of Georgia</a></td>
+ <td class="city">Carrollton</td>
+ <td class="state">GA</td>
+ <td class="cert">16480</td>
+ <td class="ai">Community & Southern Bank</td>
+ <td class="closing">January 29, 2010</td>
+ <td class="updated">December 13, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="columbiariver.html">Columbia River Bank</a></td>
+ <td class="city">The Dalles</td>
+ <td class="state">OR</td>
+ <td class="cert">22469</td>
+ <td class="ai">Columbia State Bank</td>
+ <td class="closing">January 22, 2010</td>
+ <td class="updated">September 14, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="evergreen-wa.html">Evergreen Bank</a></td>
+ <td class="city">Seattle</td>
+ <td class="state">WA</td>
+ <td class="cert">20501</td>
+ <td class="ai">Umpqua Bank</td>
+ <td class="closing">January 22, 2010</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="charter-nm.html">Charter Bank</a></td>
+ <td class="city">Santa Fe</td>
+ <td class="state">NM</td>
+ <td class="cert">32498</td>
+ <td class="ai">Charter Bank</td>
+ <td class="closing">January 22, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="leeton.html">Bank of Leeton</a></td>
+ <td class="city">Leeton</td>
+ <td class="state">MO</td>
+ <td class="cert">8265</td>
+ <td class="ai">Sunflower Bank, N.A.</td>
+ <td class="closing">January 22, 2010</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="premieramerican.html">Premier American Bank</a></td>
+ <td class="city">Miami</td>
+ <td class="state">FL</td>
+ <td class="cert">57147</td>
+ <td class="ai">Premier American Bank, N.A.</td>
+ <td class="closing">January 22, 2010</td>
+ <td class="updated">December 13, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="barnes.html">Barnes Banking Company</a></td>
+ <td class="city">Kaysville</td>
+ <td class="state">UT</td>
+ <td class="cert">1252</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">January 15, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="ststephen.html">St. Stephen State Bank</a></td>
+ <td class="city">St. Stephen</td>
+ <td class="state">MN</td>
+ <td class="cert">17522</td>
+ <td class="ai">First State Bank of St. Joseph</td>
+ <td class="closing">January 15, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="towncommunity.html">Town Community Bank & Trust</a></td>
+ <td class="city">Antioch</td>
+ <td class="state">IL</td>
+ <td class="cert">34705</td>
+ <td class="ai">First American Bank</td>
+ <td class="closing">January 15, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="horizon-wa.html">Horizon Bank</a></td>
+ <td class="city">Bellingham</td>
+ <td class="state">WA</td>
+ <td class="cert">22977</td>
+ <td class="ai">Washington Federal Savings and Loan Association</td>
+ <td class="closing">January 8, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstfederal-ca.html">First Federal Bank of California, F.S.B.</a></td>
+ <td class="city">Santa Monica</td>
+ <td class="state">CA</td>
+ <td class="cert">28536</td>
+ <td class="ai">OneWest Bank, FSB</td>
+ <td class="closing">December 18, 2009</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="imperialcapital.html">Imperial Capital Bank</a></td>
+ <td class="city">La Jolla</td>
+ <td class="state">CA</td>
+ <td class="cert">26348</td>
+ <td class="ai">City National Bank</td>
+ <td class="closing">December 18, 2009</td>
+ <td class="updated">September 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="ibb.html">Independent Bankers' Bank</a></td>
+ <td class="city">Springfield</td>
+ <td class="state">IL</td>
+ <td class="cert">26820</td>
+ <td class="ai">The Independent BankersBank (TIB)</td>
+ <td class="closing">December 18, 2009</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="newsouth.html">New South Federal Savings Bank</a></td>
+ <td class="city">Irondale</td>
+ <td class="state">AL</td>
+ <td class="cert">32276</td>
+ <td class="ai">Beal Bank</td>
+ <td class="closing">December 18, 2009</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="citizensstate-mi.html">Citizens State Bank</a></td>
+ <td class="city">New Baltimore</td>
+ <td class="state">MI</td>
+ <td class="cert">1006</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">December 18, 2009</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="peoplesfirst-fl.html">Peoples First Community Bank</a></td>
+ <td class="city">Panama City</td>
+ <td class="state">FL</td>
+ <td class="cert">32167</td>
+ <td class="ai">Hancock Bank</td>
+ <td class="closing">December 18, 2009</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="rockbridge.html">RockBridge Commercial Bank</a></td>
+ <td class="city">Atlanta</td>
+ <td class="state">GA</td>
+ <td class="cert">58315</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">December 18, 2009</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="solutions.html">SolutionsBank</a></td>
+ <td class="city">Overland Park</td>
+ <td class="state">KS</td>
+ <td class="cert">4731</td>
+ <td class="ai">Arvest Bank</td>
+ <td class="closing">December 11, 2009</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="valleycapital.html">Valley Capital Bank, N.A.</a></td>
+ <td class="city">Mesa</td>
+ <td class="state">AZ</td>
+ <td class="cert">58399</td>
+ <td class="ai">Enterprise Bank & Trust</td>
+ <td class="closing">December 11, 2009</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="republicfederal.html">Republic Federal Bank, N.A.</a></td>
+ <td class="city">Miami</td>
+ <td class="state">FL</td>
+ <td class="cert">22846</td>
+ <td class="ai">1st United Bank</td>
+ <td class="closing">December 11, 2009</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="atlantic-va.html">Greater Atlantic Bank</a></td>
+ <td class="city">Reston</td>
+ <td class="state">VA</td>
+ <td class="cert">32583</td>
+ <td class="ai">Sonabank</td>
+ <td class="closing">December 4, 2009</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="benchmark-il.html">Benchmark Bank</a></td>
+ <td class="city">Aurora</td>
+ <td class="state">IL</td>
+ <td class="cert">10440</td>
+ <td class="ai">MB Financial Bank, N.A.</td>
+ <td class="closing">December 4, 2009</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="amtrust.html">AmTrust Bank</a></td>
+ <td class="city">Cleveland</td>
+ <td class="state">OH</td>
+ <td class="cert">29776</td>
+ <td class="ai">New York Community Bank</td>
+ <td class="closing">December 4, 2009</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="tattnall.html">The Tattnall Bank</a></td>
+ <td class="city">Reidsville</td>
+ <td class="state">GA</td>
+ <td class="cert">12080</td>
+ <td class="ai">Heritage Bank of the South</td>
+ <td class="closing">December 4, 2009</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstsecurity.html">First Security National Bank</a></td>
+ <td class="city">Norcross</td>
+ <td class="state">GA</td>
+ <td class="cert">26290</td>
+ <td class="ai">State Bank and Trust Company</td>
+ <td class="closing">December 4, 2009</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="buckheadcommunity.html">The Buckhead Community Bank</a></td>
+ <td class="city">Atlanta</td>
+ <td class="state">GA</td>
+ <td class="cert">34663</td>
+ <td class="ai">State Bank and Trust Company</td>
+ <td class="closing">December 4, 2009</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="commercesw-fl.html">Commerce Bank of Southwest Florida</a></td>
+ <td class="city">Fort Myers</td>
+ <td class="state">FL</td>
+ <td class="cert">58016</td>
+ <td class="ai">Central Bank</td>
+ <td class="closing">November 20, 2009</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="pacificcoastnatl.html">Pacific Coast National Bank</a></td>
+ <td class="city">San Clemente</td>
+ <td class="state">CA</td>
+ <td class="cert">57914</td>
+ <td class="ai">Sunwest Bank</td>
+ <td class="closing">November 13, 2009</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="orion-fl.html">Orion Bank</a></td>
+ <td class="city">Naples</td>
+ <td class="state">FL</td>
+ <td class="cert">22427</td>
+ <td class="ai">IBERIABANK</td>
+ <td class="closing">November 13, 2009</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="centuryfsb.html">Century Bank, F.S.B.</a></td>
+ <td class="city">Sarasota</td>
+ <td class="state">FL</td>
+ <td class="cert">32267</td>
+ <td class="ai">IBERIABANK</td>
+ <td class="closing">November 13, 2009</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="ucb.html">United Commercial Bank</a></td>
+ <td class="city">San Francisco</td>
+ <td class="state">CA</td>
+ <td class="cert">32469</td>
+ <td class="ai">East West Bank</td>
+ <td class="closing">November 6, 2009</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="gateway-mo.html">Gateway Bank of St. Louis</a></td>
+ <td class="city">St. Louis</td>
+ <td class="state">MO</td>
+ <td class="cert">19450</td>
+ <td class="ai">Central Bank of Kansas City</td>
+ <td class="closing">November 6, 2009</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="prosperan.html">Prosperan Bank</a></td>
+ <td class="city">Oakdale</td>
+ <td class="state">MN</td>
+ <td class="cert">35074</td>
+ <td class="ai">Alerus Financial, N.A.</td>
+ <td class="closing">November 6, 2009</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="homefsb-mi.html">Home Federal Savings Bank</a></td>
+ <td class="city">Detroit</td>
+ <td class="state">MI</td>
+ <td class="cert">30329</td>
+ <td class="ai">Liberty Bank and Trust Company</td>
+ <td class="closing">November 6, 2009</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="unitedsecurity-ga.html">United Security Bank</a></td>
+ <td class="city">Sparta</td>
+ <td class="state">GA</td>
+ <td class="cert">22286</td>
+ <td class="ai">Ameris Bank</td>
+ <td class="closing">November 6, 2009</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="northhouston-tx.html">North Houston Bank</a></td>
+ <td class="city">Houston</td>
+ <td class="state">TX</td>
+ <td class="cert">18776</td>
+ <td class="ai">U.S. Bank N.A.</td>
+ <td class="closing">October 30, 2009</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="madisonville-tx.html">Madisonville State Bank</a></td>
+ <td class="city">Madisonville</td>
+ <td class="state">TX</td>
+ <td class="cert">33782</td>
+ <td class="ai">U.S. Bank N.A.</td>
+ <td class="closing">October 30, 2009</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="citizens-teague.html">Citizens National Bank</a></td>
+ <td class="city">Teague</td>
+ <td class="state">TX</td>
+ <td class="cert">25222</td>
+ <td class="ai">U.S. Bank N.A.</td>
+ <td class="closing">October 30, 2009</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="park-il.html">Park National Bank</a></td>
+ <td class="city">Chicago</td>
+ <td class="state">IL</td>
+ <td class="cert">11677</td>
+ <td class="ai">U.S. Bank N.A.</td>
+ <td class="closing">October 30, 2009</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="pacificnational-ca.html">Pacific National Bank</a></td>
+ <td class="city">San Francisco</td>
+ <td class="state">CA</td>
+ <td class="cert">30006</td>
+ <td class="ai">U.S. Bank N.A.</td>
+ <td class="closing">October 30, 2009</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="calnational.html">California National Bank</a></td>
+ <td class="city">Los Angeles</td>
+ <td class="state">CA</td>
+ <td class="cert">34659</td>
+ <td class="ai">U.S. Bank N.A.</td>
+ <td class="closing">October 30, 2009</td>
+ <td class="updated">September 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="sandiegonational.html">San Diego National Bank</a></td>
+ <td class="city">San Diego</td>
+ <td class="state">CA</td>
+ <td class="cert">23594</td>
+ <td class="ai">U.S. Bank N.A.</td>
+ <td class="closing">October 30, 2009</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="community-lemont.html">Community Bank of Lemont</a></td>
+ <td class="city">Lemont</td>
+ <td class="state">IL</td>
+ <td class="cert">35291</td>
+ <td class="ai">U.S. Bank N.A.</td>
+ <td class="closing">October 30, 2009</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="bankusa-az.html">Bank USA, N.A.</a></td>
+ <td class="city">Phoenix</td>
+ <td class="state">AZ</td>
+ <td class="cert">32218</td>
+ <td class="ai">U.S. Bank N.A.</td>
+ <td class="closing">October 30, 2009</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstdupage.html">First DuPage Bank</a></td>
+ <td class="city">Westmont</td>
+ <td class="state">IL</td>
+ <td class="cert">35038</td>
+ <td class="ai">First Midwest Bank</td>
+ <td class="closing">October 23, 2009</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="riverview-mn.html">Riverview Community Bank</a></td>
+ <td class="city">Otsego</td>
+ <td class="state">MN</td>
+ <td class="cert">57525</td>
+ <td class="ai">Central Bank</td>
+ <td class="closing">October 23, 2009</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="elmwood.html">Bank of Elmwood</a></td>
+ <td class="city">Racine</td>
+ <td class="state">WI</td>
+ <td class="cert">18321</td>
+ <td class="ai">Tri City National Bank</td>
+ <td class="closing">October 23, 2009</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="flagship.html">Flagship National Bank</a></td>
+ <td class="city">Bradenton</td>
+ <td class="state">FL</td>
+ <td class="cert">35044</td>
+ <td class="ai">First Federal Bank of Florida</td>
+ <td class="closing">October 23, 2009</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="hillcrest-fl.html">Hillcrest Bank Florida</a></td>
+ <td class="city">Naples</td>
+ <td class="state">FL</td>
+ <td class="cert">58336</td>
+ <td class="ai">Stonegate Bank</td>
+ <td class="closing">October 23, 2009</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="americanunited.html">American United Bank</a></td>
+ <td class="city">Lawrenceville</td>
+ <td class="state">GA</td>
+ <td class="cert">57794</td>
+ <td class="ai">Ameris Bank</td>
+ <td class="closing">October 23, 2009</td>
+ <td class="updated">September 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="partners-fl.html">Partners Bank</a></td>
+ <td class="city">Naples</td>
+ <td class="state">FL</td>
+ <td class="cert">57959</td>
+ <td class="ai">Stonegate Bank</td>
+ <td class="closing">October 23, 2009</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="sanjoaquin.html">San Joaquin Bank</a></td>
+ <td class="city">Bakersfield</td>
+ <td class="state">CA</td>
+ <td class="cert">23266</td>
+ <td class="ai">Citizens Business Bank</td>
+ <td class="closing">October 16, 2009</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="scnb-co.html">Southern Colorado National Bank</a></td>
+ <td class="city">Pueblo</td>
+ <td class="state">CO</td>
+ <td class="cert">57263</td>
+ <td class="ai">Legacy Bank</td>
+ <td class="closing">October 2, 2009</td>
+ <td class="updated">September 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="jennings-mn.html">Jennings State Bank</a></td>
+ <td class="city">Spring Grove</td>
+ <td class="state">MN</td>
+ <td class="cert">11416</td>
+ <td class="ai">Central Bank</td>
+ <td class="closing">October 2, 2009</td>
+ <td class="updated">August 21, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="warren-mi.html">Warren Bank</a></td>
+ <td class="city">Warren</td>
+ <td class="state">MI</td>
+ <td class="cert">34824</td>
+ <td class="ai">The Huntington National Bank</td>
+ <td class="closing">October 2, 2009</td>
+ <td class="updated">August 21, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="georgian.html">Georgian Bank</a></td>
+ <td class="city">Atlanta</td>
+ <td class="state">GA</td>
+ <td class="cert">57151</td>
+ <td class="ai">First Citizens Bank and Trust Company, Inc.</td>
+ <td class="closing">September 25, 2009</td>
+ <td class="updated">August 21, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="irwin-ky.html">Irwin Union Bank, F.S.B.</a></td>
+ <td class="city">Louisville</td>
+ <td class="state">KY</td>
+ <td class="cert">57068</td>
+ <td class="ai">First Financial Bank, N.A.</td>
+ <td class="closing">September 18, 2009</td>
+ <td class="updated">September 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="irwin-in.html">Irwin Union Bank and Trust Company</a></td>
+ <td class="city">Columbus</td>
+ <td class="state">IN</td>
+ <td class="cert">10100</td>
+ <td class="ai">First Financial Bank, N.A.</td>
+ <td class="closing">September 18, 2009</td>
+ <td class="updated">August 21, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="venture-wa.html">Venture Bank</a></td>
+ <td class="city">Lacey</td>
+ <td class="state">WA</td>
+ <td class="cert">22868</td>
+ <td class="ai">First-Citizens Bank & Trust Company</td>
+ <td class="closing">September 11, 2009</td>
+ <td class="updated">August 21, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="brickwell-mn.html">Brickwell Community Bank</a></td>
+ <td class="city">Woodbury</td>
+ <td class="state">MN</td>
+ <td class="cert">57736</td>
+ <td class="ai">CorTrust Bank N.A.</td>
+ <td class="closing">September 11, 2009</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="corus.html">Corus Bank, N.A.</a></td>
+ <td class="city">Chicago</td>
+ <td class="state">IL</td>
+ <td class="cert">13693</td>
+ <td class="ai">MB Financial Bank, N.A.</td>
+ <td class="closing">September 11, 2009</td>
+ <td class="updated">August 21, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firststate-az.html">First State Bank</a></td>
+ <td class="city">Flagstaff</td>
+ <td class="state">AZ</td>
+ <td class="cert">34875</td>
+ <td class="ai">Sunwest Bank</td>
+ <td class="closing">September 4, 2009</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="platinum-il.html">Platinum Community Bank</a></td>
+ <td class="city">Rolling Meadows</td>
+ <td class="state">IL</td>
+ <td class="cert">35030</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">September 4, 2009</td>
+ <td class="updated">August 21, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="vantus.html">Vantus Bank</a></td>
+ <td class="city">Sioux City</td>
+ <td class="state">IN</td>
+ <td class="cert">27732</td>
+ <td class="ai">Great Southern Bank</td>
+ <td class="closing">September 4, 2009</td>
+ <td class="updated">August 21, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="inbank.html">InBank</a></td>
+ <td class="city">Oak Forest</td>
+ <td class="state">IL</td>
+ <td class="cert">20203</td>
+ <td class="ai">MB Financial Bank, N.A.</td>
+ <td class="closing">September 4, 2009</td>
+ <td class="updated">August 21, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstbankkc-mo.html">First Bank of Kansas City</a></td>
+ <td class="city">Kansas City</td>
+ <td class="state">MO</td>
+ <td class="cert">25231</td>
+ <td class="ai">Great American Bank</td>
+ <td class="closing">September 4, 2009</td>
+ <td class="updated">August 21, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="affinity-ca.html">Affinity Bank</a></td>
+ <td class="city">Ventura</td>
+ <td class="state">CA</td>
+ <td class="cert">27197</td>
+ <td class="ai">Pacific Western Bank</td>
+ <td class="closing">August 28, 2009</td>
+ <td class="updated">August 21, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="mainstreet-mn.html">Mainstreet Bank</a></td>
+ <td class="city">Forest Lake</td>
+ <td class="state">MN</td>
+ <td class="cert">1909</td>
+ <td class="ai">Central Bank</td>
+ <td class="closing">August 28, 2009</td>
+ <td class="updated">August 21, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="bradford-md.html">Bradford Bank</a></td>
+ <td class="city">Baltimore</td>
+ <td class="state">MD</td>
+ <td class="cert">28312</td>
+ <td class="ai">Manufacturers and Traders Trust Company (M&T Bank)</td>
+ <td class="closing">August 28, 2009</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="guaranty-tx.html">Guaranty Bank</a></td>
+ <td class="city">Austin</td>
+ <td class="state">TX</td>
+ <td class="cert">32618</td>
+ <td class="ai">BBVA Compass</td>
+ <td class="closing">August 21, 2009</td>
+ <td class="updated">August 21, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="capitalsouth.html">CapitalSouth Bank</a></td>
+ <td class="city">Birmingham</td>
+ <td class="state">AL</td>
+ <td class="cert">22130</td>
+ <td class="ai">IBERIABANK</td>
+ <td class="closing">August 21, 2009</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="coweta.html">First Coweta Bank</a></td>
+ <td class="city">Newnan</td>
+ <td class="state">GA</td>
+ <td class="cert">57702</td>
+ <td class="ai">United Bank</td>
+ <td class="closing">August 21, 2009</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="ebank.html">ebank</a></td>
+ <td class="city">Atlanta</td>
+ <td class="state">GA</td>
+ <td class="cert">34682</td>
+ <td class="ai">Stearns Bank, N.A.</td>
+ <td class="closing">August 21, 2009</td>
+ <td class="updated">August 21, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="community-nv.html">Community Bank of Nevada</a></td>
+ <td class="city">Las Vegas</td>
+ <td class="state">NV</td>
+ <td class="cert">34043</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">August 14, 2009</td>
+ <td class="updated">August 21, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="community-az.html">Community Bank of Arizona</a></td>
+ <td class="city">Phoenix</td>
+ <td class="state">AZ</td>
+ <td class="cert">57645</td>
+ <td class="ai">MidFirst Bank</td>
+ <td class="closing">August 14, 2009</td>
+ <td class="updated">August 21, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="union-az.html">Union Bank, National Association</a></td>
+ <td class="city">Gilbert</td>
+ <td class="state">AZ</td>
+ <td class="cert">34485</td>
+ <td class="ai">MidFirst Bank</td>
+ <td class="closing">August 14, 2009</td>
+ <td class="updated">August 21, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="colonial-al.html">Colonial Bank</a></td>
+ <td class="city">Montgomery</td>
+ <td class="state">AL</td>
+ <td class="cert">9609</td>
+ <td class="ai">Branch Banking & Trust Company, (BB&T)</td>
+ <td class="closing">August 14, 2009</td>
+ <td class="updated">September 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="dwelling.html">Dwelling House Savings and Loan Association</a></td>
+ <td class="city">Pittsburgh</td>
+ <td class="state">PA</td>
+ <td class="cert">31559</td>
+ <td class="ai">PNC Bank, N.A.</td>
+ <td class="closing">August 14, 2009</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="community-prineville.html">Community First Bank</a></td>
+ <td class="city">Prineville</td>
+ <td class="state">OR</td>
+ <td class="cert">23268</td>
+ <td class="ai">Home Federal Bank</td>
+ <td class="closing">August 7, 2009</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="community-venice.html">Community National Bank of Sarasota County</a></td>
+ <td class="city">Venice</td>
+ <td class="state">FL</td>
+ <td class="cert">27183</td>
+ <td class="ai">Stearns Bank, N.A.</td>
+ <td class="closing">August 7, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="fsb-sarasota.html">First State Bank</a></td>
+ <td class="city">Sarasota</td>
+ <td class="state">FL</td>
+ <td class="cert">27364</td>
+ <td class="ai">Stearns Bank, N.A.</td>
+ <td class="closing">August 7, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="mutual-harvey.html">Mutual Bank</a></td>
+ <td class="city">Harvey</td>
+ <td class="state">IL</td>
+ <td class="cert">18659</td>
+ <td class="ai">United Central Bank</td>
+ <td class="closing">July 31, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="americano.html">First BankAmericano</a></td>
+ <td class="city">Elizabeth</td>
+ <td class="state">NJ</td>
+ <td class="cert">34270</td>
+ <td class="ai">Crown Bank</td>
+ <td class="closing">July 31, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="peoplescommunity-oh.html">Peoples Community Bank</a></td>
+ <td class="city">West Chester</td>
+ <td class="state">OH</td>
+ <td class="cert">32288</td>
+ <td class="ai">First Financial Bank, N.A.</td>
+ <td class="closing">July 31, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="integrity-fl.html">Integrity Bank</a></td>
+ <td class="city">Jupiter</td>
+ <td class="state">FL</td>
+ <td class="cert">57604</td>
+ <td class="ai">Stonegate Bank</td>
+ <td class="closing">July 31, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="fsb-altus.html">First State Bank of Altus</a></td>
+ <td class="city">Altus</td>
+ <td class="state">OK</td>
+ <td class="cert">9873</td>
+ <td class="ai">Herring Bank</td>
+ <td class="closing">July 31, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="sb-jones.html">Security Bank of Jones County</a></td>
+ <td class="city">Gray</td>
+ <td class="state">GA</td>
+ <td class="cert">8486</td>
+ <td class="ai">State Bank and Trust Company</td>
+ <td class="closing">July 24, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="sb-houston.html">Security Bank of Houston County</a></td>
+ <td class="city">Perry</td>
+ <td class="state">GA</td>
+ <td class="cert">27048</td>
+ <td class="ai">State Bank and Trust Company</td>
+ <td class="closing">July 24, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="sb-bibb.html">Security Bank of Bibb County</a></td>
+ <td class="city">Macon</td>
+ <td class="state">GA</td>
+ <td class="cert">27367</td>
+ <td class="ai">State Bank and Trust Company</td>
+ <td class="closing">July 24, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="sb-metro.html">Security Bank of North Metro</a></td>
+ <td class="city">Woodstock</td>
+ <td class="state">GA</td>
+ <td class="cert">57105</td>
+ <td class="ai">State Bank and Trust Company</td>
+ <td class="closing">July 24, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="sb-fulton.html">Security Bank of North Fulton</a></td>
+ <td class="city">Alpharetta</td>
+ <td class="state">GA</td>
+ <td class="cert">57430</td>
+ <td class="ai">State Bank and Trust Company</td>
+ <td class="closing">July 24, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="sb-gwinnett.html">Security Bank of Gwinnett County</a></td>
+ <td class="city">Suwanee</td>
+ <td class="state">GA</td>
+ <td class="cert">57346</td>
+ <td class="ai">State Bank and Trust Company</td>
+ <td class="closing">July 24, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="waterford.html">Waterford Village Bank</a></td>
+ <td class="city">Williamsville</td>
+ <td class="state">NY</td>
+ <td class="cert">58065</td>
+ <td class="ai">Evans Bank, N.A.</td>
+ <td class="closing">July 24, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="temecula.html">Temecula Valley Bank</a></td>
+ <td class="city">Temecula</td>
+ <td class="state">CA</td>
+ <td class="cert">34341</td>
+ <td class="ai">First-Citizens Bank & Trust Company</td>
+ <td class="closing">July 17, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="vineyard.html">Vineyard Bank</a></td>
+ <td class="city">Rancho Cucamonga</td>
+ <td class="state">CA</td>
+ <td class="cert">23556</td>
+ <td class="ai">California Bank & Trust</td>
+ <td class="closing">July 17, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="bankfirst.html">BankFirst</a></td>
+ <td class="city">Sioux Falls</td>
+ <td class="state">SD</td>
+ <td class="cert">34103</td>
+ <td class="ai">Alerus Financial, N.A.</td>
+ <td class="closing">July 17, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="piedmont.html">First Piedmont Bank</a></td>
+ <td class="city">Winder</td>
+ <td class="state">GA</td>
+ <td class="cert">34594</td>
+ <td class="ai">First American Bank and Trust Company</td>
+ <td class="closing">July 17, 2009</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="wyoming.html">Bank of Wyoming</a></td>
+ <td class="city">Thermopolis</td>
+ <td class="state">WY</td>
+ <td class="cert">22754</td>
+ <td class="ai">Central Bank & Trust</td>
+ <td class="closing">July 10, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="founders.html">Founders Bank</a></td>
+ <td class="city">Worth</td>
+ <td class="state">IL</td>
+ <td class="cert">18390</td>
+ <td class="ai">The PrivateBank and Trust Company</td>
+ <td class="closing">July 2, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="millennium.html">Millennium State Bank of Texas</a></td>
+ <td class="city">Dallas</td>
+ <td class="state">TX</td>
+ <td class="cert">57667</td>
+ <td class="ai">State Bank of Texas</td>
+ <td class="closing">July 2, 2009</td>
+ <td class="updated">October 26, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="danville.html">First National Bank of Danville</a></td>
+ <td class="city">Danville</td>
+ <td class="state">IL</td>
+ <td class="cert">3644</td>
+ <td class="ai">First Financial Bank, N.A.</td>
+ <td class="closing">July 2, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="elizabeth.html">Elizabeth State Bank</a></td>
+ <td class="city">Elizabeth</td>
+ <td class="state">IL</td>
+ <td class="cert">9262</td>
+ <td class="ai">Galena State Bank and Trust Company</td>
+ <td class="closing">July 2, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="rockriver.html">Rock River Bank</a></td>
+ <td class="city">Oregon</td>
+ <td class="state">IL</td>
+ <td class="cert">15302</td>
+ <td class="ai">The Harvard State Bank</td>
+ <td class="closing">July 2, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="winchester.html">First State Bank of Winchester</a></td>
+ <td class="city">Winchester</td>
+ <td class="state">IL</td>
+ <td class="cert">11710</td>
+ <td class="ai">The First National Bank of Beardstown</td>
+ <td class="closing">July 2, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="warner.html">John Warner Bank</a></td>
+ <td class="city">Clinton</td>
+ <td class="state">IL</td>
+ <td class="cert">12093</td>
+ <td class="ai">State Bank of Lincoln</td>
+ <td class="closing">July 2, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="mirae.html">Mirae Bank</a></td>
+ <td class="city">Los Angeles</td>
+ <td class="state">CA</td>
+ <td class="cert">57332</td>
+ <td class="ai">Wilshire State Bank</td>
+ <td class="closing">June 26, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="metropacific.html">MetroPacific Bank</a></td>
+ <td class="city">Irvine</td>
+ <td class="state">CA</td>
+ <td class="cert">57893</td>
+ <td class="ai">Sunwest Bank</td>
+ <td class="closing">June 26, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="horizon.html">Horizon Bank</a></td>
+ <td class="city">Pine City</td>
+ <td class="state">MN</td>
+ <td class="cert">9744</td>
+ <td class="ai">Stearns Bank, N.A.</td>
+ <td class="closing">June 26, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="neighbor.html">Neighborhood Community Bank</a></td>
+ <td class="city">Newnan</td>
+ <td class="state">GA</td>
+ <td class="cert">35285</td>
+ <td class="ai">CharterBank</td>
+ <td class="closing">June 26, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="communityga.html">Community Bank of West Georgia</a></td>
+ <td class="city">Villa Rica</td>
+ <td class="state">GA</td>
+ <td class="cert">57436</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">June 26, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="anthony.html">First National Bank of Anthony</a></td>
+ <td class="city">Anthony</td>
+ <td class="state">KS</td>
+ <td class="cert">4614</td>
+ <td class="ai">Bank of Kansas</td>
+ <td class="closing">June 19, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="cooperative.html">Cooperative Bank</a></td>
+ <td class="city">Wilmington</td>
+ <td class="state">NC</td>
+ <td class="cert">27837</td>
+ <td class="ai">First Bank</td>
+ <td class="closing">June 19, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="scb.html">Southern Community Bank</a></td>
+ <td class="city">Fayetteville</td>
+ <td class="state">GA</td>
+ <td class="cert">35251</td>
+ <td class="ai">United Community Bank</td>
+ <td class="closing">June 19, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="lincolnwood.html">Bank of Lincolnwood</a></td>
+ <td class="city">Lincolnwood</td>
+ <td class="state">IL</td>
+ <td class="cert">17309</td>
+ <td class="ai">Republic Bank of Chicago</td>
+ <td class="closing">June 5, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="citizensnational.html">Citizens National Bank</a></td>
+ <td class="city">Macomb</td>
+ <td class="state">IL</td>
+ <td class="cert">5757</td>
+ <td class="ai">Morton Community Bank</td>
+ <td class="closing">May 22, 2009</td>
+ <td class="updated">September 4, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="strategiccapital.html">Strategic Capital Bank</a></td>
+ <td class="city">Champaign</td>
+ <td class="state">IL</td>
+ <td class="cert">35175</td>
+ <td class="ai">Midland States Bank</td>
+ <td class="closing">May 22, 2009</td>
+ <td class="updated">September 4, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="bankunited.html">BankUnited, FSB</a></td>
+ <td class="city">Coral Gables</td>
+ <td class="state">FL</td>
+ <td class="cert">32247</td>
+ <td class="ai">BankUnited</td>
+ <td class="closing">May 21, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="westsound.html">Westsound Bank</a></td>
+ <td class="city">Bremerton</td>
+ <td class="state">WA</td>
+ <td class="cert">34843</td>
+ <td class="ai">Kitsap Bank</td>
+ <td class="closing">May 8, 2009</td>
+ <td class="updated">September 4, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="americawest.html">America West Bank</a></td>
+ <td class="city">Layton</td>
+ <td class="state">UT</td>
+ <td class="cert">35461</td>
+ <td class="ai">Cache Valley Bank</td>
+ <td class="closing">May 1, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="citizens.html">Citizens Community Bank</a></td>
+ <td class="city">Ridgewood</td>
+ <td class="state">NJ</td>
+ <td class="cert">57563</td>
+ <td class="ai">North Jersey Community Bank</td>
+ <td class="closing">May 1, 2009</td>
+ <td class="updated">September 4, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="silverton.html">Silverton Bank, NA</a></td>
+ <td class="city">Atlanta</td>
+ <td class="state">GA</td>
+ <td class="cert">26535</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">May 1, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstbankidaho.html">First Bank of Idaho</a></td>
+ <td class="city">Ketchum</td>
+ <td class="state">ID</td>
+ <td class="cert">34396</td>
+ <td class="ai">U.S. Bank, N.A.</td>
+ <td class="closing">April 24, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="beverlyhills.html">First Bank of Beverly Hills</a></td>
+ <td class="city">Calabasas</td>
+ <td class="state">CA</td>
+ <td class="cert">32069</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">April 24, 2009</td>
+ <td class="updated">September 4, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="michiganheritage.html">Michigan Heritage Bank</a></td>
+ <td class="city">Farmington Hills</td>
+ <td class="state">MI</td>
+ <td class="cert">34369</td>
+ <td class="ai">Level One Bank</td>
+ <td class="closing">April 24, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="amsouthern.html">American Southern Bank</a></td>
+ <td class="city">Kennesaw</td>
+ <td class="state">GA</td>
+ <td class="cert">57943</td>
+ <td class="ai">Bank of North Georgia</td>
+ <td class="closing">April 24, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="greatbasin.html">Great Basin Bank of Nevada</a></td>
+ <td class="city">Elko</td>
+ <td class="state">NV</td>
+ <td class="cert">33824</td>
+ <td class="ai">Nevada State Bank</td>
+ <td class="closing">April 17, 2009</td>
+ <td class="updated">September 4, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="amsterling.html">American Sterling Bank</a></td>
+ <td class="city">Sugar Creek</td>
+ <td class="state">MO</td>
+ <td class="cert">8266</td>
+ <td class="ai">Metcalf Bank</td>
+ <td class="closing">April 17, 2009</td>
+ <td class="updated">August 31, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="newfrontier.html">New Frontier Bank</a></td>
+ <td class="city">Greeley</td>
+ <td class="state">CO</td>
+ <td class="cert">34881</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">April 10, 2009</td>
+ <td class="updated">September 4, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="capefear.html">Cape Fear Bank</a></td>
+ <td class="city">Wilmington</td>
+ <td class="state">NC</td>
+ <td class="cert">34639</td>
+ <td class="ai">First Federal Savings and Loan Association</td>
+ <td class="closing">April 10, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="omni.html">Omni National Bank</a></td>
+ <td class="city">Atlanta</td>
+ <td class="state">GA</td>
+ <td class="cert">22238</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">March 27, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="teambank.html">TeamBank, NA</a></td>
+ <td class="city">Paola</td>
+ <td class="state">KS</td>
+ <td class="cert">4754</td>
+ <td class="ai">Great Southern Bank</td>
+ <td class="closing">March 20, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="coloradonational.html">Colorado National Bank</a></td>
+ <td class="city">Colorado Springs</td>
+ <td class="state">CO</td>
+ <td class="cert">18896</td>
+ <td class="ai">Herring Bank</td>
+ <td class="closing">March 20, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstcity.html">FirstCity Bank</a></td>
+ <td class="city">Stockbridge</td>
+ <td class="state">GA</td>
+ <td class="cert">18243</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">March 20, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="freedomga.html">Freedom Bank of Georgia</a></td>
+ <td class="city">Commerce</td>
+ <td class="state">GA</td>
+ <td class="cert">57558</td>
+ <td class="ai">Northeast Georgia Bank</td>
+ <td class="closing">March 6, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="securitysavings.html">Security Savings Bank</a></td>
+ <td class="city">Henderson</td>
+ <td class="state">NV</td>
+ <td class="cert">34820</td>
+ <td class="ai">Bank of Nevada</td>
+ <td class="closing">February 27, 2009</td>
+ <td class="updated">September 7, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="heritagebank.html">Heritage Community Bank</a></td>
+ <td class="city">Glenwood</td>
+ <td class="state">IL</td>
+ <td class="cert">20078</td>
+ <td class="ai">MB Financial Bank, N.A.</td>
+ <td class="closing">February 27, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="silverfalls.html">Silver Falls Bank</a></td>
+ <td class="city">Silverton</td>
+ <td class="state">OR</td>
+ <td class="cert">35399</td>
+ <td class="ai">Citizens Bank</td>
+ <td class="closing">February 20, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="pinnacle.html">Pinnacle Bank of Oregon</a></td>
+ <td class="city">Beaverton</td>
+ <td class="state">OR</td>
+ <td class="cert">57342</td>
+ <td class="ai">Washington Trust Bank of Spokane</td>
+ <td class="closing">February 13, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="cornbelt.html">Corn Belt Bank & Trust Co.</a></td>
+ <td class="city">Pittsfield</td>
+ <td class="state">IL</td>
+ <td class="cert">16500</td>
+ <td class="ai">The Carlinville National Bank</td>
+ <td class="closing">February 13, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="riverside.html">Riverside Bank of the Gulf Coast</a></td>
+ <td class="city">Cape Coral</td>
+ <td class="state">FL</td>
+ <td class="cert">34563</td>
+ <td class="ai">TIB Bank</td>
+ <td class="closing">February 13, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="sherman.html">Sherman County Bank</a></td>
+ <td class="city">Loup City</td>
+ <td class="state">NE</td>
+ <td class="cert">5431</td>
+ <td class="ai">Heritage Bank</td>
+ <td class="closing">February 13, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="county.html">County Bank</a></td>
+ <td class="city">Merced</td>
+ <td class="state">CA</td>
+ <td class="cert">22574</td>
+ <td class="ai">Westamerica Bank</td>
+ <td class="closing">February 6, 2009</td>
+ <td class="updated">September 4, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="alliance.html">Alliance Bank</a></td>
+ <td class="city">Culver City</td>
+ <td class="state">CA</td>
+ <td class="cert">23124</td>
+ <td class="ai">California Bank & Trust</td>
+ <td class="closing">February 6, 2009</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstbank.html">FirstBank Financial Services</a></td>
+ <td class="city">McDonough</td>
+ <td class="state">GA</td>
+ <td class="cert">57017</td>
+ <td class="ai">Regions Bank</td>
+ <td class="closing">February 6, 2009</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="ocala.html">Ocala National Bank</a></td>
+ <td class="city">Ocala</td>
+ <td class="state">FL</td>
+ <td class="cert">26538</td>
+ <td class="ai">CenterState Bank of Florida, N.A.</td>
+ <td class="closing">January 30, 2009</td>
+ <td class="updated">September 4, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="suburban.html">Suburban FSB</a></td>
+ <td class="city">Crofton</td>
+ <td class="state">MD</td>
+ <td class="cert">30763</td>
+ <td class="ai">Bank of Essex</td>
+ <td class="closing">January 30, 2009</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="magnet.html">MagnetBank</a></td>
+ <td class="city">Salt Lake City</td>
+ <td class="state">UT</td>
+ <td class="cert">58001</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">January 30, 2009</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="centennial.html">1st Centennial Bank</a></td>
+ <td class="city">Redlands</td>
+ <td class="state">CA</td>
+ <td class="cert">33025</td>
+ <td class="ai">First California Bank</td>
+ <td class="closing">January 23, 2009</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="clark.html">Bank of Clark County</a></td>
+ <td class="city">Vancouver</td>
+ <td class="state">WA</td>
+ <td class="cert">34959</td>
+ <td class="ai">Umpqua Bank</td>
+ <td class="closing">January 16, 2009</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="commerce.html">National Bank of Commerce</a></td>
+ <td class="city">Berkeley</td>
+ <td class="state">IL</td>
+ <td class="cert">19733</td>
+ <td class="ai">Republic Bank of Chicago</td>
+ <td class="closing">January 16, 2009</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="sanderson.html">Sanderson State Bank</a><br><a href="sanderson_spanish.html">En Espanol</a></td>
+ <td class="city">Sanderson</td>
+ <td class="state">TX</td>
+ <td class="cert">11568</td>
+ <td class="ai">The Pecos County State Bank</td>
+ <td class="closing">December 12, 2008</td>
+ <td class="updated">September 4, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="haventrust.html">Haven Trust Bank</a></td>
+ <td class="city">Duluth</td>
+ <td class="state">GA</td>
+ <td class="cert">35379</td>
+ <td class="ai">Branch Banking & Trust Company, (BB&T)</td>
+ <td class="closing">December 12, 2008</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstga.html">First Georgia Community Bank</a></td>
+ <td class="city">Jackson</td>
+ <td class="state">GA</td>
+ <td class="cert">34301</td>
+ <td class="ai">United Bank</td>
+ <td class="closing">December 5, 2008</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="pff.html">PFF Bank & Trust</a></td>
+ <td class="city">Pomona</td>
+ <td class="state">CA</td>
+ <td class="cert">28344</td>
+ <td class="ai">U.S. Bank, N.A.</td>
+ <td class="closing">November 21, 2008</td>
+ <td class="updated">January 4, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="downey.html">Downey Savings & Loan</a></td>
+ <td class="city">Newport Beach</td>
+ <td class="state">CA</td>
+ <td class="cert">30968</td>
+ <td class="ai">U.S. Bank, N.A.</td>
+ <td class="closing">November 21, 2008</td>
+ <td class="updated">January 4, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="community.html">Community Bank</a></td>
+ <td class="city">Loganville</td>
+ <td class="state">GA</td>
+ <td class="cert">16490</td>
+ <td class="ai">Bank of Essex</td>
+ <td class="closing">November 21, 2008</td>
+ <td class="updated">September 4, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="securitypacific.html">Security Pacific Bank</a></td>
+ <td class="city">Los Angeles</td>
+ <td class="state">CA</td>
+ <td class="cert">23595</td>
+ <td class="ai">Pacific Western Bank</td>
+ <td class="closing">November 7, 2008</td>
+ <td class="updated">August 28, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="franklinbank.html">Franklin Bank, SSB</a></td>
+ <td class="city">Houston</td>
+ <td class="state">TX</td>
+ <td class="cert">26870</td>
+ <td class="ai">Prosperity Bank</td>
+ <td class="closing">November 7, 2008</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="freedom.html">Freedom Bank</a></td>
+ <td class="city">Bradenton</td>
+ <td class="state">FL</td>
+ <td class="cert">57930</td>
+ <td class="ai">Fifth Third Bank</td>
+ <td class="closing">October 31, 2008</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="alpha.html">Alpha Bank & Trust</a></td>
+ <td class="city">Alpharetta</td>
+ <td class="state">GA</td>
+ <td class="cert">58241</td>
+ <td class="ai">Stearns Bank, N.A.</td>
+ <td class="closing">October 24, 2008</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="meridian.html">Meridian Bank</a></td>
+ <td class="city">Eldred</td>
+ <td class="state">IL</td>
+ <td class="cert">13789</td>
+ <td class="ai">National Bank</td>
+ <td class="closing">October 10, 2008</td>
+ <td class="updated">May 31, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="mainstreet.html">Main Street Bank</a></td>
+ <td class="city">Northville</td>
+ <td class="state">MI</td>
+ <td class="cert">57654</td>
+ <td class="ai">Monroe Bank & Trust</td>
+ <td class="closing">October 10, 2008</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="wamu.html">Washington Mutual Bank<br>(Including its subsidiary Washington Mutual Bank FSB)</a></td>
+ <td class="city">Henderson</td>
+ <td class="state">NV</td>
+ <td class="cert">32633</td>
+ <td class="ai">JP Morgan Chase Bank</td>
+ <td class="closing">September 25, 2008</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="ameribank.html">Ameribank</a></td>
+ <td class="city">Northfork</td>
+ <td class="state">WV</td>
+ <td class="cert">6782</td>
+ <td class="ai">The Citizens Savings Bank<br><br>Pioneer Community Bank, Inc.</td>
+ <td class="closing">September 19, 2008</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="silverstate.html">Silver State Bank</a><br><a href="silverstatesp.html">En Espanol</a></td>
+ <td class="city">Henderson</td>
+ <td class="state">NV</td>
+ <td class="cert">34194</td>
+ <td class="ai">Nevada State Bank</td>
+ <td class="closing">September 5, 2008</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="integrity.html">Integrity Bank</a></td>
+ <td class="city">Alpharetta</td>
+ <td class="state">GA</td>
+ <td class="cert">35469</td>
+ <td class="ai">Regions Bank</td>
+ <td class="closing">August 29, 2008</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="columbian.html">Columbian Bank & Trust</a></td>
+ <td class="city">Topeka</td>
+ <td class="state">KS</td>
+ <td class="cert">22728</td>
+ <td class="ai">Citizens Bank & Trust</td>
+ <td class="closing">August 22, 2008</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstprioritybank.html">First Priority Bank</a></td>
+ <td class="city">Bradenton</td>
+ <td class="state">FL</td>
+ <td class="cert">57523</td>
+ <td class="ai">SunTrust Bank</td>
+ <td class="closing">August 1, 2008</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="heritage.html">First Heritage Bank, NA</a></td>
+ <td class="city">Newport Beach</td>
+ <td class="state">CA</td>
+ <td class="cert">57961</td>
+ <td class="ai">Mutual of Omaha Bank</td>
+ <td class="closing">July 25, 2008</td>
+ <td class="updated">August 28, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="fnbnv.html">First National Bank of Nevada</a></td>
+ <td class="city">Reno</td>
+ <td class="state">NV</td>
+ <td class="cert">27011</td>
+ <td class="ai">Mutual of Omaha Bank</td>
+ <td class="closing">July 25, 2008</td>
+ <td class="updated">August 28, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="IndyMac.html">IndyMac Bank</a></td>
+ <td class="city">Pasadena</td>
+ <td class="state">CA</td>
+ <td class="cert">29730</td>
+ <td class="ai">OneWest Bank, FSB</td>
+ <td class="closing">July 11, 2008</td>
+ <td class="updated">August 28, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="first_integrity_bank.html">First Integrity Bank, NA</a></td>
+ <td class="city">Staples</td>
+ <td class="state">MN</td>
+ <td class="cert">12736</td>
+ <td class="ai">First International Bank and Trust</td>
+ <td class="closing">May 30, 2008</td>
+ <td class="updated">August 28, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="anb.html">ANB Financial, NA</a></td>
+ <td class="city">Bentonville</td>
+ <td class="state">AR</td>
+ <td class="cert">33901</td>
+ <td class="ai">Pulaski Bank and Trust Company</td>
+ <td class="closing">May 9, 2008</td>
+ <td class="updated">August 28, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="Hume.html">Hume Bank</a></td>
+ <td class="city">Hume</td>
+ <td class="state">MO</td>
+ <td class="cert">1971</td>
+ <td class="ai">Security Bank</td>
+ <td class="closing">March 7, 2008</td>
+ <td class="updated">August 28, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="Douglass.html">Douglass National Bank</a></td>
+ <td class="city">Kansas City</td>
+ <td class="state">MO</td>
+ <td class="cert">24660</td>
+ <td class="ai">Liberty Bank and Trust Company</td>
+ <td class="closing">January 25, 2008</td>
+ <td class="updated">October 26, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="MiamiValley.html">Miami Valley Bank</a></td>
+ <td class="city">Lakeview</td>
+ <td class="state">OH</td>
+ <td class="cert">16848</td>
+ <td class="ai">The Citizens Banking Company</td>
+ <td class="closing">October 4, 2007</td>
+ <td class="updated">August 28, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="NetBank.html">NetBank</a></td>
+ <td class="city">Alpharetta</td>
+ <td class="state">GA</td>
+ <td class="cert">32575</td>
+ <td class="ai">ING DIRECT</td>
+ <td class="closing">September 28, 2007</td>
+ <td class="updated">August 28, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="MetropolitanSB.html">Metropolitan Savings Bank</a></td>
+ <td class="city">Pittsburgh</td>
+ <td class="state">PA</td>
+ <td class="cert">35353</td>
+ <td class="ai">Allegheny Valley Bank of Pittsburgh</td>
+ <td class="closing">February 2, 2007</td>
+ <td class="updated">October 27, 2010</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="ephraim.html">Bank of Ephraim</a></td>
+ <td class="city">Ephraim</td>
+ <td class="state">UT</td>
+ <td class="cert">1249</td>
+ <td class="ai">Far West Bank</td>
+ <td class="closing">June 25, 2004</td>
+ <td class="updated">April 9, 2008</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="reliance.html">Reliance Bank</a></td>
+ <td class="city">White Plains</td>
+ <td class="state">NY</td>
+ <td class="cert">26778</td>
+ <td class="ai">Union State Bank</td>
+ <td class="closing">March 19, 2004</td>
+ <td class="updated">April 9, 2008</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="gnb.html">Guaranty National Bank of Tallahassee</a></td>
+ <td class="city">Tallahassee</td>
+ <td class="state">FL</td>
+ <td class="cert">26838</td>
+ <td class="ai">Hancock Bank of Florida</td>
+ <td class="closing">March 12, 2004</td>
+ <td class="updated">June 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="dollar.html">Dollar Savings Bank</a></td>
+ <td class="city">Newark</td>
+ <td class="state">NJ</td>
+ <td class="cert">31330</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">February 14, 2004</td>
+ <td class="updated">April 9, 2008</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="pulaski.html">Pulaski Savings Bank</a></td>
+ <td class="city">Philadelphia</td>
+ <td class="state">PA</td>
+ <td class="cert">27203</td>
+ <td class="ai">Earthstar Bank</td>
+ <td class="closing">November 14, 2003</td>
+ <td class="updated">July 22, 2005</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="blanchardville.html">First National Bank of Blanchardville</a></td>
+ <td class="city">Blanchardville</td>
+ <td class="state">WI</td>
+ <td class="cert">11639</td>
+ <td class="ai">The Park Bank</td>
+ <td class="closing">May 9, 2003</td>
+ <td class="updated">June 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="spbank.html">Southern Pacific Bank</a></td>
+ <td class="city">Torrance</td>
+ <td class="state">CA</td>
+ <td class="cert">27094</td>
+ <td class="ai">Beal Bank</td>
+ <td class="closing">February 7, 2003</td>
+ <td class="updated">October 20, 2008</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="farmers.html">Farmers Bank of Cheneyville</a></td>
+ <td class="city">Cheneyville</td>
+ <td class="state">LA</td>
+ <td class="cert">16445</td>
+ <td class="ai">Sabine State Bank & Trust</td>
+ <td class="closing">December 17, 2002</td>
+ <td class="updated">October 20, 2004</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="bankofalamo.html">Bank of Alamo</a></td>
+ <td class="city">Alamo</td>
+ <td class="state">TN</td>
+ <td class="cert">9961</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">November 8, 2002</td>
+ <td class="updated">March 18, 2005</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="amtrade.html">AmTrade International Bank</a><br><a href="amtrade-spanish.html">En Espanol</a></td>
+ <td class="city">Atlanta</td>
+ <td class="state">GA</td>
+ <td class="cert">33784</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">September 30, 2002</td>
+ <td class="updated">September 11, 2006</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="universal.html">Universal Federal Savings Bank</a></td>
+ <td class="city">Chicago</td>
+ <td class="state">IL</td>
+ <td class="cert">29355</td>
+ <td class="ai">Chicago Community Bank</td>
+ <td class="closing">June 27, 2002</td>
+ <td class="updated">April 9, 2008</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="cbc.html">Connecticut Bank of Commerce</a></td>
+ <td class="city">Stamford</td>
+ <td class="state">CT</td>
+ <td class="cert">19183</td>
+ <td class="ai">Hudson United Bank</td>
+ <td class="closing">June 26, 2002</td>
+ <td class="updated">February 14, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="newcentury.html">New Century Bank</a></td>
+ <td class="city">Shelby Township</td>
+ <td class="state">MI</td>
+ <td class="cert">34979</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">March 28, 2002</td>
+ <td class="updated">March 18, 2005</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="netfirst.html">Net 1st National Bank</a></td>
+ <td class="city">Boca Raton</td>
+ <td class="state">FL</td>
+ <td class="cert">26652</td>
+ <td class="ai">Bank Leumi USA</td>
+ <td class="closing">March 1, 2002</td>
+ <td class="updated">April 9, 2008</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="nextbank.html">NextBank, NA</a></td>
+ <td class="city">Phoenix</td>
+ <td class="state">AZ</td>
+ <td class="cert">22314</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">February 7, 2002</td>
+ <td class="updated">August 27, 2010</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="Oakwood.html">Oakwood Deposit Bank Co.</a></td>
+ <td class="city">Oakwood</td>
+ <td class="state">OH</td>
+ <td class="cert">8966</td>
+ <td class="ai">The State Bank & Trust Company</td>
+ <td class="closing">February 1, 2002</td>
+ <td class="updated">October 25, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="sierrablanca.html">Bank of Sierra Blanca</a></td>
+ <td class="city">Sierra Blanca</td>
+ <td class="state">TX</td>
+ <td class="cert">22002</td>
+ <td class="ai">The Security State Bank of Pecos</td>
+ <td class="closing">January 18, 2002</td>
+ <td class="updated">November 6, 2003</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="hamilton.html">Hamilton Bank, NA</a><br><a href="hamilton-spanish.html">En Espanol</a></td>
+ <td class="city">Miami</td>
+ <td class="state">FL</td>
+ <td class="cert">24382</td>
+ <td class="ai">Israel Discount Bank of New York</td>
+ <td class="closing">January 11, 2002</td>
+ <td class="updated">June 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="sinclair.html">Sinclair National Bank</a></td>
+ <td class="city">Gravette</td>
+ <td class="state">AR</td>
+ <td class="cert">34248</td>
+ <td class="ai">Delta Trust & Bank</td>
+ <td class="closing">September 7, 2001</td>
+ <td class="updated">February 10, 2004</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="superior.html">Superior Bank, FSB</a></td>
+ <td class="city">Hinsdale</td>
+ <td class="state">IL</td>
+ <td class="cert">32646</td>
+ <td class="ai">Superior Federal, FSB</td>
+ <td class="closing">July 27, 2001</td>
+ <td class="updated">June 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="Malta.html">Malta National Bank</a></td>
+ <td class="city">Malta</td>
+ <td class="state">OH</td>
+ <td class="cert">6629</td>
+ <td class="ai">North Valley Bank</td>
+ <td class="closing">May 3, 2001</td>
+ <td class="updated">November 18, 2002</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstalliance.html">First Alliance Bank & Trust Co.</a></td>
+ <td class="city">Manchester</td>
+ <td class="state">NH</td>
+ <td class="cert">34264</td>
+ <td class="ai">Southern New Hampshire Bank & Trust</td>
+ <td class="closing">February 2, 2001</td>
+ <td class="updated">February 18, 2003</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="nsb.html">National State Bank of Metropolis</a></td>
+ <td class="city">Metropolis</td>
+ <td class="state">IL</td>
+ <td class="cert">3815</td>
+ <td class="ai">Banterra Bank of Marion</td>
+ <td class="closing">December 14, 2000</td>
+ <td class="updated">March 17, 2005</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="boh.html">Bank of Honolulu</a></td>
+ <td class="city">Honolulu</td>
+ <td class="state">HI</td>
+ <td class="cert">21029</td>
+ <td class="ai">Bank of the Orient</td>
+ <td class="closing">October 13, 2000</td>
+ <td class="updated">March 17, 2005</td>
+ </tr>
+ </tbody>
+ </table>
+ </div>
- <div>
- <ul id="footer-bottom">
- <li><a href="/about/freedom/" title="Freedom of Information Act (FOIA) Service Center">Freedom of Information Act (FOIA) Service Center</a></li>
- <li>|</li>
- <li><a href="/open/" title="FDIC Open Government Webpage">FDIC Open Government Webpage</a><a href="/about/diversity/nofear/" title="No FEAR Act Data"></a></li> <li>|</li>
- <li><a href="/about/diversity/nofear/" title="No FEAR Act Data">No FEAR Act Data</a><a href="/about/diversity/nofear/" title="No FEAR Act Data"></a></li>
- </ul>
- </div>
- </div><!-- end of footer container -->
-<!-- end footer -->
-</div><!-- ends site-container -->
+</div>
+<div id="page_foot">
+ <div class="date">Last Updated 05/31/2013</div>
+ <div class="email"><a href="mailto:cservicefdicdal@fdic.gov">cservicefdicdal@fdic.gov</a></div>
+ <div class="clear"></div>
+</div>
-<script language="JavaScript" type="text/javascript">
+<!-- START of Footer -->
+<footer>
+<link rel="stylesheet" type="text/css" href="/responsive/footer/css/footer.css" />
+<div id="responsive_footer">
+ <div id="responsive_footer-full">
+ <ul>
+ <li><a href="/" title="Home">Home</a></li>
+ <li><a href="/about/contact/ask/" title="Contact Us">Contact Us</a></li>
+ <li><a href="/search/" title="Search">Search</a></li>
+ <li><a href="/help/" title="Help">Help</a></li>
+ <li><a href="/sitemap/" title="SiteMap">SiteMap</a></li>
+ <li><a href="/regulations/laws/forms/" title="Forms">Forms</a></li>
+ <li><a href="/quicklinks/spanish.html" title="En Español">En Español</a></li>
+ </ul>
+ <hr>
+ <ul>
+ <li><a href="/about/policies/" title="Website Policies">Website Policies</a></li>
+ <li><a href="/about/privacy/policy/" title="Privacy Policy">Privacy Policy</a></li>
+ <li><a href="/plainlanguage/" title="Privacy Policy">Plain Writing Act of 2010 </a></li>
+ <li><a href="http://www.usa.gov/" title="USA.gov">USA.gov</a></li>
+ <li><a href="http://www.fdicoig.gov/" title="FDIC Office of Inspector General">FDIC Office of Inspector General</a></li>
+ </ul>
+ <hr>
+ <ul>
+ <li><a href="/about/freedom/" title="Freedom of Information Act (FOIA) Service Center">Freedom of Information Act (FOIA) Service Center</a></li>
+ <li><a href="/open/" title="FDIC Open Government Webpage">FDIC Open Government Webpage</a></li>
+ <li><a href="/about/diversity/nofear/" title="No FEAR Act Data">No FEAR Act Data</a></li>
+ </ul>
+ </div>
+ <div id="responsive_footer-small">
+ <ul>
+ <li><a href="/" title="Home">Home</a></li>
+ <li><a href="/about/contact/ask/" title="Contact Us">Contact Us</a></li>
+ <li><a href="/about/policies/" title="Website Policies">Website Policies</a></li>
+ <li><a href="/search/" title="Search">Search</a></li>
+ </ul>
+ </div>
+</div>
+</footer>
+<!-- START Omniture SiteCatalyst Code -->
+<script language="JavaScript" type="text/javascript" src="/js/s_code_v1.js"></script>
+<script type="text/javascript">
/************* DO NOT ALTER ANYTHING BELOW THIS LINE ! **************/
var s_code=s.t();if(s_code)document.write(s_code)</script>
-<script language="JavaScript" type="text/javascript">
+<script type="text/javascript">
if(navigator.appVersion.indexOf('MSIE')>=0)document.write(unescape('%3C')+'\!-'+'-')
</script>
<noscript>
<a href="http://www.omniture.com" title="Web Analytics">
-<img src="http://fdic.122.2o7.net/b/ss/fdicgovprod/1/H.21--NS/0?[AQB]%26cl=Session%26AQE" height="1" width="1" border="0" alt="" /></a>
+<img src="http://fdic.122.2o7.net/b/ss/fdicgovprod/1/H.21--NS/0?[AQB]%26cl=Session%26AQE" height="1" width="1" border="0" alt="" /></a></li>
</noscript>
-
<!--/DO NOT REMOVE/-->
-<!-- End SiteCatalyst code version: H.21. -->
-<!-- end footer -->
-<!-- END FOOTER INCLUDE -->
+<!-- END Omniture SiteCatalyst Code -->
+<!-- END of Footer -->
+<script type="text/javascript" src="/responsive/js/jquery.tablesorter.js"></script>
+<script type="text/javascript" src="banklist.js"></script>
</body>
</html>
diff --git a/pandas/io/tests/test_html.py b/pandas/io/tests/test_html.py
index 418b5471d0406..ea3c0520de169 100644
--- a/pandas/io/tests/test_html.py
+++ b/pandas/io/tests/test_html.py
@@ -41,7 +41,7 @@ def _skip_if_none(module_names):
if isinstance(module_names, basestring):
_skip_if_no(module_names)
else:
- if not any(_have_module(module_name) for module_name in module_names):
+ if not all(_have_module(module_name) for module_name in module_names):
raise nose.SkipTest
@@ -388,7 +388,7 @@ def test(self):
def run_read_html(self, *args, **kwargs):
kwargs['flavor'] = 'bs4'
- _skip_if_no('lxml')
+ _skip_if_none(('lxml', 'bs4'))
parser = _BeautifulSoupLxmlFrameParser
return _run_read_html(parser, *args, **kwargs)
@@ -400,7 +400,7 @@ def test(self):
def run_read_html(self, *args, **kwargs):
kwargs['flavor'] = 'bs4'
- _skip_if_no('html5lib')
+ _skip_if_none(('html5lib', 'bs4'))
parser = _BeautifulSoupHtml5LibFrameParser
return _run_read_html(parser, *args, **kwargs)
@@ -417,17 +417,16 @@ def try_remove_ws(x):
ground_truth = read_csv(os.path.join(DATA_PATH, 'banklist.csv'),
converters={'Updated Date': Timestamp,
'Closing Date': Timestamp})
- # these will not
self.assertTupleEqual(df.shape, ground_truth.shape)
- old = ['First Vietnamese American Bank In Vietnamese',
- 'Westernbank Puerto Rico En Espanol',
- 'R-G Premier Bank of Puerto Rico En Espanol',
- 'Eurobank En Espanol', 'Sanderson State Bank En Espanol',
- 'Washington Mutual Bank (Including its subsidiary Washington '
+ old = ['First Vietnamese American BankIn Vietnamese',
+ 'Westernbank Puerto RicoEn Espanol',
+ 'R-G Premier Bank of Puerto RicoEn Espanol',
+ 'EurobankEn Espanol', 'Sanderson State BankEn Espanol',
+ 'Washington Mutual Bank(Including its subsidiary Washington '
'Mutual Bank FSB)',
- 'Silver State Bank En Espanol',
+ 'Silver State BankEn Espanol',
'AmTrade International BankEn Espanol',
- 'Hamilton Bank, NA En Espanol',
+ 'Hamilton Bank, NAEn Espanol',
'The Citizens Savings BankPioneer Community Bank, Inc.']
new = ['First Vietnamese American Bank', 'Westernbank Puerto Rico',
'R-G Premier Bank of Puerto Rico', 'Eurobank',
| https://api.github.com/repos/pandas-dev/pandas/pulls/3741 | 2013-06-03T06:13:06Z | 2013-06-04T16:58:59Z | 2013-06-04T16:58:59Z | 2014-06-12T18:25:57Z | |
TST: Fix assert_almost_equal error message | diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index 823d2c81bb72c..dd86862a2d551 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -141,7 +141,7 @@ def assert_almost_equal(a, b, check_less_precise = False):
assert_almost_equal(a[i], b[i], check_less_precise)
return True
- err_msg = lambda a, b: 'expected %.5f but got %.5f' % (a, b)
+ err_msg = lambda a, b: 'expected %.5f but got %.5f' % (b, a)
if isnull(a):
np.testing.assert_(isnull(b))
| Most tests (in pandas' test suite and in general) are of form
`assert_almost_equal(result, expected)`. Verbose error message was
treating its first argument as expected, this is now fixed.
Previous message was:
``` python
err_msg = lambda a, b: 'expected %.5f but got %.5f' % (a, b)
```
But a, the first argument, is actually `actual`, not expected.
Everywhere else, it makes sense to display `"%s != %s" % (a,b)`
| https://api.github.com/repos/pandas-dev/pandas/pulls/3737 | 2013-06-02T09:15:07Z | 2013-06-03T17:11:19Z | 2013-06-03T17:11:19Z | 2014-07-16T08:11:22Z |
PERF: speed up where operations when splitting blocks (GH3733) | diff --git a/RELEASE.rst b/RELEASE.rst
index 35741f7eb008f..4573b45ccaf16 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -76,6 +76,7 @@ pandas 0.11.1
GH3572_). This happens before any drawing takes place which elimnates any
spurious plots from showing up.
- Added Faq section on repr display options, to help users customize their setup.
+ - ``where`` operations that result in block splitting are much faster (GH3733_)
**API Changes**
@@ -116,6 +117,8 @@ pandas 0.11.1
- the ``method`` and ``axis`` arguments of ``DataFrame.replace()`` are
deprecated
- Implement ``__nonzero__`` for ``NDFrame`` objects (GH3691_, GH3696_)
+ - ``as_matrix`` with mixed signed and unsigned dtypes will result in 2 x the lcd of the unsigned
+ as an int, maxing with ``int64``, to avoid precision issues (GH3733_)
**Bug Fixes**
@@ -273,6 +276,7 @@ pandas 0.11.1
.. _GH3691: https://github.com/pydata/pandas/issues/3691
.. _GH3696: https://github.com/pydata/pandas/issues/3696
.. _GH3667: https://github.com/pydata/pandas/issues/3667
+.. _GH3733: https://github.com/pydata/pandas/issues/3733
pandas 0.11.0
=============
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 8b711f5e077ce..af1543dad0314 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -558,42 +558,38 @@ def func(c,v,o):
result.fill(np.nan)
return result
- def create_block(result, items, transpose=True):
+ # see if we can operate on the entire block, or need item-by-item
+ result = func(cond,values,other)
+ if self._can_hold_na:
+
if not isinstance(result, np.ndarray):
raise TypeError('Could not compare [%s] with block values'
% repr(other))
- if transpose and is_transposed:
+ if is_transposed:
result = result.T
# try to cast if requested
if try_cast:
result = self._try_cast_result(result)
- return make_block(result, items, self.ref_items)
+ return make_block(result, self.items, self.ref_items)
- # see if we can operate on the entire block, or need item-by-item
- if not self._can_hold_na:
- axis = cond.ndim-1
- result_blocks = []
- for item in self.items:
- loc = self.items.get_loc(item)
- item = self.items.take([loc])
- v = values.take([loc],axis=axis)
- c = cond.take([loc],axis=axis)
- o = other.take([loc],axis=axis) if hasattr(other,'shape') else other
-
- result = func(c,v,o)
- if len(result) == 1:
- result = np.repeat(result,self.shape[1:])
-
- result = _block_shape(result,ndim=self.ndim,shape=self.shape[1:])
- result_blocks.append(create_block(result, item, transpose=False))
-
- return result_blocks
- else:
- result = func(cond,values,other)
- return create_block(result, self.items)
+ # might need to separate out blocks
+ axis = cond.ndim-1
+ cond = cond.swapaxes(axis,0)
+ mask = np.array([ cond[i].all() for i in enumerate(range(cond.shape[0]))],dtype=bool)
+
+ result_blocks = []
+ for m in [mask, ~mask]:
+ if m.any():
+ items = self.items[m]
+ slices = [slice(None)] * cond.ndim
+ slices[axis] = self.items.get_indexer(items)
+ r = self._try_cast_result(result[slices])
+ result_blocks.append(make_block(r.T, items, self.ref_items))
+
+ return result_blocks
class NumericBlock(Block):
is_numeric = True
@@ -2429,7 +2425,22 @@ def _lcd_dtype(l):
elif have_bool:
return np.dtype(bool)
elif have_int and not have_float and not have_complex:
- return _lcd_dtype(counts[IntBlock])
+
+ # if we are mixing unsigned and signed, then return
+ # the next biggest int type (if we can)
+ lcd = _lcd_dtype(counts[IntBlock])
+ kinds = set([ i.dtype.kind for i in counts[IntBlock] ])
+ if len(kinds) == 1:
+ return lcd
+
+ if lcd == 'uint64' or lcd == 'int64':
+ return np.dtype('int64')
+
+ # return 1 bigger on the itemsize if unsinged
+ if lcd.kind == 'u':
+ return np.dtype('int%s' % (lcd.itemsize*8*2))
+ return lcd
+
elif have_dt64 and not have_float and not have_complex:
return np.dtype('M8[ns]')
elif have_complex:
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index fa6579ca61358..8964b21756439 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -271,6 +271,16 @@ def test_getitem_boolean_casting(self):
expected = Series({'float64': 6, 'int32' : 1, 'int64' : 1})
assert_series_equal(result, expected)
+ # where dtype conversions
+ # GH 3733
+ df = DataFrame(data = np.random.randn(100, 50))
+ df = df.where(df > 0) # create nans
+ bools = df > 0
+ mask = isnull(df)
+ expected = bools.astype(float).mask(mask)
+ result = bools.mask(mask)
+ assert_frame_equal(result,expected)
+
def test_getitem_boolean_list(self):
df = DataFrame(np.arange(12).reshape(3, 4))
@@ -7568,8 +7578,10 @@ def test_where(self):
def _safe_add(df):
# only add to the numeric items
- return DataFrame(dict([ (c,s+1) if issubclass(s.dtype.type, (np.integer,np.floating)) else (c,s) for c, s in df.iteritems() ]))
-
+ def is_ok(s):
+ return issubclass(s.dtype.type, (np.integer,np.floating)) and s.dtype != 'uint8'
+ return DataFrame(dict([ (c,s+1) if is_ok(s) else (c,s) for c, s in df.iteritems() ]))
+
def _check_get(df, cond, check_dtypes = True):
other1 = _safe_add(df)
rs = df.where(cond, other1)
@@ -7605,7 +7617,7 @@ def _check_get(df, cond, check_dtypes = True):
def _check_align(df, cond, other, check_dtypes = True):
rs = df.where(cond, other)
for i, k in enumerate(rs.columns):
- v = rs[k]
+ result = rs[k]
d = df[k].values
c = cond[k].reindex(df[k].index).fillna(False).values
@@ -7613,12 +7625,16 @@ def _check_align(df, cond, other, check_dtypes = True):
o = other
else:
if isinstance(other,np.ndarray):
- o = Series(other[:,i],index=v.index).values
+ o = Series(other[:,i],index=result.index).values
else:
o = other[k].values
new_values = d if c.all() else np.where(c, d, o)
- assert_series_equal(v, Series(new_values,index=v.index))
+ expected = Series(new_values,index=result.index)
+
+ # since we can't always have the correct numpy dtype
+ # as numpy doesn't know how to downcast, don't check
+ assert_series_equal(result, expected, check_dtype=False)
# dtypes
# can't check dtype when other is an ndarray
@@ -9894,14 +9910,14 @@ def test_as_matrix_lcd(self):
self.assert_(values.dtype == np.float16)
values = self.mixed_int.as_matrix(['A','B','C','D'])
- self.assert_(values.dtype == np.uint64)
+ self.assert_(values.dtype == np.int64)
values = self.mixed_int.as_matrix(['A','D'])
self.assert_(values.dtype == np.int64)
# guess all ints are cast to uints....
values = self.mixed_int.as_matrix(['A','B','C'])
- self.assert_(values.dtype == np.uint64)
+ self.assert_(values.dtype == np.int64)
values = self.mixed_int.as_matrix(['A','C'])
self.assert_(values.dtype == np.int32)
diff --git a/vb_suite/frame_methods.py b/vb_suite/frame_methods.py
index 7745450e5c03b..122851bf91a26 100644
--- a/vb_suite/frame_methods.py
+++ b/vb_suite/frame_methods.py
@@ -177,3 +177,18 @@ def f(K=500):
"""
frame_xs_col = Benchmark('df.xs(50000,axis = 1)', setup)
+
+## masking
+setup = common_setup + """
+data = np.random.randn(1000, 500)
+df = DataFrame(data)
+df = df.where(df > 0) # create nans
+bools = df > 0
+mask = isnull(df)
+"""
+
+mask_bools = Benchmark('bools.mask(mask)', setup,
+ start_date=datetime(2013,1,1))
+
+mask_floats = Benchmark('bools.astype(float).mask(mask)', setup,
+ start_date=datetime(2013,1,1))
| close #3733
| https://api.github.com/repos/pandas-dev/pandas/pulls/3736 | 2013-06-01T20:37:47Z | 2013-06-02T11:58:40Z | 2013-06-02T11:58:40Z | 2014-07-16T08:11:20Z |
BUG/BLD: pytables version checking was incorrect | diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 046263a9cb63c..0a86d72a05f16 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -109,15 +109,12 @@ def _tables():
global _table_supports_index
if _table_mod is None:
import tables
+ from distutils.version import LooseVersion
_table_mod = tables
# version requirements
- ver = tables.__version__.split('.')
- try:
- if int(ver[0]) >= 2 and int(ver[1][0]) >= 3:
- _table_supports_index = True
- except:
- pass
+ ver = tables.__version__
+ _table_supports_index = LooseVersion(ver) >= '2.3'
return _table_mod
| https://api.github.com/repos/pandas-dev/pandas/pulls/3735 | 2013-06-01T20:15:39Z | 2013-06-01T20:38:56Z | 2013-06-01T20:38:56Z | 2014-07-16T08:11:19Z | |
BLD: test_perf.py, add --base-pickle --target-pickle options to test_perf | diff --git a/vb_suite/test_perf.py b/vb_suite/test_perf.py
index 72b441d79be84..b0d029de7371a 100755
--- a/vb_suite/test_perf.py
+++ b/vb_suite/test_perf.py
@@ -37,10 +37,18 @@
import random
import numpy as np
+import pandas as pd
from pandas import DataFrame, Series
+try:
+ import git # gitpython
+except Exception:
+ print("Error: Please install the `gitpython` package\n")
+ sys.exit(1)
+
from suite import REPO_PATH
+VB_DIR = os.path.dirname(os.path.abspath(__file__))
DEFAULT_MIN_DURATION = 0.01
HEAD_COL="head[ms]"
BASE_COL="base[ms]"
@@ -57,6 +65,14 @@
parser.add_argument('-t', '--target-commit',
help='The commit to compare against the baseline (default: HEAD).',
type=str)
+parser.add_argument('--base-pickle',
+ help='name of pickle file with timings data generated by a former `-H -d FILE` run. '\
+ 'filename must be of the form <hash>-*.* or specify --base-commit seperately',
+ type=str)
+parser.add_argument('--target-pickle',
+ help='name of pickle file with timings data generated by a former `-H -d FILE` run '\
+ 'filename must be of the form <hash>-*.* or specify --target-commit seperately',
+ type=str)
parser.add_argument('-m', '--min-duration',
help='Minimum duration (in ms) of baseline test for inclusion in report (default: %.3f).' % DEFAULT_MIN_DURATION,
type=float,
@@ -104,8 +120,7 @@
parser.add_argument('-a', '--affinity',
metavar="a",
dest='affinity',
- default=1,
- type=int,
+ default=None,
help='set processor affinity of processm by default bind to cpu/core #1 only'
'requires the "affinity" python module , will raise Warning otherwise' )
@@ -206,21 +221,34 @@ def profile_comparative(benchmarks):
head_res = get_results_df(db, h_head)
baseline_res = get_results_df(db, h_baseline)
- totals = prep_totals(baseline_res, head_res)
-
- h_msg = repo.messages.get(h_head, "")
- b_msg = repo.messages.get(h_baseline, "")
- print_report(totals,h_head=h_head,h_msg=h_msg,
- h_baseline=h_baseline,b_msg=b_msg)
+ report_comparative(head_res,baseline_res)
- if args.outdf:
- prprint("The results DataFrame was written to '%s'\n" % args.outdf)
- totals.save(args.outdf)
finally:
# print("Disposing of TMP_DIR: %s" % TMP_DIR)
shutil.rmtree(TMP_DIR)
+def prep_pickle_for_total(df, agg_name='median'):
+ """
+ accepts a datafram resulting from invocation with -H -d o.pickle
+ If multiple data columns are present (-N was used), the
+ `agg_name` attr of the datafram will be used to reduce
+ them to a single value per vbench, df.median is used by defa
+ ult.
+
+ Returns a datadrame of the form expected by prep_totals
+ """
+ def prep(df):
+ agg = getattr(df,agg_name)
+ df = DataFrame(agg(1))
+ cols = list(df.columns)
+ cols[0]='timing'
+ df.columns=cols
+ df['name'] = list(df.index)
+ return df
+
+ return prep(df)
+
def prep_totals(head_res, baseline_res):
"""
Each argument should be a dataframe with 'timing' and 'name' columns
@@ -241,6 +269,27 @@ def prep_totals(head_res, baseline_res):
).sort("ratio").set_index('name') # sort in ascending order
return totals
+def report_comparative(head_res,baseline_res):
+ try:
+ r=git.Repo(VB_DIR)
+ except:
+ import pdb
+ pdb.set_trace()
+
+ totals = prep_totals(head_res,baseline_res)
+
+ h_head = args.target_commit
+ h_baseline = args.base_commit
+ h_msg = r.commit(h_head).message.strip()
+ b_msg = r.commit(h_baseline).message.strip()
+
+ print_report(totals,h_head=h_head,h_msg=h_msg,
+ h_baseline=h_baseline,b_msg=b_msg)
+
+ if args.outdf:
+ prprint("The results DataFrame was written to '%s'\n" % args.outdf)
+ totals.save(args.outdf)
+
def profile_head_single(benchmark):
import gc
results = []
@@ -398,18 +447,23 @@ def main():
random.seed(args.seed)
np.random.seed(args.seed)
- try:
- import affinity
- affinity.set_process_affinity_mask(0,args.affinity)
- assert affinity.get_process_affinity_mask(0) == args.affinity
- print("CPU affinity set to %d" % args.affinity)
- except ImportError:
- import warnings
- print("\n\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"+
- "The 'affinity' module is not available, results may be unreliable\n" +
- "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n\n"
- )
- time.sleep(2)
+ if args.base_pickle and args.target_pickle:
+ baseline_res = prep_pickle_for_total(pd.load(args.base_pickle))
+ target_res = prep_pickle_for_total(pd.load(args.target_pickle))
+
+ report_comparative(target_res, baseline_res)
+ sys.exit(0)
+
+ if args.affinity is not None:
+ try:
+ import affinity
+
+ affinity.set_process_affinity_mask(0,args.affinity)
+ assert affinity.get_process_affinity_mask(0) == args.affinity
+ print("CPU affinity set to %d" % args.affinity)
+ except ImportError:
+ print("-a/--afinity specified, but the 'affinity' module is not available, aborting.\n")
+ sys.exit(1)
print("\n")
prprint("LOG_FILE = %s" % args.log_file)
@@ -489,10 +543,40 @@ def inner(repo_path):
if __name__ == '__main__':
args = parser.parse_args()
- if not args.head and (not args.base_commit and not args.target_commit):
+ if (not args.head
+ and not (args.base_commit and args.target_commit)
+ and not (args.base_pickle and args.target_pickle)):
parser.print_help()
- else:
- import warnings
- warnings.filterwarnings('ignore',category=FutureWarning)
- warnings.filterwarnings('ignore',category=DeprecationWarning)
- main()
+ sys.exit(1)
+ elif ((args.base_pickle or args.target_pickle) and not
+ (args.base_pickle and args.target_pickle)):
+ print("Must specify Both --base-pickle and --target-pickle.")
+ sys.exit(1)
+
+ if ((args.base_pickle or args.target_pickle) and not
+ (args.base_commit and args.target_commit)):
+ if not args.base_commit:
+ print("base_commit not specified, Assuming base_pickle is named <commit>-foo.*")
+ args.base_commit = args.base_pickle.split('-')[0]
+ if not args.target_commit:
+ print("target_commit not specified, Assuming target_pickle is named <commit>-foo.*")
+ print(args.target_pickle.split('-')[0])
+ args.target_commit = args.target_pickle.split('-')[0]
+
+ import warnings
+ warnings.filterwarnings('ignore',category=FutureWarning)
+ warnings.filterwarnings('ignore',category=DeprecationWarning)
+
+ if args.base_commit and args.target_commit:
+ print("Verifying specified commits exist in repo...")
+ r=git.Repo(VB_DIR)
+ for c in [ args.base_commit, args.target_commit ]:
+ try:
+ msg = r.commit(c).message.strip()
+ except git.BadObject:
+ print("The commit '%s' was not found, aborting" % c)
+ sys.exit(1)
+ else:
+ print("%s: %s" % (c,msg))
+
+ main()
| Until now test_perf either generated a single commit report for HEAD,
or fell back to vbench to compare two commits. with this change, can now
compare results between saved results of test_perf -H invocations.
Flow:
- Use build_cache (cdev from #3156) to jump to desired commit using build_cache
- checkout current vb_suite from upstream/master
- use `test_perf -H -d <commit>-foo.pickle` to save timings to file, for
target and base commits
- Use test_perf options `--base-pickle`, `--target-pickle` to generate comparison report.
cc @jreback
```
λ ./test_perf.sh --base-pickle 31ecaa9-0.10.1.pickle --target-pickle f9eea30-0.11.0.pickle
This script compares the performance of two commits.
Make sure the python 'vbench' library is installed.
Setting the BUILD_CACHE_DIR env var to a temp directory will
potentially speed up subsequent runs.
base_commit not specified, Assuming base_pickle is named <commit>-foo.*
target_commit not specified, Assuming target_pickle is named <commit>-foo.*
f9eea30
Verifying specified commits exist in repo...
31ecaa9: RLS: set released to true
f9eea30: RLS: Version 0.11
***
Invoked with :
--ncalls: 3
--repeats: 3
-------------------------------------------------------------------------------
Test name | head[ms] | base[ms] | ratio |
-------------------------------------------------------------------------------
frame_reindex_columns | 0.3260 | 0.2900 | 1.1241 |
-------------------------------------------------------------------------------
Test name | head[ms] | base[ms] | ratio |
-------------------------------------------------------------------------------
Ratio < 1.0 means the target commit is faster then the baseline.
Seed used: 1234
Target [f9eea30] : RLS: Version 0.11
Base [31ecaa9] : RLS: set released to true
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/3734 | 2013-06-01T19:49:33Z | 2013-06-01T19:50:22Z | 2013-06-01T19:50:22Z | 2014-06-24T15:20:16Z |
DOC: fix read_html attribute reading example | diff --git a/doc/source/io.rst b/doc/source/io.rst
index 204dd2c984ba7..0a1f0e74255bb 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -1021,9 +1021,9 @@ Specify an HTML attribute
.. ipython:: python
- dfs = read_html(url)
- len(dfs)
- dfs[0]
+ dfs1 = read_html(url, attrs={'id': 'table'})
+ dfs2 = read_html(url, attrs={'class': 'sortable'})
+ np.all(dfs1[0] == dfs2[0])
Use some combination of the above
| https://api.github.com/repos/pandas-dev/pandas/pulls/3732 | 2013-05-31T22:04:01Z | 2013-05-31T22:10:47Z | 2013-05-31T22:10:47Z | 2014-07-16T08:11:14Z | |
API: raise TypeError on most datetime64 reduction ops | diff --git a/RELEASE.rst b/RELEASE.rst
index 3a347246be8dd..8da3b4760c303 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -97,6 +97,12 @@ pandas 0.11.1
in your calls.
- Do not allow astypes on ``datetime64[ns]`` except to ``object``, and
``timedelta64[ns]`` to ``object/int`` (GH3425_)
+ - The behavior of ``datetime64`` dtypes has changed with respect to certain
+ so-called reduction operations (GH3726_). The following operations now
+ raise a ``TypeError`` when perfomed on a ``Series`` and return an *empty*
+ ``Series`` when performed on a ``DataFrame`` similar to performing these
+ operations on, for example, a ``DataFrame`` of ``slice`` objects:
+ - sum, prod, mean, std, var, skew, kurt, corr, and cov
- Do not allow datetimelike/timedeltalike creation except with valid types
(e.g. cannot pass ``datetime64[ms]``) (GH3423_)
- Add ``squeeze`` keyword to ``groupby`` to allow reduction from
@@ -294,6 +300,7 @@ pandas 0.11.1
.. _GH3748: https://github.com/pydata/pandas/issues/3748
.. _GH3741: https://github.com/pydata/pandas/issues/3741
.. _GH3750: https://github.com/pydata/pandas/issues/3750
+.. _GH3726: https://github.com/pydata/pandas/issues/3726
pandas 0.11.0
=============
diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt
index badb364d214d1..982b2f9f2eb3b 100644
--- a/doc/source/v0.11.1.txt
+++ b/doc/source/v0.11.1.txt
@@ -128,6 +128,17 @@ API changes
- ``DataFrame.to_html`` and ``DataFrame.to_latex`` now accept a path for
their first argument (GH3702_)
+ - Do not allow astypes on ``datetime64[ns]`` except to ``object``, and
+ ``timedelta64[ns]`` to ``object/int`` (GH3425_)
+
+ - The behavior of ``datetime64`` dtypes has changed with respect to certain
+ so-called reduction operations (GH3726_). The following operations now
+ raise a ``TypeError`` when perfomed on a ``Series`` and return an *empty*
+ ``Series`` when performed on a ``DataFrame`` similar to performing these
+ operations on, for example, a ``DataFrame`` of ``slice`` objects:
+
+ - sum, prod, mean, std, var, skew, kurt, corr, and cov
+
Enhancements
~~~~~~~~~~~~
@@ -345,3 +356,5 @@ on GitHub for a complete list.
.. _GH3696: https://github.com/pydata/pandas/issues/3696
.. _GH3667: https://github.com/pydata/pandas/issues/3667
.. _GH3741: https://github.com/pydata/pandas/issues/3741
+.. _GH3726: https://github.com/pydata/pandas/issues/3726
+.. _GH3425: https://github.com/pydata/pandas/issues/3425
diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py
index f841c0dbecd8e..0d940dc348dc1 100644
--- a/pandas/core/nanops.py
+++ b/pandas/core/nanops.py
@@ -1,10 +1,11 @@
import sys
+import itertools
+import functools
import numpy as np
from pandas.core.common import isnull, notnull
import pandas.core.common as com
-import pandas.core.config as cf
import pandas.lib as lib
import pandas.algos as algos
import pandas.hashtable as _hash
@@ -17,41 +18,70 @@
_USE_BOTTLENECK = False
-def _bottleneck_switch(bn_name, alt, zero_value=None, **kwargs):
- try:
- bn_func = getattr(bn, bn_name)
- except (AttributeError, NameError): # pragma: no cover
- bn_func = None
+class disallow(object):
+ def __init__(self, *dtypes):
+ super(disallow, self).__init__()
+ self.dtypes = tuple(np.dtype(dtype).type for dtype in dtypes)
+
+ def check(self, obj):
+ return hasattr(obj, 'dtype') and issubclass(obj.dtype.type,
+ self.dtypes)
+
+ def __call__(self, f):
+ @functools.wraps(f)
+ def _f(*args, **kwargs):
+ obj_iter = itertools.chain(args, kwargs.itervalues())
+ if any(self.check(obj) for obj in obj_iter):
+ raise TypeError('reduction operation {0!r} not allowed for '
+ 'this dtype'.format(f.__name__.replace('nan',
+ '')))
+ return f(*args, **kwargs)
+ return _f
+
+
+class bottleneck_switch(object):
+ def __init__(self, zero_value=None, **kwargs):
+ self.zero_value = zero_value
+ self.kwargs = kwargs
+
+ def __call__(self, alt):
+ bn_name = alt.__name__
- def f(values, axis=None, skipna=True, **kwds):
- if len(kwargs) > 0:
- for k, v in kwargs.iteritems():
- if k not in kwds:
- kwds[k] = v
try:
- if zero_value is not None and values.size == 0:
- if values.ndim == 1:
- return 0
+ bn_func = getattr(bn, bn_name)
+ except (AttributeError, NameError): # pragma: no cover
+ bn_func = None
+
+ @functools.wraps(alt)
+ def f(values, axis=None, skipna=True, **kwds):
+ if len(self.kwargs) > 0:
+ for k, v in self.kwargs.iteritems():
+ if k not in kwds:
+ kwds[k] = v
+ try:
+ if self.zero_value is not None and values.size == 0:
+ if values.ndim == 1:
+ return 0
+ else:
+ result_shape = (values.shape[:axis] +
+ values.shape[axis + 1:])
+ result = np.empty(result_shape)
+ result.fill(0)
+ return result
+
+ if _USE_BOTTLENECK and skipna and _bn_ok_dtype(values.dtype):
+ result = bn_func(values, axis=axis, **kwds)
+ # prefer to treat inf/-inf as NA
+ if _has_infs(result):
+ result = alt(values, axis=axis, skipna=skipna, **kwds)
else:
- result_shape = values.shape[:
- axis] + values.shape[axis + 1:]
- result = np.empty(result_shape)
- result.fill(0)
- return result
-
- if _USE_BOTTLENECK and skipna and _bn_ok_dtype(values.dtype):
- result = bn_func(values, axis=axis, **kwds)
- # prefer to treat inf/-inf as NA
- if _has_infs(result):
result = alt(values, axis=axis, skipna=skipna, **kwds)
- else:
+ except Exception:
result = alt(values, axis=axis, skipna=skipna, **kwds)
- except Exception:
- result = alt(values, axis=axis, skipna=skipna, **kwds)
- return result
+ return result
- return f
+ return f
def _bn_ok_dtype(dt):
@@ -166,13 +196,17 @@ def nanall(values, axis=None, skipna=True):
values, mask, dtype = _get_values(values, skipna, True, copy=skipna)
return values.all(axis)
-def _nansum(values, axis=None, skipna=True):
+@disallow('M8')
+@bottleneck_switch(zero_value=0)
+def nansum(values, axis=None, skipna=True):
values, mask, dtype = _get_values(values, skipna, 0)
the_sum = values.sum(axis)
the_sum = _maybe_null_out(the_sum, axis, mask)
return the_sum
-def _nanmean(values, axis=None, skipna=True):
+@disallow('M8')
+@bottleneck_switch()
+def nanmean(values, axis=None, skipna=True):
values, mask, dtype = _get_values(values, skipna, 0)
the_sum = _ensure_numeric(values.sum(axis))
count = _get_counts(mask, axis)
@@ -186,8 +220,9 @@ def _nanmean(values, axis=None, skipna=True):
the_mean = the_sum / count if count > 0 else np.nan
return the_mean
-
-def _nanmedian(values, axis=None, skipna=True):
+@disallow('M8')
+@bottleneck_switch()
+def nanmedian(values, axis=None, skipna=True):
def get_median(x):
mask = notnull(x)
if not skipna and not mask.all():
@@ -197,13 +232,31 @@ def get_median(x):
if values.dtype != np.float64:
values = values.astype('f8')
- if values.ndim > 1:
- return np.apply_along_axis(get_median, axis, values)
- else:
- return get_median(values)
+ notempty = values.size
-
-def _nanvar(values, axis=None, skipna=True, ddof=1):
+ # an array from a frame
+ if values.ndim > 1:
+ # there's a non-empty array to apply over otherwise numpy raises
+ if notempty:
+ return np.apply_along_axis(get_median, axis, values)
+
+ # must return the correct shape, but median is not defined for the
+ # empty set so return nans of shape "everything but the passed axis"
+ # since "axis" is where the reduction would occur if we had a nonempty
+ # array
+ shp = np.array(values.shape)
+ dims = np.arange(values.ndim)
+ ret = np.empty(shp[dims != axis])
+ ret.fill(np.nan)
+ return ret
+
+ # otherwise return a scalar value
+ return get_median(values) if notempty else np.nan
+
+
+@disallow('M8')
+@bottleneck_switch(ddof=1)
+def nanvar(values, axis=None, skipna=True, ddof=1):
if not isinstance(values.dtype.type, np.floating):
values = values.astype('f8')
@@ -223,7 +276,8 @@ def _nanvar(values, axis=None, skipna=True, ddof=1):
return np.fabs((XX - X ** 2 / count) / (count - ddof))
-def _nanmin(values, axis=None, skipna=True):
+@bottleneck_switch()
+def nanmin(values, axis=None, skipna=True):
values, mask, dtype = _get_values(values, skipna, fill_value_typ = '+inf')
# numpy 1.6.1 workaround in Python 3.x
@@ -247,7 +301,8 @@ def _nanmin(values, axis=None, skipna=True):
return _maybe_null_out(result, axis, mask)
-def _nanmax(values, axis=None, skipna=True):
+@bottleneck_switch()
+def nanmax(values, axis=None, skipna=True):
values, mask, dtype = _get_values(values, skipna, fill_value_typ ='-inf')
# numpy 1.6.1 workaround in Python 3.x
@@ -291,14 +346,8 @@ def nanargmin(values, axis=None, skipna=True):
result = _maybe_arg_null_out(result, axis, mask, skipna)
return result
-nansum = _bottleneck_switch('nansum', _nansum, zero_value=0)
-nanmean = _bottleneck_switch('nanmean', _nanmean)
-nanmedian = _bottleneck_switch('nanmedian', _nanmedian)
-nanvar = _bottleneck_switch('nanvar', _nanvar, ddof=1)
-nanmin = _bottleneck_switch('nanmin', _nanmin)
-nanmax = _bottleneck_switch('nanmax', _nanmax)
-
+@disallow('M8')
def nanskew(values, axis=None, skipna=True):
if not isinstance(values.dtype.type, np.floating):
values = values.astype('f8')
@@ -332,6 +381,7 @@ def nanskew(values, axis=None, skipna=True):
return result
+@disallow('M8')
def nankurt(values, axis=None, skipna=True):
if not isinstance(values.dtype.type, np.floating):
values = values.astype('f8')
@@ -365,6 +415,7 @@ def nankurt(values, axis=None, skipna=True):
return result
+@disallow('M8')
def nanprod(values, axis=None, skipna=True):
mask = isnull(values)
if skipna and not issubclass(values.dtype.type, np.integer):
@@ -423,6 +474,7 @@ def _zero_out_fperr(arg):
return 0 if np.abs(arg) < 1e-14 else arg
+@disallow('M8')
def nancorr(a, b, method='pearson', min_periods=None):
"""
a, b: ndarrays
@@ -469,6 +521,7 @@ def _spearman(a, b):
return _cor_methods[method]
+@disallow('M8')
def nancov(a, b, min_periods=None):
if len(a) != len(b):
raise AssertionError('Operands to nancov must have same size')
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 64a6e9d3bcaaf..3a7a7d0f49b66 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -97,21 +97,15 @@ def convert_to_array(values):
values = np.array([values])
inferred_type = lib.infer_dtype(values)
if inferred_type in set(['datetime64','datetime','date','time']):
- if isinstance(values, pa.Array) and com.is_datetime64_dtype(values):
- pass
- else:
+ if not (isinstance(values, pa.Array) and com.is_datetime64_dtype(values)):
values = tslib.array_to_datetime(values)
elif inferred_type in set(['timedelta','timedelta64']):
# need to convert timedelta to ns here
# safest to convert it to an object arrany to process
- if isinstance(values, pa.Array) and com.is_timedelta64_dtype(values):
- pass
- else:
+ if not (isinstance(values, pa.Array) and com.is_timedelta64_dtype(values)):
values = com._possibly_cast_to_timedelta(values)
elif inferred_type in set(['integer']):
- if values.dtype == 'timedelta64[ns]':
- pass
- elif values.dtype.kind == 'm':
+ if values.dtype.kind == 'm':
values = values.astype('timedelta64[ns]')
else:
values = pa.array(values)
@@ -125,9 +119,9 @@ def convert_to_array(values):
is_datetime_rhs = com.is_datetime64_dtype(rvalues)
# 2 datetimes or 2 timedeltas
- if (is_timedelta_lhs and is_timedelta_rhs) or (is_datetime_lhs and is_datetime_rhs):
-
- if is_datetime_lhs and name not in ['__sub__']:
+ if (is_timedelta_lhs and is_timedelta_rhs) or (is_datetime_lhs and
+ is_datetime_rhs):
+ if is_datetime_lhs and name != '__sub__':
raise TypeError("can only operate on a datetimes for subtraction, "
"but the operator [%s] was passed" % name)
elif is_timedelta_lhs and name not in ['__add__','__sub__']:
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 0b34d4dc46494..d674a2f44ebe1 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -9167,6 +9167,15 @@ def _check_stat_op(self, name, alternative, frame=None, has_skipna=True,
f = getattr(frame, name)
+ if not ('max' in name or 'min' in name or 'count' in name):
+ df = DataFrame({'b': date_range('1/1/2001', periods=2)})
+ _f = getattr(df, name)
+ print df
+ self.assertFalse(len(_f()))
+
+ df['a'] = range(len(df))
+ self.assert_(len(getattr(df, name)()))
+
if has_skipna:
def skipna_wrapper(x):
nona = x.dropna().values
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index cba908f7136a9..e1589b9499757 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -1460,10 +1460,6 @@ def test_sum_inf(self):
with cf.option_context("mode.use_inf_as_null", True):
assert_almost_equal(s.sum(), s2.sum())
- res = nanops.nansum(arr, axis=1)
- expected = nanops._nansum(arr, axis=1)
- assert_almost_equal(res, expected)
-
res = nanops.nansum(arr, axis=1)
self.assertTrue(np.isinf(res).all())
@@ -1594,6 +1590,12 @@ def testit():
# add some NaNs
self.series[5:15] = np.NaN
+
+ # idxmax, idxmin, min, and max are valid for dates
+ if not ('max' in name or 'min' in name):
+ ds = Series(date_range('1/1/2001', periods=10))
+ self.assertRaises(TypeError, f, ds)
+
# skipna or no
self.assert_(notnull(f(self.series)))
self.assert_(isnull(f(self.series, skipna=False)))
| closes #3726.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3731 | 2013-05-31T20:10:10Z | 2013-06-06T02:10:51Z | 2013-06-06T02:10:51Z | 2014-06-22T11:11:14Z |
ENH/CLN: give all AssertionErrors and nose.SkipTest raises an informative message | diff --git a/pandas/core/panel.py b/pandas/core/panel.py
index 34b65f169b904..697344639c41b 100644
--- a/pandas/core/panel.py
+++ b/pandas/core/panel.py
@@ -528,9 +528,14 @@ def get_value(self, *args):
-------
value : scalar value
"""
+ nargs = len(args)
+ nreq = self._AXIS_LEN
+
# require an arg for each axis
- if not ((len(args) == self._AXIS_LEN)):
- raise AssertionError()
+ if nargs != nreq:
+ raise TypeError('There must be an argument for each axis, you gave'
+ ' {0} args, but {1} are required'.format(nargs,
+ nreq))
# hm, two layers to the onion
frame = self._get_item_cache(args[0])
@@ -554,8 +559,13 @@ def set_value(self, *args):
otherwise a new object
"""
# require an arg for each axis and the value
- if not ((len(args) == self._AXIS_LEN + 1)):
- raise AssertionError()
+ nargs = len(args)
+ nreq = self._AXIS_LEN + 1
+
+ if nargs != nreq:
+ raise TypeError('There must be an argument for each axis plus the '
+ 'value provided, you gave {0} args, but {1} are '
+ 'required'.format(nargs, nreq))
try:
frame = self._get_item_cache(args[0])
@@ -592,8 +602,10 @@ def __setitem__(self, key, value):
**self._construct_axes_dict_for_slice(self._AXIS_ORDERS[1:]))
mat = value.values
elif isinstance(value, np.ndarray):
- if not ((value.shape == shape[1:])):
- raise AssertionError()
+ if value.shape != shape[1:]:
+ raise ValueError('shape of value must be {0}, shape of given '
+ 'object was {1}'.format(shape[1:],
+ value.shape))
mat = np.asarray(value)
elif np.isscalar(value):
dtype, value = _infer_dtype_from_scalar(value)
@@ -1144,8 +1156,9 @@ def _extract_axes(self, data, axes, **kwargs):
@staticmethod
def _extract_axes_for_slice(self, axes):
""" return the slice dictionary for these axes """
- return dict([(self._AXIS_SLICEMAP[i], a) for i, a
- in zip(self._AXIS_ORDERS[self._AXIS_LEN - len(axes):], axes)])
+ return dict([(self._AXIS_SLICEMAP[i], a)
+ for i, a in zip(self._AXIS_ORDERS[self._AXIS_LEN -
+ len(axes):], axes)])
@staticmethod
def _prep_ndarray(self, values, copy=True):
@@ -1157,8 +1170,11 @@ def _prep_ndarray(self, values, copy=True):
else:
if copy:
values = values.copy()
- if not ((values.ndim == self._AXIS_LEN)):
- raise AssertionError()
+ if values.ndim != self._AXIS_LEN:
+ raise ValueError("The number of dimensions required is {0}, "
+ "but the number of dimensions of the "
+ "ndarray given was {1}".format(self._AXIS_LEN,
+ values.ndim))
return values
@staticmethod
diff --git a/pandas/core/series.py b/pandas/core/series.py
index d9e9a0034b56b..808c959eee629 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -1299,9 +1299,6 @@ def __unicode__(self):
dtype=True)
else:
result = u('Series([], dtype: %s)') % self.dtype
-
- if not (isinstance(result, compat.text_type)):
- raise AssertionError()
return result
def _tidy_repr(self, max_vals=20):
@@ -1377,7 +1374,9 @@ def to_string(self, buf=None, na_rep='NaN', float_format=None,
# catch contract violations
if not isinstance(the_repr, compat.text_type):
- raise AssertionError("expected unicode string")
+ raise AssertionError("result must be of type unicode, type"
+ " of result is {0!r}"
+ "".format(the_repr.__class__.__name__))
if buf is None:
return the_repr
@@ -1397,11 +1396,16 @@ def _get_repr(
"""
formatter = fmt.SeriesFormatter(self, name=name, header=print_header,
- length=length, dtype=dtype, na_rep=na_rep,
+ length=length, dtype=dtype,
+ na_rep=na_rep,
float_format=float_format)
result = formatter.to_string()
- if not (isinstance(result, compat.text_type)):
- raise AssertionError()
+
+ # TODO: following check prob. not neces.
+ if not isinstance(result, compat.text_type):
+ raise AssertionError("result must be of type unicode, type"
+ " of result is {0!r}"
+ "".format(result.__class__.__name__))
return result
def __iter__(self):
diff --git a/pandas/io/date_converters.py b/pandas/io/date_converters.py
index 5c99ab4d0a664..ef92b8692c07f 100644
--- a/pandas/io/date_converters.py
+++ b/pandas/io/date_converters.py
@@ -1,5 +1,5 @@
"""This module is designed for community supported date conversion functions"""
-from pandas.compat import range
+from pandas.compat import range, map
import numpy as np
import pandas.lib as lib
@@ -47,12 +47,16 @@ def _maybe_cast(arr):
def _check_columns(cols):
- if not ((len(cols) > 0)):
- raise AssertionError()
+ if not len(cols):
+ raise AssertionError("There must be at least 1 column")
- N = len(cols[0])
- for c in cols[1:]:
- if not ((len(c) == N)):
- raise AssertionError()
+ head, tail = cols[0], cols[1:]
+
+ N = len(head)
+
+ for i, n in enumerate(map(len, tail)):
+ if n != N:
+ raise AssertionError('All columns must have the same length: {0}; '
+ 'column {1} has length {2}'.format(N, i, n))
return N
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 26f15d5ae2aea..e0b12277f4416 100644
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -552,8 +552,10 @@ def _clean_options(self, options, engine):
# type conversion-related
if converters is not None:
- if not (isinstance(converters, dict)):
- raise AssertionError()
+ if not isinstance(converters, dict):
+ raise TypeError('Type converters must be a dict or'
+ ' subclass, input was '
+ 'a {0!r}'.format(type(converters).__name__))
else:
converters = {}
@@ -631,6 +633,7 @@ def get_chunk(self, size=None):
size = self.chunksize
return self.read(nrows=size)
+
def _is_index_col(col):
return col is not None and col is not False
@@ -1174,6 +1177,7 @@ def TextParser(*args, **kwds):
kwds['engine'] = 'python'
return TextFileReader(*args, **kwds)
+
# delimiter=None, dialect=None, names=None, header=0,
# index_col=None,
# na_values=None,
@@ -1653,8 +1657,8 @@ def _rows_to_cols(self, content):
if self._implicit_index:
col_len += len(self.index_col)
- if not ((self.skip_footer >= 0)):
- raise AssertionError()
+ if self.skip_footer < 0:
+ raise ValueError('skip footer cannot be negative')
if col_len != zip_len and self.index_col is not False:
i = 0
@@ -1883,6 +1887,7 @@ def _clean_na_values(na_values, keep_default_na=True):
return na_values, na_fvalues
+
def _clean_index_names(columns, index_col):
if not _is_index_col(index_col):
return None, columns, index_col
@@ -1941,6 +1946,7 @@ def _floatify_na_values(na_values):
pass
return result
+
def _stringify_na_values(na_values):
""" return a stringified and numeric for these values """
result = []
@@ -1965,6 +1971,7 @@ def _stringify_na_values(na_values):
pass
return set(result)
+
def _get_na_values(col, na_values, na_fvalues):
if isinstance(na_values, dict):
if col in na_values:
@@ -2014,15 +2021,17 @@ def __init__(self, f, colspecs, filler, thousands=None, encoding=None):
encoding = get_option('display.encoding')
self.encoding = encoding
- if not ( isinstance(colspecs, (tuple, list))):
- raise AssertionError()
+ if not isinstance(colspecs, (tuple, list)):
+ raise TypeError("column specifications must be a list or tuple, "
+ "input was a %r" % type(colspecs).__name__)
for colspec in colspecs:
- if not ( isinstance(colspec, (tuple, list)) and
- len(colspec) == 2 and
- isinstance(colspec[0], int) and
- isinstance(colspec[1], int) ):
- raise AssertionError()
+ if not (isinstance(colspec, (tuple, list)) and
+ len(colspec) == 2 and
+ isinstance(colspec[0], int) and
+ isinstance(colspec[1], int)):
+ raise TypeError('Each column specification must be '
+ '2 element tuple or list of integers')
def next(self):
line = next(self.f)
diff --git a/pandas/io/tests/test_data.py b/pandas/io/tests/test_data.py
index 091e149ebb1c0..f647b217fb260 100644
--- a/pandas/io/tests/test_data.py
+++ b/pandas/io/tests/test_data.py
@@ -16,6 +16,13 @@
from numpy.testing import assert_array_equal
+def _skip_if_no_lxml():
+ try:
+ import lxml
+ except ImportError:
+ raise nose.SkipTest("no lxml")
+
+
def assert_n_failed_equals_n_null_columns(wngs, obj, cls=SymbolWarning):
all_nan_cols = pd.Series(dict((k, pd.isnull(v).all()) for k, v in
compat.iteritems(obj)))
@@ -88,10 +95,7 @@ def test_get_multi2(self):
class TestYahoo(unittest.TestCase):
@classmethod
def setUpClass(cls):
- try:
- import lxml
- except ImportError:
- raise nose.SkipTest
+ _skip_if_no_lxml()
@network
def test_yahoo(self):
@@ -210,10 +214,7 @@ def test_get_date_ret_index(self):
class TestYahooOptions(unittest.TestCase):
@classmethod
def setUpClass(cls):
- try:
- import lxml
- except ImportError:
- raise nose.SkipTest
+ _skip_if_no_lxml()
# aapl has monthlies
cls.aapl = web.Options('aapl', 'yahoo')
@@ -272,10 +273,7 @@ def test_get_put_data(self):
class TestOptionsWarnings(unittest.TestCase):
@classmethod
def setUpClass(cls):
- try:
- import lxml
- except ImportError:
- raise nose.SkipTest
+ _skip_if_no_lxml()
with assert_produces_warning(FutureWarning):
cls.aapl = web.Options('aapl')
diff --git a/pandas/io/tests/test_ga.py b/pandas/io/tests/test_ga.py
index e33b75c569fef..a0f4dc45725a3 100644
--- a/pandas/io/tests/test_ga.py
+++ b/pandas/io/tests/test_ga.py
@@ -14,7 +14,7 @@
from pandas.io.auth import AuthenticationConfigError, reset_token_store
from pandas.io import auth
except ImportError:
- raise nose.SkipTest
+ raise nose.SkipTest("need httplib2 and auth libs")
class TestGoogle(unittest.TestCase):
@@ -68,7 +68,7 @@ def test_getdata(self):
assert_frame_equal(df, df2)
except AuthenticationConfigError:
- raise nose.SkipTest
+ raise nose.SkipTest("authentication error")
@slow
@with_connectivity_check("http://www.google.com")
@@ -96,7 +96,7 @@ def test_iterator(self):
assert (df2.index > df1.index).all()
except AuthenticationConfigError:
- raise nose.SkipTest
+ raise nose.SkipTest("authentication error")
@slow
@with_connectivity_check("http://www.google.com")
@@ -150,7 +150,8 @@ def test_segment(self):
assert 'pageviewsPerVisit' in df
except AuthenticationConfigError:
- raise nose.SkipTest
+ raise nose.SkipTest("authentication error")
+
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
diff --git a/pandas/io/tests/test_json/test_pandas.py b/pandas/io/tests/test_json/test_pandas.py
index c32fc08dab297..dea7f2b079cef 100644
--- a/pandas/io/tests/test_json/test_pandas.py
+++ b/pandas/io/tests/test_json/test_pandas.py
@@ -567,17 +567,11 @@ def test_round_trip_exception_(self):
assert_frame_equal(result.reindex(index=df.index,columns=df.columns),df)
@network
- @slow
def test_url(self):
- try:
-
- url = 'https://api.github.com/repos/pydata/pandas/issues?per_page=5'
- result = read_json(url,convert_dates=True)
- for c in ['created_at','closed_at','updated_at']:
- self.assert_(result[c].dtype == 'datetime64[ns]')
-
- url = 'http://search.twitter.com/search.json?q=pandas%20python'
- result = read_json(url)
+ url = 'https://api.github.com/repos/pydata/pandas/issues?per_page=5'
+ result = read_json(url,convert_dates=True)
+ for c in ['created_at','closed_at','updated_at']:
+ self.assert_(result[c].dtype == 'datetime64[ns]')
- except URLError:
- raise nose.SkipTest
+ url = 'http://search.twitter.com/search.json?q=pandas%20python'
+ result = read_json(url)
diff --git a/pandas/io/tests/test_json/test_ujson.py b/pandas/io/tests/test_json/test_ujson.py
index 38a30b8baf459..13ccf0bbd1742 100644
--- a/pandas/io/tests/test_json/test_ujson.py
+++ b/pandas/io/tests/test_json/test_ujson.py
@@ -29,7 +29,7 @@
def _skip_if_python_ver(skip_major, skip_minor=None):
major, minor = sys.version_info[:2]
if major == skip_major and (skip_minor is None or minor == skip_minor):
- raise nose.SkipTest
+ raise nose.SkipTest("skipping Python version %d.%d" % (major, minor))
json_unicode = (json.dumps if sys.version_info[0] >= 3
else partial(json.dumps, encoding="utf-8"))
@@ -363,7 +363,8 @@ def test_nat(self):
def test_npy_nat(self):
from distutils.version import LooseVersion
if LooseVersion(np.__version__) < '1.7.0':
- raise nose.SkipTest
+ raise nose.SkipTest("numpy version < 1.7.0, is "
+ "{0}".format(np.__version__))
input = np.datetime64('NaT')
assert ujson.encode(input) == 'null', "Expected null"
diff --git a/pandas/io/tests/test_parsers.py b/pandas/io/tests/test_parsers.py
index fadf70877409f..dea719ce6f397 100644
--- a/pandas/io/tests/test_parsers.py
+++ b/pandas/io/tests/test_parsers.py
@@ -8,6 +8,7 @@
import re
import unittest
import nose
+import platform
from numpy import nan
import numpy as np
@@ -64,6 +65,10 @@ def setUp(self):
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
+ def test_converters_type_must_be_dict(self):
+ with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
+ self.read_csv(StringIO(self.data1), converters=0)
+
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
@@ -755,6 +760,8 @@ def test_deep_skiprows(self):
condensed_data = self.read_csv(StringIO(condensed_text))
tm.assert_frame_equal(data, condensed_data)
+
+
def test_detect_string_na(self):
data = """A,B
foo,bar
@@ -1492,28 +1499,17 @@ def test_na_value_dict(self):
df = self.read_csv(StringIO(data), na_values={}, index_col=['a', 'c'])
tm.assert_frame_equal(df, xp)
- @slow
@tm.network
def test_url(self):
- try:
- # HTTP(S)
- url = ('https://raw.github.com/pydata/pandas/master/'
- 'pandas/io/tests/data/salary.table')
- url_table = self.read_table(url)
- dirpath = tm.get_data_path()
- localtable = os.path.join(dirpath, 'salary.table')
- local_table = self.read_table(localtable)
- tm.assert_frame_equal(url_table, local_table)
- # TODO: ftp testing
-
- except URLError:
- try:
- with tm.closing(urlopen('http://www.google.com')) as resp:
- pass
- except URLError:
- raise nose.SkipTest
- else:
- raise
+ # HTTP(S)
+ url = ('https://raw.github.com/pydata/pandas/master/'
+ 'pandas/io/tests/data/salary.table')
+ url_table = self.read_table(url)
+ dirpath = tm.get_data_path()
+ localtable = os.path.join(dirpath, 'salary.table')
+ local_table = self.read_table(localtable)
+ tm.assert_frame_equal(url_table, local_table)
+ # TODO: ftp testing
@slow
def test_file(self):
@@ -1529,7 +1525,8 @@ def test_file(self):
url_table = self.read_table('file://localhost/' + localtable)
except URLError:
# fails on some systems
- raise nose.SkipTest
+ raise nose.SkipTest("failing on %s" %
+ ' '.join(platform.uname()).strip())
tm.assert_frame_equal(url_table, local_table)
@@ -1710,7 +1707,8 @@ def test_utf16_example(self):
def test_converters_corner_with_nas(self):
# skip aberration observed on Win64 Python 3.2.2
if hash(np.int64(-1)) != -2:
- raise nose.SkipTest
+ raise nose.SkipTest("skipping because of windows hash on Python"
+ " 3.2.2")
csv = """id,score,days
1,2,12
@@ -1893,6 +1891,21 @@ def test_usecols_index_col_conflict(self):
class TestPythonParser(ParserTests, unittest.TestCase):
+ def test_negative_skipfooter_raises(self):
+ text = """#foo,a,b,c
+#foo,a,b,c
+#foo,a,b,c
+#foo,a,b,c
+#foo,a,b,c
+#foo,a,b,c
+1/1/2000,1.,2.,3.
+1/2/2000,4,5,6
+1/3/2000,7,8,9
+"""
+
+ with tm.assertRaisesRegexp(ValueError,
+ 'skip footer cannot be negative'):
+ df = self.read_csv(StringIO(text), skipfooter=-1)
def read_csv(self, *args, **kwds):
kwds = kwds.copy()
@@ -2048,6 +2061,18 @@ def test_fwf(self):
with tm.assertRaisesRegexp(ValueError, "Must specify either"):
read_fwf(StringIO(data3))
+ def test_fwf_colspecs_is_list_or_tuple(self):
+ with tm.assertRaisesRegexp(TypeError,
+ 'column specifications must be a list or '
+ 'tuple.+'):
+ fwr = pd.io.parsers.FixedWidthReader(StringIO(self.data1),
+ {'a': 1}, ',')
+
+ def test_fwf_colspecs_is_list_or_tuple_of_two_element_tuples(self):
+ with tm.assertRaisesRegexp(TypeError,
+ 'Each column specification must be.+'):
+ read_fwf(StringIO(self.data1), {'a': 1})
+
def test_fwf_regression(self):
# GH 3594
#### turns out 'T060' is parsable as a datetime slice!
@@ -2155,7 +2180,7 @@ def test_verbose_import(self):
def test_iteration_open_handle(self):
if PY3:
- raise nose.SkipTest
+ raise nose.SkipTest("won't work in Python 3 {0}".format(sys.version_info))
with tm.ensure_clean() as path:
with open(path, 'wb') as f:
@@ -2371,7 +2396,7 @@ def test_decompression(self):
import gzip
import bz2
except ImportError:
- raise nose.SkipTest
+ raise nose.SkipTest('need gzip and bz2 to run')
data = open(self.csv1, 'rb').read()
expected = self.read_csv(self.csv1)
@@ -2406,7 +2431,7 @@ def test_decompression_regex_sep(self):
import gzip
import bz2
except ImportError:
- raise nose.SkipTest
+ raise nose.SkipTest('need gzip and bz2 to run')
data = open(self.csv1, 'rb').read()
data = data.replace(b',', b'::')
diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py
index 35b9dfbdb6f77..35ecef2acf818 100644
--- a/pandas/io/tests/test_pytables.py
+++ b/pandas/io/tests/test_pytables.py
@@ -2295,7 +2295,7 @@ def test_index_types(self):
def test_timeseries_preepoch(self):
if sys.version_info[0] == 2 and sys.version_info[1] < 7:
- raise nose.SkipTest
+ raise nose.SkipTest("won't work on Python < 2.7")
dr = bdate_range('1/1/1940', '1/1/1960')
ts = Series(np.random.randn(len(dr)), index=dr)
@@ -3599,7 +3599,7 @@ def do_copy(f = None, new_f = None, keys = None, propindexes = True, **kwargs):
safe_remove(self.path)
def test_legacy_table_write(self):
- raise nose.SkipTest
+ raise nose.SkipTest("skipping for now")
store = HDFStore(tm.get_data_path('legacy_hdf/legacy_table_%s.h5' % pandas.__version__), 'a')
diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py
index 624f16b3207cd..f135a3619e03c 100644
--- a/pandas/io/tests/test_sql.py
+++ b/pandas/io/tests/test_sql.py
@@ -431,7 +431,7 @@ def test_tquery(self):
try:
import MySQLdb
except ImportError:
- raise nose.SkipTest
+ raise nose.SkipTest("no MySQLdb")
frame = tm.makeTimeDataFrame()
drop_sql = "DROP TABLE IF EXISTS test_table"
cur = self.db.cursor()
@@ -456,7 +456,7 @@ def test_uquery(self):
try:
import MySQLdb
except ImportError:
- raise nose.SkipTest
+ raise nose.SkipTest("no MySQLdb")
frame = tm.makeTimeDataFrame()
drop_sql = "DROP TABLE IF EXISTS test_table"
cur = self.db.cursor()
diff --git a/pandas/io/tests/test_wb.py b/pandas/io/tests/test_wb.py
index e85c63d7d5999..60b4d8d462723 100644
--- a/pandas/io/tests/test_wb.py
+++ b/pandas/io/tests/test_wb.py
@@ -10,7 +10,7 @@
@slow
@network
def test_wdi_search():
- raise nose.SkipTest
+ raise nose.SkipTest("skipping for now")
expected = {u('id'): {2634: u('GDPPCKD'),
4649: u('NY.GDP.PCAP.KD'),
4651: u('NY.GDP.PCAP.KN'),
@@ -30,7 +30,7 @@ def test_wdi_search():
@slow
@network
def test_wdi_download():
- raise nose.SkipTest
+ raise nose.SkipTest("skipping for now")
expected = {'GDPPCKN': {(u('United States'), u('2003')): u('40800.0735367688'), (u('Canada'), u('2004')): u('37857.1261134552'), (u('United States'), u('2005')): u('42714.8594790102'), (u('Canada'), u('2003')): u('37081.4575704003'), (u('United States'), u('2004')): u('41826.1728310667'), (u('Mexico'), u('2003')): u('72720.0691255285'), (u('Mexico'), u('2004')): u('74751.6003347038'), (u('Mexico'), u('2005')): u('76200.2154469437'), (u('Canada'), u('2005')): u('38617.4563629611')}, 'GDPPCKD': {(u('United States'), u('2003')): u('40800.0735367688'), (u('Canada'), u('2004')): u('34397.055116118'), (u('United States'), u('2005')): u('42714.8594790102'), (u('Canada'), u('2003')): u('33692.2812368928'), (u('United States'), u('2004')): u('41826.1728310667'), (u('Mexico'), u('2003')): u('7608.43848670658'), (u('Mexico'), u('2004')): u('7820.99026814334'), (u('Mexico'), u('2005')): u('7972.55364129367'), (u('Canada'), u('2005')): u('35087.8925933298')}}
expected = pandas.DataFrame(expected)
result = download(country=['CA', 'MX', 'US', 'junk'], indicator=['GDPPCKD',
diff --git a/pandas/sparse/array.py b/pandas/sparse/array.py
index 34823c052a518..8a50a000a9526 100644
--- a/pandas/sparse/array.py
+++ b/pandas/sparse/array.py
@@ -11,12 +11,11 @@
from pandas.core.base import PandasObject
import pandas.core.common as com
-from pandas import compat
+from pandas import compat, lib
from pandas.compat import range
from pandas._sparse import BlockIndex, IntIndex
import pandas._sparse as splib
-import pandas.lib as lib
import pandas.index as _index
@@ -28,8 +27,8 @@ def _sparse_op_wrap(op, name):
def wrapper(self, other):
if isinstance(other, np.ndarray):
- if not ((len(self) == len(other))):
- raise AssertionError()
+ if len(self) != len(other):
+ raise AssertionError("Operands must be of the same size")
if not isinstance(other, SparseArray):
other = SparseArray(other, fill_value=self.fill_value)
return _sparse_array_op(self, other, op, name)
@@ -148,8 +147,10 @@ def __new__(
fill_value=fill_value)
else:
values = data
- if not ((len(values) == sparse_index.npoints)):
- raise AssertionError()
+ if len(values) != sparse_index.npoints:
+ raise AssertionError("Non array-like type {0} must have"
+ " the same length as the"
+ " index".format(type(values)))
# Create array, do *not* copy data by default
if copy:
@@ -329,8 +330,8 @@ def take(self, indices, axis=0):
-------
taken : ndarray
"""
- if not ((axis == 0)):
- raise AssertionError()
+ if axis:
+ raise ValueError("axis must be 0, input was {0}".format(axis))
indices = np.atleast_1d(np.asarray(indices, dtype=int))
# allow -1 to indicate missing values
@@ -339,14 +340,14 @@ def take(self, indices, axis=0):
raise IndexError('out of bounds access')
if self.sp_index.npoints > 0:
- locs = np.array(
- [self.sp_index.lookup(loc) if loc > -1 else -1 for loc in indices])
+ locs = np.array([self.sp_index.lookup(loc) if loc > -1 else -1
+ for loc in indices])
result = self.sp_values.take(locs)
mask = locs == -1
if mask.any():
try:
result[mask] = self.fill_value
- except (ValueError):
+ except ValueError:
# wrong dtype
result = result.astype('float64')
result[mask] = self.fill_value
diff --git a/pandas/sparse/frame.py b/pandas/sparse/frame.py
index 53fabb0160a88..93b29cbf91b91 100644
--- a/pandas/sparse/frame.py
+++ b/pandas/sparse/frame.py
@@ -25,7 +25,6 @@
from pandas.core.generic import NDFrame
from pandas.sparse.series import SparseSeries, SparseArray
from pandas.util.decorators import Appender
-import pandas.lib as lib
class SparseDataFrame(DataFrame):
@@ -601,20 +600,15 @@ def _reindex_with_indexers(self, reindexers, method=None, fill_value=None, limit
def _join_compat(self, other, on=None, how='left', lsuffix='', rsuffix='',
sort=False):
- if isinstance(other, Series):
- if other.name is None:
- raise ValueError('Other Series must have a name')
- other = SparseDataFrame({other.name: other},
- default_fill_value=self._default_fill_value)
if on is not None:
- raise NotImplementedError
- else:
- return self._join_index(other, how, lsuffix, rsuffix)
+ raise NotImplementedError("'on' keyword parameter is not yet "
+ "implemented")
+ return self._join_index(other, how, lsuffix, rsuffix)
def _join_index(self, other, how, lsuffix, rsuffix):
if isinstance(other, Series):
- if not (other.name is not None):
- raise AssertionError()
+ if other.name is None:
+ raise ValueError('Other Series must have a name')
other = SparseDataFrame({other.name: other},
default_fill_value=self._default_fill_value)
diff --git a/pandas/sparse/panel.py b/pandas/sparse/panel.py
index ae981180022c7..286b683b1ea88 100644
--- a/pandas/sparse/panel.py
+++ b/pandas/sparse/panel.py
@@ -77,8 +77,9 @@ def __init__(self, frames, items=None, major_axis=None, minor_axis=None,
default_kind=default_kind)
frames = new_frames
- if not (isinstance(frames, dict)):
- raise AssertionError()
+ if not isinstance(frames, dict):
+ raise TypeError('input must be a dict, a %r was passed' %
+ type(frames).__name__)
self.default_fill_value = fill_value = default_fill_value
self.default_kind = kind = default_kind
@@ -99,7 +100,7 @@ def __init__(self, frames, items=None, major_axis=None, minor_axis=None,
# do we want to fill missing ones?
for item in items:
if item not in clean_frames:
- raise Exception('column %s not found in data' % item)
+ raise ValueError('column %r not found in data' % item)
self._items = items
self.major_axis = major_axis
diff --git a/pandas/sparse/series.py b/pandas/sparse/series.py
index 38003f0096df2..50e80e0c202d5 100644
--- a/pandas/sparse/series.py
+++ b/pandas/sparse/series.py
@@ -555,8 +555,8 @@ def sparse_reindex(self, new_index):
-------
reindexed : SparseSeries
"""
- if not (isinstance(new_index, splib.SparseIndex)):
- raise AssertionError()
+ if not isinstance(new_index, splib.SparseIndex):
+ raise TypeError('new index must be a SparseIndex')
block = self.block.sparse_reindex(new_index)
new_data = SingleBlockManager(block, block.ref_items)
diff --git a/pandas/sparse/tests/test_sparse.py b/pandas/sparse/tests/test_sparse.py
index a74872c8f193f..b3f2a8b3b8136 100644
--- a/pandas/sparse/tests/test_sparse.py
+++ b/pandas/sparse/tests/test_sparse.py
@@ -488,7 +488,7 @@ def test_operators_corner2(self):
def test_binary_operators(self):
# skipping for now #####
- raise nose.SkipTest
+ raise nose.SkipTest("skipping sparse binary operators test")
def _check_inplace_op(iop, op):
tmp = self.bseries.copy()
@@ -539,7 +539,7 @@ def _compare_with_series(sps, new_index):
reindexed = self.bseries.reindex(self.bseries.index, copy=False)
reindexed.sp_values[:] = 1.
- self.assert_((self.bseries.sp_values == 1.).all())
+ np.testing.assert_array_equal(self.bseries.sp_values, 1.)
def test_sparse_reindex(self):
length = 10
@@ -583,6 +583,13 @@ def _check_all(values, first, second):
_check_all(values1, index1, [0, 1, 7, 8, 9])
_check_all(values1, index1, [])
+ first_series = SparseSeries(values1, sparse_index=IntIndex(length,
+ index1),
+ fill_value=nan)
+ with tm.assertRaisesRegexp(TypeError,
+ 'new index must be a SparseIndex'):
+ reindexed = first_series.sparse_reindex(0)
+
def test_repr(self):
bsrepr = repr(self.bseries)
isrepr = repr(self.iseries)
@@ -1308,6 +1315,10 @@ def test_join(self):
right = self.frame.ix[:, ['B', 'D']]
self.assertRaises(Exception, left.join, right)
+ with tm.assertRaisesRegexp(ValueError, 'Other Series must have a name'):
+ self.frame.join(Series(np.random.randn(len(self.frame)),
+ index=self.frame.index))
+
def test_reindex(self):
def _check_frame(frame):
@@ -1576,8 +1587,11 @@ def _test_op(panel, op):
assert_sp_frame_equal(result['ItemA'], op(panel['ItemA'], 1))
def test_constructor(self):
- self.assertRaises(Exception, SparsePanel, self.data_dict,
+ self.assertRaises(ValueError, SparsePanel, self.data_dict,
items=['Item0', 'ItemA', 'ItemB'])
+ with tm.assertRaisesRegexp(TypeError,
+ "input must be a dict, a 'list' was passed"):
+ SparsePanel(['a', 'b', 'c'])
def test_from_dict(self):
fd = SparsePanel.from_dict(self.data_dict)
diff --git a/pandas/stats/ols.py b/pandas/stats/ols.py
index 2bf366f4dc8cb..9d22068c1612f 100644
--- a/pandas/stats/ols.py
+++ b/pandas/stats/ols.py
@@ -1216,8 +1216,9 @@ def _nobs_raw(self):
return result.astype(int)
def _beta_matrix(self, lag=0):
- if not ((lag >= 0)):
- raise AssertionError()
+ if lag < 0:
+ raise AssertionError("'lag' must be greater than or equal to 0, "
+ "input was {0}".format(lag))
betas = self._beta_raw
@@ -1280,8 +1281,8 @@ def _filter_data(lhs, rhs, weights=None):
Cleaned lhs and rhs
"""
if not isinstance(lhs, Series):
- if not ((len(lhs) == len(rhs))):
- raise AssertionError()
+ if len(lhs) != len(rhs):
+ raise AssertionError("length of lhs must equal length of rhs")
lhs = Series(lhs, index=rhs.index)
rhs = _combine_rhs(rhs)
diff --git a/pandas/stats/plm.py b/pandas/stats/plm.py
index 450ddac78e06a..3c67119427ae0 100644
--- a/pandas/stats/plm.py
+++ b/pandas/stats/plm.py
@@ -103,10 +103,12 @@ def _prepare_data(self):
y_regressor = y
if weights is not None:
- if not ((y_regressor.index.equals(weights.index))):
- raise AssertionError()
- if not ((x_regressor.index.equals(weights.index))):
- raise AssertionError()
+ if not y_regressor.index.equals(weights.index):
+ raise AssertionError("y_regressor and weights must have the "
+ "same index")
+ if not x_regressor.index.equals(weights.index):
+ raise AssertionError("x_regressor and weights must have the "
+ "same index")
rt_weights = np.sqrt(weights)
y_regressor = y_regressor * rt_weights
@@ -173,8 +175,10 @@ def _convert_x(self, x):
# .iteritems
iteritems = getattr(x, 'iteritems', x.items)
for key, df in iteritems():
- if not ((isinstance(df, DataFrame))):
- raise AssertionError()
+ if not isinstance(df, DataFrame):
+ raise AssertionError("all input items must be DataFrames, "
+ "at least one is of "
+ "type {0}".format(type(df)))
if _is_numeric(df):
x_converted[key] = df
@@ -642,8 +646,9 @@ def _y_predict_raw(self):
return (betas * x).sum(1)
def _beta_matrix(self, lag=0):
- if not ((lag >= 0)):
- raise AssertionError()
+ if lag < 0:
+ raise AssertionError("'lag' must be greater than or equal to 0, "
+ "input was {0}".format(lag))
index = self._y_trans.index
major_labels = index.labels[0]
diff --git a/pandas/stats/tests/test_math.py b/pandas/stats/tests/test_math.py
index 92dedb35f4512..008fffdc1db06 100644
--- a/pandas/stats/tests/test_math.py
+++ b/pandas/stats/tests/test_math.py
@@ -49,7 +49,7 @@ def test_rank_1d(self):
def test_solve_rect(self):
if not _have_statsmodels:
- raise nose.SkipTest
+ raise nose.SkipTest("no statsmodels")
b = Series(np.random.randn(N), self.frame.index)
result = pmath.solve(self.frame, b)
diff --git a/pandas/stats/tests/test_moments.py b/pandas/stats/tests/test_moments.py
index 24fc04d849c7f..70653d9d96bef 100644
--- a/pandas/stats/tests/test_moments.py
+++ b/pandas/stats/tests/test_moments.py
@@ -22,7 +22,7 @@ def _skip_if_no_scipy():
try:
import scipy.stats
except ImportError:
- raise nose.SkipTest
+ raise nose.SkipTest("no scipy.stats")
class TestMoments(unittest.TestCase):
@@ -73,7 +73,7 @@ def test_cmov_mean(self):
try:
from scikits.timeseries.lib import cmov_mean
except ImportError:
- raise nose.SkipTest
+ raise nose.SkipTest("no scikits.timeseries")
vals = np.random.randn(10)
xp = cmov_mean(vals, 5)
@@ -91,7 +91,7 @@ def test_cmov_window(self):
try:
from scikits.timeseries.lib import cmov_window
except ImportError:
- raise nose.SkipTest
+ raise nose.SkipTest("no scikits.timeseries")
vals = np.random.randn(10)
xp = cmov_window(vals, 5, 'boxcar')
@@ -109,7 +109,7 @@ def test_cmov_window_corner(self):
try:
from scikits.timeseries.lib import cmov_window
except ImportError:
- raise nose.SkipTest
+ raise nose.SkipTest("no scikits.timeseries")
# all nan
vals = np.empty(10, dtype=float)
@@ -133,7 +133,7 @@ def test_cmov_window_frame(self):
try:
from scikits.timeseries.lib import cmov_window
except ImportError:
- raise nose.SkipTest
+ raise nose.SkipTest("no scikits.timeseries")
# DataFrame
vals = np.random.randn(10, 2)
@@ -146,7 +146,7 @@ def test_cmov_window_na_min_periods(self):
try:
from scikits.timeseries.lib import cmov_window
except ImportError:
- raise nose.SkipTest
+ raise nose.SkipTest("no scikits.timeseries")
# min_periods
vals = Series(np.random.randn(10))
@@ -163,7 +163,7 @@ def test_cmov_window_regular(self):
try:
from scikits.timeseries.lib import cmov_window
except ImportError:
- raise nose.SkipTest
+ raise nose.SkipTest("no scikits.timeseries")
win_types = ['triang', 'blackman', 'hamming', 'bartlett', 'bohman',
'blackmanharris', 'nuttall', 'barthann']
@@ -179,7 +179,7 @@ def test_cmov_window_special(self):
try:
from scikits.timeseries.lib import cmov_window
except ImportError:
- raise nose.SkipTest
+ raise nose.SkipTest("no scikits.timeseries")
win_types = ['kaiser', 'gaussian', 'general_gaussian', 'slepian']
kwds = [{'beta': 1.}, {'std': 1.}, {'power': 2., 'width': 2.},
@@ -319,7 +319,7 @@ def test_rolling_kurt(self):
def test_fperr_robustness(self):
# TODO: remove this once python 2.5 out of picture
if PY3:
- raise nose.SkipTest
+ raise nose.SkipTest("doesn't work on python 3")
# #2114
data = '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1a@\xaa\xaa\xaa\xaa\xaa\xaa\x02@8\x8e\xe38\x8e\xe3\xe8?z\t\xed%\xb4\x97\xd0?\xa2\x0c<\xdd\x9a\x1f\xb6?\x82\xbb\xfa&y\x7f\x9d?\xac\'\xa7\xc4P\xaa\x83?\x90\xdf\xde\xb0k8j?`\xea\xe9u\xf2zQ?*\xe37\x9d\x98N7?\xe2.\xf5&v\x13\x1f?\xec\xc9\xf8\x19\xa4\xb7\x04?\x90b\xf6w\x85\x9f\xeb>\xb5A\xa4\xfaXj\xd2>F\x02\xdb\xf8\xcb\x8d\xb8>.\xac<\xfb\x87^\xa0>\xe8:\xa6\xf9_\xd3\x85>\xfb?\xe2cUU\xfd?\xfc\x7fA\xed8\x8e\xe3?\xa5\xaa\xac\x91\xf6\x12\xca?n\x1cs\xb6\xf9a\xb1?\xe8%D\xf3L-\x97?5\xddZD\x11\xe7~?#>\xe7\x82\x0b\x9ad?\xd9R4Y\x0fxK?;7x;\nP2?N\xf4JO\xb8j\x18?4\xf81\x8a%G\x00?\x9a\xf5\x97\r2\xb4\xe5>\xcd\x9c\xca\xbcB\xf0\xcc>3\x13\x87(\xd7J\xb3>\x99\x19\xb4\xe0\x1e\xb9\x99>ff\xcd\x95\x14&\x81>\x88\x88\xbc\xc7p\xddf>`\x0b\xa6_\x96|N>@\xb2n\xea\x0eS4>U\x98\x938i\x19\x1b>\x8eeb\xd0\xf0\x10\x02>\xbd\xdc-k\x96\x16\xe8=(\x93\x1e\xf2\x0e\x0f\xd0=\xe0n\xd3Bii\xb5=*\xe9\x19Y\x8c\x8c\x9c=\xc6\xf0\xbb\x90]\x08\x83=]\x96\xfa\xc0|`i=>d\xfc\xd5\xfd\xeaP=R0\xfb\xc7\xa7\x8e6=\xc2\x95\xf9_\x8a\x13\x1e=\xd6c\xa6\xea\x06\r\x04=r\xda\xdd8\t\xbc\xea<\xf6\xe6\x93\xd0\xb0\xd2\xd1<\x9d\xdeok\x96\xc3\xb7<&~\xea9s\xaf\x9f<UUUUUU\x13@q\x1c\xc7q\x1c\xc7\xf9?\xf6\x12\xdaKh/\xe1?\xf2\xc3"e\xe0\xe9\xc6?\xed\xaf\x831+\x8d\xae?\xf3\x1f\xad\xcb\x1c^\x94?\x15\x1e\xdd\xbd>\xb8\x02@\xc6\xd2&\xfd\xa8\xf5\xe8?\xd9\xe1\x19\xfe\xc5\xa3\xd0?v\x82"\xa8\xb2/\xb6?\x9dX\x835\xee\x94\x9d?h\x90W\xce\x9e\xb8\x83?\x8a\xc0th~Kj?\\\x80\xf8\x9a\xa9\x87Q?%\xab\xa0\xce\x8c_7?1\xe4\x80\x13\x11*\x1f? \x98\x00\r\xb6\xc6\x04?\x80u\xabf\x9d\xb3\xeb>UNrD\xbew\xd2>\x1c\x13C[\xa8\x9f\xb8>\x12b\xd7<pj\xa0>m-\x1fQ@\xe3\x85>\xe6\x91)l\x00/m>Da\xc6\xf2\xaatS>\x05\xd7]\xee\xe3\xf09>'
diff --git a/pandas/stats/tests/test_ols.py b/pandas/stats/tests/test_ols.py
index ad9184e698316..df2f545c90b92 100644
--- a/pandas/stats/tests/test_ols.py
+++ b/pandas/stats/tests/test_ols.py
@@ -6,9 +6,9 @@
from __future__ import division
-from distutils.version import LooseVersion
from datetime import datetime
from pandas import compat
+from distutils.version import LooseVersion
import unittest
import nose
import numpy as np
@@ -77,7 +77,7 @@ def setUpClass(cls):
pass
if not _have_statsmodels:
- raise nose.SkipTest
+ raise nose.SkipTest("no statsmodels")
def testOLSWithDatasets_ccard(self):
self.checkDataSet(sm.datasets.ccard.load(), skip_moving=True)
@@ -262,7 +262,7 @@ class TestOLSMisc(unittest.TestCase):
@classmethod
def setupClass(cls):
if not _have_statsmodels:
- raise nose.SkipTest
+ raise nose.SkipTest("no statsmodels")
def test_f_test(self):
x = tm.makeTimeDataFrame()
diff --git a/pandas/tests/test_categorical.py b/pandas/tests/test_categorical.py
index e47ba0c8e1569..f41f6a9858b47 100644
--- a/pandas/tests/test_categorical.py
+++ b/pandas/tests/test_categorical.py
@@ -35,7 +35,7 @@ def test_getitem(self):
tm.assert_almost_equal(subf.labels, [2, 2, 2])
def test_constructor_unsortable(self):
- raise nose.SkipTest
+ raise nose.SkipTest('skipping for now')
arr = np.array([1, 2, 3, datetime.now()], dtype='O')
diff --git a/pandas/tests/test_expressions.py b/pandas/tests/test_expressions.py
index f81620b897a4a..56f52447aadfe 100644
--- a/pandas/tests/test_expressions.py
+++ b/pandas/tests/test_expressions.py
@@ -18,8 +18,13 @@
if not expr._USE_NUMEXPR:
- raise nose.SkipTest("numexpr not available")
-
+ try:
+ import numexpr
+ except ImportError:
+ msg = "don't have"
+ else:
+ msg = "not using"
+ raise nose.SkipTest("{0} numexpr".format(msg))
_frame = DataFrame(randn(10000, 4), columns=list('ABCD'), dtype='float64')
_frame2 = DataFrame(randn(100, 4), columns = list('ABCD'), dtype='float64')
diff --git a/pandas/tests/test_format.py b/pandas/tests/test_format.py
index 723bf022c3f48..9405f3c58bfd7 100644
--- a/pandas/tests/test_format.py
+++ b/pandas/tests/test_format.py
@@ -202,7 +202,8 @@ def test_repr_non_interactive(self):
def test_repr_max_columns_max_rows(self):
term_width, term_height = get_terminal_size()
if term_width < 10 or term_height < 10:
- raise nose.SkipTest
+ raise nose.SkipTest("terminal size too small, "
+ "{0} x {1}".format(term_width, term_height))
def mkframe(n):
index = ['%05d' % i for i in range(n)]
@@ -766,7 +767,7 @@ def test_pprint_thing(self):
from pandas.core.common import pprint_thing as pp_t
if PY3:
- raise nose.SkipTest()
+ raise nose.SkipTest("doesn't work on Python 3")
self.assertEquals(pp_t('a') , u('a'))
self.assertEquals(pp_t(u('a')) , u('a'))
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index e5d2bb17ec7a8..51278b82aaedc 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -60,7 +60,7 @@ def _skip_if_no_scipy():
try:
import scipy.stats
except ImportError:
- raise nose.SkipTest
+ raise nose.SkipTest("no scipy.stats module")
#---------------------------------------------------------------------
# DataFrame test cases
@@ -9498,7 +9498,7 @@ def test_sum(self):
self._check_stat_op('sum', np.sum, has_numeric_only=True)
def test_sum_mixed_numeric(self):
- raise nose.SkipTest
+ raise nose.SkipTest("skipping for now")
# mixed types
self._check_stat_op('sum', np.sum, frame = self.mixed_float, has_numeric_only=True)
@@ -10910,10 +10910,7 @@ def test_stale_cached_series_bug_473(self):
self.assert_(isnull(Y['g']['c']))
def test_index_namedtuple(self):
- try:
- from collections import namedtuple
- except ImportError:
- raise nose.SkipTest
+ from collections import namedtuple
IndexType = namedtuple("IndexType", ["a", "b"])
idx1 = IndexType("foo", "bar")
idx2 = IndexType("baz", "bof")
diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py
index 49dc31514da7a..bdeb4ca3d0212 100644
--- a/pandas/tests/test_graphics.py
+++ b/pandas/tests/test_graphics.py
@@ -26,7 +26,7 @@ def _skip_if_no_scipy():
try:
import scipy
except ImportError:
- raise nose.SkipTest
+ raise nose.SkipTest("no scipy")
@tm.mplskip
diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py
index 857836fa698ce..2a9149ef30dab 100644
--- a/pandas/tests/test_index.py
+++ b/pandas/tests/test_index.py
@@ -1414,7 +1414,7 @@ def test_pickle(self):
def test_legacy_pickle(self):
if compat.PY3:
- raise nose.SkipTest
+ raise nose.SkipTest("doesn't work on Python 3")
def curpath():
pth, _ = os.path.split(os.path.abspath(__file__))
diff --git a/pandas/tests/test_internals.py b/pandas/tests/test_internals.py
index 07436236a62de..2a9e7f8642601 100644
--- a/pandas/tests/test_internals.py
+++ b/pandas/tests/test_internals.py
@@ -195,7 +195,7 @@ def test_split_block_at(self):
# with dup column support this method was taken out
# GH3679
- raise nose.SkipTest
+ raise nose.SkipTest("skipping for now")
bs = list(self.fblock.split_block_at('a'))
self.assertEqual(len(bs), 1)
diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py
index 2c8394bfde285..5ec97344373a2 100644
--- a/pandas/tests/test_multilevel.py
+++ b/pandas/tests/test_multilevel.py
@@ -1419,7 +1419,7 @@ def test_getitem_lowerdim_corner(self):
# AMBIGUOUS CASES!
def test_partial_ix_missing(self):
- raise nose.SkipTest
+ raise nose.SkipTest("skipping for now")
result = self.ymd.ix[2000, 0]
expected = self.ymd.ix[2000]['A']
diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py
index a61212b341fa7..289bcb9db0c7e 100644
--- a/pandas/tests/test_panel.py
+++ b/pandas/tests/test_panel.py
@@ -33,7 +33,7 @@ def _skip_if_no_scipy():
try:
import scipy.stats
except ImportError:
- raise nose.SkipTest
+ raise nose.SkipTest("no scipy.stats")
class PanelTests(object):
@@ -102,7 +102,7 @@ def test_skew(self):
try:
from scipy.stats import skew
except ImportError:
- raise nose.SkipTest
+ raise nose.SkipTest("no scipy.stats.skew")
def this_skew(x):
if len(x) < 3:
@@ -426,8 +426,8 @@ def test_delitem_and_pop(self):
def test_setitem(self):
# LongPanel with one item
lp = self.panel.filter(['ItemA', 'ItemB']).to_frame()
- self.assertRaises(Exception, self.panel.__setitem__,
- 'ItemE', lp)
+ with tm.assertRaises(ValueError):
+ self.panel['ItemE'] = lp
# DataFrame
df = self.panel['ItemA'][2:].filter(items=['A', 'B'])
@@ -456,6 +456,13 @@ def test_setitem(self):
self.assertRaises(TypeError, self.panel.__setitem__, 'foo',
self.panel.ix[['ItemP']])
+ # bad shape
+ p = Panel(np.random.randn(4, 3, 2))
+ with tm.assertRaisesRegexp(ValueError,
+ "shape of value must be \(3, 2\), "
+ "shape of given object was \(4, 2\)"):
+ p[0] = np.random.randn(4, 2)
+
def test_setitem_ndarray(self):
from pandas import date_range, datetools
@@ -758,6 +765,9 @@ def test_get_value(self):
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
+ with tm.assertRaisesRegexp(TypeError,
+ "There must be an argument for each axis"):
+ self.panel.get_value('a')
def test_set_value(self):
for item in self.panel.items:
@@ -774,6 +784,10 @@ def test_set_value(self):
res3 = self.panel.set_value('ItemE', 'foobar', 'baz', 5)
self.assert_(com.is_float_dtype(res3['ItemE'].values))
+ with tm.assertRaisesRegexp(TypeError,
+ "There must be an argument for each axis"
+ " plus the value provided"):
+ self.panel.set_value('a')
_panel = tm.makePanel()
tm.add_nans(_panel)
@@ -878,6 +892,11 @@ def _check_dtype(panel, dtype):
panel = Panel(np.random.randn(2,10,5),items=lrange(2),major_axis=lrange(10),minor_axis=lrange(5),dtype=dtype)
_check_dtype(panel,dtype)
+ def test_constructor_fails_with_not_3d_input(self):
+ with tm.assertRaisesRegexp(ValueError,
+ "The number of dimensions required is 3"):
+ Panel(np.random.randn(10, 2))
+
def test_consolidate(self):
self.assert_(self.panel._data.is_consolidated())
@@ -1457,14 +1476,14 @@ def test_from_frame_level1_unsorted(self):
assert_frame_equal(p.minor_xs(2), df.xs(2, level=1).sort_index())
def test_to_excel(self):
+ import os
try:
- import os
import xlwt
import xlrd
import openpyxl
from pandas.io.excel import ExcelFile
except ImportError:
- raise nose.SkipTest
+ raise nose.SkipTest("need xlwt xlrd openpyxl")
for ext in ['xls', 'xlsx']:
path = '__tmp__.' + ext
@@ -1473,7 +1492,7 @@ def test_to_excel(self):
try:
reader = ExcelFile(path)
except ImportError:
- raise nose.SkipTest
+ raise nose.SkipTest("need xlwt xlrd openpyxl")
for item, df in compat.iteritems(self.panel):
recdf = reader.parse(str(item), index_col=0)
@@ -1492,8 +1511,8 @@ def test_to_excel_xlsxwriter(self):
self.panel.to_excel(path, engine='xlsxwriter')
try:
reader = ExcelFile(path)
- except ImportError:
- raise nose.SkipTest
+ except ImportError as e:
+ raise nose.SkipTest("cannot write excel file: %s" % e)
for item, df in compat.iteritems(self.panel):
recdf = reader.parse(str(item), index_col=0)
diff --git a/pandas/tests/test_panel4d.py b/pandas/tests/test_panel4d.py
index 1ce909b57402f..4d5d29e08fa9f 100644
--- a/pandas/tests/test_panel4d.py
+++ b/pandas/tests/test_panel4d.py
@@ -73,7 +73,7 @@ def test_skew(self):
try:
from scipy.stats import skew
except ImportError:
- raise nose.SkipTest
+ raise nose.SkipTest("no scipy.stats.skew")
def this_skew(x):
if len(x) < 3:
@@ -397,7 +397,7 @@ def test_comp(func):
test_comp(operator.le)
def test_setitem_ndarray(self):
- raise nose.SkipTest
+ raise nose.SkipTest("skipping for now")
# from pandas import DateRange, datetools
# timeidx = DateRange(start=datetime(2009,1,1),
@@ -510,7 +510,7 @@ def test_getitem_fancy_ints(self):
pass
def test_getitem_fancy_xs(self):
- raise nose.SkipTest
+ raise nose.SkipTest("skipping for now")
# self.assertRaises(NotImplementedError, self.panel4d.major_xs)
# self.assertRaises(NotImplementedError, self.panel4d.minor_xs)
@@ -706,7 +706,7 @@ def test_constructor_resize(self):
assert_panel4d_equal(result, expected)
def test_from_dict_mixed_orient(self):
- raise nose.SkipTest
+ raise nose.SkipTest("skipping for now")
# df = tm.makeDataFrame()
# df['foo'] = 'bar'
@@ -798,7 +798,7 @@ def test_reindex_like(self):
assert_panel4d_equal(smaller, smaller_like)
def test_take(self):
- raise nose.SkipTest
+ raise nose.SkipTest("skipping for now")
# # axis == 0
# result = self.panel.take([2, 0, 1], axis=0)
@@ -876,7 +876,7 @@ def test_swapaxes(self):
self.assert_(id(self.panel4d) != id(result))
def test_to_frame(self):
- raise nose.SkipTest
+ raise nose.SkipTest("skipping for now")
# # filtered
# filtered = self.panel.to_frame()
# expected = self.panel.to_frame().dropna(how='any')
@@ -890,7 +890,7 @@ def test_to_frame(self):
# self.assertEqual(unfiltered.index.names, ('major', 'minor'))
def test_to_frame_mixed(self):
- raise nose.SkipTest
+ raise nose.SkipTest("skipping for now")
# panel = self.panel.fillna(0)
# panel['str'] = 'foo'
# panel['bool'] = panel['ItemA'] > 0
@@ -928,20 +928,20 @@ def test_update(self):
assert_panel4d_equal(p4d, expected)
def test_filter(self):
- raise nose.SkipTest
+ raise nose.SkipTest("skipping for now")
def test_apply(self):
- raise nose.SkipTest
+ raise nose.SkipTest("skipping for now")
def test_compound(self):
- raise nose.SkipTest
+ raise nose.SkipTest("skipping for now")
# compounded = self.panel.compound()
# assert_series_equal(compounded['ItemA'],
# (1 + self.panel['ItemA']).product(0) - 1)
def test_shift(self):
- raise nose.SkipTest
+ raise nose.SkipTest("skipping for now")
# # major
# idx = self.panel.major_axis[0]
# idx_lag = self.panel.major_axis[1]
@@ -963,7 +963,7 @@ def test_shift(self):
# self.assertRaises(Exception, self.panel.shift, 1, axis='items')
def test_multiindex_get(self):
- raise nose.SkipTest
+ raise nose.SkipTest("skipping for now")
# ind = MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1), ('b',2)],
# names=['first', 'second'])
# wp = Panel(np.random.random((4,5,5)),
@@ -981,7 +981,7 @@ def test_multiindex_get(self):
# names=['first', 'second'])
def test_multiindex_blocks(self):
- raise nose.SkipTest
+ raise nose.SkipTest("skipping for now")
# ind = MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)],
# names=['first', 'second'])
# wp = Panel(self.panel._data)
@@ -1034,10 +1034,10 @@ def test_group_agg(self):
self.assertRaises(Exception, group_agg, values, bounds, f2)
def test_from_frame_level1_unsorted(self):
- raise nose.SkipTest
+ raise nose.SkipTest("skipping for now")
def test_to_excel(self):
- raise nose.SkipTest
+ raise nose.SkipTest("skipping for now")
if __name__ == '__main__':
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index 98fa5c0a56ccd..f8320149f4ac6 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -5,6 +5,7 @@
import unittest
import string
from itertools import product, starmap
+from distutils.version import LooseVersion
import nose
@@ -37,14 +38,14 @@ def _skip_if_no_scipy():
try:
import scipy.stats
except ImportError:
- raise nose.SkipTest
+ raise nose.SkipTest("scipy not installed")
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
- raise nose.SkipTest
+ raise nose.SkipTest("pytz not installed")
#------------------------------------------------------------------------------
# Series test cases
@@ -1772,7 +1773,8 @@ def test_cummax(self):
self.assert_(np.array_equal(result, expected))
def test_npdiff(self):
- raise nose.SkipTest
+ raise nose.SkipTest("skipping due to Series no longer being an "
+ "ndarray")
# no longer works as the return type of np.diff is now nd.array
s = Series(np.arange(5))
@@ -3098,8 +3100,9 @@ def test_corr_rank(self):
self.assertAlmostEqual(result, expected)
# these methods got rewritten in 0.8
- if int(scipy.__version__.split('.')[1]) < 9:
- raise nose.SkipTest
+ if scipy.__version__ < LooseVersion('0.9'):
+ raise nose.SkipTest("skipping corr rank because of scipy version "
+ "{0}".format(scipy.__version__))
# results from R
A = Series([-0.89926396, 0.94209606, -1.03289164, -0.95445587,
diff --git a/pandas/tools/merge.py b/pandas/tools/merge.py
index ba60566a7fc55..d5bd1072f6a3e 100644
--- a/pandas/tools/merge.py
+++ b/pandas/tools/merge.py
@@ -407,17 +407,19 @@ def _validate_specification(self):
elif self.left_on is not None:
n = len(self.left_on)
if self.right_index:
- if not ((len(self.left_on) == self.right.index.nlevels)):
- raise AssertionError()
+ if len(self.left_on) != self.right.index.nlevels:
+ raise ValueError('len(left_on) must equal the number '
+ 'of levels in the index of "right"')
self.right_on = [None] * n
elif self.right_on is not None:
n = len(self.right_on)
if self.left_index:
- if not ((len(self.right_on) == self.left.index.nlevels)):
- raise AssertionError()
+ if len(self.right_on) != self.left.index.nlevels:
+ raise ValueError('len(right_on) must equal the number '
+ 'of levels in the index of "left"')
self.left_on = [None] * n
- if not ((len(self.right_on) == len(self.left_on))):
- raise AssertionError()
+ if len(self.right_on) != len(self.left_on):
+ raise ValueError("len(right_on) must equal len(left_on)")
def _get_join_indexers(left_keys, right_keys, sort=False, how='inner'):
@@ -430,8 +432,8 @@ def _get_join_indexers(left_keys, right_keys, sort=False, how='inner'):
-------
"""
- if not ((len(left_keys) == len(right_keys))):
- raise AssertionError()
+ if len(left_keys) != len(right_keys):
+ raise AssertionError('left_key and right_keys must be the same length')
left_labels = []
right_labels = []
@@ -545,8 +547,11 @@ def _left_join_on_index(left_ax, right_ax, join_keys, sort=False):
if len(join_keys) > 1:
if not ((isinstance(right_ax, MultiIndex) and
- len(join_keys) == right_ax.nlevels) ):
- raise AssertionError()
+ len(join_keys) == right_ax.nlevels)):
+ raise AssertionError("If more than one join key is given then "
+ "'right_ax' must be a MultiIndex and the "
+ "number of join keys must be the number of "
+ "levels in right_ax")
left_tmp, right_indexer = \
_get_multiindex_indexer(join_keys, right_ax,
@@ -645,8 +650,9 @@ def __init__(self, data_list, join_index, indexers, axis=1, copy=True):
if axis <= 0: # pragma: no cover
raise MergeError('Only axis >= 1 supported for this operation')
- if not ((len(data_list) == len(indexers))):
- raise AssertionError()
+ if len(data_list) != len(indexers):
+ raise AssertionError("data_list and indexers must have the same "
+ "length")
self.units = []
for data, indexer in zip(data_list, indexers):
@@ -977,8 +983,9 @@ def __init__(self, objs, axis=0, join='outer', join_axes=None,
axis = 1 if axis == 0 else 0
self._is_series = isinstance(sample, ABCSeries)
- if not ((0 <= axis <= sample.ndim)):
- raise AssertionError()
+ if not 0 <= axis <= sample.ndim:
+ raise AssertionError("axis must be between 0 and {0}, "
+ "input was {1}".format(sample.ndim, axis))
# note: this is the BlockManager axis (since DataFrame is transposed)
self.axis = axis
@@ -1202,8 +1209,9 @@ def _concat_single_item(self, objs, item):
to_concat.append(item_values)
# this method only gets called with axis >= 1
- if not ((self.axis >= 1)):
- raise AssertionError()
+ if self.axis < 1:
+ raise AssertionError("axis must be >= 1, input was"
+ " {0}".format(self.axis))
return com._concat_compat(to_concat, axis=self.axis - 1)
def _get_result_dim(self):
@@ -1222,8 +1230,9 @@ def _get_new_axes(self):
continue
new_axes[i] = self._get_comb_axis(i)
else:
- if not ((len(self.join_axes) == ndim - 1)):
- raise AssertionError()
+ if len(self.join_axes) != ndim - 1:
+ raise AssertionError("length of join_axes must not be "
+ "equal to {0}".format(ndim - 1))
# ufff...
indices = lrange(ndim)
diff --git a/pandas/tools/pivot.py b/pandas/tools/pivot.py
index c11ec9f338f88..a4b229e98ada9 100644
--- a/pandas/tools/pivot.py
+++ b/pandas/tools/pivot.py
@@ -386,8 +386,8 @@ def _get_names(arrs, names, prefix='row'):
else:
names.append('%s_%d' % (prefix, i))
else:
- if not ((len(names) == len(arrs))):
- raise AssertionError()
+ if len(names) != len(arrs):
+ raise AssertionError('arrays and names must have the same length')
if not isinstance(names, list):
names = list(names)
diff --git a/pandas/tools/tests/test_merge.py b/pandas/tools/tests/test_merge.py
index d44564db4b830..eec134ebeb990 100644
--- a/pandas/tools/tests/test_merge.py
+++ b/pandas/tools/tests/test_merge.py
@@ -231,6 +231,33 @@ def test_join_on(self):
source_copy['A'] = 0
self.assertRaises(Exception, target.join, source_copy, on='A')
+ def test_join_on_fails_with_different_right_index(self):
+ with tm.assertRaises(ValueError):
+ df = DataFrame({'a': tm.choice(['m', 'f'], size=3),
+ 'b': np.random.randn(3)})
+ df2 = DataFrame({'a': tm.choice(['m', 'f'], size=10),
+ 'b': np.random.randn(10)},
+ index=tm.makeCustomIndex(10, 2))
+ merge(df, df2, left_on='a', right_index=True)
+
+ def test_join_on_fails_with_different_left_index(self):
+ with tm.assertRaises(ValueError):
+ df = DataFrame({'a': tm.choice(['m', 'f'], size=3),
+ 'b': np.random.randn(3)},
+ index=tm.makeCustomIndex(10, 2))
+ df2 = DataFrame({'a': tm.choice(['m', 'f'], size=10),
+ 'b': np.random.randn(10)})
+ merge(df, df2, right_on='b', left_index=True)
+
+ def test_join_on_fails_with_different_column_counts(self):
+ with tm.assertRaises(ValueError):
+ df = DataFrame({'a': tm.choice(['m', 'f'], size=3),
+ 'b': np.random.randn(3)})
+ df2 = DataFrame({'a': tm.choice(['m', 'f'], size=10),
+ 'b': np.random.randn(10)},
+ index=tm.makeCustomIndex(10, 2))
+ merge(df, df2, right_on='a', left_on=['a', 'b'])
+
def test_join_on_pass_vector(self):
expected = self.target.join(self.source, on='C')
del expected['C']
diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py
index 76433cf0c8f88..847896871045b 100644
--- a/pandas/tseries/index.py
+++ b/pandas/tseries/index.py
@@ -309,12 +309,12 @@ def _generate(cls, start, end, periods, name, offset,
if tz is not None and inferred_tz is not None:
if not inferred_tz == tz:
- raise AssertionError()
+ raise AssertionError("Inferred time zone not equal to passed "
+ "time zone")
elif inferred_tz is not None:
tz = inferred_tz
-
if start is not None:
if normalize:
start = normalize_date(start)
@@ -456,16 +456,16 @@ def _cached_range(cls, start=None, end=None, periods=None, offset=None,
cachedRange = drc[offset]
if start is None:
- if not (isinstance(end, Timestamp)):
- raise AssertionError()
+ if not isinstance(end, Timestamp):
+ raise AssertionError('end must be an instance of Timestamp')
end = offset.rollback(end)
endLoc = cachedRange.get_loc(end) + 1
startLoc = endLoc - periods
elif end is None:
- if not (isinstance(start, Timestamp)):
- raise AssertionError()
+ if not isinstance(start, Timestamp):
+ raise AssertionError('start must be an instance of Timestamp')
start = offset.rollforward(start)
@@ -601,14 +601,14 @@ def _format_native_types(self, na_rep=u('NaT'), **kwargs):
if d.time() != zero_time or d.tzinfo is not None:
return [u('%s') % x for x in data]
- values = np.array(data,dtype=object)
+ values = np.array(data, dtype=object)
mask = isnull(self.values)
values[mask] = na_rep
imask = -mask
- values[imask] = np.array([u('%d-%.2d-%.2d') % (
- dt.year, dt.month, dt.day)
- for dt in values[imask] ])
+ values[imask] = np.array([u('%d-%.2d-%.2d') % (dt.year, dt.month,
+ dt.day)
+ for dt in values[imask]])
return values.tolist()
def isin(self, values):
@@ -1130,7 +1130,6 @@ def _partial_date_slice(self, reso, parsed, use_lhs=True, use_rhs=True):
else:
raise KeyError
-
stamps = self.asi8
if is_monotonic:
@@ -1147,8 +1146,8 @@ def _partial_date_slice(self, reso, parsed, use_lhs=True, use_rhs=True):
return slice(left, right)
- lhs_mask = (stamps>=t1.value) if use_lhs else True
- rhs_mask = (stamps<=t2.value) if use_rhs else True
+ lhs_mask = (stamps >= t1.value) if use_lhs else True
+ rhs_mask = (stamps <= t2.value) if use_rhs else True
# try to find a the dates
return (lhs_mask & rhs_mask).nonzero()[0]
@@ -1227,7 +1226,8 @@ def _get_string_slice(self, key, use_lhs=True, use_rhs=True):
freq = getattr(self, 'freqstr',
getattr(self, 'inferred_freq', None))
_, parsed, reso = parse_time_string(key, freq)
- loc = self._partial_date_slice(reso, parsed, use_lhs=use_lhs, use_rhs=use_rhs)
+ loc = self._partial_date_slice(reso, parsed, use_lhs=use_lhs,
+ use_rhs=use_rhs)
return loc
def slice_indexer(self, start=None, end=None, step=None):
@@ -1274,12 +1274,13 @@ def slice_locs(self, start=None, end=None):
# so create an indexer directly
try:
if start:
- start_loc = self._get_string_slice(start,use_rhs=False)
+ start_loc = self._get_string_slice(start,
+ use_rhs=False)
else:
start_loc = np.arange(len(self))
if end:
- end_loc = self._get_string_slice(end,use_lhs=False)
+ end_loc = self._get_string_slice(end, use_lhs=False)
else:
end_loc = np.arange(len(self))
diff --git a/pandas/tseries/period.py b/pandas/tseries/period.py
index afa267ed5b4e4..b6f3c3c83f3d8 100644
--- a/pandas/tseries/period.py
+++ b/pandas/tseries/period.py
@@ -5,7 +5,6 @@
import numpy as np
from pandas.core.base import PandasObject
-import pandas.tseries.offsets as offsets
from pandas.tseries.frequencies import (get_freq_code as _gfc,
_month_numbers, FreqGroup)
from pandas.tseries.index import DatetimeIndex, Int64Index, Index
@@ -217,7 +216,7 @@ def end_time(self):
ordinal = (self + 1).start_time.value - 1
return Timestamp(ordinal)
- def to_timestamp(self, freq=None, how='start',tz=None):
+ def to_timestamp(self, freq=None, how='start', tz=None):
"""
Return the Timestamp representation of the Period at the target
frequency at the specified end (how) of the Period
@@ -245,7 +244,7 @@ def to_timestamp(self, freq=None, how='start',tz=None):
val = self.asfreq(freq, how)
dt64 = tslib.period_ordinal_to_dt64(val.ordinal, base)
- return Timestamp(dt64,tz=tz)
+ return Timestamp(dt64, tz=tz)
year = _period_field_accessor('year', 0)
month = _period_field_accessor('month', 3)
@@ -288,7 +287,6 @@ def __unicode__(self):
value = ("%s" % formatted)
return value
-
def strftime(self, fmt):
"""
Returns the string representation of the :class:`Period`, depending
@@ -479,13 +477,13 @@ def _period_index_cmp(opname):
def wrapper(self, other):
if isinstance(other, Period):
func = getattr(self.values, opname)
- if not (other.freq == self.freq):
- raise AssertionError()
+ if other.freq != self.freq:
+ raise AssertionError("Frequencies must be equal")
result = func(other.ordinal)
elif isinstance(other, PeriodIndex):
- if not (other.freq == self.freq):
- raise AssertionError()
+ if other.freq != self.freq:
+ raise AssertionError("Frequencies must be equal")
return getattr(self.values, opname)(other.values)
else:
other = Period(other, freq=self.freq)
@@ -701,7 +699,6 @@ def asof_locs(self, where, mask):
@property
def asobject(self):
- from pandas.core.index import Index
return Index(self._box_values(self.values), dtype=object)
def _array_values(self):
@@ -940,7 +937,7 @@ def get_loc(self, key):
key = Period(key, self.freq)
try:
return self._engine.get_loc(key.ordinal)
- except KeyError as inst:
+ except KeyError:
raise KeyError(key)
def slice_locs(self, start=None, end=None):
@@ -1062,7 +1059,7 @@ def _format_with_header(self, header, **kwargs):
def _format_native_types(self, na_rep=u('NaT'), **kwargs):
- values = np.array(list(self),dtype=object)
+ values = np.array(list(self), dtype=object)
mask = isnull(self.values)
values[mask] = na_rep
@@ -1169,7 +1166,7 @@ def __setstate__(self, state):
nd_state, own_state = state
np.ndarray.__setstate__(self, nd_state)
self.name = own_state[0]
- try: # backcompat
+ try: # backcompat
self.freq = own_state[1]
except:
pass
@@ -1235,8 +1232,8 @@ def _range_from_fields(year=None, month=None, quarter=None, day=None,
base, mult = _gfc(freq)
if mult != 1:
raise ValueError('Only mult == 1 supported')
- if not (base == FreqGroup.FR_QTR):
- raise AssertionError()
+ if base != FreqGroup.FR_QTR:
+ raise AssertionError("base must equal FR_QTR")
year, quarter = _make_field_arrays(year, quarter)
for y, q in zip(year, quarter):
diff --git a/pandas/tseries/resample.py b/pandas/tseries/resample.py
index 20d42f7211f55..96ff8c47abc1e 100644
--- a/pandas/tseries/resample.py
+++ b/pandas/tseries/resample.py
@@ -120,8 +120,9 @@ def _get_time_grouper(self, obj):
return binner, grouper
def _get_time_bins(self, axis):
- if not (isinstance(axis, DatetimeIndex)):
- raise AssertionError()
+ if not isinstance(axis, DatetimeIndex):
+ raise TypeError('axis must be a DatetimeIndex, but got '
+ 'an instance of %r' % type(axis).__name__)
if len(axis) == 0:
binner = labels = DatetimeIndex(data=[], freq=self.freq)
@@ -180,10 +181,11 @@ def _adjust_bin_edges(self, binner, ax_values):
return binner, bin_edges
def _get_time_period_bins(self, axis):
- if not(isinstance(axis, DatetimeIndex)):
- raise AssertionError()
+ if not isinstance(axis, DatetimeIndex):
+ raise TypeError('axis must be a DatetimeIndex, but got '
+ 'an instance of %r' % type(axis).__name__)
- if len(axis) == 0:
+ if not len(axis):
binner = labels = PeriodIndex(data=[], freq=self.freq)
return binner, [], labels
@@ -211,8 +213,8 @@ def _resample_timestamps(self, obj):
result = grouped.aggregate(self._agg_method)
else:
# upsampling shortcut
- if not (self.axis == 0):
- raise AssertionError()
+ if self.axis:
+ raise AssertionError('axis must be 0')
if self.closed == 'right':
res_index = binner[1:]
@@ -278,7 +280,6 @@ def _resample_periods(self, obj):
def _take_new_index(obj, indexer, new_index, axis=0):
from pandas.core.api import Series, DataFrame
- from pandas.core.internals import BlockManager
if isinstance(obj, Series):
new_values = com.take_1d(obj.values, indexer)
@@ -286,7 +287,7 @@ def _take_new_index(obj, indexer, new_index, axis=0):
elif isinstance(obj, DataFrame):
if axis == 1:
raise NotImplementedError
- return DataFrame(obj._data.take(indexer,new_index=new_index,axis=1))
+ return DataFrame(obj._data.take(indexer, new_index=new_index, axis=1))
else:
raise NotImplementedError
diff --git a/pandas/tseries/tests/test_converter.py b/pandas/tseries/tests/test_converter.py
index c3bb7d82dfb6d..7cb84b5134a9a 100644
--- a/pandas/tseries/tests/test_converter.py
+++ b/pandas/tseries/tests/test_converter.py
@@ -11,7 +11,7 @@
try:
import pandas.tseries.converter as converter
except ImportError:
- raise nose.SkipTest
+ raise nose.SkipTest("no pandas.tseries.converter, skipping")
def test_timtetonum_accepts_unicode():
diff --git a/pandas/tseries/tests/test_daterange.py b/pandas/tseries/tests/test_daterange.py
index d17b42c1e23c9..cb17375266edf 100644
--- a/pandas/tseries/tests/test_daterange.py
+++ b/pandas/tseries/tests/test_daterange.py
@@ -23,7 +23,7 @@ def _skip_if_no_pytz():
try:
import pytz
except ImportError:
- raise nose.SkipTest
+ raise nose.SkipTest("pytz not installed")
def _skip_if_no_cday():
diff --git a/pandas/tseries/tests/test_plotting.py b/pandas/tseries/tests/test_plotting.py
index 96888df114950..cfbde75f6ae21 100644
--- a/pandas/tseries/tests/test_plotting.py
+++ b/pandas/tseries/tests/test_plotting.py
@@ -23,7 +23,7 @@ def _skip_if_no_scipy():
try:
import scipy
except ImportError:
- raise nose.SkipTest
+ raise nose.SkipTest("scipy not installed")
@tm.mplskip
diff --git a/pandas/tseries/tests/test_resample.py b/pandas/tseries/tests/test_resample.py
index 620310e32ffcc..c60d4b3fd48d1 100644
--- a/pandas/tseries/tests/test_resample.py
+++ b/pandas/tseries/tests/test_resample.py
@@ -11,6 +11,8 @@
from pandas.tseries.offsets import Minute, BDay
from pandas.tseries.period import period_range, PeriodIndex, Period
from pandas.tseries.resample import DatetimeIndex, TimeGrouper
+from pandas.tseries.frequencies import MONTHS, DAYS
+
import pandas.tseries.offsets as offsets
import pandas as pd
@@ -28,7 +30,7 @@ def _skip_if_no_pytz():
try:
import pytz
except ImportError:
- raise nose.SkipTest
+ raise nose.SkipTest("pytz not installed")
class TestResample(unittest.TestCase):
@@ -660,9 +662,6 @@ def _simple_pts(start, end, freq='D'):
return TimeSeries(np.random.randn(len(rng)), index=rng)
-from pandas.tseries.frequencies import MONTHS, DAYS
-
-
class TestResamplePeriodIndex(unittest.TestCase):
_multiprocess_can_split_ = True
@@ -1055,6 +1054,7 @@ def test_resample_doesnt_truncate(self):
result = series.resample('D')
self.assertEquals(result.index[0], dates[0])
+
class TestTimeGrouper(unittest.TestCase):
def setUp(self):
@@ -1129,6 +1129,21 @@ def f(x):
result = bingrouped.agg(f)
tm.assert_panel_equal(result, binagg)
+ def test_fails_on_no_datetime_index(self):
+ index_names = ('Int64Index', 'PeriodIndex', 'Index', 'Float64Index',
+ 'MultiIndex')
+ index_funcs = (tm.makeIntIndex, tm.makePeriodIndex,
+ tm.makeUnicodeIndex, tm.makeFloatIndex,
+ lambda m: tm.makeCustomIndex(m, 2))
+ n = 2
+ for name, func in zip(index_names, index_funcs):
+ index = func(n)
+ df = DataFrame({'a': np.random.randn(n)}, index=index)
+ with tm.assertRaisesRegexp(TypeError,
+ "axis must be a DatetimeIndex, "
+ "but got an instance of %r" % name):
+ df.groupby(TimeGrouper('D'))
+
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py
index 0e5e3d1922ec4..28725a6a9ac56 100644
--- a/pandas/tseries/tests/test_timeseries.py
+++ b/pandas/tseries/tests/test_timeseries.py
@@ -5,6 +5,8 @@
import unittest
import operator
+from distutils.version import LooseVersion
+
import nose
import numpy as np
@@ -49,7 +51,7 @@ def _skip_if_no_pytz():
try:
import pytz
except ImportError:
- raise nose.SkipTest
+ raise nose.SkipTest("pytz not installed")
class TestTimeSeriesDuplicates(unittest.TestCase):
@@ -661,8 +663,8 @@ def test_index_cast_datetime64_other_units(self):
def test_index_astype_datetime64(self):
idx = Index([datetime(2012, 1, 1)], dtype=object)
- if np.__version__ >= '1.7':
- raise nose.SkipTest("Test requires numpy < 1.7")
+ if np.__version__ >= LooseVersion('1.7'):
+ raise nose.SkipTest("test only valid in numpy < 1.7")
casted = idx.astype(np.dtype('M8[D]'))
expected = DatetimeIndex(idx.values)
diff --git a/pandas/tseries/tests/test_timeseries_legacy.py b/pandas/tseries/tests/test_timeseries_legacy.py
index babf60758f751..d1f4f647db0e1 100644
--- a/pandas/tseries/tests/test_timeseries_legacy.py
+++ b/pandas/tseries/tests/test_timeseries_legacy.py
@@ -48,7 +48,7 @@ def _skip_if_no_pytz():
try:
import pytz
except ImportError:
- raise nose.SkipTest
+ raise nose.SkipTest("pytz not installed")
# infortunately, too much has changed to handle these legacy pickles
# class TestLegacySupport(unittest.TestCase):
@@ -59,7 +59,7 @@ class LegacySupport(object):
@classmethod
def setUpClass(cls):
if compat.PY3:
- raise nose.SkipTest
+ raise nose.SkipTest("not compatible with Python >= 3")
pth, _ = os.path.split(os.path.abspath(__file__))
filepath = os.path.join(pth, 'data', 'frame.pickle')
diff --git a/pandas/tseries/tests/test_timezones.py b/pandas/tseries/tests/test_timezones.py
index 883025bee1ba1..80d85241ae0ff 100644
--- a/pandas/tseries/tests/test_timezones.py
+++ b/pandas/tseries/tests/test_timezones.py
@@ -37,7 +37,7 @@ def _skip_if_no_pytz():
try:
import pytz
except ImportError:
- raise nose.SkipTest
+ raise nose.SkipTest("pytz not installed")
try:
import pytz
diff --git a/pandas/tseries/tools.py b/pandas/tseries/tools.py
index 5dda1a9b352d9..39364d21d4aa1 100644
--- a/pandas/tseries/tools.py
+++ b/pandas/tseries/tools.py
@@ -30,7 +30,8 @@ def _infer(a, b):
tz = a.tzinfo
if b and b.tzinfo:
if not (tslib.get_timezone(tz) == tslib.get_timezone(b.tzinfo)):
- raise AssertionError()
+ raise AssertionError('Inputs must both have the same timezone,'
+ ' {0} != {1}'.format(tz, b.tzinfo))
return tz
tz = None
if start is not None:
| continuation of #3519 because of a branch rename.
### UPDATE: The exception parsing script has been moved to [a Gist](https://gist.github.com/cpcloud/6745173)
This PR partially addresses #3024.
- all `AssertionError` exceptions now have an informative error message in them
- some `AssertionErrors` have been converted to different `Exception` subclasses, where it makes sense, and there's a corresponding test wherever these were changed.
- all `nose.SkipTest` exceptions now have an informative message
| https://api.github.com/repos/pandas-dev/pandas/pulls/3730 | 2013-05-31T16:42:37Z | 2013-09-28T20:44:42Z | 2013-09-28T20:44:42Z | 2014-06-26T21:36:32Z |
DOC/BLD: squash as many doc-build warnings as possible | diff --git a/doc/source/basics.rst b/doc/source/basics.rst
index f428579b78570..4100c4404ece6 100644
--- a/doc/source/basics.rst
+++ b/doc/source/basics.rst
@@ -1369,7 +1369,7 @@ For instance:
.. ipython:: python
:suppress:
- reset_printoptions()
+ reset_option('^display\.')
The ``set_printoptions`` function has a number of options for controlling how
diff --git a/doc/source/dsintro.rst b/doc/source/dsintro.rst
index 2b42288f670bd..7870bdbeb97d3 100644
--- a/doc/source/dsintro.rst
+++ b/doc/source/dsintro.rst
@@ -17,7 +17,7 @@ objects. To get started, import numpy and load pandas into your namespace:
from pandas import *
randn = np.random.randn
np.set_printoptions(precision=4, suppress=True)
- set_printoptions(precision=4, max_columns=8)
+ set_option('display.precision', 4, 'display.max_columns', 8)
.. ipython:: python
@@ -571,7 +571,7 @@ R package):
:suppress:
# force a summary to be printed
- set_printoptions(max_rows=5)
+ pd.set_option('display.max_rows', 5)
.. ipython:: python
@@ -582,7 +582,7 @@ R package):
:suppress:
# restore GlobalPrintConfig
- reset_printoptions()
+ pd.reset_option('^display\.')
However, using ``to_string`` will return a string representation of the
DataFrame in tabular form, though it won't always fit the console width:
diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst
index 43b512a934558..1c4f5db9a45d0 100644
--- a/doc/source/indexing.rst
+++ b/doc/source/indexing.rst
@@ -991,9 +991,9 @@ Note that how the index is displayed by be controlled using the
.. ipython:: python
- pd.set_printoptions(multi_sparse=False)
+ pd.set_option('display.multi_sparse', False)
df
- pd.set_printoptions(multi_sparse=True)
+ pd.set_option('display.multi_sparse', True)
Reconstructing the level labels
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/source/v0.10.1.txt b/doc/source/v0.10.1.txt
index e8435df7b2b0c..3c22e9552c3a2 100644
--- a/doc/source/v0.10.1.txt
+++ b/doc/source/v0.10.1.txt
@@ -67,8 +67,11 @@ Retrieving unique values in an indexable or data column.
.. ipython:: python
- store.unique('df','index')
- store.unique('df','string')
+ import warnings
+ with warnings.catch_warnings():
+ warnings.simplefilter('ignore', category=DeprecationWarning)
+ store.unique('df','index')
+ store.unique('df','string')
You can now store ``datetime64`` in data columns
diff --git a/pandas/tests/test_format.py b/pandas/tests/test_format.py
index 9e8a69a32d454..bca38ba55e205 100644
--- a/pandas/tests/test_format.py
+++ b/pandas/tests/test_format.py
@@ -81,7 +81,7 @@ def test_eng_float_formatter(self):
fmt.set_eng_float_format(accuracy=0)
repr(self.frame)
- fmt.reset_printoptions()
+ fmt.reset_option('^display.')
def test_repr_tuples(self):
buf = StringIO()
@@ -719,11 +719,11 @@ def test_repr_corner(self):
def test_frame_info_encoding(self):
index = ['\'Til There Was You (1997)',
'ldum klaka (Cold Fever) (1994)']
- fmt.set_printoptions(max_rows=1)
+ fmt.set_option('display.max_rows', 1)
df = DataFrame(columns=['a', 'b', 'c'], index=index)
repr(df)
repr(df.T)
- fmt.set_printoptions(max_rows=200)
+ fmt.set_option('display.max_rows', 200)
def test_large_frame_repr(self):
def wrap_rows_options(f):
@@ -1026,9 +1026,9 @@ def test_to_string_no_index(self):
assert(df_s == expected)
def test_to_string_float_formatting(self):
- fmt.reset_printoptions()
- fmt.set_printoptions(precision=6, column_space=12,
- notebook_repr_html=False)
+ fmt.reset_option('^display.')
+ fmt.set_option('display.precision', 6, 'display.column_space',
+ 12, 'display.notebook_repr_html', False)
df = DataFrame({'x': [0, 0.25, 3456.000, 12e+45, 1.64e+6,
1.7e+8, 1.253456, np.pi, -1e6]})
@@ -1057,7 +1057,7 @@ def test_to_string_float_formatting(self):
'1 0.253')
assert(df_s == expected)
- fmt.reset_printoptions()
+ fmt.reset_option('^display.')
self.assertEqual(get_option("display.precision"), 7)
df = DataFrame({'x': [1e9, 0.2512]})
@@ -1149,7 +1149,7 @@ def test_to_string_index_formatter(self):
self.assertEqual(rs, xp)
def test_to_string_left_justify_cols(self):
- fmt.reset_printoptions()
+ fmt.reset_option('^display.')
df = DataFrame({'x': [3234, 0.253]})
df_s = df.to_string(justify='left')
expected = (' x \n'
@@ -1158,7 +1158,7 @@ def test_to_string_left_justify_cols(self):
assert(df_s == expected)
def test_to_string_format_na(self):
- fmt.reset_printoptions()
+ fmt.reset_option('^display.')
df = DataFrame({'A': [np.nan, -1, -2.1234, 3, 4],
'B': [np.nan, 'foo', 'foooo', 'fooooo', 'bar']})
result = df.to_string()
@@ -1420,13 +1420,13 @@ def test_to_html_index(self):
def test_repr_html(self):
self.frame._repr_html_()
- fmt.set_printoptions(max_rows=1, max_columns=1)
+ fmt.set_option('display.max_rows', 1, 'display.max_columns', 1)
self.frame._repr_html_()
- fmt.set_printoptions(notebook_repr_html=False)
+ fmt.set_option('display.notebook_repr_html', False)
self.frame._repr_html_()
- fmt.reset_printoptions()
+ fmt.reset_option('^display.')
def test_fake_qtconsole_repr_html(self):
def get_ipython():
@@ -1437,11 +1437,11 @@ def get_ipython():
repstr = self.frame._repr_html_()
self.assert_(repstr is not None)
- fmt.set_printoptions(max_rows=5, max_columns=2)
+ fmt.set_option('display.max_rows', 5, 'display.max_columns', 2)
repstr = self.frame._repr_html_()
self.assert_('class' in repstr) # info fallback
- fmt.reset_printoptions()
+ fmt.reset_option('^display.')
def test_to_html_with_classes(self):
df = pandas.DataFrame()
@@ -1751,7 +1751,7 @@ def test_eng_float_formatter(self):
'3 1E+06')
self.assertEqual(result, expected)
- fmt.reset_printoptions()
+ fmt.reset_option('^display.')
def compare(self, formatter, input, output):
formatted_input = formatter(input)
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 39452ece7a33d..fa6579ca61358 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -3927,16 +3927,16 @@ def test_repr_unsortable(self):
index=np.arange(50))
foo = repr(unsortable)
- fmt.set_printoptions(precision=3, column_space=10)
+ fmt.set_option('display.precision', 3, 'display.column_space', 10)
repr(self.frame)
- fmt.set_printoptions(max_rows=10, max_columns=2)
+ fmt.set_option('display.max_rows', 10, 'display.max_columns', 2)
repr(self.frame)
- fmt.set_printoptions(max_rows=1000, max_columns=1000)
+ fmt.set_option('display.max_rows', 1000, 'display.max_columns', 1000)
repr(self.frame)
- fmt.reset_printoptions()
+ fmt.reset_option('^display\.')
warnings.filters = warn_filters
diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py
index da8c900e903c2..5926f5d51abfd 100644
--- a/pandas/tests/test_index.py
+++ b/pandas/tests/test_index.py
@@ -1371,12 +1371,12 @@ def test_format_sparse_config(self):
category=FutureWarning,
module=".*format")
# #1538
- pd.set_printoptions(multi_sparse=False)
+ pd.set_option('display.multi_sparse', False)
result = self.index.format()
self.assertEqual(result[1], 'foo two')
- pd.reset_printoptions()
+ pd.reset_option("^display\.")
warnings.filters = warn_filters
| https://api.github.com/repos/pandas-dev/pandas/pulls/3725 | 2013-05-31T01:00:22Z | 2013-05-31T13:21:02Z | 2013-05-31T13:21:02Z | 2014-07-12T14:11:36Z | |
DOC: older version of bs4 for 64bit as well | diff --git a/doc/source/install.rst b/doc/source/install.rst
index 407746e3cb000..2a0f67fe8d9e6 100644
--- a/doc/source/install.rst
+++ b/doc/source/install.rst
@@ -75,14 +75,14 @@ Dependencies
Recommended Dependencies
~~~~~~~~~~~~~~~~~~~~~~~~
- * `numexpr <http://code.google.com/p/numexpr/>`__: for accelerating certain numerical operations.
+ * `numexpr <http://code.google.com/p/numexpr/>`__: for accelerating certain numerical operations.
``numexpr`` uses multiple cores as well as smart chunking and caching to achieve large speedups.
* `bottleneck <http://berkeleyanalytics.com/bottleneck>`__: for accelerating certain types of ``nan``
evaluations. ``bottleneck`` uses specialized cython routines to achieve large speedups.
.. note::
- You are highly encouraged to install these libraries, as they provide large speedups, especially
+ You are highly encouraged to install these libraries, as they provide large speedups, especially
if working with large data sets.
@@ -105,9 +105,9 @@ Optional Dependencies
.. warning::
- If you are on a 32-bit machine you need to install an older version of
- Beautiful Soup. Version 4.0.2 of BeautifulSoup has been tested on Ubuntu
- 12.04.02 32-bit.
+ You need to install an older version of Beautiful Soup:
+ - Version 4.1.3 and 4.0.2 have been confirmed for 64-bit Ubuntu/Debian
+ - Version 4.0.2 have been confirmed for 32-bit Ubuntu
* Any recent version of ``html5lib`` is okay.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3724 | 2013-05-31T00:08:29Z | 2013-05-31T00:10:59Z | 2013-05-31T00:10:59Z | 2014-07-16T08:10:52Z | |
DOC/BLD: fix annoying sphinx bugs | diff --git a/pandas/core/common.py b/pandas/core/common.py
index 994a57247e50b..69f38bf0c7c61 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -1348,8 +1348,8 @@ def iterpairs(seq):
-------
iterator returning overlapping pairs of elements
- Example
- -------
+ Examples
+ --------
>>> iterpairs([1, 2, 3, 4])
[(1, 2), (2, 3), (3, 4)
"""
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index f590585eea9fa..9c0a2843370f4 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1374,9 +1374,8 @@ def to_csv(self, path_or_buf, sep=",", na_rep='', float_format=None,
mode='w', nanRep=None, encoding=None, quoting=None,
line_terminator='\n', chunksize=None,
tupleize_cols=True, **kwds):
- """
- Write DataFrame to a comma-separated values (csv) file
-
+ r"""Write DataFrame to a comma-separated values (csv) file
+
Parameters
----------
path_or_buf : string or file handle / StringIO
@@ -1390,8 +1389,8 @@ def to_csv(self, path_or_buf, sep=",", na_rep='', float_format=None,
cols : sequence, optional
Columns to write
header : boolean or list of string, default True
- Write out column names. If a list of string is given it is
- assumed to be aliases for the column names
+ Write out column names. If a list of string is given it is assumed
+ to be aliases for the column names
index : boolean, default True
Write row names (index)
index_label : string or sequence, or False, default None
@@ -1400,21 +1399,23 @@ def to_csv(self, path_or_buf, sep=",", na_rep='', float_format=None,
sequence should be given if the DataFrame uses MultiIndex. If
False do not print fields for index names. Use index_label=False
for easier importing in R
- nanRep : deprecated, use na_rep
- mode : Python write mode, default 'w'
+ nanRep : None
+ deprecated, use na_rep
+ mode : str
+ Python write mode, default 'w'
encoding : string, optional
a string representing the encoding to use if the contents are
non-ascii, for python versions prior to 3
- line_terminator: string, default '\n'
+ line_terminator : string, default '\\n'
The newline character or character sequence to use in the output
file
quoting : optional constant from csv module
defaults to csv.QUOTE_MINIMAL
- chunksize : rows to write at a time
+ chunksize : int or None
+ rows to write at a time
tupleize_cols : boolean, default True
write multi_index columns as a list of tuples (if True)
or new (expanded format) if False)
-
"""
if nanRep is not None: # pragma: no cover
import warnings
@@ -2401,27 +2402,31 @@ def xs(self, key, axis=0, level=None, copy=True):
_xs = xs
def lookup(self, row_labels, col_labels):
- """
- Label-based "fancy indexing" function for DataFrame. Given equal-length
- arrays of row and column labels, return an array of the values
- corresponding to each (row, col) pair.
+ """Label-based "fancy indexing" function for DataFrame. Given
+ equal-length arrays of row and column labels, return an array of the
+ values corresponding to each (row, col) pair.
Parameters
----------
row_labels : sequence
+ The row labels to use for lookup
col_labels : sequence
+ The column labels to use for lookup
Notes
-----
Akin to
- result = []
- for row, col in zip(row_labels, col_labels):
- result.append(df.get_value(row, col))
+ .. code-block:: python
- Example
- -------
+ result = []
+ for row, col in zip(row_labels, col_labels):
+ result.append(df.get_value(row, col))
+
+ Examples
+ --------
values : ndarray
+ The found values
"""
from itertools import izip
@@ -3483,12 +3488,16 @@ def replace(self, to_replace=None, value=None, inplace=False, limit=None,
Parameters
----------
to_replace : str, regex, list, dict, Series, numeric, or None
+
* str or regex:
+
- str: string exactly matching `to_replace` will be replaced
with `value`
- regex: regexs matching `to_replace` will be replaced with
`value`
+
* list of str, regex, or numeric:
+
- First, if `to_replace` and `value` are both lists, they
**must** be the same length.
- Second, if ``regex=True`` then all of the strings in **both**
@@ -3496,7 +3505,9 @@ def replace(self, to_replace=None, value=None, inplace=False, limit=None,
directly. This doesn't matter much for `value` since there
are only a few possible substitution regexes you can use.
- str and regex rules apply as above.
+
* dict:
+
- Nested dictionaries, e.g., {'a': {'b': nan}}, are read as
follows: look in column 'a' for the value 'b' and replace it
with nan. You can nest regular expressions as well. Note that
@@ -3505,11 +3516,14 @@ def replace(self, to_replace=None, value=None, inplace=False, limit=None,
- Keys map to column names and values map to substitution
values. You can treat this as a special case of passing two
lists except that you are specifying the column to search in.
+
* None:
+
- This means that the ``regex`` argument must be a string,
compiled regular expression, or list, dict, ndarray or Series
of such elements. If `value` is also ``None`` then this
**must** be a nested dictionary or ``Series``.
+
See the examples section for examples of each of these.
value : scalar, dict, list, str, regex, default None
Value to use to fill holes (e.g. 0), alternately a dict of values
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index 122355581956d..d409adfd71158 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -1374,8 +1374,8 @@ def aggregate(self, func_or_funcs, *args, **kwargs):
-----
agg is an alias for aggregate. Use it.
- Example
- -------
+ Examples
+ --------
>>> series
bar 1.0
baz 2.0
@@ -1523,8 +1523,8 @@ def transform(self, func, *args, **kwargs):
func : function
To apply to each group. Should return a Series with the same index
- Example
- -------
+ Examples
+ --------
>>> grouped.transform(lambda x: (x - x.mean()) / x.std())
Returns
@@ -1906,7 +1906,7 @@ def transform(self, func, *args, **kwargs):
Each subframe is endowed the attribute 'name' in case you need to know
which group you are working on.
- Example
+ Examples
--------
>>> grouped = df.groupby(lambda x: mapping[x])
>>> grouped.transform(lambda x: (x - x.mean()) / x.std())
diff --git a/pandas/io/excel.py b/pandas/io/excel.py
index ea102cb6803d7..c23056ce76a62 100644
--- a/pandas/io/excel.py
+++ b/pandas/io/excel.py
@@ -9,10 +9,6 @@
from itertools import izip
import numpy as np
-from pandas.core.index import Index, MultiIndex
-from pandas.core.frame import DataFrame
-import pandas.core.common as com
-from pandas.util import py3compat
from pandas.io.parsers import TextParser
from pandas.tseries.period import Period
import json
@@ -21,8 +17,7 @@ def read_excel(path_or_buf, sheetname, header=0, skiprows=None, skip_footer=0,
index_col=None, parse_cols=None, parse_dates=False,
date_parser=None, na_values=None, thousands=None, chunksize=None,
kind=None, **kwds):
- """
- Read Excel table into DataFrame
+ """Read an Excel table into a pandas DataFrame
Parameters
----------
@@ -38,23 +33,30 @@ def read_excel(path_or_buf, sheetname, header=0, skiprows=None, skip_footer=0,
Column to use as the row labels of the DataFrame. Pass None if
there is no such column
parse_cols : int or list, default None
- If None then parse all columns,
- If int then indicates last column to be parsed
- If list of ints then indicates list of column numbers to be parsed
- If string then indicates comma separated list of column names and
- column ranges (e.g. "A:E" or "A,C,E:F")
+ * If None then parse all columns,
+ * If int then indicates last column to be parsed
+ * If list of ints then indicates list of column numbers to be parsed
+ * If string then indicates comma separated list of column names and
+ column ranges (e.g. "A:E" or "A,C,E:F")
na_values : list-like, default None
List of additional strings to recognize as NA/NaN
Returns
-------
parsed : DataFrame
+ DataFrame from the passed in Excel file
"""
return ExcelFile(path_or_buf,kind=kind).parse(sheetname=sheetname,
- header=0, skiprows=None, skip_footer=0,
- index_col=None, parse_cols=None, parse_dates=False,
- date_parser=None, na_values=None, thousands=None, chunksize=None,
- kind=None, **kwds)
+ header=0, skiprows=None,
+ skip_footer=0,
+ index_col=None,
+ parse_cols=None,
+ parse_dates=False,
+ date_parser=None,
+ na_values=None,
+ thousands=None,
+ chunksize=None, kind=None,
+ **kwds)
class ExcelFile(object):
"""
@@ -90,8 +92,7 @@ def parse(self, sheetname, header=0, skiprows=None, skip_footer=0,
index_col=None, parse_cols=None, parse_dates=False,
date_parser=None, na_values=None, thousands=None, chunksize=None,
**kwds):
- """
- Read Excel table into DataFrame
+ """Read an Excel table into DataFrame
Parameters
----------
@@ -107,17 +108,19 @@ def parse(self, sheetname, header=0, skiprows=None, skip_footer=0,
Column to use as the row labels of the DataFrame. Pass None if
there is no such column
parse_cols : int or list, default None
- If None then parse all columns,
- If int then indicates last column to be parsed
- If list of ints then indicates list of column numbers to be parsed
- If string then indicates comma separated list of column names and
- column ranges (e.g. "A:E" or "A,C,E:F")
+ * If None then parse all columns
+ * If int then indicates last column to be parsed
+ * If list of ints then indicates list of column numbers to be
+ parsed
+ * If string then indicates comma separated list of column names and
+ column ranges (e.g. "A:E" or "A,C,E:F")
na_values : list-like, default None
List of additional strings to recognize as NA/NaN
Returns
-------
parsed : DataFrame
+ DataFrame parsed from the Excel file
"""
# has_index_names: boolean, default False
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 0ae835c81d870..046263a9cb63c 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -386,9 +386,6 @@ def select(self, key, where=None, start=None, stop=None, columns=None, iterator=
Parameters
----------
key : object
-
- Optional Parameters
- -------------------
where : list of Term (or convertable) objects, optional
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
@@ -421,9 +418,6 @@ def select_as_coordinates(self, key, where=None, start=None, stop=None, **kwargs
Parameters
----------
key : object
-
- Optional Parameters
- -------------------
where : list of Term (or convertable) objects, optional
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
@@ -551,9 +545,6 @@ def remove(self, key, where=None, start=None, stop=None):
----------
key : string
Node to remove or delete rows from
-
- Optional Parameters
- -------------------
where : list of Term (or convertable) objects, optional
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
@@ -602,9 +593,6 @@ def append(self, key, value, columns=None, **kwargs):
----------
key : object
value : {Series, DataFrame, Panel, Panel4D}
-
- Optional Parameters
- -------------------
data_columns : list of columns to create as data columns, or True to use all columns
min_itemsize : dict of columns that specify minimum string sizes
nan_rep : string to use as string nan represenation
@@ -3276,30 +3264,29 @@ def _need_convert(kind):
return False
class Term(object):
- """ create a term object that holds a field, op, and value
+ """create a term object that holds a field, op, and value
- Parameters
- ----------
- field : dict, string term expression, or the field to operate (must be a valid index/column type of DataFrame/Panel)
- op : a valid op (defaults to '=') (optional)
- >, >=, <, <=, =, != (not equal) are allowed
- value : a value or list of values (required)
- queryables : a kinds map (dict of column name -> kind), or None i column is non-indexable
+ Parameters
+ ----------
+ field : dict, string term expression, or the field to operate (must be a valid index/column type of DataFrame/Panel)
+ op : a valid op (defaults to '=') (optional)
+ >, >=, <, <=, =, != (not equal) are allowed
+ value : a value or list of values (required)
+ queryables : a kinds map (dict of column name -> kind), or None i column is non-indexable
- Returns
- -------
- a Term object
-
- Examples
- --------
- Term(dict(field = 'index', op = '>', value = '20121114'))
- Term('index', '20121114')
- Term('index', '>', '20121114')
- Term('index', ['20121114','20121114'])
- Term('index', datetime(2012,11,14))
- Term('major_axis>20121114')
- Term('minor_axis', ['A','B'])
+ Returns
+ -------
+ a Term object
+ Examples
+ --------
+ >>> Term(dict(field = 'index', op = '>', value = '20121114'))
+ >>> Term('index', '20121114')
+ >>> Term('index', '>', '20121114')
+ >>> Term('index', ['20121114','20121114'])
+ >>> Term('index', datetime(2012,11,14))
+ >>> Term('major_axis>20121114')
+ >>> Term('minor_axis', ['A','B'])
"""
_ops = ['<=', '<', '>=', '>', '!=', '==', '=']
diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py
index 3b66eba31fca1..5985a8a898b27 100644
--- a/pandas/tseries/frequencies.py
+++ b/pandas/tseries/frequencies.py
@@ -394,9 +394,10 @@ def to_offset(freqstr):
"""
Return DateOffset object from string representation
- Example
- -------
- to_offset('5Min') -> Minute(5)
+ Examples
+ --------
+ >>> to_offset('5Min')
+ Minute(5)
"""
if freqstr is None:
return None
@@ -444,8 +445,8 @@ def _base_and_stride(freqstr):
"""
Return base freq and stride info from string representation
- Example
- -------
+ Examples
+ --------
_freq_and_stride('5Min') -> 'Min', 5
"""
groups = opattern.match(freqstr)
@@ -478,8 +479,8 @@ def get_offset(name):
"""
Return DateOffset object associated with rule name
- Example
- -------
+ Examples
+ --------
get_offset('EOM') --> BMonthEnd(1)
"""
if name not in _dont_uppercase:
@@ -512,8 +513,8 @@ def get_offset_name(offset):
"""
Return rule name associated with a DateOffset object
- Example
- -------
+ Examples
+ --------
get_offset_name(BMonthEnd(1)) --> 'EOM'
"""
name = _offset_names.get(offset)
| closes #3721. cc @jreback
| https://api.github.com/repos/pandas-dev/pandas/pulls/3722 | 2013-05-30T23:13:59Z | 2013-05-30T23:56:42Z | 2013-05-30T23:56:42Z | 2014-07-17T00:18:56Z |
DOC: clean up io docs and fix up links | diff --git a/doc/source/api.rst b/doc/source/api.rst
index 2e59bf6533205..fea8b95bb2bcf 100644
--- a/doc/source/api.rst
+++ b/doc/source/api.rst
@@ -38,9 +38,8 @@ Pickling
load
save
-File IO
-~~~~~~~
-
+Flat File IO
+^^^^^^^^^^^^
.. currentmodule:: pandas.io.parsers
.. autosummary::
@@ -62,9 +61,13 @@ File IO
:toctree: generated/
read_stata
+ read_fwf
+ read_clipboard
.. currentmodule:: pandas.io.html
+HTML IO
+^^^^^^^
.. autosummary::
:toctree: generated/
@@ -80,6 +83,46 @@ SQL
read_sql
+Excel IO
+^^^^^^^^
+.. currentmodule:: pandas.io.parsers
+
+.. autosummary::
+ :toctree: generated/
+
+ ExcelFile.parse
+
+SQL IO
+^^^^^^
+.. currentmodule:: pandas.io.sql
+
+.. autosummary::
+ :toctree: generated/
+
+ read_frame
+ write_frame
+
+.. currentmodule:: pandas.io
+
+.. autosummary::
+ :toctree: generated/
+
+ sql
+
+STATA IO
+^^^^^^^^
+.. currentmodule:: pandas.io.stata
+
+.. autosummary::
+ :toctree: generated/
+
+ read_stata
+ StataReader.data
+ StataReader.data_label
+ StataReader.value_labels
+ StataReader.variable_labels
+ StataWriter.write_file
+
HDFStore: PyTables (HDF5)
~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -94,6 +137,17 @@ HDFStore: PyTables (HDF5)
HDFStore.get
HDFStore.select
+Top-level Missing Data
+~~~~~~~~~~~~~~~~~~~~~~
+
+.. currentmodule:: pandas.core.common
+
+.. autosummary::
+ :toctree: generated/
+
+ isnull
+ notnull
+
Standard moving window functions
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/source/io.rst b/doc/source/io.rst
index 5bf3075f2688e..a8d5cf4ab2f60 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -667,9 +667,9 @@ should pass the ``escapechar`` option:
Files with Fixed Width Columns
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-While `read_csv` reads delimited data, the :func:`~pandas.io.parsers.read_fwf`
+While ``read_csv`` reads delimited data, the :func:`~pandas.io.parsers.read_fwf`
function works with data files that have known and fixed column widths.
-The function parameters to `read_fwf` are largely the same as `read_csv` with
+The function parameters to ``read_fwf`` are largely the same as `read_csv` with
two extra parameters:
- ``colspecs``: a list of pairs (tuples), giving the extents of the
@@ -2123,23 +2123,30 @@ Writing to STATA format
.. _io.StataWriter:
-The method ``to_stata`` will write a DataFrame into a .dta file.
+The method :func:`~pandas.io.stata.StataWriter.write_file` of
+:class:`~pandas.io.stata.StataWriter` will write a DataFrame into a .dta file.
The format version of this file is always the latest one, 115.
.. ipython:: python
- df = DataFrame(randn(10,2),columns=list('AB'))
- df.to_stata('stata.dta')
+ from pandas.io.stata import StataWriter
+ df = DataFrame(randn(10, 2), columns=list('AB'))
+ writer = StataWriter('stata.dta', df)
+ writer.write_file()
Reading from STATA format
~~~~~~~~~~~~~~~~~~~~~~~~~
-.. _io.StataReader:
+.. _io.statareader:
.. versionadded:: 0.11.1
The top-level function ``read_stata`` will read a dta format file
and return a DataFrame:
+The class :class:`~pandas.io.stata.StataReader` will read the header of the
+given dta file at initialization. Its method
+:func:`~pandas.io.stata.StataReader.data` will read the observations,
+converting them to a DataFrame which is returned:
.. ipython:: python
@@ -2153,6 +2160,7 @@ also be retrieved by the function ``variable_labels``, which requires data to be
called before (see ``pandas.io.stata.StataReader``).
The StataReader supports .dta Formats 104, 105, 108, 113-115.
+Alternatively, the function :func:`~pandas.io.stata.read_stata` can be used
.. ipython:: python
:suppress:
diff --git a/pandas/core/common.py b/pandas/core/common.py
index ee8b3bbbda647..994a57247e50b 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -48,17 +48,19 @@ class AmbiguousIndexError(PandasError, KeyError):
_INT64_DTYPE = np.dtype(np.int64)
def isnull(obj):
- '''
- Detect missing values (NaN in numeric arrays, None/NaN in object arrays)
+ """Detect missing values (NaN in numeric arrays, None/NaN in object arrays)
Parameters
----------
- arr: ndarray or object value
+ arr : ndarray or object value
+ Object to check for null-ness
Returns
-------
- boolean ndarray or boolean
- '''
+ isnulled : array-like of bool or bool
+ Array or bool indicating whether an object is null or if an array is
+ given which of the element is null.
+ """
return _isnull(obj)
@@ -187,18 +189,20 @@ def _isnull_ndarraylike_old(obj):
def notnull(obj):
- '''
- Replacement for numpy.isfinite / -numpy.isnan which is suitable
- for use on object arrays.
+ """Replacement for numpy.isfinite / -numpy.isnan which is suitable for use
+ on object arrays.
Parameters
----------
- arr: ndarray or object value
+ arr : ndarray or object value
+ Object to check for *not*-null-ness
Returns
-------
- boolean ndarray or boolean
- '''
+ isnulled : array-like of bool or bool
+ Array or bool indicating whether an object is *not* null or if an array
+ is given which of the element is *not* null.
+ """
res = isnull(obj)
if np.isscalar(res):
return not res
diff --git a/pandas/io/__init__.py b/pandas/io/__init__.py
index e69de29bb2d1d..a984c40cdc098 100644
--- a/pandas/io/__init__.py
+++ b/pandas/io/__init__.py
@@ -0,0 +1,2 @@
+import sql
+import stata
| closes #3718.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3720 | 2013-05-30T16:09:36Z | 2013-05-30T16:41:20Z | 2013-05-30T16:41:20Z | 2014-07-16T08:10:50Z |
BLD: switch MAJOR and MICRO | diff --git a/setup.py b/setup.py
index bbad3e87a3fa0..2e7fd778578fd 100755
--- a/setup.py
+++ b/setup.py
@@ -183,9 +183,9 @@ def build_extensions(self):
'Topic :: Scientific/Engineering',
]
-MAJOR = 1
+MAJOR = 0
MINOR = 11
-MICRO = 0
+MICRO = 1
ISRELEASED = False
VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
QUALIFIER = ''
| cc @y-p
| https://api.github.com/repos/pandas-dev/pandas/pulls/3716 | 2013-05-30T14:47:20Z | 2013-05-30T14:56:07Z | 2013-05-30T14:56:07Z | 2014-07-16T08:10:48Z |
BUG: restore 10.1 expand_repr behaviour, only for < max_cols, if wider then term GH3706 | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 27aa68ee39d8e..9e276e01dd723 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -703,10 +703,13 @@ def __unicode__(self):
self.to_string(buf=buf)
else:
width, _ = fmt.get_console_size()
- max_rows = get_option("display.max_rows")
- if (get_option("display.expand_frame_repr")
- and fits_vertical):
- # and len(self.columns) < max_rows)
+ max_columns = get_option("display.max_columns")
+ expand_repr = get_option("display.expand_frame_repr")
+ # within max_cols and max_rows, but cols exceed width
+ # of terminal, then use expand_repr
+ if (fits_vertical and
+ expand_repr and
+ len(self.columns) <= max_columns):
self.to_string(buf=buf, line_width=width)
else:
max_info_rows = get_option('display.max_info_rows')
| #3706
| https://api.github.com/repos/pandas-dev/pandas/pulls/3713 | 2013-05-30T05:09:21Z | 2013-05-30T05:10:56Z | 2013-05-30T05:10:56Z | 2014-07-16T08:10:45Z |
API: deprecate unused DataFrame.replace arguments | diff --git a/RELEASE.rst b/RELEASE.rst
index 1d63a7f53954d..e611b330b08f0 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -110,6 +110,8 @@ pandas 0.11.1
- added ``pandas.io.api`` for i/o imports
- removed ``Excel`` support to ``pandas.io.excel``
- added top-level ``pd.read_sql`` and ``to_sql`` DataFrame methods
+ - the ``method`` and ``axis`` arguments of ``DataFrame.replace()`` are
+ deprecated
**Bug Fixes**
diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt
index 9113c74c6813b..c025450c44cca 100644
--- a/doc/source/v0.11.1.txt
+++ b/doc/source/v0.11.1.txt
@@ -83,6 +83,8 @@ API changes
- ``DataFrame.interpolate()`` is now deprecated. Please use
``DataFrame.fillna()`` and ``DataFrame.replace()`` instead. (GH3582_,
GH3675_, GH3676_)
+ - the ``method`` and ``axis`` arguments of ``DataFrame.replace()`` are
+ deprecated
- Add the keyword ``allow_duplicates`` to ``DataFrame.insert`` to allow a duplicate column
to be inserted if ``True``, default is ``False`` (same as prior to 0.11.1) (GH3679_)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 3ad8de077f1ea..1dfeae997451a 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -3481,9 +3481,9 @@ def bfill(self, axis=0, inplace=False, limit=None):
return self.fillna(method='bfill', axis=axis, inplace=inplace,
limit=limit)
- def replace(self, to_replace=None, value=None, method='pad', axis=0,
- inplace=False, limit=None, regex=False, infer_types=False):
- """Replace values given in 'to_replace' with 'value' or using 'method'.
+ def replace(self, to_replace=None, value=None, inplace=False, limit=None,
+ regex=False, infer_types=False, method=None, axis=None):
+ """Replace values given in 'to_replace' with 'value'.
Parameters
----------
@@ -3521,13 +3521,6 @@ def replace(self, to_replace=None, value=None, method='pad', axis=0,
specifying which value to use for each column (columns not in the
dict will not be filled). Regular expressions, strings and lists or
dicts of such objects are also allowed.
- method : {'backfill', 'bfill', 'pad', 'ffill', None}, default 'pad'
- Method to use for filling holes in reindexed Series
- pad / ffill: propagate last valid observation forward to next valid
- backfill / bfill: use NEXT valid observation to fill gap
- axis : {0, 1}, default 0
- 0: fill column-by-column
- 1: fill row-by-row
inplace : boolean, default False
If True, fill the DataFrame in place. Note: this will modify any
other views on this DataFrame, like if you took a no-copy slice of
@@ -3580,10 +3573,17 @@ def replace(self, to_replace=None, value=None, method='pad', axis=0,
if not isinstance(regex, bool) and to_replace is not None:
raise AssertionError("'to_replace' must be 'None' if 'regex' is "
"not a bool")
- self._consolidate_inplace()
+ if method is not None:
+ from warnings import warn
+ warn('the "method" argument is deprecated and will be removed in'
+ 'v0.12; this argument has no effect')
- axis = self._get_axis_number(axis)
- method = com._clean_fill_method(method)
+ if axis is not None:
+ from warnings import warn
+ warn('the "axis" argument is deprecated and will be removed in'
+ 'v0.12; this argument has no effect')
+
+ self._consolidate_inplace()
if value is None:
if not isinstance(to_replace, (dict, Series)):
@@ -3615,8 +3615,8 @@ def replace(self, to_replace=None, value=None, method='pad', axis=0,
else:
to_replace, value = keys, values
- return self.replace(to_replace, value, method=method, axis=axis,
- inplace=inplace, limit=limit, regex=regex,
+ return self.replace(to_replace, value, inplace=inplace,
+ limit=limit, regex=regex,
infer_types=infer_types)
else:
if not len(self.columns):
@@ -3629,7 +3629,7 @@ def replace(self, to_replace=None, value=None, method='pad', axis=0,
for c, src in to_replace.iteritems():
if c in value and c in self:
new_data = new_data.replace(src, value[c],
- filter=[ c ],
+ filter=[c],
inplace=inplace,
regex=regex)
@@ -3638,7 +3638,7 @@ def replace(self, to_replace=None, value=None, method='pad', axis=0,
for k, src in to_replace.iteritems():
if k in self:
new_data = new_data.replace(src, value,
- filter = [ k ],
+ filter=[k],
inplace=inplace,
regex=regex)
else:
@@ -3667,9 +3667,8 @@ def replace(self, to_replace=None, value=None, method='pad', axis=0,
"regular expression or a list or dict of "
"strings or regular expressions, you "
"passed a {0}".format(type(regex)))
- return self.replace(regex, value, method=method, axis=axis,
- inplace=inplace, limit=limit, regex=True,
- infer_types=infer_types)
+ return self.replace(regex, value, inplace=inplace, limit=limit,
+ regex=True, infer_types=infer_types)
else:
# dest iterable dict-like
@@ -3679,7 +3678,7 @@ def replace(self, to_replace=None, value=None, method='pad', axis=0,
for k, v in value.iteritems():
if k in self:
new_data = new_data.replace(to_replace, v,
- filter=[ k ],
+ filter=[k],
inplace=inplace,
regex=regex)
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 4e892f884e541..1de643985d893 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -6360,8 +6360,7 @@ def test_replace_inplace(self):
res = tsframe.replace(nan, 0, inplace=True)
assert_frame_equal(tsframe, self.tsframe.fillna(0))
- self.assertRaises(TypeError, self.tsframe.replace, nan, method='pad',
- inplace=True)
+ self.assertRaises(TypeError, self.tsframe.replace, nan, inplace=True)
# mixed type
self.mixed_frame['foo'][5:20] = nan
@@ -6953,21 +6952,18 @@ def test_interpolate(self):
pass
def test_replace_value_is_none(self):
- self.assertRaises(TypeError, self.tsframe.replace, nan, method='pad')
+ self.assertRaises(TypeError, self.tsframe.replace, nan)
orig_value = self.tsframe.iloc[0, 0]
orig2 = self.tsframe.iloc[1, 0]
self.tsframe.iloc[0, 0] = nan
self.tsframe.iloc[1, 0] = 1
- result = self.tsframe.replace(to_replace={nan: 0}, method='pad',
- axis=1)
- expected = self.tsframe.T.replace(
- to_replace={nan: 0}, method='pad').T
+ result = self.tsframe.replace(to_replace={nan: 0})
+ expected = self.tsframe.T.replace(to_replace={nan: 0}).T
assert_frame_equal(result, expected)
- result = self.tsframe.replace(to_replace={nan: 0, 1: -1e8},
- method='bfill')
+ result = self.tsframe.replace(to_replace={nan: 0, 1: -1e8})
tsframe = self.tsframe.copy()
tsframe.iloc[0, 0] = 0
tsframe.iloc[1, 0] = -1e8
@@ -7088,25 +7084,6 @@ def test_replace_input_formats(self):
expected.replace(to_rep[i], -1, inplace=True)
assert_frame_equal(result, expected)
- def test_replace_axis(self):
- self.tsframe['A'][:5] = nan
- self.tsframe['A'][-5:] = nan
-
- zero_filled = self.tsframe.replace(nan, 0, axis=1)
- assert_frame_equal(zero_filled, self.tsframe.fillna(0, axis=1))
-
- self.assertRaises(TypeError, self.tsframe.replace, method='pad',
- axis=1)
-
- # mixed type
- self.mixed_frame['foo'][5:20] = nan
- self.mixed_frame['A'][-10:] = nan
-
- result = self.mixed_frame.replace(np.nan, -1e8, axis=1)
- expected = self.mixed_frame.fillna(value=-1e8, axis=1)
- assert_frame_equal(result, expected)
-
-
def test_replace_limit(self):
pass
| @jreback i forgot to do this as per our conversation
| https://api.github.com/repos/pandas-dev/pandas/pulls/3712 | 2013-05-30T00:11:09Z | 2013-05-30T16:41:40Z | 2013-05-30T16:41:40Z | 2014-07-16T08:10:43Z |
improve error message when xlrd import fails | diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 68db97b7a3c53..0dde47e6065e4 100644
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -2000,7 +2000,7 @@ def __init__(self, path_or_buf, kind=None, **kwds):
import xlrd # throw an ImportError if we need to
ver = tuple(map(int,xlrd.__VERSION__.split(".")[:2]))
if ver < (0, 9):
- raise ImportError("pandas requires xlrd >= 0.9.0 for excel support")
+ raise ImportError("pandas requires xlrd >= 0.9.0 for excel support, current version "+xlrd.__VERSION__)
self.path_or_buf = path_or_buf
self.tmpfile = None
| https://api.github.com/repos/pandas-dev/pandas/pulls/3709 | 2013-05-29T23:02:12Z | 2013-05-29T23:47:53Z | 2013-05-29T23:47:53Z | 2013-06-10T19:20:35Z | |
BUG: allow DataFrame.from_records to accept empty recarrays | diff --git a/RELEASE.rst b/RELEASE.rst
index 76aac0d73466c..38a8b42fcde6f 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -173,6 +173,7 @@ pandas 0.11.1
- ``read_html()`` now only allows a single backend: ``html5lib`` (GH3616_)
- ``convert_objects`` with ``convert_dates='coerce'`` was parsing some single-letter strings
into today's date
+ - ``DataFrame.from_records`` did not accept empty recarrays (GH3682_)
.. _GH3164: https://github.com/pydata/pandas/issues/3164
.. _GH2786: https://github.com/pydata/pandas/issues/2786
@@ -246,6 +247,7 @@ pandas 0.11.1
.. _GH3582: https://github.com/pydata/pandas/issues/3582
.. _GH3676: https://github.com/pydata/pandas/issues/3676
.. _GH3675: https://github.com/pydata/pandas/issues/3675
+.. _GH3682: https://github.com/pydata/pandas/issues/3682
pandas 0.11.0
=============
diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt
index ffa2cc6dc7cab..c7f590d6ebbe8 100644
--- a/doc/source/v0.11.1.txt
+++ b/doc/source/v0.11.1.txt
@@ -208,6 +208,8 @@ Bug Fixes
to replace all occurrences of the string ``'.'`` with ``NaN``.
+ - ``DataFrame.from_records`` did not accept empty recarrays (GH3682_)
+
See the `full release notes
<https://github.com/pydata/pandas/blob/master/RELEASE.rst>`__ or issue tracker
on GitHub for a complete list.
@@ -247,3 +249,4 @@ on GitHub for a complete list.
.. _GH3582: https://github.com/pydata/pandas/issues/3582
.. _GH3676: https://github.com/pydata/pandas/issues/3676
.. _GH3675: https://github.com/pydata/pandas/issues/3675
+.. _GH3682: https://github.com/pydata/pandas/issues/3682
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 68a6c9e261c97..68edceb29e6b2 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -5756,7 +5756,11 @@ def _to_arrays(data, columns, coerce_float=False, dtype=None):
return arrays, columns
- if len(data) == 0:
+ if not len(data):
+ if isinstance(data, np.ndarray):
+ columns = data.dtype.names
+ if columns is not None:
+ return [[]] * len(columns), columns
return [], [] # columns if columns is not None else []
if isinstance(data[0], (list, tuple)):
return _list_to_arrays(data, columns, coerce_float=coerce_float,
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index fddbbf93552b3..3711a814cc273 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -3522,6 +3522,18 @@ def test_from_records_empty(self):
expected = DataFrame(columns=['a','b','b'])
assert_frame_equal(result, expected)
+ def test_from_records_empty_with_nonempty_fields_gh3682(self):
+ a = np.array([(1, 2)], dtype=[('id', np.int64), ('value', np.int64)])
+ df = DataFrame.from_records(a, index='id')
+ assert_array_equal(df.index, Index([1], name='id'))
+ self.assertEqual(df.index.name, 'id')
+ assert_array_equal(df.columns, Index(['value']))
+
+ b = np.array([], dtype=[('id', np.int64), ('value', np.int64)])
+ df = DataFrame.from_records(b, index='id')
+ assert_array_equal(df.index, Index([], name='id'))
+ self.assertEqual(df.index.name, 'id')
+
def test_to_records_floats(self):
df = DataFrame(np.random.rand(10, 10))
df.to_records()
| closes #3682.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3708 | 2013-05-29T14:23:51Z | 2013-05-30T00:46:35Z | 2013-05-30T00:46:35Z | 2014-07-06T13:17:47Z |
DOC: document read_html and to_html | diff --git a/doc/source/io.rst b/doc/source/io.rst
index 92747f9906da2..5bf3075f2688e 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -938,8 +938,106 @@ Reading HTML Content
.. versionadded:: 0.11.1
-The toplevel :func:`~pandas.io.parsers.read_html` function can accept an HTML
+The toplevel :func:`~pandas.io.html.read_html` function can accept an HTML
string/file/url and will parse HTML tables into list of pandas DataFrames.
+Let's look at a few examples.
+
+Read a URL with no options
+
+.. ipython:: python
+
+ url = 'http://www.fdic.gov/bank/individual/failed/banklist.html'
+ dfs = read_html(url)
+ dfs
+
+.. note::
+
+ ``read_html`` returns a ``list`` of ``DataFrame`` objects, even if there is
+ only a single table contained in the HTML content
+
+Read a URL and match a table that contains specific text
+
+.. ipython:: python
+
+ match = 'Metcalf Bank'
+ df_list = read_html(url, match=match)
+ len(dfs)
+ dfs[0]
+
+Specify a header row (by default ``<th>`` elements are used to form the column
+index); if specified, the header row is taken from the data minus the parsed
+header elements (``<th>`` elements).
+
+.. ipython:: python
+
+ dfs = read_html(url, header=0)
+ len(dfs)
+ dfs[0]
+
+Specify an index column
+
+.. ipython:: python
+
+ dfs = read_html(url, index_col=0)
+ len(dfs)
+ dfs[0]
+ dfs[0].index.name
+
+Specify a number of rows to skip
+
+.. ipython:: python
+
+ dfs = read_html(url, skiprows=0)
+ len(dfs)
+ dfs[0]
+
+Specify a number of rows to skip using a list (``xrange`` (Python 2 only) works
+as well)
+
+.. ipython:: python
+
+ dfs = read_html(url, skiprows=range(2))
+ len(dfs)
+ dfs[0]
+
+Don't infer numeric and date types
+
+.. ipython:: python
+
+ dfs = read_html(url, infer_types=False)
+ len(dfs)
+ dfs[0]
+
+Specify an HTML attribute
+
+.. ipython:: python
+
+ dfs = read_html(url)
+ len(dfs)
+ dfs[0]
+
+Use some combination of the above
+
+.. ipython:: python
+
+ dfs = read_html(url, match='Metcalf Bank', index_col=0)
+ len(dfs)
+ dfs[0]
+
+Read in pandas ``to_html`` output (with some loss of floating point precision)
+
+.. ipython:: python
+
+ df = DataFrame(randn(2, 2))
+ s = df.to_html(float_format='{0:.40g}'.format)
+ dfin = read_html(s, index_col=0)
+ df
+ dfin[0]
+ df.index
+ df.columns
+ dfin[0].index
+ dfin[0].columns
+ np.allclose(df, dfin[0])
Writing to HTML files
@@ -947,9 +1045,134 @@ Writing to HTML files
.. _io.html:
-DataFrame object has an instance method ``to_html`` which renders the contents
-of the DataFrame as an html table. The function arguments are as in the method
-``to_string`` described above.
+``DataFrame`` objects have an instance method ``to_html`` which renders the
+contents of the ``DataFrame`` as an HTML table. The function arguments are as
+in the method ``to_string`` described above.
+
+.. note::
+
+ Not all of the possible options for ``DataFrame.to_html`` are shown here for
+ brevity's sake. See :func:`~pandas.DataFrame.to_html` for the full set of
+ options.
+
+.. ipython:: python
+ :suppress:
+
+ def write_html(df, filename, *args, **kwargs):
+ static = os.path.abspath(os.path.join('source', '_static'))
+ with open(os.path.join(static, filename + '.html'), 'w') as f:
+ df.to_html(f, *args, **kwargs)
+
+.. ipython:: python
+
+ df = DataFrame(randn(2, 2))
+ df
+ print df.to_html() # raw html
+
+.. ipython:: python
+ :suppress:
+
+ write_html(df, 'basic')
+
+HTML:
+
+.. raw:: html
+ :file: _static/basic.html
+
+The ``columns`` argument will limit the columns shown
+
+.. ipython:: python
+
+ print df.to_html(columns=[0])
+
+.. ipython:: python
+ :suppress:
+
+ write_html(df, 'columns', columns=[0])
+
+HTML:
+
+.. raw:: html
+ :file: _static/columns.html
+
+``float_format`` takes a Python callable to control the precision of floating
+point values
+
+.. ipython:: python
+
+ print df.to_html(float_format='{0:.10f}'.format)
+
+.. ipython:: python
+ :suppress:
+
+ write_html(df, 'float_format', float_format='{0:.10f}'.format)
+
+HTML:
+
+.. raw:: html
+ :file: _static/float_format.html
+
+``bold_rows`` will make the row labels bold by default, but you can turn that
+off
+
+.. ipython:: python
+
+ print df.to_html(bold_rows=False)
+
+.. ipython:: python
+ :suppress:
+
+ write_html(df, 'nobold', bold_rows=False)
+
+.. raw:: html
+ :file: _static/nobold.html
+
+The ``classes`` argument provides the ability to give the resulting HTML
+table CSS classes. Note that these classes are *appended* to the existing
+``'dataframe'`` class.
+
+.. ipython:: python
+
+ print df.to_html(classes=['awesome_table_class', 'even_more_awesome_class'])
+
+Finally, the ``escape`` argument allows you to control whether the
+"<", ">" and "&" characters escaped in the resulting HTML (by default it is
+``True``). So to get the HTML without escaped characters pass ``escape=False``
+
+.. ipython:: python
+
+ df = DataFrame({'a': list('&<>'), 'b': randn(3)})
+
+
+.. ipython:: python
+ :suppress:
+
+ write_html(df, 'escape')
+ write_html(df, 'noescape', escape=False)
+
+Escaped:
+
+.. ipython:: python
+
+ print df.to_html()
+
+.. raw:: html
+ :file: _static/escape.html
+
+Not escaped:
+
+.. ipython:: python
+
+ print df.to_html(escape=False)
+
+.. raw:: html
+ :file: _static/noescape.html
+
+.. note::
+
+ Some browsers may not show a difference in the rendering of the previous two
+ HTML tables.
+
Clipboard
---------
diff --git a/doc/source/missing_data.rst b/doc/source/missing_data.rst
index a9fc412a6b8e3..5ff436f6d0d50 100644
--- a/doc/source/missing_data.rst
+++ b/doc/source/missing_data.rst
@@ -357,7 +357,7 @@ Replace the '.' with ``nan`` (str -> str)
:suppress:
from numpy.random import rand, randn
- nan = np.nan
+ from numpy import nan
from pandas import DataFrame
.. ipython:: python
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index ea8dee51565ac..3ad8de077f1ea 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1598,6 +1598,7 @@ def to_html(self, buf=None, columns=None, col_space=None, colSpace=None,
classes=None, escape=True):
"""
to_html-specific options
+
bold_rows : boolean, default True
Make the row labels bold in the output
classes : str or list or tuple, default None
@@ -1605,7 +1606,7 @@ def to_html(self, buf=None, columns=None, col_space=None, colSpace=None,
escape : boolean, default True
Convert the characters <, >, and & to HTML-safe sequences.
- Render a DataFrame to an html table.
+ Render a DataFrame as an HTML table.
"""
import warnings
diff --git a/pandas/io/html.py b/pandas/io/html.py
index 915c30ecc3c40..9b2f292d30f47 100644
--- a/pandas/io/html.py
+++ b/pandas/io/html.py
@@ -18,7 +18,7 @@
import numpy as np
-from pandas import DataFrame, MultiIndex
+from pandas import DataFrame, MultiIndex, Index, Series, isnull
from pandas.io.parsers import _is_url
@@ -398,7 +398,6 @@ def _parse_tables(self, doc, match, attrs):
if not tables:
raise AssertionError("No tables found matching "
"'{0}'".format(match.pattern))
- #import ipdb; ipdb.set_trace()
return tables
def _setup_build_doc(self):
@@ -560,6 +559,17 @@ def _parse_raw_tfoot(self, table):
table.xpath(expr)]
+def _maybe_convert_index_type(index):
+ try:
+ index = index.astype(int)
+ except (TypeError, ValueError):
+ if not isinstance(index, MultiIndex):
+ s = Series(index, name=index.name)
+ index = Index(s.convert_objects(convert_numeric=True),
+ name=index.name)
+ return index
+
+
def _data_to_frame(data, header, index_col, infer_types, skiprows):
"""Parse a BeautifulSoup table into a DataFrame.
@@ -620,6 +630,12 @@ def _data_to_frame(data, header, index_col, infer_types, skiprows):
raise ValueError('Labels {0} not found when trying to skip'
' rows'.format(it))
+ # convert to numbers/dates where possible
+ # must be sequential since dates trump numbers if both args are given
+ if infer_types:
+ df = df.convert_objects(convert_numeric=True)
+ df = df.convert_objects(convert_dates='coerce')
+
if header is not None:
header_rows = df.iloc[header]
@@ -632,11 +648,6 @@ def _data_to_frame(data, header, index_col, infer_types, skiprows):
df = df.drop(df.index[header])
- # convert to numbers/dates where possible
- # must be sequential since dates trump numbers if both args are given
- if infer_types:
- df = df.convert_objects(convert_numeric=True)
-
if index_col is not None:
cols = df.columns[index_col]
@@ -648,12 +659,16 @@ def _data_to_frame(data, header, index_col, infer_types, skiprows):
# drop by default
df.set_index(cols, inplace=True)
if df.index.nlevels == 1:
- if not (df.index.name or df.index.name is None):
+ if isnull(df.index.name) or not df.index.name:
df.index.name = None
else:
names = [name or None for name in df.index.names]
df.index = MultiIndex.from_tuples(df.index.values, names=names)
+ if infer_types:
+ df.index = _maybe_convert_index_type(df.index)
+ df.columns = _maybe_convert_index_type(df.columns)
+
return df
| https://api.github.com/repos/pandas-dev/pandas/pulls/3704 | 2013-05-28T21:56:08Z | 2013-05-30T16:36:50Z | 2013-05-30T16:36:50Z | 2014-07-16T08:10:33Z | |
ENH: allow to_html and to_latex to take a path for their first argument | diff --git a/RELEASE.rst b/RELEASE.rst
index 5293b858b72a3..71d8054283b57 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -179,6 +179,8 @@ pandas 0.11.1
into today's date
- ``DataFrame.from_records`` did not accept empty recarrays (GH3682_)
- ``DataFrame.to_csv`` will succeed with the deprecated option ``nanRep``, @tdsmith
+ - ``DataFrame.to_html`` and ``DataFrame.to_latex`` now accept a path for
+ their first argument (GH3702_)
.. _GH3164: https://github.com/pydata/pandas/issues/3164
.. _GH2786: https://github.com/pydata/pandas/issues/2786
@@ -255,6 +257,7 @@ pandas 0.11.1
.. _GH3676: https://github.com/pydata/pandas/issues/3676
.. _GH3675: https://github.com/pydata/pandas/issues/3675
.. _GH3682: https://github.com/pydata/pandas/issues/3682
+.. _GH3702: https://github.com/pydata/pandas/issues/3702
pandas 0.11.0
=============
diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt
index bd4a7c49fbb4d..ae400a199b372 100644
--- a/doc/source/v0.11.1.txt
+++ b/doc/source/v0.11.1.txt
@@ -233,6 +233,9 @@ Bug Fixes
- ``DataFrame.from_records`` did not accept empty recarrays (GH3682_)
+ - ``DataFrame.to_html`` and ``DataFrame.to_latex`` now accept a path for
+ their first argument (GH3702_)
+
See the `full release notes
<https://github.com/pydata/pandas/blob/master/RELEASE.rst>`__ or issue tracker
on GitHub for a complete list.
@@ -274,3 +277,4 @@ on GitHub for a complete list.
.. _GH3675: https://github.com/pydata/pandas/issues/3675
.. _GH3682: https://github.com/pydata/pandas/issues/3682
.. _GH3679: https://github.com/pydata/pandas/issues/3679
+.. _GH3702: https://github.com/pydata/pandas/issues/3702
diff --git a/pandas/core/format.py b/pandas/core/format.py
index 7327f3b1b2175..40d80e91f0264 100644
--- a/pandas/core/format.py
+++ b/pandas/core/format.py
@@ -364,21 +364,31 @@ def get_col_type(dtype):
raise AssertionError(('column_format must be str or unicode, not %s'
% type(column_format)))
- self.buf.write('\\begin{tabular}{%s}\n' % column_format)
- self.buf.write('\\toprule\n')
-
- nlevels = frame.index.nlevels
- for i, row in enumerate(izip(*strcols)):
- if i == nlevels:
- self.buf.write('\\midrule\n') # End of header
- crow = [(x.replace('_', '\\_')
- .replace('%', '\\%')
- .replace('&', '\\&') if x else '{}') for x in row]
- self.buf.write(' & '.join(crow))
- self.buf.write(' \\\\\n')
-
- self.buf.write('\\bottomrule\n')
- self.buf.write('\\end{tabular}\n')
+ def write(buf, frame, column_format, strcols):
+ buf.write('\\begin{tabular}{%s}\n' % column_format)
+ buf.write('\\toprule\n')
+
+ nlevels = frame.index.nlevels
+ for i, row in enumerate(izip(*strcols)):
+ if i == nlevels:
+ buf.write('\\midrule\n') # End of header
+ crow = [(x.replace('_', '\\_')
+ .replace('%', '\\%')
+ .replace('&', '\\&') if x else '{}') for x in row]
+ buf.write(' & '.join(crow))
+ buf.write(' \\\\\n')
+
+ buf.write('\\bottomrule\n')
+ buf.write('\\end{tabular}\n')
+
+ if hasattr(self.buf, 'write'):
+ write(self.buf, frame, column_format, strcols)
+ elif isinstance(self.buf, basestring):
+ with open(self.buf, 'w') as f:
+ write(f, frame, column_format, strcols)
+ else:
+ raise TypeError('buf is not a file name and it has no write '
+ 'method')
def _format_col(self, i):
formatter = self._get_formatter(i)
@@ -392,7 +402,14 @@ def to_html(self, classes=None):
Render a DataFrame to a html table.
"""
html_renderer = HTMLFormatter(self, classes=classes)
- html_renderer.write_result(self.buf)
+ if hasattr(self.buf, 'write'):
+ html_renderer.write_result(self.buf)
+ elif isinstance(self.buf, basestring):
+ with open(self.buf, 'w') as f:
+ html_renderer.write_result(f)
+ else:
+ raise TypeError('buf is not a file name and it has no write '
+ ' method')
def _get_formatted_column_labels(self):
from pandas.core.index import _sparsify
@@ -574,7 +591,6 @@ def write_result(self, buf):
indent = self._write_body(indent)
self.write('</table>', indent)
-
_put_lines(buf, self.elements)
def _write_header(self, indent):
diff --git a/pandas/core/series.py b/pandas/core/series.py
index a04e931cf07e3..ab8a48f4b8eb9 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -1222,7 +1222,11 @@ def to_string(self, buf=None, na_rep='NaN', float_format=None,
if buf is None:
return the_repr
else:
- print >> buf, the_repr
+ try:
+ buf.write(the_repr)
+ except AttributeError:
+ with open(buf, 'w') as f:
+ f.write(the_repr)
def _get_repr(self, name=False, print_header=False, length=True, dtype=True,
na_rep='NaN', float_format=None):
diff --git a/pandas/tests/test_format.py b/pandas/tests/test_format.py
index fb1465f3cdc7b..9e8a69a32d454 100644
--- a/pandas/tests/test_format.py
+++ b/pandas/tests/test_format.py
@@ -1216,6 +1216,26 @@ def test_to_html(self):
frame = DataFrame(index=np.arange(200))
frame.to_html()
+ def test_to_html_filename(self):
+ biggie = DataFrame({'A': randn(200),
+ 'B': tm.makeStringIndex(200)},
+ index=range(200))
+
+ biggie['A'][:20] = nan
+ biggie['B'][:20] = nan
+ with tm.ensure_clean('test.html') as path:
+ biggie.to_html(path)
+ with open(path, 'r') as f:
+ s = biggie.to_html()
+ s2 = f.read()
+ self.assertEqual(s, s2)
+
+ frame = DataFrame(index=np.arange(200))
+ with tm.ensure_clean('test.html') as path:
+ frame.to_html(path)
+ with open(path, 'r') as f:
+ self.assertEqual(frame.to_html(), f.read())
+
def test_to_html_with_no_bold(self):
x = DataFrame({'x': randn(5)})
ashtml = x.to_html(bold_rows=False)
@@ -1474,6 +1494,13 @@ def test_dict_entries(self):
self.assertTrue("'a': 1" in val)
self.assertTrue("'b': 2" in val)
+ def test_to_latex_filename(self):
+ with tm.ensure_clean('test.tex') as path:
+ self.frame.to_latex(path)
+
+ with open(path, 'r') as f:
+ self.assertEqual(self.frame.to_latex(), f.read())
+
def test_to_latex(self):
# it works!
self.frame.to_latex()
| https://api.github.com/repos/pandas-dev/pandas/pulls/3702 | 2013-05-28T17:33:25Z | 2013-05-30T16:34:48Z | 2013-05-30T16:34:48Z | 2014-06-20T04:17:09Z | |
DOC: use na_rep not nanRep in .to_csv() | diff --git a/doc/source/io.rst b/doc/source/io.rst
index e192eea0d2b12..a1ba88c0d798b 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -871,7 +871,7 @@ allows storing the contents of the object as a comma-separated-values file. The
function takes a number of arguments. Only the first is required.
- ``path``: A string path to the file to write
- - ``nanRep``: A string representation of a missing value (default '')
+ - ``na_rep``: A string representation of a missing value (default '')
- ``cols``: Columns to write (default None)
- ``header``: Whether to write out the column names (default True)
- ``index``: whether to write row (index) names (default True)
| nanRep works but is deprecated and emits a FutureWarning.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3700 | 2013-05-28T05:03:16Z | 2013-05-30T00:53:56Z | 2013-05-30T00:53:56Z | 2015-08-15T22:13:32Z |
ENH/API: implement __nonzero__ for NDFrame | diff --git a/RELEASE.rst b/RELEASE.rst
index e611b330b08f0..9283bada2d720 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -112,6 +112,7 @@ pandas 0.11.1
- added top-level ``pd.read_sql`` and ``to_sql`` DataFrame methods
- the ``method`` and ``axis`` arguments of ``DataFrame.replace()`` are
deprecated
+ - Implement ``__nonzero__`` for ``NDFrame`` objects (GH3691_, GH3696_)
**Bug Fixes**
@@ -266,6 +267,8 @@ pandas 0.11.1
.. _GH3675: https://github.com/pydata/pandas/issues/3675
.. _GH3682: https://github.com/pydata/pandas/issues/3682
.. _GH3702: https://github.com/pydata/pandas/issues/3702
+.. _GH3691: https://github.com/pydata/pandas/issues/3691
+.. _GH3696: https://github.com/pydata/pandas/issues/3696
pandas 0.11.0
=============
diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt
index c025450c44cca..289c011f7a7a9 100644
--- a/doc/source/v0.11.1.txt
+++ b/doc/source/v0.11.1.txt
@@ -88,6 +88,8 @@ API changes
- Add the keyword ``allow_duplicates`` to ``DataFrame.insert`` to allow a duplicate column
to be inserted if ``True``, default is ``False`` (same as prior to 0.11.1) (GH3679_)
+ - Implement ``__nonzero__`` for ``NDFrame`` objects (GH3691_, GH3696_)
+
- IO api
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 1dfeae997451a..8dc1a921eecad 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -595,14 +595,6 @@ def shape(self):
#----------------------------------------------------------------------
# Class behavior
-
- @property
- def empty(self):
- return not (len(self.columns) > 0 and len(self.index) > 0)
-
- def __nonzero__(self):
- raise ValueError("Cannot call bool() on DataFrame.")
-
def _repr_fits_vertical_(self):
"""
Check length against max_rows.
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index aa574219a259e..7dd0315d7d90e 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -559,6 +559,13 @@ def __repr__(self):
def values(self):
return self._data.as_matrix()
+ @property
+ def empty(self):
+ return not all(len(ax) > 0 for ax in self.axes)
+
+ def __nonzero__(self):
+ return not self.empty
+
@property
def ndim(self):
return self._data.ndim
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 1de643985d893..39452ece7a33d 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -10379,9 +10379,15 @@ def test_index_namedtuple(self):
df = DataFrame([(1, 2), (3, 4)], index=index, columns=["A", "B"])
self.assertEqual(df.ix[IndexType("foo", "bar")]["A"], 1)
- def test_bool_raises_value_error_1069(self):
+ def test_bool_empty_nonzero(self):
df = DataFrame([1, 2, 3])
- self.failUnlessRaises(ValueError, lambda: bool(df))
+ self.assertTrue(bool(df))
+ self.assertFalse(df.empty)
+ df = DataFrame(index=['a', 'b'], columns=['c', 'd']).dropna()
+ self.assertFalse(bool(df))
+ self.assertFalse(bool(df.T))
+ self.assertTrue(df.empty)
+ self.assertTrue(df.T.empty)
def test_any_all(self):
self._check_bool_op('any', np.any, has_skipna=True, has_bool_only=True)
| closes #3691.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3696 | 2013-05-27T01:42:23Z | 2013-05-30T16:45:00Z | 2013-05-30T16:45:00Z | 2014-06-25T21:08:44Z |
Test to verify/fix behavior in #3503 | diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index fddbbf93552b3..207d08e795e37 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -23,6 +23,7 @@
from pandas.core.api import (DataFrame, Index, Series, notnull, isnull,
MultiIndex, DatetimeIndex, Timestamp, Period)
from pandas import date_range
+import pandas as pd
from pandas.io.parsers import read_csv
from pandas.util.testing import (assert_almost_equal,
@@ -4037,7 +4038,7 @@ def test_div(self):
### this is technically wrong as the integer portion is coerced to float ###
expected = DataFrame({ 'first' : Series([1,1,1,1],dtype='float64'), 'second' : Series([np.inf,np.inf,np.inf,1]) })
assert_frame_equal(result,expected)
-
+
result2 = DataFrame(p.values.astype('float64')/p.values,index=p.index,columns=p.columns).fillna(np.inf)
assert_frame_equal(result2,expected)
@@ -4723,7 +4724,7 @@ def _check_df(df,cols=None):
if isinstance(obj_df,Series):
assert_series_equal(obj_df,obj_rs)
else:
- assert_frame_equal(obj_df,obj_rs,check_names=False)
+ assert_frame_equal(obj_df,obj_rs,check_names=False)
# wrote in the same order
else:
@@ -4990,9 +4991,9 @@ def test_to_csv_multiindex(self):
def _make_frame(names=None):
if names is True:
names = ['first','second']
- return DataFrame(np.random.randint(0,10,size=(3,3)),
- columns=MultiIndex.from_tuples([('bah', 'foo'),
- ('bah', 'bar'),
+ return DataFrame(np.random.randint(0,10,size=(3,3)),
+ columns=MultiIndex.from_tuples([('bah', 'foo'),
+ ('bah', 'bar'),
('ban', 'baz')],
names=names),
dtype='int64')
@@ -5069,12 +5070,12 @@ def _make_frame(names=None):
raise AssertionError("failure in read_csv header=range(3)")
try:
- read_csv(path,tupleize_cols=False,header=range(7),index_col=0)
+ read_csv(path,tupleize_cols=False,header=range(7),index_col=0)
except (Exception), detail:
if not str(detail).startswith('Passed header=[0,1,2,3,4,5,6], len of 7, but only 6 lines in file'):
raise AssertionError("failure in read_csv header=range(7)")
- for i in [3,4,5,6,7]:
+ for i in [3,4,5,6,7]:
self.assertRaises(Exception, read_csv, path, tupleize_cols=False, header=range(i), index_col=0)
self.assertRaises(Exception, read_csv, path, tupleize_cols=False, header=[0,2], index_col=0)
@@ -5168,7 +5169,7 @@ def test_to_csv_dups_cols(self):
with ensure_clean() as filename:
df.to_csv(filename)
result = read_csv(filename,index_col=0)
-
+
# date cols
for i in ['0.4','1.4','2.4']:
result[i] = to_datetime(result[i])
@@ -5281,6 +5282,14 @@ def test_to_csv_quoting(self):
self.assertEqual(result, expected)
+ # quoting windows line terminators, presents with encoding?
+ # #3503
+ text = 'a,b,c\n1,"test \r\n",3\n'
+ df = pd.read_csv(StringIO(text))
+ buf = StringIO()
+ df.to_csv(buf, encoding='utf-8', index=False)
+ self.assertEqual(buf.getvalue(), text)
+
def test_to_csv_unicodewriter_quoting(self):
import csv
@@ -8540,7 +8549,7 @@ def test_combine_first_mixed_bug(self):
result = df1.combine_first(df2)[2]
expected = Series([True,True,False])
- assert_series_equal(result,expected)
+ assert_series_equal(result,expected)
# GH 3593, converting datetime64[ns] incorrecly
df0 = DataFrame({"a":[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)]})
@@ -9942,11 +9951,11 @@ def test_columns_with_dups(self):
df.iloc[:,i]
# dup columns across dtype GH 2079/2194
- vals = [[1, -1, 2.], [2, -2, 3.]]
- rs = DataFrame(vals, columns=['A', 'A', 'B'])
- xp = DataFrame(vals)
- xp.columns = ['A', 'A', 'B']
- assert_frame_equal(rs, xp)
+ vals = [[1, -1, 2.], [2, -2, 3.]]
+ rs = DataFrame(vals, columns=['A', 'A', 'B'])
+ xp = DataFrame(vals)
+ xp.columns = ['A', 'A', 'B']
+ assert_frame_equal(rs, xp)
def test_cast_internals(self):
casted = DataFrame(self.frame._data, dtype=int)
| Was not able to reproduce the issue. #3503
| https://api.github.com/repos/pandas-dev/pandas/pulls/3694 | 2013-05-25T23:36:33Z | 2013-06-02T20:38:25Z | 2013-06-02T20:38:25Z | 2014-07-16T08:10:25Z |
CLN: added io.api for i/o importing functions | diff --git a/RELEASE.rst b/RELEASE.rst
index 5293b858b72a3..5b512814d0fec 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -35,6 +35,7 @@ pandas 0.11.1
GH3606_)
- Support for reading Amazon S3 files. (GH3504_)
- Added module for reading and writing Stata files: pandas.io.stata (GH1512_)
+ includes ``to_stata`` DataFrame method, and a ``read_stata`` top-level reader
- Added support for writing in ``to_csv`` and reading in ``read_csv``,
multi-index columns. The ``header`` option in ``read_csv`` now accepts a
list of the rows from which to read the index. Added the option,
@@ -104,6 +105,11 @@ pandas 0.11.1
does not control triggering of summary, similar to < 0.11.0.
- Add the keyword ``allow_duplicates`` to ``DataFrame.insert`` to allow a duplicate column
to be inserted if ``True``, default is ``False`` (same as prior to 0.11.1) (GH3679_)
+ - io API changes
+
+ - added ``pandas.io.api`` for i/o imports
+ - removed ``Excel`` support to ``pandas.io.excel``
+ - added top-level ``pd.read_sql`` and ``to_sql`` DataFrame methods
**Bug Fixes**
diff --git a/doc/source/10min.rst b/doc/source/10min.rst
index 2eda474d7954f..19bacdc81bdf9 100644
--- a/doc/source/10min.rst
+++ b/doc/source/10min.rst
@@ -699,8 +699,7 @@ Reading from an excel file
.. ipython:: python
- xls = ExcelFile('foo.xlsx')
- xls.parse('sheet1', index_col=None, na_values=['NA'])
+ read_excel('foo.xlsx', 'sheet1', index_col=None, na_values=['NA'])
.. ipython:: python
:suppress:
diff --git a/doc/source/api.rst b/doc/source/api.rst
index c5b83e4af6999..2e59bf6533205 100644
--- a/doc/source/api.rst
+++ b/doc/source/api.rst
@@ -48,7 +48,20 @@ File IO
read_table
read_csv
- ExcelFile.parse
+
+.. currentmodule:: pandas.io.excel
+
+.. autosummary::
+ :toctree: generated/
+
+ read_excel
+
+.. currentmodule:: pandas.io.stata
+
+.. autosummary::
+ :toctree: generated/
+
+ read_stata
.. currentmodule:: pandas.io.html
@@ -57,15 +70,29 @@ File IO
read_html
+SQL
+~~~
+
+.. currentmodule:: pandas.io.sql
+
+.. autosummary::
+ :toctree: generated/
+
+ read_sql
+
HDFStore: PyTables (HDF5)
~~~~~~~~~~~~~~~~~~~~~~~~~
+
.. currentmodule:: pandas.io.pytables
.. autosummary::
:toctree: generated/
+ read_hdf
HDFStore.put
+ HDFStore.append
HDFStore.get
+ HDFStore.select
Standard moving window functions
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -532,9 +559,11 @@ Serialization / IO / Conversion
DataFrame.load
DataFrame.save
DataFrame.to_csv
+ DataFrame.to_hdf
DataFrame.to_dict
DataFrame.to_excel
DataFrame.to_html
+ DataFrame.to_stata
DataFrame.to_records
DataFrame.to_sparse
DataFrame.to_string
diff --git a/doc/source/cookbook.rst b/doc/source/cookbook.rst
index 8aac415721f9a..7f6b54667765d 100644
--- a/doc/source/cookbook.rst
+++ b/doc/source/cookbook.rst
@@ -32,25 +32,25 @@ Selection
The :ref:`indexing <indexing>` docs.
-`Boolean Rows Indexing
+Indexing using both row labels and conditionals, see
+`here
<http://stackoverflow.com/questions/14725068/pandas-using-row-labels-in-boolean-indexing>`__
- Indexing using both row labels and conditionals
-`Using loc and iloc in selections
+Use loc for label-oriented slicing and iloc positional slicing, see
+`here
<https://github.com/pydata/pandas/issues/2904>`__
- Use loc for label-oriented slicing and iloc positional slicing
-`Extending a panel along the minor axis
+Extend a panel frame by transposing, adding a new dimension, and transposing back to the original dimensions, see
+`here
<http://stackoverflow.com/questions/15364050/extending-a-pandas-panel-frame-along-the-minor-axis>`__
- Extend a panel frame by transposing, adding a new dimension, and transposing back to the original dimensions
-`Boolean masking in a panel
+Mask a panel by using ``np.where`` and then reconstructing the panel with the new masked values
+`here
<http://stackoverflow.com/questions/14650341/boolean-mask-in-pandas-panel>`__
- Mask a panel by using ``np.where`` and then reconstructing the panel with the new masked values
-`Selecting via the complement
+Using ``~`` to take the complement of a boolean array, see
+`here
<http://stackoverflow.com/questions/14986510/picking-out-elements-based-on-complement-of-indices-in-python-pandas>`__
- ``~`` can be used to take the complement of a boolean array
`Efficiently creating columns using applymap
<http://stackoverflow.com/questions/16575868/efficiently-creating-additional-columns-in-a-pandas-dataframe-using-map>`__
diff --git a/doc/source/io.rst b/doc/source/io.rst
index a1ba88c0d798b..92747f9906da2 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -9,6 +9,7 @@
import csv
from StringIO import StringIO
import pandas as pd
+ ExcelWriter = pd.ExcelWriter
import numpy as np
np.random.seed(123456)
@@ -27,6 +28,18 @@
IO Tools (Text, CSV, HDF5, ...)
*******************************
+The Pandas I/O api is a set of top level ``reader`` functions accessed like ``pd.read_csv()`` that generally return a ``pandas``
+object. The corresponding ``writer`` functions are object methods that are accessed like ``df.to_csv()``
+
+.. csv-table::
+ :widths: 12, 15, 15, 15, 15
+ :delim: ;
+
+ Reader; ``read_csv``; ``read_excel``; ``read_hdf``; ``read_sql``
+ Writer; ``to_csv``; ``to_excel``; ``to_hdf``; ``to_sql``
+ Reader; ``read_html``; ``read_stata``; ``read_clipboard`` ;
+ Writer; ``to_html``; ``to_stata``; ``to_clipboard`` ;
+
.. _io.read_csv_table:
CSV & Text files
@@ -971,29 +984,33 @@ And then import the data directly to a DataFrame by calling:
Excel files
-----------
-The ``ExcelFile`` class can read an Excel 2003 file using the ``xlrd`` Python
+The ``read_excel`` method can read an Excel 2003 file using the ``xlrd`` Python
module and use the same parsing code as the above to convert tabular data into
a DataFrame. See the :ref:`cookbook<cookbook.excel>` for some
advanced strategies
-To use it, create the ``ExcelFile`` object:
+.. note::
-.. code-block:: python
+ The prior method of accessing Excel is now deprecated as of 0.11.1,
+ this will work but will be removed in a future version.
- xls = ExcelFile('path_to_file.xls')
+ .. code-block:: python
-Then use the ``parse`` instance method with a sheetname, then use the same
-additional arguments as the parsers above:
+ from pandas.io.parsers import ExcelFile
+ xls = ExcelFile('path_to_file.xls')
+ xls.parse('Sheet1', index_col=None, na_values=['NA'])
-.. code-block:: python
+ Replaced by
+
+ .. code-block:: python
- xls.parse('Sheet1', index_col=None, na_values=['NA'])
+ read_excel('path_to_file.xls', 'Sheet1', index_col=None, na_values=['NA'])
To read sheets from an Excel 2007 file, you can pass a filename with a ``.xlsx``
extension, in which case the ``openpyxl`` module will be used to read the file.
It is often the case that users will insert columns to do temporary computations
-in Excel and you may not want to read in those columns. `ExcelFile.parse` takes
+in Excel and you may not want to read in those columns. `read_excel` takes
a `parse_cols` keyword to allow you to specify a subset of columns to parse.
If `parse_cols` is an integer, then it is assumed to indicate the last column
@@ -1001,14 +1018,14 @@ to be parsed.
.. code-block:: python
- xls.parse('Sheet1', parse_cols=2, index_col=None, na_values=['NA'])
+ read_excel('path_to_file.xls', 'Sheet1', parse_cols=2, index_col=None, na_values=['NA'])
If `parse_cols` is a list of integers, then it is assumed to be the file column
indices to be parsed.
.. code-block:: python
- xls.parse('Sheet1', parse_cols=[0, 2, 3], index_col=None, na_values=['NA'])
+ read_excel('path_to_file.xls', Sheet1', parse_cols=[0, 2, 3], index_col=None, na_values=['NA'])
To write a DataFrame object to a sheet of an Excel file, you can use the
``to_excel`` instance method. The arguments are largely the same as ``to_csv``
@@ -1883,16 +1900,13 @@ Writing to STATA format
.. _io.StataWriter:
-The function :func:'~pandas.io.StataWriter.write_file' will write a DataFrame
-into a .dta file. The format version of this file is always the latest one,
-115.
+The method ``to_stata`` will write a DataFrame into a .dta file.
+The format version of this file is always the latest one, 115.
.. ipython:: python
- from pandas.io.stata import StataWriter
df = DataFrame(randn(10,2),columns=list('AB'))
- writer = StataWriter('stata.dta',df)
- writer.write_file()
+ df.to_stata('stata.dta')
Reading from STATA format
~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -1901,24 +1915,21 @@ Reading from STATA format
.. versionadded:: 0.11.1
-The class StataReader will read the header of the given dta file at
-initialization. Its function :func:'~pandas.io.StataReader.data' will
-read the observations, converting them to a DataFrame which is returned:
+The top-level function ``read_stata`` will read a dta format file
+and return a DataFrame:
.. ipython:: python
- from pandas.io.stata import StataReader
- reader = StataReader('stata.dta')
- reader.data()
+ pd.read_stata('stata.dta')
-The parameter convert_categoricals indicates wheter value labels should be
-read and used to create a Categorical variable from them. Value labels can
-also be retrieved by the function variable_labels, which requires data to be
-called before.
+Currently the ``index`` is retrieved as a column on read back.
-The StataReader supports .dta Formats 104, 105, 108, 113-115.
+The parameter ``convert_categoricals`` indicates wheter value labels should be
+read and used to create a ``Categorical`` variable from them. Value labels can
+also be retrieved by the function ``variable_labels``, which requires data to be
+called before (see ``pandas.io.stata.StataReader``).
-Alternatively, the function :func:'~pandas.io.read_stata' can be used
+The StataReader supports .dta Formats 104, 105, 108, 113-115.
.. ipython:: python
:suppress:
diff --git a/doc/source/v0.10.0.txt b/doc/source/v0.10.0.txt
index 0c5497868efe2..51075a61bec4d 100644
--- a/doc/source/v0.10.0.txt
+++ b/doc/source/v0.10.0.txt
@@ -1,5 +1,10 @@
.. _whatsnew_0100:
+.. ipython:: python
+ :suppress:
+
+ from StringIO import StringIO
+
v0.10.0 (December 17, 2012)
---------------------------
diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt
index bd4a7c49fbb4d..7cedb62693c73 100644
--- a/doc/source/v0.11.1.txt
+++ b/doc/source/v0.11.1.txt
@@ -6,6 +6,19 @@ v0.11.1 (??)
This is a minor release from 0.11.0 and includes several new features and
enhancements along with a large number of bug fixes.
+The I/O api is now much more consistent with the following top-level reading
+functions available, e.g. ``pd.read_csv``, and the counterpart writers are
+available as object methods, e.g. ``df.to_csv``
+
+.. csv-table::
+ :widths: 12, 15, 15, 15, 15
+ :delim: ;
+
+ Reader; ``read_csv``; ``read_excel``; ``read_hdf``; ``read_sql``
+ Writer; ``to_csv``; ``to_excel``; ``to_hdf``; ``to_sql``
+ Reader; ``read_html``; ``read_stata``; ``read_clipboard`` ;
+ Writer; ``to_html``; ``to_stata``; ``to_clipboard`` ;
+
API changes
~~~~~~~~~~~
@@ -74,6 +87,31 @@ API changes
- Add the keyword ``allow_duplicates`` to ``DataFrame.insert`` to allow a duplicate column
to be inserted if ``True``, default is ``False`` (same as prior to 0.11.1) (GH3679_)
+ - IO api
+
+ - added top-level function ``read_excel`` to replace the following,
+ The original API is deprecated and will be removed in a future version
+
+ .. code-block:: python
+
+ from pandas.io.parsers import ExcelFile
+ xls = ExcelFile('path_to_file.xls')
+ xls.parse('Sheet1', index_col=None, na_values=['NA'])
+
+ With
+
+ .. code-block:: python
+
+ import pandas as pd
+ pd.read_excel('path_to_file.xls', 'Sheet1', index_col=None, na_values=['NA'])
+
+ - added top-level function ``read_sql`` that is equivalent to the following
+
+ .. code-block:: python
+
+ from pandas.io.sql import read_frame
+ read_frame(....)
+
Enhancements
~~~~~~~~~~~~
@@ -109,6 +147,8 @@ Enhancements
a list or tuple.
- Added module for reading and writing Stata files: pandas.io.stata (GH1512_)
+ accessable via ``read_stata`` top-level function for reading,
+ and ``to_stata`` DataFrame method for writing
- ``DataFrame.replace()`` now allows regular expressions on contained
``Series`` with object dtype. See the examples section in the regular docs
@@ -218,7 +258,7 @@ Bug Fixes
.. ipython :: python
df = DataFrame({'a': list('ab..'), 'b': [1, 2, 3, 4]})
- df.replace(regex=r'\s*\.\s*', value=nan)
+ df.replace(regex=r'\s*\.\s*', value=np.nan)
to replace all occurrences of the string ``'.'`` with zero or more
instances of surrounding whitespace with ``NaN``.
@@ -227,7 +267,7 @@ Bug Fixes
.. ipython :: python
- df.replace('.', nan)
+ df.replace('.', np.nan)
to replace all occurrences of the string ``'.'`` with ``NaN``.
diff --git a/pandas/__init__.py b/pandas/__init__.py
index bf5bcc81bc21e..da4c146da3cfd 100644
--- a/pandas/__init__.py
+++ b/pandas/__init__.py
@@ -28,12 +28,8 @@
from pandas.sparse.api import *
from pandas.stats.api import *
from pandas.tseries.api import *
+from pandas.io.api import *
-from pandas.io.parsers import (read_csv, read_table, read_clipboard,
- read_fwf, to_clipboard, ExcelFile,
- ExcelWriter)
-from pandas.io.pytables import HDFStore, Term, get_store, read_hdf
-from pandas.io.html import read_html
from pandas.util.testing import debug
from pandas.tools.describe import value_range
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 9e276e01dd723..ea8dee51565ac 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1300,35 +1300,6 @@ def from_csv(cls, path, header=0, sep=',', index_col=0,
parse_dates=parse_dates, index_col=index_col,
encoding=encoding,tupleize_cols=False)
- @classmethod
- def from_dta(dta, path, parse_dates=True, convert_categoricals=True, encoding=None, index_col=None):
- """
- Read Stata file into DataFrame
-
- Parameters
- ----------
- path : string file path or file handle / StringIO
- parse_dates : boolean, default True
- Convert date variables to DataFrame time values
- convert_categoricals : boolean, default True
- Read value labels and convert columns to Categorical/Factor variables
- encoding : string, None or encoding, default None
- Encoding used to parse the files. Note that Stata doesn't
- support unicode. None defaults to cp1252.
- index_col : int or sequence, default None
- Column to use for index. If a sequence is given, a MultiIndex
- is used. Different default from read_table
-
- Notes
- -----
-
- Returns
- -------
- y : DataFrame
- """
- from pandas.io.stata import read_stata
- return read_stata(path, parse_dates=parse_dates, convert_categoricals=convert_categoricals, encoding=encoding, index=index_col)
-
def to_sparse(self, fill_value=None, kind='block'):
"""
Convert to SparseDataFrame
@@ -1510,7 +1481,7 @@ def to_excel(self, excel_writer, sheet_name='sheet1', na_rep='',
>>> df2.to_excel(writer,'sheet2')
>>> writer.save()
"""
- from pandas.io.parsers import ExcelWriter
+ from pandas.io.excel import ExcelWriter
need_save = False
if isinstance(excel_writer, basestring):
excel_writer = ExcelWriter(excel_writer)
@@ -1529,6 +1500,57 @@ def to_excel(self, excel_writer, sheet_name='sheet1', na_rep='',
if need_save:
excel_writer.save()
+ def to_stata(self, fname, convert_dates=None, write_index=True, encoding="latin-1",
+ byteorder=None):
+ """
+ A class for writing Stata binary dta files from array-like objects
+
+ Parameters
+ ----------
+ fname : file path or buffer
+ Where to save the dta file.
+ convert_dates : dict
+ Dictionary mapping column of datetime types to the stata internal
+ format that you want to use for the dates. Options are
+ 'tc', 'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either a
+ number or a name.
+ encoding : str
+ Default is latin-1. Note that Stata does not support unicode.
+ byteorder : str
+ Can be ">", "<", "little", or "big". The default is None which uses
+ `sys.byteorder`
+
+ Examples
+ --------
+ >>> writer = StataWriter('./data_file.dta', data)
+ >>> writer.write_file()
+
+ Or with dates
+
+ >>> writer = StataWriter('./date_data_file.dta', data, {2 : 'tw'})
+ >>> writer.write_file()
+ """
+ from pandas.io.stata import StataWriter
+ writer = StataWriter(fname,self,convert_dates=convert_dates, encoding=encoding, byteorder=byteorder)
+ writer.write_file()
+
+ def to_sql(self, name, con, flavor='sqlite', if_exists='fail', **kwargs):
+ """
+ Write records stored in a DataFrame to a SQL database.
+
+ Parameters
+ ----------
+ name: name of SQL table
+ conn: an open SQL database connection object
+ flavor: {'sqlite', 'mysql', 'oracle'}, default 'sqlite'
+ if_exists: {'fail', 'replace', 'append'}, default 'fail'
+ fail: If table exists, do nothing.
+ replace: If table exists, drop it, recreate it, and insert data.
+ append: If table exists, insert data. Create if does not exist.
+ """
+ from pandas.io.sql import write_frame
+ write_frame(self, name, con, flavor=flavor, if_exists=if_exists, **kwargs)
+
@Appender(fmt.docstring_to_string, indents=1)
def to_string(self, buf=None, columns=None, col_space=None, colSpace=None,
header=True, index=True, na_rep='NaN', formatters=None,
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 4a80e2f65fd71..aa574219a259e 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -491,6 +491,10 @@ def to_hdf(self, path_or_buf, key, **kwargs):
from pandas.io import pytables
return pytables.to_hdf(path_or_buf, key, self, **kwargs)
+ def to_clipboard(self):
+ from pandas.io import parsers
+ parsers.to_clipboard(self)
+
# install the indexerse
for _name, _indexer in indexing.get_indexers_list():
PandasObject._create_indexer(_name,_indexer)
diff --git a/pandas/core/panel.py b/pandas/core/panel.py
index fa1305d27058e..0a099661c58f1 100644
--- a/pandas/core/panel.py
+++ b/pandas/core/panel.py
@@ -596,7 +596,7 @@ def to_excel(self, path, na_rep=''):
na_rep : string, default ''
Missing data representation
"""
- from pandas.io.parsers import ExcelWriter
+ from pandas.io.excel import ExcelWriter
writer = ExcelWriter(path)
for item, df in self.iteritems():
name = str(item)
diff --git a/pandas/io/api.py b/pandas/io/api.py
new file mode 100644
index 0000000000000..e4c0c8c0c77f0
--- /dev/null
+++ b/pandas/io/api.py
@@ -0,0 +1,11 @@
+"""
+Data IO api
+"""
+
+from pandas.io.parsers import (read_csv, read_table, read_clipboard,
+ read_fwf, to_clipboard)
+from pandas.io.excel import ExcelFile, ExcelWriter, read_excel
+from pandas.io.pytables import HDFStore, Term, get_store, read_hdf
+from pandas.io.html import read_html
+from pandas.io.sql import read_sql
+from pandas.io.stata import read_stata
diff --git a/pandas/io/excel.py b/pandas/io/excel.py
new file mode 100644
index 0000000000000..ea102cb6803d7
--- /dev/null
+++ b/pandas/io/excel.py
@@ -0,0 +1,462 @@
+"""
+Module parse to/from Excel
+"""
+
+#----------------------------------------------------------------------
+# ExcelFile class
+
+import datetime
+from itertools import izip
+import numpy as np
+
+from pandas.core.index import Index, MultiIndex
+from pandas.core.frame import DataFrame
+import pandas.core.common as com
+from pandas.util import py3compat
+from pandas.io.parsers import TextParser
+from pandas.tseries.period import Period
+import json
+
+def read_excel(path_or_buf, sheetname, header=0, skiprows=None, skip_footer=0,
+ index_col=None, parse_cols=None, parse_dates=False,
+ date_parser=None, na_values=None, thousands=None, chunksize=None,
+ kind=None, **kwds):
+ """
+ Read Excel table into DataFrame
+
+ Parameters
+ ----------
+ sheetname : string
+ Name of Excel sheet
+ header : int, default 0
+ Row to use for the column labels of the parsed DataFrame
+ skiprows : list-like
+ Rows to skip at the beginning (0-indexed)
+ skip_footer : int, default 0
+ Rows at the end to skip (0-indexed)
+ index_col : int, default None
+ Column to use as the row labels of the DataFrame. Pass None if
+ there is no such column
+ parse_cols : int or list, default None
+ If None then parse all columns,
+ If int then indicates last column to be parsed
+ If list of ints then indicates list of column numbers to be parsed
+ If string then indicates comma separated list of column names and
+ column ranges (e.g. "A:E" or "A,C,E:F")
+ na_values : list-like, default None
+ List of additional strings to recognize as NA/NaN
+
+ Returns
+ -------
+ parsed : DataFrame
+ """
+ return ExcelFile(path_or_buf,kind=kind).parse(sheetname=sheetname,
+ header=0, skiprows=None, skip_footer=0,
+ index_col=None, parse_cols=None, parse_dates=False,
+ date_parser=None, na_values=None, thousands=None, chunksize=None,
+ kind=None, **kwds)
+
+class ExcelFile(object):
+ """
+ Class for parsing tabular excel sheets into DataFrame objects.
+ Uses xlrd. See ExcelFile.parse for more documentation
+
+ Parameters
+ ----------
+ path : string or file-like object
+ Path to xls or xlsx file
+ """
+ def __init__(self, path_or_buf, kind=None, **kwds):
+ self.kind = kind
+
+ import xlrd # throw an ImportError if we need to
+ ver = tuple(map(int,xlrd.__VERSION__.split(".")[:2]))
+ if ver < (0, 9):
+ raise ImportError("pandas requires xlrd >= 0.9.0 for excel support, current version "+xlrd.__VERSION__)
+
+ self.path_or_buf = path_or_buf
+ self.tmpfile = None
+
+ if isinstance(path_or_buf, basestring):
+ self.book = xlrd.open_workbook(path_or_buf)
+ else:
+ data = path_or_buf.read()
+ self.book = xlrd.open_workbook(file_contents=data)
+
+ def __repr__(self):
+ return object.__repr__(self)
+
+ def parse(self, sheetname, header=0, skiprows=None, skip_footer=0,
+ index_col=None, parse_cols=None, parse_dates=False,
+ date_parser=None, na_values=None, thousands=None, chunksize=None,
+ **kwds):
+ """
+ Read Excel table into DataFrame
+
+ Parameters
+ ----------
+ sheetname : string
+ Name of Excel sheet
+ header : int, default 0
+ Row to use for the column labels of the parsed DataFrame
+ skiprows : list-like
+ Rows to skip at the beginning (0-indexed)
+ skip_footer : int, default 0
+ Rows at the end to skip (0-indexed)
+ index_col : int, default None
+ Column to use as the row labels of the DataFrame. Pass None if
+ there is no such column
+ parse_cols : int or list, default None
+ If None then parse all columns,
+ If int then indicates last column to be parsed
+ If list of ints then indicates list of column numbers to be parsed
+ If string then indicates comma separated list of column names and
+ column ranges (e.g. "A:E" or "A,C,E:F")
+ na_values : list-like, default None
+ List of additional strings to recognize as NA/NaN
+
+ Returns
+ -------
+ parsed : DataFrame
+ """
+
+ # has_index_names: boolean, default False
+ # True if the cols defined in index_col have an index name and are
+ # not in the header
+ has_index_names = False # removed as new argument of API function
+
+ skipfooter = kwds.pop('skipfooter', None)
+ if skipfooter is not None:
+ skip_footer = skipfooter
+
+ return self._parse_excel(sheetname, header=header,
+ skiprows=skiprows, index_col=index_col,
+ has_index_names=has_index_names,
+ parse_cols=parse_cols,
+ parse_dates=parse_dates,
+ date_parser=date_parser,
+ na_values=na_values,
+ thousands=thousands,
+ chunksize=chunksize,
+ skip_footer=skip_footer)
+
+ def _should_parse(self, i, parse_cols):
+
+ def _range2cols(areas):
+ """
+ Convert comma separated list of column names and column ranges to a
+ list of 0-based column indexes.
+
+ >>> _range2cols('A:E')
+ [0, 1, 2, 3, 4]
+ >>> _range2cols('A,C,Z:AB')
+ [0, 2, 25, 26, 27]
+ """
+ def _excel2num(x):
+ "Convert Excel column name like 'AB' to 0-based column index"
+ return reduce(lambda s, a: s * 26 + ord(a) - ord('A') + 1, x.upper().strip(), 0) - 1
+
+ cols = []
+ for rng in areas.split(','):
+ if ':' in rng:
+ rng = rng.split(':')
+ cols += range(_excel2num(rng[0]), _excel2num(rng[1]) + 1)
+ else:
+ cols.append(_excel2num(rng))
+ return cols
+
+ if isinstance(parse_cols, int):
+ return i <= parse_cols
+ elif isinstance(parse_cols, basestring):
+ return i in _range2cols(parse_cols)
+ else:
+ return i in parse_cols
+
+ def _parse_excel(self, sheetname, header=0, skiprows=None,
+ skip_footer=0, index_col=None, has_index_names=None,
+ parse_cols=None, parse_dates=False, date_parser=None,
+ na_values=None, thousands=None, chunksize=None):
+ from xlrd import (xldate_as_tuple, XL_CELL_DATE,
+ XL_CELL_ERROR, XL_CELL_BOOLEAN)
+
+ datemode = self.book.datemode
+ sheet = self.book.sheet_by_name(sheetname)
+
+ data = []
+ should_parse = {}
+ for i in range(sheet.nrows):
+ row = []
+ for j, (value, typ) in enumerate(izip(sheet.row_values(i),
+ sheet.row_types(i))):
+ if parse_cols is not None and j not in should_parse:
+ should_parse[j] = self._should_parse(j, parse_cols)
+
+ if parse_cols is None or should_parse[j]:
+ if typ == XL_CELL_DATE:
+ dt = xldate_as_tuple(value, datemode)
+ # how to produce this first case?
+ if dt[0] < datetime.MINYEAR: # pragma: no cover
+ value = datetime.time(*dt[3:])
+ else:
+ value = datetime.datetime(*dt)
+ elif typ == XL_CELL_ERROR:
+ value = np.nan
+ elif typ == XL_CELL_BOOLEAN:
+ value = bool(value)
+ row.append(value)
+
+ data.append(row)
+
+ if header is not None:
+ data[header] = _trim_excel_header(data[header])
+
+ parser = TextParser(data, header=header, index_col=index_col,
+ has_index_names=has_index_names,
+ na_values=na_values,
+ thousands=thousands,
+ parse_dates=parse_dates,
+ date_parser=date_parser,
+ skiprows=skiprows,
+ skip_footer=skip_footer,
+ chunksize=chunksize)
+
+ return parser.read()
+
+ @property
+ def sheet_names(self):
+ return self.book.sheet_names()
+
+
+def _trim_excel_header(row):
+ # trim header row so auto-index inference works
+ # xlrd uses '' , openpyxl None
+ while len(row) > 0 and (row[0] == '' or row[0] is None):
+ row = row[1:]
+ return row
+
+
+class CellStyleConverter(object):
+ """
+ Utility Class which converts a style dict to xlrd or openpyxl style
+ """
+
+ @staticmethod
+ def to_xls(style_dict, num_format_str=None):
+ """
+ converts a style_dict to an xlwt style object
+ Parameters
+ ----------
+ style_dict: style dictionary to convert
+ """
+ import xlwt
+
+ def style_to_xlwt(item, firstlevel=True, field_sep=',', line_sep=';'):
+ """helper wich recursively generate an xlwt easy style string
+ for example:
+
+ hstyle = {"font": {"bold": True},
+ "border": {"top": "thin",
+ "right": "thin",
+ "bottom": "thin",
+ "left": "thin"},
+ "align": {"horiz": "center"}}
+ will be converted to
+ font: bold on; \
+ border: top thin, right thin, bottom thin, left thin; \
+ align: horiz center;
+ """
+ if hasattr(item, 'items'):
+ if firstlevel:
+ it = ["%s: %s" % (key, style_to_xlwt(value, False))
+ for key, value in item.items()]
+ out = "%s " % (line_sep).join(it)
+ return out
+ else:
+ it = ["%s %s" % (key, style_to_xlwt(value, False))
+ for key, value in item.items()]
+ out = "%s " % (field_sep).join(it)
+ return out
+ else:
+ item = "%s" % item
+ item = item.replace("True", "on")
+ item = item.replace("False", "off")
+ return item
+
+ if style_dict:
+ xlwt_stylestr = style_to_xlwt(style_dict)
+ style = xlwt.easyxf(xlwt_stylestr, field_sep=',', line_sep=';')
+ else:
+ style = xlwt.XFStyle()
+ if num_format_str is not None:
+ style.num_format_str = num_format_str
+
+ return style
+
+ @staticmethod
+ def to_xlsx(style_dict):
+ """
+ converts a style_dict to an openpyxl style object
+ Parameters
+ ----------
+ style_dict: style dictionary to convert
+ """
+
+ from openpyxl.style import Style
+ xls_style = Style()
+ for key, value in style_dict.items():
+ for nk, nv in value.items():
+ if key == "borders":
+ (xls_style.borders.__getattribute__(nk)
+ .__setattr__('border_style', nv))
+ else:
+ xls_style.__getattribute__(key).__setattr__(nk, nv)
+
+ return xls_style
+
+
+def _conv_value(val):
+ # convert value for excel dump
+ if isinstance(val, np.int64):
+ val = int(val)
+ elif isinstance(val, np.bool8):
+ val = bool(val)
+ elif isinstance(val, Period):
+ val = "%s" % val
+
+ return val
+
+
+class ExcelWriter(object):
+ """
+ Class for writing DataFrame objects into excel sheets, uses xlwt for xls,
+ openpyxl for xlsx. See DataFrame.to_excel for typical usage.
+
+ Parameters
+ ----------
+ path : string
+ Path to xls file
+ """
+ def __init__(self, path):
+ self.use_xlsx = True
+ if path.endswith('.xls'):
+ self.use_xlsx = False
+ import xlwt
+ self.book = xlwt.Workbook()
+ self.fm_datetime = xlwt.easyxf(
+ num_format_str='YYYY-MM-DD HH:MM:SS')
+ self.fm_date = xlwt.easyxf(num_format_str='YYYY-MM-DD')
+ else:
+ from openpyxl.workbook import Workbook
+ self.book = Workbook() # optimized_write=True)
+ # open pyxl 1.6.1 adds a dummy sheet remove it
+ if self.book.worksheets:
+ self.book.remove_sheet(self.book.worksheets[0])
+ self.path = path
+ self.sheets = {}
+ self.cur_sheet = None
+
+ def save(self):
+ """
+ Save workbook to disk
+ """
+ self.book.save(self.path)
+
+ def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0):
+ """
+ Write given formated cells into Excel an excel sheet
+
+ Parameters
+ ----------
+ cells : generator
+ cell of formated data to save to Excel sheet
+ sheet_name : string, default None
+ Name of Excel sheet, if None, then use self.cur_sheet
+ startrow: upper left cell row to dump data frame
+ startcol: upper left cell column to dump data frame
+ """
+ if sheet_name is None:
+ sheet_name = self.cur_sheet
+ if sheet_name is None: # pragma: no cover
+ raise Exception('Must pass explicit sheet_name or set '
+ 'cur_sheet property')
+ if self.use_xlsx:
+ self._writecells_xlsx(cells, sheet_name, startrow, startcol)
+ else:
+ self._writecells_xls(cells, sheet_name, startrow, startcol)
+
+ def _writecells_xlsx(self, cells, sheet_name, startrow, startcol):
+
+ from openpyxl.cell import get_column_letter
+
+ if sheet_name in self.sheets:
+ wks = self.sheets[sheet_name]
+ else:
+ wks = self.book.create_sheet()
+ wks.title = sheet_name
+ self.sheets[sheet_name] = wks
+
+ for cell in cells:
+ colletter = get_column_letter(startcol + cell.col + 1)
+ xcell = wks.cell("%s%s" % (colletter, startrow + cell.row + 1))
+ xcell.value = _conv_value(cell.val)
+ if cell.style:
+ style = CellStyleConverter.to_xlsx(cell.style)
+ for field in style.__fields__:
+ xcell.style.__setattr__(field,
+ style.__getattribute__(field))
+
+ if isinstance(cell.val, datetime.datetime):
+ xcell.style.number_format.format_code = "YYYY-MM-DD HH:MM:SS"
+ elif isinstance(cell.val, datetime.date):
+ xcell.style.number_format.format_code = "YYYY-MM-DD"
+
+ # merging requires openpyxl latest (works on 1.6.1)
+ # todo add version check
+ if cell.mergestart is not None and cell.mergeend is not None:
+ cletterstart = get_column_letter(startcol + cell.col + 1)
+ cletterend = get_column_letter(startcol + cell.mergeend + 1)
+
+ wks.merge_cells('%s%s:%s%s' % (cletterstart,
+ startrow + cell.row + 1,
+ cletterend,
+ startrow + cell.mergestart + 1))
+
+ def _writecells_xls(self, cells, sheet_name, startrow, startcol):
+ if sheet_name in self.sheets:
+ wks = self.sheets[sheet_name]
+ else:
+ wks = self.book.add_sheet(sheet_name)
+ self.sheets[sheet_name] = wks
+
+ style_dict = {}
+
+ for cell in cells:
+ val = _conv_value(cell.val)
+
+ num_format_str = None
+ if isinstance(cell.val, datetime.datetime):
+ num_format_str = "YYYY-MM-DD HH:MM:SS"
+ if isinstance(cell.val, datetime.date):
+ num_format_str = "YYYY-MM-DD"
+
+ stylekey = json.dumps(cell.style)
+ if num_format_str:
+ stylekey += num_format_str
+
+ if stylekey in style_dict:
+ style = style_dict[stylekey]
+ else:
+ style = CellStyleConverter.to_xls(cell.style, num_format_str)
+ style_dict[stylekey] = style
+
+ if cell.mergestart is not None and cell.mergeend is not None:
+ wks.write_merge(startrow + cell.row,
+ startrow + cell.mergestart,
+ startcol + cell.col,
+ startcol + cell.mergeend,
+ val, style)
+ else:
+ wks.write(startrow + cell.row,
+ startcol + cell.col,
+ val, style)
+
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 0dde47e6065e4..249afe0755445 100644
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -1981,409 +1981,17 @@ def _make_reader(self, f):
self.data = FixedWidthReader(f, self.colspecs, self.delimiter)
-#----------------------------------------------------------------------
-# ExcelFile class
-
-class ExcelFile(object):
- """
- Class for parsing tabular excel sheets into DataFrame objects.
- Uses xlrd. See ExcelFile.parse for more documentation
-
- Parameters
- ----------
- path : string or file-like object
- Path to xls or xlsx file
- """
- def __init__(self, path_or_buf, kind=None, **kwds):
- self.kind = kind
-
- import xlrd # throw an ImportError if we need to
- ver = tuple(map(int,xlrd.__VERSION__.split(".")[:2]))
- if ver < (0, 9):
- raise ImportError("pandas requires xlrd >= 0.9.0 for excel support, current version "+xlrd.__VERSION__)
-
- self.path_or_buf = path_or_buf
- self.tmpfile = None
-
- if isinstance(path_or_buf, basestring):
- self.book = xlrd.open_workbook(path_or_buf)
- else:
- data = path_or_buf.read()
- self.book = xlrd.open_workbook(file_contents=data)
-
- def __repr__(self):
- return object.__repr__(self)
-
- def parse(self, sheetname, header=0, skiprows=None, skip_footer=0,
- index_col=None, parse_cols=None, parse_dates=False,
- date_parser=None, na_values=None, thousands=None, chunksize=None,
- **kwds):
- """
- Read Excel table into DataFrame
-
- Parameters
- ----------
- sheetname : string
- Name of Excel sheet
- header : int, default 0
- Row to use for the column labels of the parsed DataFrame
- skiprows : list-like
- Rows to skip at the beginning (0-indexed)
- skip_footer : int, default 0
- Rows at the end to skip (0-indexed)
- index_col : int, default None
- Column to use as the row labels of the DataFrame. Pass None if
- there is no such column
- parse_cols : int or list, default None
- If None then parse all columns,
- If int then indicates last column to be parsed
- If list of ints then indicates list of column numbers to be parsed
- If string then indicates comma separated list of column names and
- column ranges (e.g. "A:E" or "A,C,E:F")
- na_values : list-like, default None
- List of additional strings to recognize as NA/NaN
-
- Returns
- -------
- parsed : DataFrame
- """
-
- # has_index_names: boolean, default False
- # True if the cols defined in index_col have an index name and are
- # not in the header
- has_index_names = False # removed as new argument of API function
-
- skipfooter = kwds.pop('skipfooter', None)
- if skipfooter is not None:
- skip_footer = skipfooter
-
- return self._parse_excel(sheetname, header=header,
- skiprows=skiprows, index_col=index_col,
- has_index_names=has_index_names,
- parse_cols=parse_cols,
- parse_dates=parse_dates,
- date_parser=date_parser,
- na_values=na_values,
- thousands=thousands,
- chunksize=chunksize,
- skip_footer=skip_footer)
-
- def _should_parse(self, i, parse_cols):
-
- def _range2cols(areas):
- """
- Convert comma separated list of column names and column ranges to a
- list of 0-based column indexes.
-
- >>> _range2cols('A:E')
- [0, 1, 2, 3, 4]
- >>> _range2cols('A,C,Z:AB')
- [0, 2, 25, 26, 27]
- """
- def _excel2num(x):
- "Convert Excel column name like 'AB' to 0-based column index"
- return reduce(lambda s, a: s * 26 + ord(a) - ord('A') + 1, x.upper().strip(), 0) - 1
-
- cols = []
- for rng in areas.split(','):
- if ':' in rng:
- rng = rng.split(':')
- cols += range(_excel2num(rng[0]), _excel2num(rng[1]) + 1)
- else:
- cols.append(_excel2num(rng))
- return cols
-
- if isinstance(parse_cols, int):
- return i <= parse_cols
- elif isinstance(parse_cols, basestring):
- return i in _range2cols(parse_cols)
- else:
- return i in parse_cols
-
- def _parse_excel(self, sheetname, header=0, skiprows=None,
- skip_footer=0, index_col=None, has_index_names=None,
- parse_cols=None, parse_dates=False, date_parser=None,
- na_values=None, thousands=None, chunksize=None):
- from xlrd import (xldate_as_tuple, XL_CELL_DATE,
- XL_CELL_ERROR, XL_CELL_BOOLEAN)
-
- datemode = self.book.datemode
- sheet = self.book.sheet_by_name(sheetname)
-
- data = []
- should_parse = {}
- for i in range(sheet.nrows):
- row = []
- for j, (value, typ) in enumerate(izip(sheet.row_values(i),
- sheet.row_types(i))):
- if parse_cols is not None and j not in should_parse:
- should_parse[j] = self._should_parse(j, parse_cols)
-
- if parse_cols is None or should_parse[j]:
- if typ == XL_CELL_DATE:
- dt = xldate_as_tuple(value, datemode)
- # how to produce this first case?
- if dt[0] < datetime.MINYEAR: # pragma: no cover
- value = datetime.time(*dt[3:])
- else:
- value = datetime.datetime(*dt)
- elif typ == XL_CELL_ERROR:
- value = np.nan
- elif typ == XL_CELL_BOOLEAN:
- value = bool(value)
- row.append(value)
-
- data.append(row)
-
- if header is not None:
- data[header] = _trim_excel_header(data[header])
-
- parser = TextParser(data, header=header, index_col=index_col,
- has_index_names=has_index_names,
- na_values=na_values,
- thousands=thousands,
- parse_dates=parse_dates,
- date_parser=date_parser,
- skiprows=skiprows,
- skip_footer=skip_footer,
- chunksize=chunksize)
-
- return parser.read()
-
- @property
- def sheet_names(self):
- return self.book.sheet_names()
-
-
-def _trim_excel_header(row):
- # trim header row so auto-index inference works
- # xlrd uses '' , openpyxl None
- while len(row) > 0 and (row[0] == '' or row[0] is None):
- row = row[1:]
- return row
-
-
-class CellStyleConverter(object):
- """
- Utility Class which converts a style dict to xlrd or openpyxl style
- """
-
- @staticmethod
- def to_xls(style_dict, num_format_str=None):
- """
- converts a style_dict to an xlwt style object
- Parameters
- ----------
- style_dict: style dictionary to convert
- """
- import xlwt
-
- def style_to_xlwt(item, firstlevel=True, field_sep=',', line_sep=';'):
- """helper wich recursively generate an xlwt easy style string
- for example:
-
- hstyle = {"font": {"bold": True},
- "border": {"top": "thin",
- "right": "thin",
- "bottom": "thin",
- "left": "thin"},
- "align": {"horiz": "center"}}
- will be converted to
- font: bold on; \
- border: top thin, right thin, bottom thin, left thin; \
- align: horiz center;
- """
- if hasattr(item, 'items'):
- if firstlevel:
- it = ["%s: %s" % (key, style_to_xlwt(value, False))
- for key, value in item.items()]
- out = "%s " % (line_sep).join(it)
- return out
- else:
- it = ["%s %s" % (key, style_to_xlwt(value, False))
- for key, value in item.items()]
- out = "%s " % (field_sep).join(it)
- return out
- else:
- item = "%s" % item
- item = item.replace("True", "on")
- item = item.replace("False", "off")
- return item
-
- if style_dict:
- xlwt_stylestr = style_to_xlwt(style_dict)
- style = xlwt.easyxf(xlwt_stylestr, field_sep=',', line_sep=';')
- else:
- style = xlwt.XFStyle()
- if num_format_str is not None:
- style.num_format_str = num_format_str
-
- return style
-
- @staticmethod
- def to_xlsx(style_dict):
- """
- converts a style_dict to an openpyxl style object
- Parameters
- ----------
- style_dict: style dictionary to convert
- """
-
- from openpyxl.style import Style
- xls_style = Style()
- for key, value in style_dict.items():
- for nk, nv in value.items():
- if key == "borders":
- (xls_style.borders.__getattribute__(nk)
- .__setattr__('border_style', nv))
- else:
- xls_style.__getattribute__(key).__setattr__(nk, nv)
-
- return xls_style
-
-
-def _conv_value(val):
- # convert value for excel dump
- if isinstance(val, np.int64):
- val = int(val)
- elif isinstance(val, np.bool8):
- val = bool(val)
- elif isinstance(val, Period):
- val = "%s" % val
-
- return val
-
-
-class ExcelWriter(object):
- """
- Class for writing DataFrame objects into excel sheets, uses xlwt for xls,
- openpyxl for xlsx. See DataFrame.to_excel for typical usage.
-
- Parameters
- ----------
- path : string
- Path to xls file
- """
+from pandas.io import excel
+class ExcelWriter(excel.ExcelWriter):
def __init__(self, path):
- self.use_xlsx = True
- if path.endswith('.xls'):
- self.use_xlsx = False
- import xlwt
- self.book = xlwt.Workbook()
- self.fm_datetime = xlwt.easyxf(
- num_format_str='YYYY-MM-DD HH:MM:SS')
- self.fm_date = xlwt.easyxf(num_format_str='YYYY-MM-DD')
- else:
- from openpyxl.workbook import Workbook
- self.book = Workbook() # optimized_write=True)
- # open pyxl 1.6.1 adds a dummy sheet remove it
- if self.book.worksheets:
- self.book.remove_sheet(self.book.worksheets[0])
- self.path = path
- self.sheets = {}
- self.cur_sheet = None
-
- def save(self):
- """
- Save workbook to disk
- """
- self.book.save(self.path)
+ from warnings import warn
+ warn("ExcelWriter can now be imported from: pandas.io.excel", FutureWarning)
+ super(ExcelWriter, self).__init__(path)
- def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0):
- """
- Write given formated cells into Excel an excel sheet
-
- Parameters
- ----------
- cells : generator
- cell of formated data to save to Excel sheet
- sheet_name : string, default None
- Name of Excel sheet, if None, then use self.cur_sheet
- startrow: upper left cell row to dump data frame
- startcol: upper left cell column to dump data frame
- """
- if sheet_name is None:
- sheet_name = self.cur_sheet
- if sheet_name is None: # pragma: no cover
- raise Exception('Must pass explicit sheet_name or set '
- 'cur_sheet property')
- if self.use_xlsx:
- self._writecells_xlsx(cells, sheet_name, startrow, startcol)
- else:
- self._writecells_xls(cells, sheet_name, startrow, startcol)
-
- def _writecells_xlsx(self, cells, sheet_name, startrow, startcol):
-
- from openpyxl.cell import get_column_letter
-
- if sheet_name in self.sheets:
- wks = self.sheets[sheet_name]
- else:
- wks = self.book.create_sheet()
- wks.title = sheet_name
- self.sheets[sheet_name] = wks
-
- for cell in cells:
- colletter = get_column_letter(startcol + cell.col + 1)
- xcell = wks.cell("%s%s" % (colletter, startrow + cell.row + 1))
- xcell.value = _conv_value(cell.val)
- if cell.style:
- style = CellStyleConverter.to_xlsx(cell.style)
- for field in style.__fields__:
- xcell.style.__setattr__(field,
- style.__getattribute__(field))
-
- if isinstance(cell.val, datetime.datetime):
- xcell.style.number_format.format_code = "YYYY-MM-DD HH:MM:SS"
- elif isinstance(cell.val, datetime.date):
- xcell.style.number_format.format_code = "YYYY-MM-DD"
-
- # merging requires openpyxl latest (works on 1.6.1)
- # todo add version check
- if cell.mergestart is not None and cell.mergeend is not None:
- cletterstart = get_column_letter(startcol + cell.col + 1)
- cletterend = get_column_letter(startcol + cell.mergeend + 1)
-
- wks.merge_cells('%s%s:%s%s' % (cletterstart,
- startrow + cell.row + 1,
- cletterend,
- startrow + cell.mergestart + 1))
-
- def _writecells_xls(self, cells, sheet_name, startrow, startcol):
- if sheet_name in self.sheets:
- wks = self.sheets[sheet_name]
- else:
- wks = self.book.add_sheet(sheet_name)
- self.sheets[sheet_name] = wks
-
- style_dict = {}
-
- for cell in cells:
- val = _conv_value(cell.val)
-
- num_format_str = None
- if isinstance(cell.val, datetime.datetime):
- num_format_str = "YYYY-MM-DD HH:MM:SS"
- if isinstance(cell.val, datetime.date):
- num_format_str = "YYYY-MM-DD"
-
- stylekey = json.dumps(cell.style)
- if num_format_str:
- stylekey += num_format_str
+class ExcelFile(excel.ExcelFile):
+ def __init__(self, path_or_buf, kind=None, **kwds):
+ from warnings import warn
+ warn("ExcelFile can now be imported from: pandas.io.excel", FutureWarning)
+ super(ExcelFile, self).__init__(path_or_buf, kind=kind, **kwds)
- if stylekey in style_dict:
- style = style_dict[stylekey]
- else:
- style = CellStyleConverter.to_xls(cell.style, num_format_str)
- style_dict[stylekey] = style
-
- if cell.mergestart is not None and cell.mergeend is not None:
- wks.write_merge(startrow + cell.row,
- startrow + cell.mergestart,
- startcol + cell.col,
- startcol + cell.mergeend,
- val, style)
- else:
- wks.write(startrow + cell.row,
- startcol + cell.col,
- val, style)
+
diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index b54a30d95bb54..4a1cac8a60e30 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -167,7 +167,7 @@ def read_frame(sql, con, index_col=None, coerce_float=True, params=None):
return result
frame_query = read_frame
-
+read_sql = read_frame
def write_frame(frame, name, con, flavor='sqlite', if_exists='fail', **kwargs):
"""
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index 3fc246c2ffbc7..f1257f505ca9b 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -37,10 +37,8 @@ def read_stata(filepath_or_buffer, convert_dates=True, convert_categoricals=True
return reader.data(convert_dates, convert_categoricals, index)
-
_date_formats = ["%tc", "%tC", "%td", "%tw", "%tm", "%tq", "%th", "%ty"]
-
def _stata_elapsed_date_to_datetime(date, fmt):
"""
Convert from SIF to datetime. http://www.stata.com/help.cgi?datetime
diff --git a/pandas/io/tests/test_cparser.py b/pandas/io/tests/test_cparser.py
index 0c5b168ee8de5..23503f74f25f2 100644
--- a/pandas/io/tests/test_cparser.py
+++ b/pandas/io/tests/test_cparser.py
@@ -18,7 +18,7 @@
from pandas import DataFrame, Series, Index, isnull, MultiIndex
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
- ExcelFile, TextParser)
+ TextParser)
from pandas.util.testing import (assert_almost_equal, assert_frame_equal,
assert_series_equal, network)
import pandas.lib as lib
diff --git a/pandas/io/tests/test_date_converters.py b/pandas/io/tests/test_date_converters.py
index 9396581f74326..396912c0f5f54 100644
--- a/pandas/io/tests/test_date_converters.py
+++ b/pandas/io/tests/test_date_converters.py
@@ -15,7 +15,7 @@
from pandas import DataFrame, Series, Index, isnull
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
- ExcelFile, TextParser)
+ TextParser)
from pandas.util.testing import (assert_almost_equal, assert_frame_equal,
assert_series_equal, network)
import pandas.lib as lib
diff --git a/pandas/io/tests/test_excel.py b/pandas/io/tests/test_excel.py
index 8a145517d3b5a..00a695f3013cd 100644
--- a/pandas/io/tests/test_excel.py
+++ b/pandas/io/tests/test_excel.py
@@ -17,7 +17,8 @@
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
- ExcelFile, TextFileReader, TextParser)
+ TextParser, TextFileReader)
+from pandas.io.excel import ExcelFile, ExcelWriter, read_excel
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
network,
@@ -35,9 +36,6 @@
from pandas._parser import OverflowError
-from pandas.io.parsers import (ExcelFile, ExcelWriter, read_csv)
-
-
def _skip_if_no_xlrd():
try:
import xlrd
@@ -275,19 +273,16 @@ def _check_extension(self, ext):
# test roundtrip
self.frame.to_excel(path, 'test1')
- reader = ExcelFile(path)
- recons = reader.parse('test1', index_col=0)
+ recons = read_excel(path, 'test1', index_col=0)
tm.assert_frame_equal(self.frame, recons)
self.frame.to_excel(path, 'test1', index=False)
- reader = ExcelFile(path)
- recons = reader.parse('test1', index_col=None)
+ recons = read_excel(path, 'test1', index_col=None)
recons.index = self.frame.index
tm.assert_frame_equal(self.frame, recons)
self.frame.to_excel(path, 'test1', na_rep='NA')
- reader = ExcelFile(path)
- recons = reader.parse('test1', index_col=0, na_values=['NA'])
+ recons = read_excel(path, 'test1', index_col=0, na_values=['NA'])
tm.assert_frame_equal(self.frame, recons)
def test_excel_roundtrip_xls_mixed(self):
@@ -668,7 +663,7 @@ def test_to_excel_unicode_filename(self):
tm.assert_frame_equal(rs, xp)
def test_to_excel_styleconverter(self):
- from pandas.io.parsers import CellStyleConverter
+ from pandas.io.excel import CellStyleConverter
try:
import xlwt
@@ -859,6 +854,23 @@ def roundtrip(df, header=True, parser_hdr=0):
self.assertEqual(res.shape, (1, 2))
self.assertTrue(res.ix[0, 0] is not np.nan)
+ def test_deprecated_from_parsers(self):
+
+ # since 0.11.1 changed the import path
+ import warnings
+
+ with warnings.catch_warnings() as w:
+ warnings.filterwarnings(action='ignore', category=FutureWarning)
+
+ _skip_if_no_xlrd()
+ from pandas.io.parsers import ExcelFile as xf
+ xf(self.xls1)
+
+ _skip_if_no_xlwt()
+ with ensure_clean('test.xls') as path:
+ from pandas.io.parsers import ExcelWriter as xw
+ xw(path)
+
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
diff --git a/pandas/io/tests/test_stata.py b/pandas/io/tests/test_stata.py
index 51062b2ab706f..9f5d796763fb0 100644
--- a/pandas/io/tests/test_stata.py
+++ b/pandas/io/tests/test_stata.py
@@ -130,23 +130,21 @@ def test_read_dta4(self):
def test_write_dta5(self):
original = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)],
columns=['float_miss', 'double_miss', 'byte_miss', 'int_miss', 'long_miss'])
+ original.index.name = 'index'
with ensure_clean(self.dta5) as path:
- writer = StataWriter(path, original, None, False)
- writer.write_file()
-
+ original.to_stata(path, None, False)
written_and_read_again = self.read_dta(path)
- tm.assert_frame_equal(written_and_read_again, original)
+ tm.assert_frame_equal(written_and_read_again.set_index('index'), original)
def test_write_dta6(self):
original = self.read_csv(self.csv3)
+ original.index.name = 'index'
with ensure_clean(self.dta6) as path:
- writer = StataWriter(path, original, None, False)
- writer.write_file()
-
+ original.to_stata(path, None, False)
written_and_read_again = self.read_dta(path)
- tm.assert_frame_equal(written_and_read_again, original)
+ tm.assert_frame_equal(written_and_read_again.set_index('index'), original)
@nose.tools.nottest
def test_read_dta7(self):
@@ -184,6 +182,10 @@ def test_read_dta9(self):
decimal=3
)
+ def test_stata_doc_examples(self):
+ with ensure_clean(self.dta5) as path:
+ df = DataFrame(np.random.randn(10,2),columns=list('AB'))
+ df.to_stata('path')
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py
index 3640025bbf95c..58b7ac272401f 100644
--- a/pandas/tests/test_panel.py
+++ b/pandas/tests/test_panel.py
@@ -1367,7 +1367,7 @@ def test_to_excel(self):
import xlwt
import xlrd
import openpyxl
- from pandas.io.parsers import ExcelFile
+ from pandas.io.excel import ExcelFile
except ImportError:
raise nose.SkipTest
diff --git a/pandas/tests/test_panel4d.py b/pandas/tests/test_panel4d.py
index 5981640b4159c..a2e08bc744ab0 100644
--- a/pandas/tests/test_panel4d.py
+++ b/pandas/tests/test_panel4d.py
@@ -15,7 +15,6 @@
import pandas.core.common as com
import pandas.core.panel as panelmod
from pandas.util import py3compat
-from pandas.io.parsers import (ExcelFile, ExcelWriter)
from pandas.util.testing import (assert_panel_equal,
assert_panel4d_equal,
| closes most of #3411 (all except the deprecation of `from_csv`)
- moved excel functionaility out of io.parsers to io.excel
added read_excel top-level function
aliases from pandas.io.excel
- added read_stata top-level function, to_stata DataFrame method
aliases from pandas.io.stata
removed read_dta (replace by read_stata)
- added read_sql top-level function, to_sql DataFrame method
aliases from pandas.io.sql
DOC: doc updates for all the above and intro section to io.rst
| https://api.github.com/repos/pandas-dev/pandas/pulls/3693 | 2013-05-25T00:40:50Z | 2013-05-30T16:32:27Z | 2013-05-30T16:32:27Z | 2014-07-16T08:10:24Z |
BUG: allow insertion/deletion of columns in non-unique column DataFrames | diff --git a/RELEASE.rst b/RELEASE.rst
index 38a8b42fcde6f..710e3cbbb2b81 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -102,6 +102,8 @@ pandas 0.11.1
GH3675_, GH3676_).
- Deprecated display.height, display.width is now only a formatting option
does not control triggering of summary, similar to < 0.11.0.
+ - Add the keyword ``allow_duplicates`` to ``DataFrame.insert`` to allow a duplicate column
+ to be inserted if ``True``, default is ``False`` (same as prior to 0.11.1) (GH3679_)
**Bug Fixes**
@@ -133,6 +135,8 @@ pandas 0.11.1
- Duplicate indexes with and empty DataFrame.from_records will return a correct frame (GH3562_)
- Concat to produce a non-unique columns when duplicates are across dtypes is fixed (GH3602_)
- Non-unique indexing with a slice via ``loc`` and friends fixed (GH3659_)
+ - Allow insert/delete to non-unique columns (GH3679_)
+ - Extend ``reindex`` to correctly deal with non-unique indices (GH3679_)
- Fixed bug in groupby with empty series referencing a variable before assignment. (GH3510_)
- Fixed bug in mixed-frame assignment with aligned series (GH3492_)
- Fixed bug in selecting month/quarter/year from a series would not select the time element
@@ -242,6 +246,8 @@ pandas 0.11.1
.. _GH3606: https://github.com/pydata/pandas/issues/3606
.. _GH3659: https://github.com/pydata/pandas/issues/3659
.. _GH3649: https://github.com/pydata/pandas/issues/3649
+.. _GH3679: https://github.com/pydata/pandas/issues/3679
+.. _Gh3616: https://github.com/pydata/pandas/issues/3616
.. _GH1818: https://github.com/pydata/pandas/issues/1818
.. _GH3572: https://github.com/pydata/pandas/issues/3572
.. _GH3582: https://github.com/pydata/pandas/issues/3582
diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt
index ac769ed2f1cea..bd4a7c49fbb4d 100644
--- a/doc/source/v0.11.1.txt
+++ b/doc/source/v0.11.1.txt
@@ -71,6 +71,8 @@ API changes
``DataFrame.fillna()`` and ``DataFrame.replace()`` instead. (GH3582_,
GH3675_, GH3676_)
+ - Add the keyword ``allow_duplicates`` to ``DataFrame.insert`` to allow a duplicate column
+ to be inserted if ``True``, default is ``False`` (same as prior to 0.11.1) (GH3679_)
Enhancements
~~~~~~~~~~~~
@@ -209,6 +211,7 @@ Bug Fixes
and handle missing elements like unique indices (GH3561_)
- Duplicate indexes with and empty DataFrame.from_records will return a correct frame (GH3562_)
- Concat to produce a non-unique columns when duplicates are across dtypes is fixed (GH3602_)
+ - Allow insert/delete to non-unique columns (GH3679_)
For example you can do
@@ -270,3 +273,4 @@ on GitHub for a complete list.
.. _GH3676: https://github.com/pydata/pandas/issues/3676
.. _GH3675: https://github.com/pydata/pandas/issues/3675
.. _GH3682: https://github.com/pydata/pandas/issues/3682
+.. _GH3679: https://github.com/pydata/pandas/issues/3679
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 68edceb29e6b2..5c2bc3e632a57 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -2003,7 +2003,11 @@ def __getitem__(self, key):
return self._getitem_multilevel(key)
else:
# get column
- return self._get_item_cache(key)
+ if self.columns.is_unique:
+ return self._get_item_cache(key)
+
+ # duplicate columns
+ return self._constructor(self._data.get(key))
def _getitem_slice(self, key):
return self._slice(key, axis=0)
@@ -2162,10 +2166,10 @@ def _set_item(self, key, value):
value = self._sanitize_column(key, value)
NDFrame._set_item(self, key, value)
- def insert(self, loc, column, value):
+ def insert(self, loc, column, value, allow_duplicates=False):
"""
- Insert column into DataFrame at specified location. Raises Exception if
- column is already contained in the DataFrame
+ Insert column into DataFrame at specified location.
+ if allow_duplicates is False, Raises Exception if column is already contained in the DataFrame
Parameters
----------
@@ -2175,7 +2179,7 @@ def insert(self, loc, column, value):
value : int, Series, or array-like
"""
value = self._sanitize_column(column, value)
- self._data.insert(loc, column, value)
+ self._data.insert(loc, column, value, allow_duplicates=allow_duplicates)
def _sanitize_column(self, key, value):
# Need to make sure new columns (which go into the BlockManager as new
diff --git a/pandas/core/index.py b/pandas/core/index.py
index 3a6913a924c1d..51ebd58c33343 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -940,8 +940,15 @@ def reindex(self, target, method=None, level=None, limit=None):
if self.equals(target):
indexer = None
else:
- indexer = self.get_indexer(target, method=method,
- limit=limit)
+ if self.is_unique:
+ indexer = self.get_indexer(target, method=method,
+ limit=limit)
+ else:
+ if method is not None or limit is not None:
+ raise ValueError("cannot reindex a non-unique index "
+ "with a method or limit")
+ indexer, missing = self.get_indexer_non_unique(target)
+
return target, indexer
def join(self, other, how='left', level=None, return_indexers=False):
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index f7187b7ae5d61..7a7210c479c67 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -457,7 +457,7 @@ def _reindex(keys, level=None):
else:
level = None
- if labels.is_unique:
+ if labels.is_unique and Index(keyarr).is_unique:
return _reindex(keyarr, level=level)
else:
indexer, missing = labels.get_indexer_non_unique(keyarr)
@@ -991,7 +991,6 @@ def _slice(self, indexer, axis=0):
def _setitem_with_indexer(self, indexer, value):
self.obj._set_values(indexer, value)
-
def _check_bool_indexer(ax, key):
# boolean indexing, need to check that the data are aligned, otherwise
# disallowed
@@ -1010,7 +1009,6 @@ def _check_bool_indexer(ax, key):
result = np.asarray(result, dtype=bool)
return result
-
def _is_series(obj):
from pandas.core.series import Series
return isinstance(obj, Series)
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index ca04bd3fe26e0..8b711f5e077ce 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -36,7 +36,7 @@ class Block(object):
_can_hold_na = False
_downcast_dtype = None
- def __init__(self, values, items, ref_items, ndim=2, fastpath=False):
+ def __init__(self, values, items, ref_items, ndim=2, fastpath=False, placement=None):
if values.ndim != ndim:
raise ValueError('Wrong number of dimensions')
@@ -45,7 +45,7 @@ def __init__(self, values, items, ref_items, ndim=2, fastpath=False):
raise ValueError('Wrong number of items passed %d, indices imply %d'
% (len(items), len(values)))
- self._ref_locs = None
+ self.set_ref_locs(placement)
self.values = values
self.ndim = ndim
@@ -71,10 +71,16 @@ def ref_locs(self):
self._ref_locs = indexer
return self._ref_locs
+ def reset_ref_locs(self):
+ """ reset the block ref_locs """
+ self._ref_locs = np.empty(len(self.items),dtype='int64')
+
def set_ref_locs(self, placement):
""" explicity set the ref_locs indexer, only necessary for duplicate indicies """
- if placement is not None:
- self._ref_locs = np.array(placement,dtype='int64')
+ if placement is None:
+ self._ref_locs = None
+ else:
+ self._ref_locs = np.array(placement,dtype='int64', copy=True)
def set_ref_items(self, ref_items, maybe_rename=True):
"""
@@ -129,7 +135,7 @@ def copy(self, deep=True):
values = self.values
if deep:
values = values.copy()
- return make_block(values, self.items, self.ref_items, klass=self.__class__, fastpath=True)
+ return make_block(values, self.items, self.ref_items, klass=self.__class__, fastpath=True, placement=self._ref_locs)
def merge(self, other):
if not self.ref_items.equals(other.ref_items):
@@ -148,7 +154,8 @@ def reindex_axis(self, indexer, axis=1, fill_value=np.nan, mask_info=None):
raise AssertionError('axis must be at least 1, got %d' % axis)
new_values = com.take_nd(self.values, indexer, axis,
fill_value=fill_value, mask_info=mask_info)
- return make_block(new_values, self.items, self.ref_items, fastpath=True)
+ return make_block(new_values, self.items, self.ref_items, fastpath=True,
+ placement=self._ref_locs)
def reindex_items_from(self, new_ref_items, copy=True):
"""
@@ -162,6 +169,7 @@ def reindex_items_from(self, new_ref_items, copy=True):
reindexed : Block
"""
new_ref_items, indexer = self.items.reindex(new_ref_items)
+
if indexer is None:
new_items = new_ref_items
new_values = self.values.copy() if copy else self.values
@@ -201,31 +209,6 @@ def delete(self, item):
new_values = np.delete(self.values, loc, 0)
return make_block(new_values, new_items, self.ref_items, klass=self.__class__, fastpath=True)
- def split_block_at(self, item):
- """
- Split block into zero or more blocks around columns with given label,
- for "deleting" a column without having to copy data by returning views
- on the original array.
-
- Returns
- -------
- generator of Block
- """
- loc = self.items.get_loc(item)
-
- if type(loc) == slice or type(loc) == int:
- mask = [True] * len(self)
- mask[loc] = False
- else: # already a mask, inverted
- mask = -loc
-
- for s, e in com.split_ranges(mask):
- yield make_block(self.values[s:e],
- self.items[s:e].copy(),
- self.ref_items,
- klass=self.__class__,
- fastpath=True)
-
def fillna(self, value, inplace=False, downcast=None):
if not self._can_hold_na:
if inplace:
@@ -707,11 +690,12 @@ class ObjectBlock(Block):
is_object = True
_can_hold_na = True
- def __init__(self, values, items, ref_items, ndim=2, fastpath=False):
+ def __init__(self, values, items, ref_items, ndim=2, fastpath=False, placement=None):
if issubclass(values.dtype.type, basestring):
values = np.array(values, dtype=object)
- super(ObjectBlock, self).__init__(values, items, ref_items, ndim=ndim, fastpath=fastpath)
+ super(ObjectBlock, self).__init__(values, items, ref_items,
+ ndim=ndim, fastpath=fastpath, placement=placement)
@property
def is_bool(self):
@@ -736,6 +720,7 @@ def convert(self, convert_dates = True, convert_numeric = True, copy = True):
"""
# attempt to create new type blocks
+ is_unique = self.items.is_unique
blocks = []
for i, c in enumerate(self.items):
values = self.iget(i)
@@ -743,7 +728,8 @@ def convert(self, convert_dates = True, convert_numeric = True, copy = True):
values = com._possibly_convert_objects(values, convert_dates=convert_dates, convert_numeric=convert_numeric)
values = _block_shape(values)
items = self.items.take([i])
- newb = make_block(values, items, self.ref_items, fastpath=True)
+ placement = None if is_unique else [i]
+ newb = make_block(values, items, self.ref_items, fastpath=True, placement=placement)
blocks.append(newb)
return blocks
@@ -857,11 +843,12 @@ def re_replacer(s):
class DatetimeBlock(Block):
_can_hold_na = True
- def __init__(self, values, items, ref_items, ndim=2, fastpath=True):
+ def __init__(self, values, items, ref_items, ndim=2, fastpath=True, placement=None):
if values.dtype != _NS_DTYPE:
values = tslib.cast_to_nanoseconds(values)
- super(DatetimeBlock, self).__init__(values, items, ref_items, ndim=ndim, fastpath=fastpath)
+ super(DatetimeBlock, self).__init__(values, items, ref_items,
+ ndim=ndim, fastpath=fastpath, placement=placement)
def _gi(self, arg):
return lib.Timestamp(self.values[arg])
@@ -942,8 +929,7 @@ def get_values(self, dtype):
return res.reshape(self.values.shape)
return self.values
-
-def make_block(values, items, ref_items, klass = None, fastpath=False):
+def make_block(values, items, ref_items, klass=None, fastpath=False, placement=None):
if klass is None:
dtype = values.dtype
@@ -977,7 +963,7 @@ def make_block(values, items, ref_items, klass = None, fastpath=False):
if klass is None:
klass = ObjectBlock
- return klass(values, items, ref_items, ndim=values.ndim, fastpath=fastpath)
+ return klass(values, items, ref_items, ndim=values.ndim, fastpath=fastpath, placement=placement)
# TODO: flexible with index=None and/or items=None
@@ -1031,11 +1017,11 @@ def __nonzero__(self):
def ndim(self):
return len(self.axes)
- def set_axis(self, axis, value):
+ def set_axis(self, axis, value, maybe_rename=True, check_axis=True):
cur_axis = self.axes[axis]
value = _ensure_index(value)
- if len(value) != len(cur_axis):
+ if check_axis and len(value) != len(cur_axis):
raise Exception('Length mismatch (%d vs %d)'
% (len(value), len(cur_axis)))
@@ -1049,11 +1035,40 @@ def set_axis(self, axis, value):
# take via ref_locs
for block in self.blocks:
- block.set_ref_items(self.items, maybe_rename=True)
+ block.set_ref_items(self.items, maybe_rename=maybe_rename)
# set/reset ref_locs based on the new index
self._set_ref_locs(labels=value, do_refs=True)
+
+ def _reset_ref_locs(self):
+ """ take the current _ref_locs and reset ref_locs on the blocks
+ to correctly map, ignoring Nones;
+ reset both _items_map and _ref_locs """
+
+ # let's reset the ref_locs in individual blocks
+ if self.items.is_unique:
+ for b in self.blocks:
+ b._ref_locs = None
+ else:
+ for b in self.blocks:
+ b.reset_ref_locs()
+ self._rebuild_ref_locs()
+
+ self._ref_locs = None
+ self._items_map = None
+
+ def _rebuild_ref_locs(self):
+ """ take _ref_locs and set the individual block ref_locs, skipping Nones
+ no effect on a unique index """
+ if self._ref_locs is not None:
+ item_count = 0
+ for v in self._ref_locs:
+ if v is not None:
+ block, item_loc = v
+ block._ref_locs[item_loc] = item_count
+ item_count += 1
+
def _set_ref_locs(self, labels=None, do_refs=False):
"""
if we have a non-unique index on this axis, set the indexers
@@ -1065,61 +1080,50 @@ def _set_ref_locs(self, labels=None, do_refs=False):
"""
- im = None
if labels is None:
labels = self.items
- else:
- _ensure_index(labels)
# we are unique, and coming from a unique
- if labels.is_unique and not do_refs:
+ is_unique = labels.is_unique
+ if is_unique and not do_refs:
- # reset our ref locs
- self._ref_locs = None
- for b in self.blocks:
- b._ref_locs = None
+ if not self.items.is_unique:
+
+ # reset our ref locs
+ self._ref_locs = None
+ for b in self.blocks:
+ b._ref_locs = None
return None
# we are going to a non-unique index
# we have ref_locs on the block at this point
- # or if ref_locs are not set, then we must assume a block
- # ordering
- if not labels.is_unique and do_refs:
+ if (not is_unique and do_refs) or do_refs=='force':
# create the items map
im = getattr(self,'_items_map',None)
if im is None:
im = dict()
- def maybe_create_block(block):
- try:
- return d[block]
- except:
- im[block] = l = [ None ] * len(block.items)
- return l
-
- count_items = 0
for block in self.blocks:
# if we have a duplicate index but
- # _ref_locs have not been set....then
- # have to assume ordered blocks are passed
- num_items = len(block.items)
+ # _ref_locs have not been set
try:
rl = block.ref_locs
except:
- rl = np.arange(num_items) + count_items
+ raise AssertionError("cannot create BlockManager._ref_locs because "
+ "block [%s] with duplicate items [%s] "
+ "does not have _ref_locs set" % (block,labels))
- m = maybe_create_block(block)
+ m = maybe_create_block_in_items_map(im,block)
for i, item in enumerate(block.items):
m[i] = rl[i]
- count_items += num_items
self._items_map = im
# create the _ref_loc map here
- rl = np.empty(len(labels),dtype=object)
+ rl = [ None] * len(labels)
for block, items in im.items():
for i, loc in enumerate(items):
rl[loc] = (block,i)
@@ -1147,20 +1151,13 @@ def get_items_map(self, use_cached=True):
im = dict()
rl = self._set_ref_locs()
- def maybe_create_block(block):
- try:
- return im[block]
- except:
- im[block] = l = [ None ] * len(block.items)
- return l
-
# we have a non-duplicative index
if rl is None:
axis = self.axes[0]
for block in self.blocks:
- m = maybe_create_block(block)
+ m = maybe_create_block_in_items_map(im,block)
for i, item in enumerate(block.items):
m[i] = axis.get_loc(item)
@@ -1170,7 +1167,7 @@ def maybe_create_block(block):
for i, (block, idx) in enumerate(rl):
- m = maybe_create_block(block)
+ m = maybe_create_block_in_items_map(im,block)
m[idx] = i
self._items_map = im
@@ -1445,8 +1442,8 @@ def get_slice(self, slobj, axis=0, raise_on_error=False):
new_items,
new_items,
klass=blk.__class__,
- fastpath=True)
- newb.set_ref_locs(blk._ref_locs)
+ fastpath=True,
+ placement=blk._ref_locs)
new_blocks = [newb]
else:
return self.reindex_items(new_items)
@@ -1469,8 +1466,8 @@ def _slice_blocks(self, slobj, axis):
block.items,
block.ref_items,
klass=block.__class__,
- fastpath=True)
- newb.set_ref_locs(block._ref_locs)
+ fastpath=True,
+ placement=block._ref_locs)
new_blocks.append(newb)
return new_blocks
@@ -1640,13 +1637,41 @@ def consolidate(self):
def _consolidate_inplace(self):
if not self.is_consolidated():
+
self.blocks = _consolidate(self.blocks, self.items)
+
+ # reset our mappings
+ if not self.items.is_unique:
+ self._ref_locs = None
+ self._items_map = None
+ self._set_ref_locs(do_refs=True)
+
self._is_consolidated = True
self._known_consolidated = True
def get(self, item):
- _, block = self._find_block(item)
- return block.get(item)
+ if self.items.is_unique:
+ _, block = self._find_block(item)
+ return block.get(item)
+ else:
+ indexer = self.items.get_loc(item)
+ ref_locs = np.array(self._set_ref_locs())
+
+ # duplicate index but only a single result
+ if com.is_integer(indexer):
+ b, loc = ref_locs[indexer]
+ return b.iget(loc)
+ else:
+
+ # we have a multiple result, potentially across blocks
+ values = [ block.iget(i) for block, i in ref_locs[indexer] ]
+ index = self.items[indexer]
+ axes = [ index ] + self.axes[1:]
+ blocks = form_blocks(values, index, axes)
+ mgr = BlockManager(blocks, axes)
+ mgr._consolidate_inplace()
+ return mgr
+
def iget(self, i):
item = self.items[i]
@@ -1672,18 +1697,23 @@ def get_scalar(self, tup):
return blk.values[full_loc]
def delete(self, item):
- i, _ = self._find_block(item)
- loc = self.items.get_loc(item)
- self._delete_from_block(i, item)
- if com._is_bool_indexer(loc): # dupe keys may return mask
- loc = [i for i, v in enumerate(loc) if v]
+ is_unique = self.items.is_unique
+ loc = self.items.get_loc(item)
+ # dupe keys may return mask
+ loc = _possibly_convert_to_indexer(loc)
+ self._delete_from_all_blocks(loc, item)
+
+ # _ref_locs, and _items_map are good here
new_items = self.items.delete(loc)
-
self.set_items_norename(new_items)
+
self._known_consolidated = False
+ if not is_unique:
+ self._consolidate_inplace()
+
def set(self, item, value):
"""
Set new item in-place. Does not consolidate. Adds new Block if not
@@ -1704,6 +1734,7 @@ def _set_item(item, arr):
block.set(item, arr)
try:
+
loc = self.items.get_loc(item)
if isinstance(loc, int):
_set_item(self.items[loc], value)
@@ -1712,16 +1743,43 @@ def _set_item(item, arr):
if len(value) != len(subset):
raise AssertionError(
'Number of items to set did not match')
- for i, (item, arr) in enumerate(zip(subset, value)):
- _set_item(item, arr[None, :])
+
+ # we are inserting multiple non-unique items as replacements
+ # we are inserting one by one, so the index can go from unique
+ # to non-unique during the loop, need to have _ref_locs defined
+ # at all times
+ if np.isscalar(item) and com.is_list_like(loc):
+
+ # first delete from all blocks
+ self.delete(item)
+
+ loc = _possibly_convert_to_indexer(loc)
+ for i, (l, arr) in enumerate(zip(loc, value)):
+
+ # insert the item
+ self.insert(l, item, arr[None, :], allow_duplicates=True)
+
+ # reset the _ref_locs on indiviual blocks
+ # rebuild ref_locs
+ if self.items.is_unique:
+ self._reset_ref_locs()
+ self._set_ref_locs(do_refs='force')
+
+ self._rebuild_ref_locs()
+
+
+ else:
+ for i, (item, arr) in enumerate(zip(subset, value)):
+ _set_item(item, arr[None, :])
except KeyError:
# insert at end
self.insert(len(self.items), item, value)
self._known_consolidated = False
- def insert(self, loc, item, value):
- if item in self.items:
+ def insert(self, loc, item, value, allow_duplicates=False):
+
+ if not allow_duplicates and item in self.items:
raise Exception('cannot insert %s, already exists' % item)
try:
@@ -1747,20 +1805,89 @@ def insert(self, loc, item, value):
self._known_consolidated = False
def set_items_norename(self, value):
- value = _ensure_index(value)
- self.axes[0] = value
+ self.set_axis(0, value, maybe_rename=False, check_axis=False)
- for block in self.blocks:
- block.set_ref_items(value, maybe_rename=False)
+ def _delete_from_all_blocks(self, loc, item):
+ """ delete from the items loc the item
+ the item could be in multiple blocks which could
+ change each iteration (as we split blocks) """
+
+ # possibily convert to an indexer
+ loc = _possibly_convert_to_indexer(loc)
+
+ if isinstance(loc, (list,tuple,np.ndarray)):
+ for l in loc:
+ for i, b in enumerate(self.blocks):
+ if item in b.items:
+ self._delete_from_block(i, item)
+
+ else:
+ i, _ = self._find_block(item)
+ self._delete_from_block(i, item)
def _delete_from_block(self, i, item):
"""
Delete and maybe remove the whole block
+
+ Remap the split blocks to there old ranges,
+ so after this function, _ref_locs and _items_map (if used)
+ are correct for the items, None fills holes in _ref_locs
"""
- block = self.blocks.pop(i)
- for b in block.split_block_at(item):
- self.blocks.append(b)
+ block = self.blocks.pop(i)
+ ref_locs = self._set_ref_locs()
+ prev_items_map = self._items_map.pop(block) if ref_locs is not None else None
+
+ # compute the split mask
+ loc = block.items.get_loc(item)
+ if type(loc) == slice or com.is_integer(loc):
+ mask = np.array([True] * len(block))
+ mask[loc] = False
+ else: # already a mask, inverted
+ mask = -loc
+
+ # split the block
+ counter = 0
+ for s, e in com.split_ranges(mask):
+
+ sblock = make_block(block.values[s:e],
+ block.items[s:e].copy(),
+ block.ref_items,
+ klass=block.__class__,
+ fastpath=True)
+ self.blocks.append(sblock)
+
+ # update the _ref_locs/_items_map
+ if ref_locs is not None:
+
+ # fill the item_map out for this sub-block
+ m = maybe_create_block_in_items_map(self._items_map,sblock)
+ for j, itm in enumerate(sblock.items):
+
+ # is this item masked (e.g. was deleted)?
+ while (True):
+
+ if counter > len(mask) or mask[counter]:
+ break
+ else:
+ counter += 1
+
+ # find my mapping location
+ m[j] = prev_items_map[counter]
+ counter += 1
+
+ # set the ref_locs in this block
+ sblock.set_ref_locs(m)
+
+ # reset the ref_locs to the new structure
+ if ref_locs is not None:
+
+ # items_map is now good, with the original locations
+ self._set_ref_locs(do_refs=True)
+
+ # reset the ref_locs based on the now good block._ref_locs
+ self._reset_ref_locs()
+
def _add_new_block(self, item, value, loc=None):
# Do we care about dtype at the moment?
@@ -1771,6 +1898,26 @@ def _add_new_block(self, item, value, loc=None):
self.items, fastpath=True)
self.blocks.append(new_block)
+ # set ref_locs based on the this new block
+ # and add to the ref/items maps
+ if not self.items.is_unique:
+
+ # insert into the ref_locs at the appropriate location
+ # _ref_locs is already long enough,
+ # but may need to shift elements
+ new_block.set_ref_locs([0])
+
+ # need to shift elements to the right
+ if self._ref_locs[loc] is not None:
+ for i in reversed(range(loc+1,len(self._ref_locs))):
+ self._ref_locs[i] = self._ref_locs[i-1]
+
+ self._ref_locs[loc] = (new_block, 0)
+
+ # and reset
+ self._reset_ref_locs()
+ self._set_ref_locs(do_refs=True)
+
def _find_block(self, item):
self._check_have(item)
for i, block in enumerate(self.blocks):
@@ -1827,17 +1974,18 @@ def reindex_indexer(self, new_axis, indexer, axis=1, fill_value=np.nan):
def _reindex_indexer_items(self, new_items, indexer, fill_value):
# TODO: less efficient than I'd like
+ is_unique = self.items.is_unique
item_order = com.take_1d(self.items.values, indexer)
# keep track of what items aren't found anywhere
mask = np.zeros(len(item_order), dtype=bool)
-
new_axes = [new_items] + self.axes[1:]
new_blocks = []
for blk in self.blocks:
blk_indexer = blk.items.get_indexer(item_order)
selector = blk_indexer != -1
+
# update with observed items
mask |= selector
@@ -1997,7 +2145,7 @@ def rename_axis(self, mapper, axis=1):
def rename_items(self, mapper, copydata=True):
new_items = Index([mapper(x) for x in self.items])
- new_items.is_unique
+ is_unique = new_items.is_unique
new_blocks = []
for block in self.blocks:
@@ -2057,7 +2205,8 @@ def create_block_manager_from_blocks(blocks, axes):
# if we are passed values, make the blocks
if len(blocks) == 1 and not isinstance(blocks[0], Block):
- blocks = [ make_block(blocks[0], axes[0], axes[0]) ]
+ placement = None if axes[0].is_unique else np.arange(len(axes[0]))
+ blocks = [ make_block(blocks[0], axes[0], axes[0], placement=placement) ]
mgr = BlockManager(blocks, axes)
mgr._consolidate_inplace()
@@ -2077,6 +2226,15 @@ def create_block_manager_from_arrays(arrays, names, axes):
except (ValueError):
construction_error(len(arrays),arrays[0].shape[1:],axes)
+def maybe_create_block_in_items_map(im,block):
+ """ create/return the block in an items_map """
+ try:
+ return im[block]
+ except:
+ im[block] = l = [ None ] * len(block.items)
+ return l
+
+
def form_blocks(arrays, names, axes):
# pre-filter out items if we passed it
@@ -2154,7 +2312,8 @@ def form_blocks(arrays, names, axes):
block_values = np.empty(shape, dtype=object)
block_values.fill(nan)
- na_block = make_block(block_values, extra_items, items)
+ placement = None if is_unique else np.arange(len(extra_items))
+ na_block = make_block(block_values, extra_items, items, placement=placement)
blocks.append(na_block)
return blocks
@@ -2168,9 +2327,9 @@ def _simple_blockify(tuples, ref_items, dtype, is_unique=True):
if dtype is not None and values.dtype != dtype: # pragma: no cover
values = values.astype(dtype)
- block = make_block(values, block_items, ref_items)
- if not is_unique:
- block.set_ref_locs(placement)
+ if is_unique:
+ placement=None
+ block = make_block(values, block_items, ref_items, placement=placement)
return [ block ]
def _multi_blockify(tuples, ref_items, dtype = None, is_unique=True):
@@ -2183,9 +2342,9 @@ def _multi_blockify(tuples, ref_items, dtype = None, is_unique=True):
for dtype, tup_block in grouper:
block_items, values, placement = _stack_arrays(list(tup_block), ref_items, dtype)
- block = make_block(values, block_items, ref_items)
- if not is_unique:
- block.set_ref_locs(placement)
+ if is_unique:
+ placement=None
+ block = make_block(values, block_items, ref_items, placement=placement)
new_blocks.append(block)
return new_blocks
@@ -2308,7 +2467,16 @@ def _merge_blocks(blocks, items, dtype=None):
new_values = _vstack([ b.values for b in blocks ], dtype)
new_items = blocks[0].items.append([b.items for b in blocks[1:]])
new_block = make_block(new_values, new_items, items)
- return new_block.reindex_items_from(items)
+
+ # unique, can reindex
+ if items.is_unique:
+ return new_block.reindex_items_from(items)
+
+ # merge the ref_locs
+ new_ref_locs = [ b._ref_locs for b in blocks ]
+ if all([ x is not None for x in new_ref_locs ]):
+ new_block.set_ref_locs(np.concatenate(new_ref_locs))
+ return new_block
def _block_shape(values, ndim=1, shape=None):
@@ -2328,3 +2496,10 @@ def _vstack(to_stack, dtype):
else:
return np.vstack(to_stack)
+
+def _possibly_convert_to_indexer(loc):
+ if com._is_bool_indexer(loc):
+ loc = [i for i, v in enumerate(loc) if v]
+ elif isinstance(loc,slice):
+ loc = range(loc.start,loc.stop)
+ return loc
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 3711a814cc273..e48cdb52ebae5 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -2825,6 +2825,126 @@ def test_constructor_column_duplicates(self):
[('a', [8]), ('a', [5]), ('b', [6])],
columns=['b', 'a', 'a'])
+
+ def test_column_duplicates_operations(self):
+
+ def check(result, expected=None):
+ if expected is not None:
+ assert_frame_equal(result,expected)
+ result.dtypes
+ str(result)
+
+ # assignment
+ # GH 3687
+ arr = np.random.randn(3, 2)
+ idx = range(2)
+ df = DataFrame(arr, columns=['A', 'A'])
+ df.columns = idx
+ expected = DataFrame(arr,columns=idx)
+ check(df,expected)
+
+ idx = date_range('20130101',periods=4,freq='Q-NOV')
+ df = DataFrame([[1,1,1,5],[1,1,2,5],[2,1,3,5]],columns=['a','a','a','a'])
+ df.columns = idx
+ expected = DataFrame([[1,1,1,5],[1,1,2,5],[2,1,3,5]],columns=idx)
+ check(df,expected)
+
+ # insert
+ df = DataFrame([[1,1,1,5],[1,1,2,5],[2,1,3,5]],columns=['foo','bar','foo','hello'])
+ df['string'] = 'bah'
+ expected = DataFrame([[1,1,1,5,'bah'],[1,1,2,5,'bah'],[2,1,3,5,'bah']],columns=['foo','bar','foo','hello','string'])
+ check(df,expected)
+
+ # insert same dtype
+ df['foo2'] = 3
+ expected = DataFrame([[1,1,1,5,'bah',3],[1,1,2,5,'bah',3],[2,1,3,5,'bah',3]],columns=['foo','bar','foo','hello','string','foo2'])
+ check(df,expected)
+
+ # set (non-dup)
+ df['foo2'] = 4
+ expected = DataFrame([[1,1,1,5,'bah',4],[1,1,2,5,'bah',4],[2,1,3,5,'bah',4]],columns=['foo','bar','foo','hello','string','foo2'])
+ check(df,expected)
+ df['foo2'] = 3
+
+ # delete (non dup)
+ del df['bar']
+ expected = DataFrame([[1,1,5,'bah',3],[1,2,5,'bah',3],[2,3,5,'bah',3]],columns=['foo','foo','hello','string','foo2'])
+ check(df,expected)
+
+ # try to delete again (its not consolidated)
+ del df['hello']
+ expected = DataFrame([[1,1,'bah',3],[1,2,'bah',3],[2,3,'bah',3]],columns=['foo','foo','string','foo2'])
+ check(df,expected)
+
+ # consolidate
+ df = df.consolidate()
+ expected = DataFrame([[1,1,'bah',3],[1,2,'bah',3],[2,3,'bah',3]],columns=['foo','foo','string','foo2'])
+ check(df,expected)
+
+ # insert
+ df.insert(2,'new_col',5.)
+ expected = DataFrame([[1,1,5.,'bah',3],[1,2,5.,'bah',3],[2,3,5.,'bah',3]],columns=['foo','foo','new_col','string','foo2'])
+ check(df,expected)
+
+ # insert a dup
+ self.assertRaises(Exception, df.insert, 2, 'new_col', 4.)
+ df.insert(2,'new_col',4.,allow_duplicates=True)
+ expected = DataFrame([[1,1,4.,5.,'bah',3],[1,2,4.,5.,'bah',3],[2,3,4.,5.,'bah',3]],columns=['foo','foo','new_col','new_col','string','foo2'])
+ check(df,expected)
+
+ # delete (dup)
+ del df['foo']
+ expected = DataFrame([[4.,5.,'bah',3],[4.,5.,'bah',3],[4.,5.,'bah',3]],columns=['new_col','new_col','string','foo2'])
+ assert_frame_equal(df,expected)
+
+ # dup across dtypes
+ df = DataFrame([[1,1,1.,5],[1,1,2.,5],[2,1,3.,5]],columns=['foo','bar','foo','hello'])
+ check(df)
+
+ df['foo2'] = 7.
+ expected = DataFrame([[1,1,1.,5,7.],[1,1,2.,5,7.],[2,1,3.,5,7.]],columns=['foo','bar','foo','hello','foo2'])
+ check(df,expected)
+
+ result = df['foo']
+ expected = DataFrame([[1,1.],[1,2.],[2,3.]],columns=['foo','foo'])
+ check(result,expected)
+
+ # multiple replacements
+ df['foo'] = 'string'
+ expected = DataFrame([['string',1,'string',5,7.],['string',1,'string',5,7.],['string',1,'string',5,7.]],columns=['foo','bar','foo','hello','foo2'])
+ check(df,expected)
+
+ del df['foo']
+ expected = DataFrame([[1,5,7.],[1,5,7.],[1,5,7.]],columns=['bar','hello','foo2'])
+ check(df,expected)
+
+ # reindex
+ df = DataFrame([[1,5,7.],[1,5,7.],[1,5,7.]],columns=['bar','a','a'])
+ expected = DataFrame([[1],[1],[1]],columns=['bar'])
+ result = df.reindex(columns=['bar'])
+ check(result,expected)
+
+ result1 = DataFrame([[1],[1],[1]],columns=['bar']).reindex(columns=['bar','foo'])
+ result2 = df.reindex(columns=['bar','foo'])
+ check(result2,result1)
+
+ # drop
+ df = DataFrame([[1,5,7.],[1,5,7.],[1,5,7.]],columns=['bar','a','a'])
+ df = df.drop(['a'],axis=1)
+ expected = DataFrame([[1],[1],[1]],columns=['bar'])
+ check(df,expected)
+
+ def test_insert_benchmark(self):
+ # from the vb_suite/frame_methods/frame_insert_columns
+ N = 10
+ K = 5
+ df = DataFrame(index=range(N))
+ new_col = np.random.randn(N)
+ for i in range(K):
+ df[i] = new_col
+ expected = DataFrame(np.repeat(new_col,K).reshape(N,K),index=range(N))
+ assert_frame_equal(df,expected)
+
def test_constructor_single_value(self):
# expecting single value upcasting here
diff --git a/pandas/tests/test_internals.py b/pandas/tests/test_internals.py
index e25bd0de769a7..0f3b8c1634416 100644
--- a/pandas/tests/test_internals.py
+++ b/pandas/tests/test_internals.py
@@ -1,7 +1,7 @@
# pylint: disable=W0102
import unittest
-
+import nose
import numpy as np
from pandas import Index, MultiIndex, DataFrame, Series
@@ -173,6 +173,11 @@ def test_delete(self):
self.assertRaises(Exception, self.fblock.delete, 'b')
def test_split_block_at(self):
+
+ # with dup column support this method was taken out
+ # GH3679
+ raise nose.SkipTest
+
bs = list(self.fblock.split_block_at('a'))
self.assertEqual(len(bs), 1)
self.assertTrue(np.array_equal(bs[0].items, ['c', 'e']))
@@ -267,9 +272,21 @@ def test_duplicate_item_failure(self):
for b in blocks:
b.ref_items = items
+ # test trying to create _ref_locs with/o ref_locs set on the blocks
+ self.assertRaises(AssertionError, BlockManager, blocks, [items, np.arange(N)])
+
+ blocks[0].set_ref_locs([0])
+ blocks[1].set_ref_locs([1])
mgr = BlockManager(blocks, [items, np.arange(N)])
mgr.iget(1)
+ # invalidate the _ref_locs
+ for b in blocks:
+ b._ref_locs = None
+ mgr._ref_locs = None
+ mgr._items_map = None
+ self.assertRaises(AssertionError, mgr._set_ref_locs, do_refs=True)
+
def test_contains(self):
self.assert_('a' in self.mgr)
self.assert_('baz' not in self.mgr)
diff --git a/pandas/tools/merge.py b/pandas/tools/merge.py
index f0d8b922be5bf..b19d099790566 100644
--- a/pandas/tools/merge.py
+++ b/pandas/tools/merge.py
@@ -781,10 +781,10 @@ def _upcast_blocks(blocks):
for block in blocks:
if isinstance(block, IntBlock):
newb = make_block(block.values.astype(float), block.items,
- block.ref_items)
+ block.ref_items, placement=block._ref_locs)
elif isinstance(block, BoolBlock):
newb = make_block(block.values.astype(object), block.items,
- block.ref_items)
+ block.ref_items, placement=block._ref_locs)
else:
newb = block
new_blocks.append(newb)
| closes #3679, #3687
Here's example of various operations
```
In [3]: df = DataFrame([[1,1,1,5],[1,1,2,5],[2,1,3,5]],
columns=['foo','bar','foo','hello'])
In [27]: df.columns.is_unique
Out[27]: False
In [4]: # insert
In [5]: df['string'] = 'bah'
In [6]: df
Out[6]:
foo bar foo hello string
0 1 1 1 5 bah
1 1 1 2 5 bah
2 2 1 3 5 bah
In [7]: # insert same dtype
In [8]: df['foo2'] = 3
In [9]: df
Out[9]:
foo bar foo hello string foo2
0 1 1 1 5 bah 3
1 1 1 2 5 bah 3
2 2 1 3 5 bah 3
In [10]: # delete (non dup)
In [11]: del df['bar']
In [12]: df
Out[12]:
foo foo hello string foo2
0 1 1 5 bah 3
1 1 2 5 bah 3
2 2 3 5 bah 3
In [13]: # try to delete again (its not consolidated)
In [14]: del df['hello']
In [15]: df
Out[15]:
foo foo string foo2
0 1 1 bah 3
1 1 2 bah 3
2 2 3 bah 3
In [16]: # insert
In [17]: df.insert(2,'new_col',5.)
In [18]: df
Out[18]:
foo foo new_col string foo2
0 1 1 5 bah 3
1 1 2 5 bah 3
2 2 3 5 bah 3
```
This is the current default behavior now
```
In [19]: # insert a dup
In [20]: df.insert(2,'new_col',4.)
Exception: cannot insert new_col, already exists
In [21]: # insert a dup
In [22]: df.insert(2,'new_col',4.,allow_duplicates=True)
In [23]: df
Out[23]:
foo foo new_col new_col string foo2
0 1 1 4 5 bah 3
1 1 2 4 5 bah 3
2 2 3 4 5 bah 3
```
```
In [24]: # delete (dup)
In [25]: del df['foo']
In [26]: df
Out[26]:
new_col new_col string foo2
0 4 5 bah 3
1 4 5 bah 3
2 4 5 bah 3
```
Don't try this at home
1) duplicates across dtypes
2) assigning those duplicates
```
In [5]: df = DataFrame([[1,1,1.,5],[1,1,2.,5],[2,1,3.,5]],columns=['foo','bar','foo','hello'])
In [6]: df.dtypes
Out[6]:
foo int64
bar int64
foo float64
hello int64
dtype: object
In [7]: df['foo'] = 'string'
In [8]: df
Out[8]:
foo bar foo hello
0 string 1 string 5
1 string 1 string 5
2 string 1 string 5
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/3683 | 2013-05-22T14:44:07Z | 2013-05-30T00:48:23Z | 2013-05-30T00:48:23Z | 2014-06-12T20:29:27Z |
EHN: Add filter methods to SeriesGroupBy, DataFrameGroupBy GH919 | diff --git a/RELEASE.rst b/RELEASE.rst
index b5dd3eef68dea..1e7880016cdee 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -48,6 +48,8 @@ pandas 0.11.1
- Add iterator to ``Series.str`` (GH3638_)
- ``pd.set_option()`` now allows N option, value pairs (GH3667_).
- Added keyword parameters for different types of scatter_matrix subplots
+ - A ``filter`` method on grouped Series or DataFrames returns a subset of
+ the original (GH3680_, GH919_)
**Improvements to existing features**
diff --git a/doc/source/groupby.rst b/doc/source/groupby.rst
index bc2ff9bbe1013..c5e38a72ec3e9 100644
--- a/doc/source/groupby.rst
+++ b/doc/source/groupby.rst
@@ -41,6 +41,12 @@ following:
- Standardizing data (zscore) within group
- Filling NAs within groups with a value derived from each group
+ - **Filtration**: discard some groups, according to a group-wise computation
+ that evaluates True or False. Some examples:
+
+ - Discarding data that belongs to groups with only a few members
+ - Filtering out data based on the group sum or mean
+
- Some combination of the above: GroupBy will examine the results of the apply
step and try to return a sensibly combined result if it doesn't fit into
either of the above two categories
@@ -489,6 +495,39 @@ and that the transformed data contains no NAs.
grouped_trans.count() # counts after transformation
grouped_trans.size() # Verify non-NA count equals group size
+.. _groupby.filter:
+
+Filtration
+----------
+
+The ``filter`` method returns a subset of the original object. Suppose we
+want to take only elements that belong to groups with a group sum greater
+than 2.
+
+.. ipython:: python
+
+ s = Series([1, 1, 2, 3, 3, 3])
+ s.groupby(s).filter(lambda x: x.sum() > 2)
+
+The argument of ``filter`` must a function that, applied to the group as a
+whole, returns ``True`` or ``False``.
+
+Another useful operation is filtering out elements that belong to groups
+with only a couple members.
+
+.. ipython:: python
+
+ df = DataFrame({'A': arange(8), 'B': list('aabbbbcc')})
+ df.groupby('B').filter(lambda x: len(x) > 2)
+
+Alternatively, instead of dropping the offending groups, we can return a
+like-indexed objects where the groups that do not pass the filter are filled
+with NaNs.
+
+.. ipython:: python
+
+ df.groupby('B').filter(lambda x: len(x) > 2, dropna=False)
+
.. _groupby.dispatch:
Dispatching to instance methods
diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt
index b2fee1acbc4d6..0641ffae542c0 100644
--- a/doc/source/v0.11.1.txt
+++ b/doc/source/v0.11.1.txt
@@ -237,6 +237,35 @@ Enhancements
pd.get_option('a.b')
pd.get_option('b.c')
+ - The ``filter`` method for group objects returns a subset of the original
+ object. Suppose we want to take only elements that belong to groups with a
+ group sum greater than 2.
+
+ .. ipython:: python
+
+ s = Series([1, 1, 2, 3, 3, 3])
+ s.groupby(s).filter(lambda x: x.sum() > 2)
+
+ The argument of ``filter`` must a function that, applied to the group as a
+ whole, returns ``True`` or ``False``.
+
+ Another useful operation is filtering out elements that belong to groups
+ with only a couple members.
+
+ .. ipython:: python
+
+ df = DataFrame({'A': arange(8), 'B': list('aabbbbcc')})
+ df.groupby('B').filter(lambda x: len(x) > 2)
+
+ Alternatively, instead of dropping the offending groups, we can return a
+ like-indexed objects where the groups that do not pass the filter are
+ filled with NaNs.
+
+ .. ipython:: python
+
+ df.groupby('B').filter(lambda x: len(x) > 2, dropna=False)
+
+
Bug Fixes
~~~~~~~~~
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index 64606a6e644f9..0be5d438e5e7c 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -1558,6 +1558,42 @@ def transform(self, func, *args, **kwargs):
result = _possibly_downcast_to_dtype(result, dtype)
return self.obj.__class__(result,index=self.obj.index,name=self.obj.name)
+ def filter(self, func, dropna=True, *args, **kwargs):
+ """
+ Return a copy of a Series excluding elements from groups that
+ do not satisfy the boolean criterion specified by func.
+
+ Parameters
+ ----------
+ func : function
+ To apply to each group. Should return True or False.
+ dropna : Drop groups that do not pass the filter. True by default;
+ if False, groups that evaluate False are filled with NaNs.
+
+ Example
+ -------
+ >>> grouped.filter(lambda x: x.mean() > 0)
+
+ Returns
+ -------
+ filtered : Series
+ """
+ if isinstance(func, basestring):
+ wrapper = lambda x: getattr(x, func)(*args, **kwargs)
+ else:
+ wrapper = lambda x: func(x, *args, **kwargs)
+
+ indexers = [self.obj.index.get_indexer(group.index) \
+ if wrapper(group) else [] for _ , group in self]
+
+ if len(indexers) == 0:
+ filtered = self.obj.take([]) # because np.concatenate would fail
+ else:
+ filtered = self.obj.take(np.concatenate(indexers))
+ if dropna:
+ return filtered
+ else:
+ return filtered.reindex(self.obj.index) # Fill with NaNs.
class NDFrameGroupBy(GroupBy):
@@ -1928,47 +1964,22 @@ def transform(self, func, *args, **kwargs):
obj = self._obj_with_exclusions
gen = self.grouper.get_iterator(obj, axis=self.axis)
-
- if isinstance(func, basestring):
- fast_path = lambda group: getattr(group, func)(*args, **kwargs)
- slow_path = lambda group: group.apply(lambda x: getattr(x, func)(*args, **kwargs), axis=self.axis)
- else:
- fast_path = lambda group: func(group, *args, **kwargs)
- slow_path = lambda group: group.apply(lambda x: func(x, *args, **kwargs), axis=self.axis)
+ fast_path, slow_path = self._define_paths(func, *args, **kwargs)
path = None
for name, group in gen:
object.__setattr__(group, 'name', name)
- # decide on a fast path
if path is None:
-
- path = slow_path
+ # Try slow path and fast path.
try:
- res = slow_path(group)
-
- # if we make it here, test if we can use the fast path
- try:
- res_fast = fast_path(group)
-
- # compare that we get the same results
- if res.shape == res_fast.shape:
- res_r = res.values.ravel()
- res_fast_r = res_fast.values.ravel()
- mask = notnull(res_r)
- if (res_r[mask] == res_fast_r[mask]).all():
- path = fast_path
-
- except:
- pass
+ path, res = self._choose_path(fast_path, slow_path, group)
except TypeError:
return self._transform_item_by_item(obj, fast_path)
except Exception: # pragma: no cover
res = fast_path(group)
path = fast_path
-
else:
-
res = path(group)
# broadcasting
@@ -1988,6 +1999,35 @@ def transform(self, func, *args, **kwargs):
concatenated.sort_index(inplace=True)
return concatenated
+ def _define_paths(self, func, *args, **kwargs):
+ if isinstance(func, basestring):
+ fast_path = lambda group: getattr(group, func)(*args, **kwargs)
+ slow_path = lambda group: group.apply(lambda x: getattr(x, func)(*args, **kwargs), axis=self.axis)
+ else:
+ fast_path = lambda group: func(group, *args, **kwargs)
+ slow_path = lambda group: group.apply(lambda x: func(x, *args, **kwargs), axis=self.axis)
+ return fast_path, slow_path
+
+ def _choose_path(self, fast_path, slow_path, group):
+ path = slow_path
+ res = slow_path(group)
+
+ # if we make it here, test if we can use the fast path
+ try:
+ res_fast = fast_path(group)
+
+ # compare that we get the same results
+ if res.shape == res_fast.shape:
+ res_r = res.values.ravel()
+ res_fast_r = res_fast.values.ravel()
+ mask = notnull(res_r)
+ if (res_r[mask] == res_fast_r[mask]).all():
+ path = fast_path
+
+ except:
+ pass
+ return path, res
+
def _transform_item_by_item(self, obj, wrapper):
# iterate through columns
output = {}
@@ -2008,6 +2048,63 @@ def _transform_item_by_item(self, obj, wrapper):
return DataFrame(output, index=obj.index, columns=columns)
+ def filter(self, func, dropna=True, *args, **kwargs):
+ """
+ Return a copy of a DataFrame excluding elements from groups that
+ do not satisfy the boolean criterion specified by func.
+
+ Parameters
+ ----------
+ f : function
+ Function to apply to each subframe. Should return True or False.
+ dropna : Drop groups that do not pass the filter. True by default;
+ if False, groups that evaluate False are filled with NaNs.
+
+ Note
+ ----
+ Each subframe is endowed the attribute 'name' in case you need to know
+ which group you are working on.
+
+ Example
+ --------
+ >>> grouped = df.groupby(lambda x: mapping[x])
+ >>> grouped.filter(lambda x: x['A'].sum() + x['B'].sum() > 0)
+ """
+ from pandas.tools.merge import concat
+
+ indexers = []
+
+ obj = self._obj_with_exclusions
+ gen = self.grouper.get_iterator(obj, axis=self.axis)
+
+ fast_path, slow_path = self._define_paths(func, *args, **kwargs)
+
+ path = None
+ for name, group in gen:
+ object.__setattr__(group, 'name', name)
+
+ if path is None:
+ # Try slow path and fast path.
+ try:
+ path, res = self._choose_path(fast_path, slow_path, group)
+ except Exception: # pragma: no cover
+ res = fast_path(group)
+ path = fast_path
+ else:
+ res = path(group)
+
+ if res:
+ indexers.append(self.obj.index.get_indexer(group.index))
+
+ if len(indexers) == 0:
+ filtered = self.obj.take([]) # because np.concatenate would fail
+ else:
+ filtered = self.obj.take(np.concatenate(indexers))
+ if dropna:
+ return filtered
+ else:
+ return filtered.reindex(self.obj.index) # Fill with NaNs.
+
class DataFrameGroupBy(NDFrameGroupBy):
diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py
index cf62b16a9dd2a..f3a608b82e756 100644
--- a/pandas/tests/test_groupby.py
+++ b/pandas/tests/test_groupby.py
@@ -2498,6 +2498,155 @@ def test_groupby_with_empty(self):
grouped = series.groupby(grouper)
assert next(iter(grouped), None) is None
+ def test_filter_series(self):
+ import pandas as pd
+ s = pd.Series([1, 3, 20, 5, 22, 24, 7])
+ expected_odd = pd.Series([1, 3, 5, 7], index=[0, 1, 3, 6])
+ expected_even = pd.Series([20, 22, 24], index=[2, 4, 5])
+ grouper = s.apply(lambda x: x % 2)
+ grouped = s.groupby(grouper)
+ assert_series_equal(
+ grouped.filter(lambda x: x.mean() < 10), expected_odd)
+ assert_series_equal(
+ grouped.filter(lambda x: x.mean() > 10), expected_even)
+ # Test dropna=False.
+ assert_series_equal(
+ grouped.filter(lambda x: x.mean() < 10, dropna=False),
+ expected_odd.reindex(s.index))
+ assert_series_equal(
+ grouped.filter(lambda x: x.mean() > 10, dropna=False),
+ expected_even.reindex(s.index))
+
+ def test_filter_single_column_df(self):
+ import pandas as pd
+ df = pd.DataFrame([1, 3, 20, 5, 22, 24, 7])
+ expected_odd = pd.DataFrame([1, 3, 5, 7], index=[0, 1, 3, 6])
+ expected_even = pd.DataFrame([20, 22, 24], index=[2, 4, 5])
+ grouper = df[0].apply(lambda x: x % 2)
+ grouped = df.groupby(grouper)
+ assert_frame_equal(
+ grouped.filter(lambda x: x.mean() < 10), expected_odd)
+ assert_frame_equal(
+ grouped.filter(lambda x: x.mean() > 10), expected_even)
+ # Test dropna=False.
+ assert_frame_equal(
+ grouped.filter(lambda x: x.mean() < 10, dropna=False),
+ expected_odd.reindex(df.index))
+ assert_frame_equal(
+ grouped.filter(lambda x: x.mean() > 10, dropna=False),
+ expected_even.reindex(df.index))
+
+ def test_filter_multi_column_df(self):
+ import pandas as pd
+ df = pd.DataFrame({'A': [1, 12, 12, 1], 'B': [1, 1, 1, 1]})
+ grouper = df['A'].apply(lambda x: x % 2)
+ grouped = df.groupby(grouper)
+ expected = pd.DataFrame({'A': [12, 12], 'B': [1, 1]}, index=[1, 2])
+ assert_frame_equal(
+ grouped.filter(lambda x: x['A'].sum() - x['B'].sum() > 10), expected)
+
+ def test_filter_mixed_df(self):
+ import pandas as pd
+ df = pd.DataFrame({'A': [1, 12, 12, 1], 'B': 'a b c d'.split()})
+ grouper = df['A'].apply(lambda x: x % 2)
+ grouped = df.groupby(grouper)
+ expected = pd.DataFrame({'A': [12, 12], 'B': ['b', 'c']},
+ index=[1, 2])
+ assert_frame_equal(
+ grouped.filter(lambda x: x['A'].sum() > 10), expected)
+
+ def test_filter_out_all_groups(self):
+ import pandas as pd
+ s = pd.Series([1, 3, 20, 5, 22, 24, 7])
+ grouper = s.apply(lambda x: x % 2)
+ grouped = s.groupby(grouper)
+ assert_series_equal(
+ grouped.filter(lambda x: x.mean() > 1000), s[[]])
+ df = pd.DataFrame({'A': [1, 12, 12, 1], 'B': 'a b c d'.split()})
+ grouper = df['A'].apply(lambda x: x % 2)
+ grouped = df.groupby(grouper)
+ assert_frame_equal(
+ grouped.filter(lambda x: x['A'].sum() > 1000), df.ix[[]])
+
+ def test_filter_out_no_groups(self):
+ import pandas as pd
+ s = pd.Series([1, 3, 20, 5, 22, 24, 7])
+ grouper = s.apply(lambda x: x % 2)
+ grouped = s.groupby(grouper)
+ filtered = grouped.filter(lambda x: x.mean() > 0)
+ filtered.sort() # was sorted by group
+ s.sort() # was sorted arbitrarily
+ assert_series_equal(filtered, s)
+ df = pd.DataFrame({'A': [1, 12, 12, 1], 'B': 'a b c d'.split()})
+ grouper = df['A'].apply(lambda x: x % 2)
+ grouped = df.groupby(grouper)
+ filtered = grouped.filter(lambda x: x['A'].mean() > 0)
+ assert_frame_equal(filtered.sort(), df)
+
+ def test_filter_condition_raises(self):
+ import pandas as pd
+ def raise_if_sum_is_zero(x):
+ if x.sum() == 0:
+ raise ValueError
+ else:
+ return x.sum() > 0
+ s = pd.Series([-1,0,1,2])
+ grouper = s.apply(lambda x: x % 2)
+ grouped = s.groupby(grouper)
+ self.assertRaises(ValueError,
+ lambda: grouped.filter(raise_if_sum_is_zero))
+
+ def test_filter_against_workaround(self):
+ np.random.seed(0)
+ # Series of ints
+ s = Series(np.random.randint(0,100,1000))
+ grouper = s.apply(lambda x: np.round(x, -1))
+ grouped = s.groupby(grouper)
+ f = lambda x: x.mean() > 10
+ old_way = s[grouped.transform(f).astype('bool')]
+ new_way = grouped.filter(f)
+ assert_series_equal(new_way.order(), old_way.order())
+
+ # Series of floats
+ s = 100*Series(np.random.random(1000))
+ grouper = s.apply(lambda x: np.round(x, -1))
+ grouped = s.groupby(grouper)
+ f = lambda x: x.mean() > 10
+ old_way = s[grouped.transform(f).astype('bool')]
+ new_way = grouped.filter(f)
+ assert_series_equal(new_way.order(), old_way.order())
+
+ # Set up DataFrame of ints, floats, strings.
+ from string import ascii_lowercase
+ letters = np.array(list(ascii_lowercase))
+ N = 1000
+ random_letters = letters.take(np.random.randint(0, 26, N))
+ df = DataFrame({'ints': Series(np.random.randint(0, 100, N)),
+ 'floats': N/10*Series(np.random.random(N)),
+ 'letters': Series(random_letters)})
+
+ # Group by ints; filter on floats.
+ grouped = df.groupby('ints')
+ old_way = df[grouped.floats.\
+ transform(lambda x: x.mean() > N/20).astype('bool')]
+ new_way = grouped.filter(lambda x: x['floats'].mean() > N/20)
+ assert_frame_equal(new_way.sort(), old_way.sort())
+
+ # Group by floats (rounded); filter on strings.
+ grouper = df.floats.apply(lambda x: np.round(x, -1))
+ grouped = df.groupby(grouper)
+ old_way = df[grouped.letters.\
+ transform(lambda x: len(x) < N/10).astype('bool')]
+ new_way = grouped.filter(
+ lambda x: len(x.letters) < N/10)
+ assert_frame_equal(new_way.sort(), old_way.sort())
+
+ # Group by strings; filter on ints.
+ grouped = df.groupby('letters')
+ old_way = df[grouped.ints.\
+ transform(lambda x: x.mean() > N/20).astype('bool')]
+ new_way = grouped.filter(lambda x: x['ints'].mean() > N/20)
+ assert_frame_equal(new_way.sort_index(), old_way.sort_index())
def assert_fp_equal(a, b):
assert((np.abs(a - b) < 1e-12).all())
| closes #919
I have been using Wes' workaround (see #919) for filtering groups. Finally, for brevity's sake, I wrote a real filter method. In the one simple case I checked, it performs faster than the workaround.
On a small (~10) Series:
```
In [7]: %timeit grouped.filter(lambda x: x.mean() > 10) # my method
1000 loops, best of 3: 346 us per loop
In [8]: %timeit grouped.obj[grouped.transform(lambda x: x.mean() > 10)] # workaround
1000 loops, best of 3: 462 us per loop
```
On a large (1000000) Series:
```
In [18]: %timeit grouped.filter(lambda x: x.mean() > 0) # my method
1 loops, best of 3: 213 ms per loop
In [19]: %timeit grouped.obj[grouped.transform(lambda x: x.mean() > 0)]
1 loops, best of 3: 696 ms per loop
```
This PR only handles Series, and I included one simple test. If I am on the right track, I'll write one for DataFrames also and write additional tests. If this is a job for Cython, I'm out of my depth, but I think numpy is sufficient. Does this look OK?
| https://api.github.com/repos/pandas-dev/pandas/pulls/3680 | 2013-05-22T02:43:16Z | 2013-06-06T21:06:26Z | 2013-06-06T21:06:26Z | 2014-06-15T15:23:56Z |
ENH: enhance set_option syntax | diff --git a/RELEASE.rst b/RELEASE.rst
index 9283bada2d720..35741f7eb008f 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -46,6 +46,9 @@ pandas 0.11.1
Note: The default value will change in 0.12 to make the default *to* write and
read multi-index columns in the new format. (GH3571_, GH1651_, GH3141_)
- Add iterator to ``Series.str`` (GH3638_)
+ - ``pd.set_option()`` now allows N option, value pairs (GH3667_).
+
+
**Improvements to existing features**
@@ -269,6 +272,7 @@ pandas 0.11.1
.. _GH3702: https://github.com/pydata/pandas/issues/3702
.. _GH3691: https://github.com/pydata/pandas/issues/3691
.. _GH3696: https://github.com/pydata/pandas/issues/3696
+.. _GH3667: https://github.com/pydata/pandas/issues/3667
pandas 0.11.0
=============
diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt
index 26069681552f0..5acd2aa365ea3 100644
--- a/doc/source/v0.11.1.txt
+++ b/doc/source/v0.11.1.txt
@@ -238,6 +238,30 @@ Enhancements
GH3572_). This happens before any drawing takes place which elimnates any
spurious plots from showing up.
+ - ``pd.set_option()`` now allows N option, value pairs (GH3667_).
+
+ Let's say that we had an option ``'a.b'`` and another option ``'b.c'``.
+ We can set them at the same time:
+
+ .. ipython:: python
+ :suppress:
+
+ pd.core.config.register_option('a.b', 2, 'ay dot bee')
+ pd.core.config.register_option('b.c', 3, 'bee dot cee')
+
+ .. ipython:: python
+
+ pd.get_option('a.b')
+ pd.get_option('b.c')
+ pd.set_option('a.b', 1, 'b.c', 4)
+ pd.get_option('a.b')
+ pd.get_option('b.c')
+
+ You can of course still do it sequentially if you want. You can use up to
+ N arguments here, the only stipulation is that the number of arguments
+ must be even (since if they weren't then that would mean you provided an
+ argument name with no value).
+
Bug Fixes
~~~~~~~~~
@@ -305,3 +329,4 @@ on GitHub for a complete list.
.. _GH3702: https://github.com/pydata/pandas/issues/3702
.. _GH3691: https://github.com/pydata/pandas/issues/3691
.. _GH3696: https://github.com/pydata/pandas/issues/3696
+.. _GH3667: https://github.com/pydata/pandas/issues/3667
diff --git a/pandas/core/config.py b/pandas/core/config.py
index 2d62b807cf203..e8403164ac1b9 100644
--- a/pandas/core/config.py
+++ b/pandas/core/config.py
@@ -94,7 +94,7 @@ def _get_option(pat, silent=False):
return root[k]
-def _set_option(pat, value, silent=False):
+def _set_single_option(pat, value, silent):
key = _get_single_key(pat, silent)
o = _get_registered_option(key)
@@ -109,6 +109,40 @@ def _set_option(pat, value, silent=False):
o.cb(key)
+def _set_multiple_options(args, silent):
+ for k, v in zip(args[::2], args[1::2]):
+ _set_single_option(k, v, silent)
+
+
+def _set_option(*args, **kwargs):
+ # must at least 1 arg deal with constraints later
+ nargs = len(args)
+ if not nargs or nargs % 2 != 0:
+ raise AssertionError("Must provide an even number of non-keyword "
+ "arguments")
+
+ # must be 0 or 1 kwargs
+ nkwargs = len(kwargs)
+ if nkwargs not in (0, 1):
+ raise AssertionError("The can only be 0 or 1 keyword arguments")
+
+ # if 1 kwarg then it must be silent=True or silent=False
+ if nkwargs:
+ k, = kwargs.keys()
+ v, = kwargs.values()
+
+ if k != 'silent':
+ raise ValueError("the only allowed keyword argument is 'silent', "
+ "you passed '{0}'".format(k))
+ if not isinstance(v, bool):
+ raise TypeError("the type of the keyword argument passed must be "
+ "bool, you passed a {0}".format(v.__class__))
+
+ # default to false
+ silent = kwargs.get('silent', False)
+ _set_multiple_options(args, silent)
+
+
def _describe_option(pat='', _print_desc=True):
keys = _select_options(pat)
@@ -186,7 +220,7 @@ def __dir__(self):
# of options, and option descriptions.
-class CallableDyanmicDoc(object):
+class CallableDynamicDoc(object):
def __init__(self, func, doc_tmpl):
self.__doc_tmpl__ = doc_tmpl
@@ -301,10 +335,10 @@ def __doc__(self):
# bind the functions with their docstrings into a Callable
# and use that as the functions exposed in pd.api
-get_option = CallableDyanmicDoc(_get_option, _get_option_tmpl)
-set_option = CallableDyanmicDoc(_set_option, _set_option_tmpl)
-reset_option = CallableDyanmicDoc(_reset_option, _reset_option_tmpl)
-describe_option = CallableDyanmicDoc(_describe_option, _describe_option_tmpl)
+get_option = CallableDynamicDoc(_get_option, _get_option_tmpl)
+set_option = CallableDynamicDoc(_set_option, _set_option_tmpl)
+reset_option = CallableDynamicDoc(_reset_option, _reset_option_tmpl)
+describe_option = CallableDynamicDoc(_describe_option, _describe_option_tmpl)
options = DictWrapper(_global_config)
######################################################
@@ -505,13 +539,7 @@ def _get_registered_option(key):
-------
RegisteredOption (namedtuple) if key is deprecated, None otherwise
"""
-
- try:
- d = _registered_options[key]
- except KeyError:
- return None
- else:
- return d
+ return _registered_options.get(key)
def _translate_key(key):
diff --git a/pandas/tests/test_config.py b/pandas/tests/test_config.py
index c1231df026853..a2b1ea43717cf 100644
--- a/pandas/tests/test_config.py
+++ b/pandas/tests/test_config.py
@@ -169,6 +169,44 @@ def test_set_option(self):
self.assertRaises(KeyError, self.cf.set_option, 'no.such.key', None)
+
+ def test_set_option_empty_args(self):
+ self.assertRaises(AssertionError, self.cf.set_option)
+
+ def test_set_option_uneven_args(self):
+ self.assertRaises(AssertionError, self.cf.set_option, 'a.b', 2, 'b.c')
+
+
+ def test_set_option_2_kwargs(self):
+ self.assertRaises(AssertionError, self.cf.set_option, 'a.b', 2,
+ silenadf=2, asdf=2)
+
+ def test_set_option_invalid_kwargs_key(self):
+ self.assertRaises(ValueError, self.cf.set_option, 'a.b', 2,
+ silenadf=2)
+
+ def test_set_option_invalid_kwargs_value_type(self):
+ self.assertRaises(TypeError, self.cf.set_option, 'a.b', 2,
+ silent=2)
+
+ def test_set_option_invalid_single_argument_type(self):
+ self.assertRaises(AssertionError, self.cf.set_option, 2)
+
+ def test_set_option_multiple(self):
+ self.cf.register_option('a', 1, 'doc')
+ self.cf.register_option('b.c', 'hullo', 'doc2')
+ self.cf.register_option('b.b', None, 'doc2')
+
+ self.assertEqual(self.cf.get_option('a'), 1)
+ self.assertEqual(self.cf.get_option('b.c'), 'hullo')
+ self.assertTrue(self.cf.get_option('b.b') is None)
+
+ self.cf.set_option('a', '2', 'b.c', None, 'b.b', 10.0)
+
+ self.assertEqual(self.cf.get_option('a'), '2')
+ self.assertTrue(self.cf.get_option('b.c') is None)
+ self.assertEqual(self.cf.get_option('b.b'), 10.0)
+
def test_validation(self):
self.cf.register_option('a', 1, 'doc', validator=self.cf.is_int)
self.cf.register_option('b.c', 'hullo', 'doc2',
| closes #3667.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3677 | 2013-05-21T22:33:21Z | 2013-05-30T18:12:03Z | 2013-05-30T18:12:03Z | 2014-06-14T19:08:56Z |
API: deprecate DataFrame.interpolate | diff --git a/RELEASE.rst b/RELEASE.rst
index efc0f912060b7..1377bac856a96 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -96,6 +96,9 @@ pandas 0.11.1
- The ``raise_on_error`` option to plotting methods is obviated by GH3572_,
so it is removed. Plots now always raise when data cannot be plotted or the
object being plotted has a dtype of ``object``.
+ - ``DataFrame.interpolate()`` is now deprecated. Please use
+ ``DataFrame.fillna()`` and ``DataFrame.replace()`` instead (GH3582_,
+ GH3675_, GH3676_).
**Bug Fixes**
@@ -233,9 +236,11 @@ pandas 0.11.1
.. _GH3606: https://github.com/pydata/pandas/issues/3606
.. _GH3659: https://github.com/pydata/pandas/issues/3659
.. _GH3649: https://github.com/pydata/pandas/issues/3649
-.. _Gh3616: https://github.com/pydata/pandas/issues/3616
.. _GH1818: https://github.com/pydata/pandas/issues/1818
.. _GH3572: https://github.com/pydata/pandas/issues/3572
+.. _GH3582: https://github.com/pydata/pandas/issues/3582
+.. _GH3676: https://github.com/pydata/pandas/issues/3676
+.. _GH3675: https://github.com/pydata/pandas/issues/3675
pandas 0.11.0
=============
diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt
index 9209c3938023e..ffa2cc6dc7cab 100644
--- a/doc/source/v0.11.1.txt
+++ b/doc/source/v0.11.1.txt
@@ -67,6 +67,9 @@ API changes
and thus you should cast to an appropriate numeric dtype if you need to
plot something.
+ - ``DataFrame.interpolate()`` is now deprecated. Please use
+ ``DataFrame.fillna()`` and ``DataFrame.replace()`` instead. (GH3582_,
+ GH3675_, GH3676_)
Enhancements
@@ -241,3 +244,6 @@ on GitHub for a complete list.
.. _GH3656: https://github.com/pydata/pandas/issues/3656
.. _GH1818: https://github.com/pydata/pandas/issues/1818
.. _GH3572: https://github.com/pydata/pandas/issues/3572
+.. _GH3582: https://github.com/pydata/pandas/issues/3582
+.. _GH3676: https://github.com/pydata/pandas/issues/3676
+.. _GH3675: https://github.com/pydata/pandas/issues/3675
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index ed56a658d817d..962f994194108 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -3506,7 +3506,7 @@ def replace(self, to_replace=None, value=None, method='pad', axis=0,
See also
--------
- reindex, asfreq, fillna, interpolate
+ reindex, asfreq, fillna
Returns
-------
@@ -3678,6 +3678,10 @@ def interpolate(self, to_replace, method='pad', axis=0, inplace=False,
--------
reindex, replace, fillna
"""
+ from warnings import warn
+ warn('DataFrame.interpolate will be removed in v0.12, please use '
+ 'either DataFrame.fillna or DataFrame.replace instead',
+ FutureWarning)
if self._is_mixed_type and axis == 1:
return self.T.replace(to_replace, method=method, limit=limit).T
| closes #3582.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3675 | 2013-05-21T20:33:17Z | 2013-05-22T13:45:59Z | 2013-05-22T13:45:59Z | 2014-06-14T02:16:46Z |
scatter_matrix bug | diff --git a/RELEASE.rst b/RELEASE.rst
index 436f9d8b833a3..fc54f1b87453e 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -40,11 +40,12 @@ pandas 0.11.1
list of the rows from which to read the index. Added the option,
``tupleize_cols`` to provide compatiblity for the pre 0.11.1 behavior of
writing and reading multi-index columns via a list of tuples. The default in
- 0.11.1 is to write lists of tuples and *not* interpret list of tuples as a
- multi-index column.
+ 0.11.1 is to write lists of tuples and *not* interpret list of tuples as a
+ multi-index column.
Note: The default value will change in 0.12 to make the default *to* write and
read multi-index columns in the new format. (GH3571_, GH1651_, GH3141_)
- Add iterator to ``Series.str`` (GH3638_)
+ - Added keyword parameters for different types of scatter_matrix subplots
**Improvements to existing features**
@@ -63,7 +64,7 @@ pandas 0.11.1
- Add modulo operator to Series, DataFrame
- Add ``date`` method to DatetimeIndex
- Simplified the API and added a describe method to Categorical
- - ``melt`` now accepts the optional parameters ``var_name`` and ``value_name``
+ - ``melt`` now accepts the optional parameters ``var_name`` and ``value_name``
to specify custom column names of the returned DataFrame (GH3649_),
thanks @hoechenberger
@@ -82,8 +83,8 @@ pandas 0.11.1
``timedelta64[ns]`` to ``object/int`` (GH3425_)
- Do not allow datetimelike/timedeltalike creation except with valid types
(e.g. cannot pass ``datetime64[ms]``) (GH3423_)
- - Add ``squeeze`` keyword to ``groupby`` to allow reduction from
- DataFrame -> Series if groups are unique. Regression from 0.10.1,
+ - Add ``squeeze`` keyword to ``groupby`` to allow reduction from
+ DataFrame -> Series if groups are unique. Regression from 0.10.1,
partial revert on (GH2893_) with (GH3596_)
- Raise on ``iloc`` when boolean indexing with a label based indexer mask
e.g. a boolean Series, even with integer labels, will raise. Since ``iloc``
@@ -137,7 +138,7 @@ pandas 0.11.1
is a ``list`` or ``tuple``.
- Fixed bug where a time-series was being selected in preference to an actual column name
in a frame (GH3594_)
- - Fix modulo and integer division on Series,DataFrames to act similary to ``float`` dtypes to return
+ - Fix modulo and integer division on Series,DataFrames to act similary to ``float`` dtypes to return
``np.nan`` or ``np.inf`` as appropriate (GH3590_)
- Fix incorrect dtype on groupby with ``as_index=False`` (GH3610_)
- Fix ``read_csv`` to correctly encode identical na_values, e.g. ``na_values=[-999.0,-999]``
diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py
index 751f5fcdb82b2..f0c2e272348ea 100644
--- a/pandas/tools/plotting.py
+++ b/pandas/tools/plotting.py
@@ -159,7 +159,8 @@ def use(self, key, value):
def scatter_matrix(frame, alpha=0.5, figsize=None, ax=None, grid=False,
- diagonal='hist', marker='.', **kwds):
+ diagonal='hist', marker='.', density_kwds={}, hist_kwds={},
+ **kwds):
"""
Draw a matrix of scatter plots.
@@ -174,6 +175,10 @@ def scatter_matrix(frame, alpha=0.5, figsize=None, ax=None, grid=False,
either Kernel Density Estimation or Histogram
plot in the diagonal
marker : Matplotlib marker type, default '.'
+ hist_kwds : other plotting keyword arguments
+ To be passed to hist function
+ density_kwds : other plotting keyword arguments
+ To be passed to kernel density estimate plot
kwds : other plotting keyword arguments
To be passed to scatter function
@@ -205,13 +210,13 @@ def scatter_matrix(frame, alpha=0.5, figsize=None, ax=None, grid=False,
# Deal with the diagonal by drawing a histogram there.
if diagonal == 'hist':
- ax.hist(values)
+ ax.hist(values, hist_kwds)
elif diagonal in ('kde', 'density'):
from scipy.stats import gaussian_kde
y = values
gkde = gaussian_kde(y)
ind = np.linspace(y.min(), y.max(), 1000)
- ax.plot(ind, gkde.evaluate(ind), **kwds)
+ ax.plot(ind, gkde.evaluate(ind), **density_kwds)
else:
common = (mask[a] & mask[b]).values
@@ -368,16 +373,16 @@ def andrews_curves(data, class_column, ax=None, samples=200):
"""
Parameters:
-----------
- data : DataFrame
+ data : DataFrame
Data to be plotted, preferably normalized to (0.0, 1.0)
class_column : Name of the column containing class names
ax : matplotlib axes object, default None
samples : Number of points to plot in each curve
-
+
Returns:
--------
ax: Matplotlib axis object
-
+
"""
from math import sqrt, pi, sin, cos
import matplotlib.pyplot as plt
@@ -1805,7 +1810,7 @@ def scatter_plot(data, x, y, by=None, ax=None, figsize=None, grid=False, **kwarg
grid : Setting this to True will show the grid
kwargs : other plotting keyword arguments
To be passed to scatter function
-
+
Returns
-------
fig : matplotlib.Figure
@@ -2198,9 +2203,9 @@ def _subplots(nrows=1, ncols=1, sharex=False, sharey=False, squeeze=True,
data : DataFrame, optional
If secondary_y is a sequence, data is used to select columns.
-
- fig_kw : Other keyword arguments to be passed to the figure() call.
- Note that all keywords not recognized above will be
+
+ fig_kw : Other keyword arguments to be passed to the figure() call.
+ Note that all keywords not recognized above will be
automatically included here.
| Currently scatter_matrix passes all unmatched keyword arguments to both scatter and line subplots (off-diagonal plots are scatter, diagonal plots are discrete or continuous histograms). I needed to color the points on the scatter plots but could not pass a "c" argument or pandas would try to pass the "c" argument to the line plots and barf. This change scratches my itch and doesn't break backward compatibility.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3673 | 2013-05-21T18:26:36Z | 2013-06-03T01:18:28Z | 2013-06-03T01:18:28Z | 2014-07-16T08:10:09Z |
BUG: convert_objects with convert_dates=coerce was parsing `a` into a date | diff --git a/RELEASE.rst b/RELEASE.rst
index 436f9d8b833a3..f2c150341d2c6 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -155,6 +155,8 @@ pandas 0.11.1
- Fix running of bs4 tests when it is not installed (GH3605_)
- Fix parsing of html table (GH3606_)
- ``read_html()`` now only allows a single backend: ``html5lib`` (GH3616_)
+ - ``convert_objects`` with ``convert_dates='coerce'`` was parsing some single-letter strings
+ into today's date
.. _GH3164: https://github.com/pydata/pandas/issues/3164
.. _GH2786: https://github.com/pydata/pandas/issues/2786
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index 5cff7f85593a6..75b8d1dc69452 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -3509,6 +3509,15 @@ def test_convert_objects(self):
#result = r.convert_objects(convert_dates=True,convert_numeric=False)
#self.assert_(result.dtype == 'M8[ns]')
+ # dateutil parses some single letters into today's value as a date
+ for x in 'abcdefghijklmnopqrstuvwxyz':
+ s = Series([x])
+ result = s.convert_objects(convert_dates='coerce')
+ assert_series_equal(result,s)
+ s = Series([x.upper()])
+ result = s.convert_objects(convert_dates='coerce')
+ assert_series_equal(result,s)
+
def test_apply_args(self):
s = Series(['foo,bar'])
diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx
index a633b9482da06..abec45b52a363 100644
--- a/pandas/tslib.pyx
+++ b/pandas/tslib.pyx
@@ -319,6 +319,7 @@ class Timestamp(_Timestamp):
_nat_strings = set(['NaT','nat','NAT','nan','NaN','NAN'])
+_not_datelike_strings = set(['a','A','m','M','p','P','t','T'])
class NaTType(_NaT):
"""(N)ot-(A)-(T)ime, the time equivalent of NaN"""
@@ -876,6 +877,14 @@ def array_to_datetime(ndarray[object] values, raise_=False, dayfirst=False,
&dts)
_check_dts_bounds(iresult[i], &dts)
except ValueError:
+
+ # for some reason, dateutil parses some single letter len-1 strings into today's date
+ if len(val) == 1 and val in _not_datelike_strings:
+ if coerce:
+ iresult[i] = iNaT
+ continue
+ elif raise_:
+ raise
try:
result[i] = parse(val, dayfirst=dayfirst)
except Exception:
| this is a 'bug' in dateutil:
`a,t,m,p` are the offenders
```
In [1]: import dateutil
In [2]: dateutil.parser.parse('a')
Out[2]: datetime.datetime(2013, 5, 21, 0, 0)
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/3671 | 2013-05-21T17:41:33Z | 2013-05-21T19:04:01Z | 2013-05-21T19:04:01Z | 2014-07-09T17:58:31Z |
BUG: Fix alignment issue when setitem in a mixed-DataFrame with a Series (GH3668) | diff --git a/RELEASE.rst b/RELEASE.rst
index 3940cd6d10b51..436f9d8b833a3 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -146,7 +146,8 @@ pandas 0.11.1
- Correctly parse date columns with embedded (nan/NaT) into datetime64[ns] dtype in ``read_csv``
when ``parse_dates`` is specified (GH3062_)
- Fix not consolidating before to_csv (GH3624_)
- - Fix alignment issue when setitem in a DataFrame with a piece of a DataFrame (GH3626_)
+ - Fix alignment issue when setitem in a DataFrame with a piece of a DataFrame (GH3626_) or
+ a mixed DataFrame and a Series (GH3668_)
- Fix plotting of unordered DatetimeIndex (GH3601_)
- ``sql.write_frame`` failing when writing a single column to sqlite (GH3628_),
thanks to @stonebig
@@ -217,6 +218,7 @@ pandas 0.11.1
.. _GH3141: https://github.com/pydata/pandas/issues/3141
.. _GH3628: https://github.com/pydata/pandas/issues/3628
.. _GH3638: https://github.com/pydata/pandas/issues/3638
+.. _GH3668: https://github.com/pydata/pandas/issues/3668
.. _GH3605: https://github.com/pydata/pandas/issues/3605
.. _GH3606: https://github.com/pydata/pandas/issues/3606
.. _GH3659: https://github.com/pydata/pandas/issues/3659
diff --git a/pandas/core/common.py b/pandas/core/common.py
index a52c932b30ba4..29516f9d2d4a3 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -851,6 +851,7 @@ def _maybe_upcast_indexer(result, indexer, other, dtype=None):
return the result and a changed flag
"""
+ original_dtype = result.dtype
def changeit():
# our type is wrong here, need to upcast
r, fill_value = _maybe_upcast(result, fill_value=other, dtype=dtype, copy=True)
@@ -861,9 +862,11 @@ def changeit():
# if we hit this then we still have an incompatible type
r[indexer] = fill_value
+ # if we have changed to floats, might want to cast back if we can
+ r = _possibly_downcast_to_dtype(r,original_dtype)
return r, True
- new_dtype, fill_value = _maybe_promote(result.dtype,other)
+ new_dtype, fill_value = _maybe_promote(original_dtype,other)
if new_dtype != result.dtype:
return changeit()
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 41f20cbcc15ac..f7187b7ae5d61 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -143,7 +143,7 @@ def setter(item, v):
else:
setter(item, np.nan)
- # we have an equal len ndarray
+ # we have an equal len ndarray to our labels
elif isinstance(value, np.ndarray) and value.ndim == 2:
if len(labels) != value.shape[1]:
raise ValueError('Must have equal len keys and value when'
@@ -153,7 +153,8 @@ def setter(item, v):
setter(item, value[:,i])
# we have an equal len list/ndarray
- elif len(labels) == 1 and len(self.obj[labels[0]]) == len(value):
+ elif len(labels) == 1 and (
+ len(self.obj[labels[0]]) == len(value) or len(plane_indexer[0]) == len(value)):
setter(labels[0], value)
# per label values
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 50bddb6ecd85c..fddbbf93552b3 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -938,12 +938,6 @@ def test_getitem_setitem_non_ix_labels(self):
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
- def test_ix_assign_column_mixed(self):
- # GH #1142
- orig = self.mixed_frame.ix[:, 'B'].copy()
- self.mixed_frame.ix[:, 'B'] = self.mixed_frame.ix[:, 'B'] + 1
- assert_series_equal(self.mixed_frame.B, orig + 1)
-
def test_ix_multi_take(self):
df = DataFrame(np.random.randn(3, 2))
rs = df.ix[df.index == 0, :]
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index 5891e8ac08040..ad3d150c7e0ad 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -881,7 +881,7 @@ def test_multi_assign(self):
expected = DataFrame({'FC':['a',np.nan,'a','b','a','b'],
'PF':[0,0,0,0,1,1],
- 'col1':Series([0,1,4,6,8,10],dtype='float64'),
+ 'col1':Series([0,1,4,6,8,10]),
'col2':[12,7,16,np.nan,20,22]})
@@ -898,6 +898,27 @@ def test_multi_assign(self):
df2.ix[mask, cols]= dft.ix[mask, cols].values
assert_frame_equal(df2,expected)
+ def test_ix_assign_column_mixed(self):
+ # GH #1142
+ df = DataFrame(tm.getSeriesData())
+ df['foo'] = 'bar'
+
+ orig = df.ix[:, 'B'].copy()
+ df.ix[:, 'B'] = df.ix[:, 'B'] + 1
+ assert_series_equal(df.B, orig + 1)
+
+ # GH 3668, mixed frame with series value
+ df = DataFrame({'x':range(10), 'y':range(10,20),'z' : 'bar'})
+ expected = df.copy()
+ expected.ix[0, 'y'] = 1000
+ expected.ix[2, 'y'] = 1200
+ expected.ix[4, 'y'] = 1400
+ expected.ix[6, 'y'] = 1600
+ expected.ix[8, 'y'] = 1800
+
+ df.ix[df.x % 2 == 0, 'y'] = df.ix[df.x % 2 == 0, 'y'] * 100
+ assert_frame_equal(df,expected)
+
def test_iloc_mask(self):
# GH 3631, iloc with a mask (of a series) should raise
| closes #3668
| https://api.github.com/repos/pandas-dev/pandas/pulls/3670 | 2013-05-21T16:14:14Z | 2013-05-21T17:37:31Z | 2013-05-21T17:37:31Z | 2014-06-23T10:57:56Z |
Rework display logic again. | diff --git a/RELEASE.rst b/RELEASE.rst
index e02ad66252bdc..18468ebcd3f4c 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -40,8 +40,8 @@ pandas 0.11.1
list of the rows from which to read the index. Added the option,
``tupleize_cols`` to provide compatiblity for the pre 0.11.1 behavior of
writing and reading multi-index columns via a list of tuples. The default in
- 0.11.1 is to write lists of tuples and *not* interpret list of tuples as a
- multi-index column.
+ 0.11.1 is to write lists of tuples and *not* interpret list of tuples as a
+ multi-index column.
Note: The default value will change in 0.12 to make the default *to* write and
read multi-index columns in the new format. (GH3571_, GH1651_, GH3141_)
- Add iterator to ``Series.str`` (GH3638_)
@@ -63,6 +63,7 @@ pandas 0.11.1
- Add modulo operator to Series, DataFrame
- Add ``date`` method to DatetimeIndex
- Simplified the API and added a describe method to Categorical
+ - Added Faq section on repr display options, to help users customize their setup.
**API Changes**
@@ -79,12 +80,14 @@ pandas 0.11.1
``timedelta64[ns]`` to ``object/int`` (GH3425_)
- Do not allow datetimelike/timedeltalike creation except with valid types
(e.g. cannot pass ``datetime64[ms]``) (GH3423_)
- - Add ``squeeze`` keyword to ``groupby`` to allow reduction from
- DataFrame -> Series if groups are unique. Regression from 0.10.1,
+ - Add ``squeeze`` keyword to ``groupby`` to allow reduction from
+ DataFrame -> Series if groups are unique. Regression from 0.10.1,
partial revert on (GH2893_) with (GH3596_)
- Raise on ``iloc`` when boolean indexing with a label based indexer mask
e.g. a boolean Series, even with integer labels, will raise. Since ``iloc``
is purely positional based, the labels on the Series are not alignable (GH3631_)
+ - Deprecated display.height, display.width is now only a formatting option
+ does not control triggering of summary, simuliar to < 0.11.0.
**Bug Fixes**
@@ -134,11 +137,13 @@ pandas 0.11.1
is a ``list`` or ``tuple``.
- Fixed bug where a time-series was being selected in preference to an actual column name
in a frame (GH3594_)
- - Fix modulo and integer division on Series,DataFrames to act similary to ``float`` dtypes to return
+ - Fix modulo and integer division on Series,DataFrames to act similary to ``float`` dtypes to return
``np.nan`` or ``np.inf`` as appropriate (GH3590_)
- Fix incorrect dtype on groupby with ``as_index=False`` (GH3610_)
- Fix ``read_csv`` to correctly encode identical na_values, e.g. ``na_values=[-999.0,-999]``
was failing (GH3611_)
+ - Disable HTML output in qtconsole again. (GH3657_)
+ - Reworked the new repr display logic, which users found confusing. (GH3663_)
- Fix indexing issue in ndim >= 3 with ``iloc`` (GH3617_)
- Correctly parse date columns with embedded (nan/NaT) into datetime64[ns] dtype in ``read_csv``
when ``parse_dates`` is specified (GH3062_)
diff --git a/doc/source/faq.rst b/doc/source/faq.rst
index 8009c7014c347..a5b6db2964cd2 100644
--- a/doc/source/faq.rst
+++ b/doc/source/faq.rst
@@ -35,8 +35,8 @@ horizontal scrolling, auto-detection of width/height.
To appropriately address all these environments, the display behavior is controlled
by several options, which you're encouraged to tweak to suit your setup.
-As of 0.11.0, the relavent options are all under the `display` namespace,
-(e.g. display.width, display.height, etc'):
+As of 0.11.1, these are the relavent options, all under the `display` namespace,
+(e.g. display.width, etc'):
- notebook_repr_html: if True, IPython frontends with HTML support will display
dataframes as HTML tables when possible.
- expand_repr (default True): when the frame width cannot fit within the screen,
@@ -45,10 +45,10 @@ As of 0.11.0, the relavent options are all under the `display` namespace,
- max_columns: max dataframe columns to display. a wider frame will trigger
a summary view, unless `expand_repr` is True and HTML output is disabled.
- max_rows: max dataframe rows display. a longer frame will trigger a summary view.
-- width: width of display screen in characters. When using a terminal, setting this to None
- will trigger auto-detection of terminal width.
-- height: height of display screen. When using a terminal, setting this to None
- will trigger auto-detection of terminal height.
+- width: width of display screen in characters, used to determine the width of lines
+ when expand_repr is active, Setting this to None will trigger auto-detection of terminal
+ width, this only works for proper terminals, not IPython frontends such as ipnb.
+ width is ignored in IPython notebook, since the browser provides horizontal scrolling.
IPython users can use the IPython startup file to import pandas and set these
options automatically when starting up.
diff --git a/pandas/core/common.py b/pandas/core/common.py
index a52c932b30ba4..cbc85e6b91c33 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -1904,8 +1904,23 @@ def in_qtconsole():
return True
except:
return False
+ return False
+
+def in_ipnb():
+ """
+ check if we're inside an IPython Notebook
+ """
+ try:
+ ip = get_ipython()
+ front_end = (ip.config.get('KernelApp',{}).get('parent_appname',"") or
+ ip.config.get('IPKernelApp',{}).get('parent_appname',""))
+ if 'notebook' in front_end.lower():
+ return True
+ except:
+ return False
+ return False
-def in_ipnb_frontend():
+def in_ipython_frontend():
"""
check if we're inside an an IPython zmq frontend
"""
diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py
index 71b4539265069..57bbe747c9c2c 100644
--- a/pandas/core/config_init.py
+++ b/pandas/core/config_init.py
@@ -120,13 +120,17 @@
pc_line_width_doc = """
: int
- When printing wide DataFrames, this is the width of each line.
+ Deprecated.
"""
pc_line_width_deprecation_warning = """\
line_width has been deprecated, use display.width instead (currently both are identical)
"""
+pc_height_deprecation_warning = """\
+height has been deprecated.
+"""
+
pc_width_doc = """
: int
Width of the display in characters. In case python/IPython is running in
@@ -138,10 +142,7 @@
pc_height_doc = """
: int
- Height of the display in lines. In case python/IPython is running in a
- terminal this can be set to None and pandas will auto-detect the width.
- Note that the IPython notebook, IPython qtconsole, or IDLE do not run
- in a terminal, and hence it is not possible to correctly detect the height.
+ Deprecated.
"""
pc_chop_threshold_doc = """
@@ -244,10 +245,15 @@ def mpl_style_cb(key):
validator=is_instance_factory([type(None), int]))
# redirected to width, make defval identical
cf.register_option('line_width', get_default_val('display.width'), pc_line_width_doc)
+
cf.deprecate_option('display.line_width',
msg=pc_line_width_deprecation_warning,
rkey='display.width')
+cf.deprecate_option('display.height',
+ msg=pc_height_deprecation_warning,
+ rkey='display.height')
+
tc_sim_interactive_doc = """
: boolean
Whether to simulate interactive mode for purposes of testing
diff --git a/pandas/core/format.py b/pandas/core/format.py
index 3d38caa84492f..7327f3b1b2175 100644
--- a/pandas/core/format.py
+++ b/pandas/core/format.py
@@ -1702,7 +1702,7 @@ def detect_console_encoding():
def get_console_size():
"""Return console size as tuple = (width, height).
- May return (None,None) in some cases.
+ Returns (None,None) in non-interactive session.
"""
display_width = get_option('display.width')
display_height = get_option('display.height')
@@ -1718,7 +1718,7 @@ def get_console_size():
# Simple. yeah.
if com.in_interactive_session():
- if com.in_ipnb_frontend():
+ if com.in_ipython_frontend():
# sane defaults for interactive non-shell terminal
# match default for width,height in config_init
from pandas.core.config import get_default_val
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index ed56a658d817d..0580be25a3f04 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -605,57 +605,62 @@ def __nonzero__(self):
def _repr_fits_vertical_(self):
"""
- Check if full repr fits in vertical boundaries imposed by the display
- options height and max_rows. In case of non-interactive session,
- no boundaries apply.
+ Check length against max_rows.
"""
- width, height = fmt.get_console_size()
max_rows = get_option("display.max_rows")
+ return len(self) <= max_rows
- if height is None and max_rows is None:
- return True
-
- else:
- # min of two, where one may be None
- height = height or max_rows +1
- max_rows = max_rows or height +1
- return len(self) <= min(max_rows, height)
-
- def _repr_fits_horizontal_(self):
+ def _repr_fits_horizontal_(self,ignore_width=False):
"""
Check if full repr fits in horizontal boundaries imposed by the display
options width and max_columns. In case off non-interactive session, no
boundaries apply.
+
+ ignore_width is here so ipnb+HTML output can behave the way
+ users expect. display.max_columns remains in effect.
+ GH3541, GH3573
"""
+
width, height = fmt.get_console_size()
max_columns = get_option("display.max_columns")
nb_columns = len(self.columns)
# exceed max columns
if ((max_columns and nb_columns > max_columns) or
- (width and nb_columns > (width // 2))):
+ ((not ignore_width) and width and nb_columns > (width // 2))):
return False
- if width is None:
- # no sense finding width of repr if no width set
+ if (ignore_width # used by repr_html under IPython notebook
+ or not com.in_interactive_session()): # scripts ignore terminal dims
return True
+ if (get_option('display.width') is not None or
+ com.in_ipython_frontend()):
+ # check at least the column row for excessive width
+ max_rows = 1
+ else:
+ max_rows = get_option("display.max_rows")
+
+ # when auto-detecting, so width=None and not in ipython front end
+ # check whether repr fits horizontal by actualy checking
+ # the width of the rendered repr
buf = StringIO()
# only care about the stuff we'll actually print out
# and to_string on entire frame may be expensive
d = self
- max_rows = get_option("display.max_rows")
- if not (height is None and max_rows is None):
+
+ if not (max_rows is None): # unlimited rows
# min of two, where one may be None
- height = height or max_rows +1
- max_rows = max_rows or height +1
- d=d.iloc[:min(max_rows, height,len(d))]
+ d=d.iloc[:min(max_rows,len(d))]
+ else:
+ return True
d.to_string(buf=buf)
value = buf.getvalue()
repr_width = max([len(l) for l in value.split('\n')])
- return repr_width <= width
+
+ return repr_width < width
def __str__(self):
"""
@@ -697,14 +702,11 @@ def __unicode__(self):
if fits_vertical and fits_horizontal:
self.to_string(buf=buf)
else:
- width, height = fmt.get_console_size()
- max_rows = get_option("display.max_rows") or height
- # expand_repr basically takes the extrac columns that don't
- # fit the width, and creates a new page, which increases
- # the effective row count. check number of cols agaibst
- # max rows to catch wrapping. that would exceed max_rows.
- if (get_option("display.expand_frame_repr") and fits_vertical and
- len(self.columns) < max_rows):
+ width, _ = fmt.get_console_size()
+ max_rows = get_option("display.max_rows")
+ if (get_option("display.expand_frame_repr")
+ and fits_vertical):
+ # and len(self.columns) < max_rows)
self.to_string(buf=buf, line_width=width)
else:
max_info_rows = get_option('display.max_info_rows')
@@ -731,12 +733,22 @@ def _repr_html_(self):
Return a html representation for a particular DataFrame.
Mainly for IPython notebook.
"""
+ # ipnb in html repr mode allows scrolling
+ # users strongly prefer to h-scroll a wide HTML table in the browser
+ # then to get a summary view. GH3541, GH3573
+ ipnbh = com.in_ipnb() and get_option('display.notebook_repr_html')
+
+ # qtconsole doesn't report it's line width, and also
+ # behaves badly when outputting an HTML table
+ # that doesn't fit the window, so disable it.
+ if com.in_qtconsole():
+ raise ValueError('Disable HTML output in QtConsole')
if get_option("display.notebook_repr_html"):
fits_vertical = self._repr_fits_vertical_()
fits_horizontal = False
if fits_vertical:
- fits_horizontal = self._repr_fits_horizontal_()
+ fits_horizontal = self._repr_fits_horizontal_(ignore_width=ipnbh)
if fits_horizontal and fits_vertical:
return ('<div style="max-height:1000px;'
@@ -870,7 +882,7 @@ def __contains__(self, key):
# Python 2 division methods
if not py3compat.PY3:
- __div__ = _arith_method(operator.div, '__div__', '/',
+ __div__ = _arith_method(operator.div, '__div__', '/',
default_axis=None, fill_zeros=np.inf)
__rdiv__ = _arith_method(lambda x, y: y / x, '__rdiv__',
default_axis=None, fill_zeros=np.inf)
diff --git a/pandas/tests/test_format.py b/pandas/tests/test_format.py
index 6b281edf17da9..7feb2f17d79a5 100644
--- a/pandas/tests/test_format.py
+++ b/pandas/tests/test_format.py
@@ -170,8 +170,9 @@ def test_expand_frame_repr(self):
df_tall = DataFrame('hello', range(30), range(5))
with option_context('mode.sim_interactive', True):
- with option_context('display.width', 50,
- 'display.height', 20):
+ with option_context('display.max_columns', 5,
+ 'display.width',20,
+ 'display.max_rows', 20):
with option_context('display.expand_frame_repr', True):
self.assertFalse(has_info_repr(df_small))
self.assertFalse(has_expanded_repr(df_small))
@@ -226,19 +227,21 @@ def mkframe(n):
# since not exceeding width
self.assertFalse(has_expanded_repr(df6))
self.assertFalse(has_info_repr(df6))
-
+
with option_context('display.max_rows', 9,
'display.max_columns', 10):
# out vertical bounds can not result in exanded repr
self.assertFalse(has_expanded_repr(df10))
self.assertTrue(has_info_repr(df10))
- with option_context('display.max_columns', 0,
+ # width=None in terminal, auto detection
+ with option_context('display.max_columns', 100,
'display.max_rows', term_width * 20,
- 'display.width', 0):
+ 'display.width', None):
df = mkframe((term_width // 7) - 2)
self.assertFalse(has_expanded_repr(df))
df = mkframe((term_width // 7) + 2)
+ print( df._repr_fits_horizontal_())
self.assertTrue(has_expanded_repr(df))
def test_to_string_repr_unicode(self):
@@ -787,7 +790,8 @@ def test_pprint_thing(self):
def test_wide_repr(self):
with option_context('mode.sim_interactive', True):
col = lambda l, k: [tm.rands(k) for _ in xrange(l)]
- df = DataFrame([col(20, 25) for _ in range(10)])
+ max_cols = get_option('display.max_columns')
+ df = DataFrame([col(max_cols+1, 25) for _ in range(10)])
set_option('display.expand_frame_repr', False)
rep_str = repr(df)
set_option('display.expand_frame_repr', True)
@@ -810,7 +814,8 @@ def test_wide_repr_wide_columns(self):
def test_wide_repr_named(self):
with option_context('mode.sim_interactive', True):
col = lambda l, k: [tm.rands(k) for _ in xrange(l)]
- df = DataFrame([col(20, 25) for _ in range(10)])
+ max_cols = get_option('display.max_columns')
+ df = DataFrame([col(max_cols+1, 25) for _ in range(10)])
df.index.name = 'DataFrame Index'
set_option('display.expand_frame_repr', False)
@@ -833,7 +838,8 @@ def test_wide_repr_multiindex(self):
col = lambda l, k: [tm.rands(k) for _ in xrange(l)]
midx = pandas.MultiIndex.from_arrays([np.array(col(10, 5)),
np.array(col(10, 5))])
- df = DataFrame([col(20, 25) for _ in range(10)],
+ max_cols = get_option('display.max_columns')
+ df = DataFrame([col(max_cols+1, 25) for _ in range(10)],
index=midx)
df.index.names = ['Level 0', 'Level 1']
set_option('display.expand_frame_repr', False)
@@ -853,12 +859,13 @@ def test_wide_repr_multiindex(self):
def test_wide_repr_multiindex_cols(self):
with option_context('mode.sim_interactive', True):
+ max_cols = get_option('display.max_columns')
col = lambda l, k: [tm.rands(k) for _ in xrange(l)]
midx = pandas.MultiIndex.from_arrays([np.array(col(10, 5)),
np.array(col(10, 5))])
- mcols = pandas.MultiIndex.from_arrays([np.array(col(20, 3)),
- np.array(col(20, 3))])
- df = DataFrame([col(20, 25) for _ in range(10)],
+ mcols = pandas.MultiIndex.from_arrays([np.array(col(max_cols+1, 3)),
+ np.array(col(max_cols+1, 3))])
+ df = DataFrame([col(max_cols+1, 25) for _ in range(10)],
index=midx, columns=mcols)
df.index.names = ['Level 0', 'Level 1']
set_option('display.expand_frame_repr', False)
@@ -876,7 +883,8 @@ def test_wide_repr_multiindex_cols(self):
def test_wide_repr_unicode(self):
with option_context('mode.sim_interactive', True):
col = lambda l, k: [tm.randu(k) for _ in xrange(l)]
- df = DataFrame([col(20, 25) for _ in range(10)])
+ max_cols = get_option('display.max_columns')
+ df = DataFrame([col(max_cols+1, 25) for _ in range(10)])
set_option('display.expand_frame_repr', False)
rep_str = repr(df)
set_option('display.expand_frame_repr', True)
| v0.11.0 introduced last-minute changes to the way the dataframe display logic works,
and they haven't proven themselves. The churn is unforunate, but if it's broken,
fix it.
`height` is deprecated.
max_rows solely determines the amount of rows displayed.
rebased on top of #3657.
Merging the previous PR and subsequent last minute fixes so close to the release
contributed to the problems only being discovered after the release.
So please test and report if this works for you, in terminal, qtconsole or notebook,
since the 0.11.1 release is so close.
cc @hayd , @lodagro ,@jreback
| https://api.github.com/repos/pandas-dev/pandas/pulls/3663 | 2013-05-21T00:07:41Z | 2013-05-25T23:08:36Z | 2013-05-25T23:08:36Z | 2014-06-22T03:00:30Z |
BUG: Non-unique indexing via loc and friends fixed when slicing (GH3659_) | diff --git a/RELEASE.rst b/RELEASE.rst
index 9b3cc3683c3de..e02ad66252bdc 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -115,6 +115,7 @@ pandas 0.11.1
and handle missing elements like unique indices (GH3561_)
- Duplicate indexes with and empty DataFrame.from_records will return a correct frame (GH3562_)
- Concat to produce a non-unique columns when duplicates are across dtypes is fixed (GH3602_)
+ - Non-unique indexing with a slice via ``loc`` and friends fixed (GH3659_)
- Fixed bug in groupby with empty series referencing a variable before assignment. (GH3510_)
- Fixed bug in mixed-frame assignment with aligned series (GH3492_)
- Fixed bug in selecting month/quarter/year from a series would not select the time element
@@ -215,6 +216,7 @@ pandas 0.11.1
.. _GH3638: https://github.com/pydata/pandas/issues/3638
.. _GH3605: https://github.com/pydata/pandas/issues/3605
.. _GH3606: https://github.com/pydata/pandas/issues/3606
+.. _GH3659: https://github.com/pydata/pandas/issues/3659
.. _Gh3616: https://github.com/pydata/pandas/issues/3616
pandas 0.11.0
diff --git a/pandas/core/index.py b/pandas/core/index.py
index 3e5a4f5676437..3a6913a924c1d 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -1219,13 +1219,25 @@ def slice_locs(self, start=None, end=None):
-----
This function assumes that the data is sorted, so use at your own peril
"""
+
+ is_unique = self.is_unique
if start is None:
start_slice = 0
else:
try:
start_slice = self.get_loc(start)
+
+ if not is_unique:
+
+ # get_loc will return a boolean array for non_uniques
+ # if we are not monotonic
+ if isinstance(start_slice,np.ndarray):
+ raise KeyError("cannot peform a slice operation "
+ "on a non-unique non-monotonic index")
+
if isinstance(start_slice, slice):
start_slice = start_slice.start
+
except KeyError:
if self.is_monotonic:
start_slice = self.searchsorted(start, side='left')
@@ -1237,10 +1249,19 @@ def slice_locs(self, start=None, end=None):
else:
try:
end_slice = self.get_loc(end)
+
+ if not is_unique:
+
+ # get_loc will return a boolean array for non_uniques
+ if isinstance(end_slice,np.ndarray):
+ raise KeyError("cannot perform a slice operation "
+ "on a non-unique non-monotonic index")
+
if isinstance(end_slice, slice):
end_slice = end_slice.stop
else:
end_slice += 1
+
except KeyError:
if self.is_monotonic:
end_slice = self.searchsorted(end, side='right')
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index ea684ef11446c..41f20cbcc15ac 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -759,6 +759,7 @@ def _getitem_axis(self, key, axis=0):
labels = self.obj._get_axis(axis)
if isinstance(key, slice):
+ self._has_valid_type(key,axis)
return self._get_slice_axis(key, axis=axis)
elif com._is_bool_indexer(key):
return self._getbool_axis(key, axis=axis)
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index e9afa1ae6ec1d..5891e8ac08040 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -953,6 +953,30 @@ def test_iloc_mask(self):
(key,ans,r))
warnings.filterwarnings(action='always', category=UserWarning)
+ def test_non_unique_loc(self):
+ ## GH3659
+ ## non-unique indexer with loc slice
+ ## https://groups.google.com/forum/?fromgroups#!topic/pydata/zTm2No0crYs
+
+ # these are going to raise becuase the we are non monotonic
+ df = DataFrame({'A' : [1,2,3,4,5,6], 'B' : [3,4,5,6,7,8]}, index = [0,1,0,1,2,3])
+ self.assertRaises(KeyError, df.loc.__getitem__, tuple([slice(1,None)]))
+ self.assertRaises(KeyError, df.loc.__getitem__, tuple([slice(0,None)]))
+ self.assertRaises(KeyError, df.loc.__getitem__, tuple([slice(1,2)]))
+
+ # monotonic are ok
+ df = DataFrame({'A' : [1,2,3,4,5,6], 'B' : [3,4,5,6,7,8]}, index = [0,1,0,1,2,3]).sort(axis=0)
+ result = df.loc[1:]
+ expected = DataFrame({'A' : [2,4,5,6], 'B' : [4, 6,7,8]}, index = [1,1,2,3])
+ assert_frame_equal(result,expected)
+
+ result = df.loc[0:]
+ assert_frame_equal(result,df)
+
+ result = df.loc[1:2]
+ expected = DataFrame({'A' : [2,4,5], 'B' : [4,6,7]}, index = [1,1,2])
+ assert_frame_equal(result,expected)
+
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
| closes #3659
This is if you try a non_monotonic selection on a non_unique index (a mouthful)!
The reason is we cannot determinate a proper start/end point on what to include
```
In [11]: df = DataFrame({'A' : [1,2,3,4,5,6], 'B' : [3,4,5,6,7,8]},
index = [0,1,0,1,2,3])
In [18]: df
Out[18]:
A B
0 1 3
1 2 4
0 3 5
1 4 6
2 5 7
3 6 8
In [12]: df.loc[1:]
KeyError: 'cannot perform a slice operation on a non-unique non-monotonic index'
```
On a non_unique, but monotonic index, however, slicing works normally
(notice, since we are using loc, that both endpoints ARE included)
```
In [13]: df = DataFrame({'A' : [1,2,3,4,5,6], 'B' : [3,4,5,6,7,8]},
index = [0,1,0,1,2,3]).sort(axis=0)
In [14]: df
Out[14]:
A B
0 1 3
0 3 5
1 2 4
1 4 6
2 5 7
3 6 8
In [15]: df.loc[1:]
Out[15]:
A B
1 2 4
1 4 6
2 5 7
3 6 8
In [16]: df.loc[1:2]
Out[16]:
A B
1 2 4
1 4 6
2 5 7
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/3661 | 2013-05-20T19:44:44Z | 2013-05-20T23:34:40Z | 2013-05-20T23:34:40Z | 2014-06-20T10:08:11Z |
ENH: special case HTML repr behaviour on ipnb GH3573 | diff --git a/RELEASE.rst b/RELEASE.rst
index e02ad66252bdc..c2d4154bf2587 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -40,8 +40,8 @@ pandas 0.11.1
list of the rows from which to read the index. Added the option,
``tupleize_cols`` to provide compatiblity for the pre 0.11.1 behavior of
writing and reading multi-index columns via a list of tuples. The default in
- 0.11.1 is to write lists of tuples and *not* interpret list of tuples as a
- multi-index column.
+ 0.11.1 is to write lists of tuples and *not* interpret list of tuples as a
+ multi-index column.
Note: The default value will change in 0.12 to make the default *to* write and
read multi-index columns in the new format. (GH3571_, GH1651_, GH3141_)
- Add iterator to ``Series.str`` (GH3638_)
@@ -79,8 +79,8 @@ pandas 0.11.1
``timedelta64[ns]`` to ``object/int`` (GH3425_)
- Do not allow datetimelike/timedeltalike creation except with valid types
(e.g. cannot pass ``datetime64[ms]``) (GH3423_)
- - Add ``squeeze`` keyword to ``groupby`` to allow reduction from
- DataFrame -> Series if groups are unique. Regression from 0.10.1,
+ - Add ``squeeze`` keyword to ``groupby`` to allow reduction from
+ DataFrame -> Series if groups are unique. Regression from 0.10.1,
partial revert on (GH2893_) with (GH3596_)
- Raise on ``iloc`` when boolean indexing with a label based indexer mask
e.g. a boolean Series, even with integer labels, will raise. Since ``iloc``
@@ -134,11 +134,12 @@ pandas 0.11.1
is a ``list`` or ``tuple``.
- Fixed bug where a time-series was being selected in preference to an actual column name
in a frame (GH3594_)
- - Fix modulo and integer division on Series,DataFrames to act similary to ``float`` dtypes to return
+ - Fix modulo and integer division on Series,DataFrames to act similary to ``float`` dtypes to return
``np.nan`` or ``np.inf`` as appropriate (GH3590_)
- Fix incorrect dtype on groupby with ``as_index=False`` (GH3610_)
- Fix ``read_csv`` to correctly encode identical na_values, e.g. ``na_values=[-999.0,-999]``
was failing (GH3611_)
+ - Disable HTML output in qtconsole again. (GH3657_)
- Fix indexing issue in ndim >= 3 with ``iloc`` (GH3617_)
- Correctly parse date columns with embedded (nan/NaT) into datetime64[ns] dtype in ``read_csv``
when ``parse_dates`` is specified (GH3062_)
diff --git a/pandas/core/common.py b/pandas/core/common.py
index a52c932b30ba4..cbc85e6b91c33 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -1904,8 +1904,23 @@ def in_qtconsole():
return True
except:
return False
+ return False
+
+def in_ipnb():
+ """
+ check if we're inside an IPython Notebook
+ """
+ try:
+ ip = get_ipython()
+ front_end = (ip.config.get('KernelApp',{}).get('parent_appname',"") or
+ ip.config.get('IPKernelApp',{}).get('parent_appname',""))
+ if 'notebook' in front_end.lower():
+ return True
+ except:
+ return False
+ return False
-def in_ipnb_frontend():
+def in_ipython_frontend():
"""
check if we're inside an an IPython zmq frontend
"""
diff --git a/pandas/core/format.py b/pandas/core/format.py
index 3d38caa84492f..608165f4ed340 100644
--- a/pandas/core/format.py
+++ b/pandas/core/format.py
@@ -1718,7 +1718,7 @@ def get_console_size():
# Simple. yeah.
if com.in_interactive_session():
- if com.in_ipnb_frontend():
+ if com.in_ipython_frontend():
# sane defaults for interactive non-shell terminal
# match default for width,height in config_init
from pandas.core.config import get_default_val
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index ed56a658d817d..d2476735a256d 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -621,19 +621,26 @@ def _repr_fits_vertical_(self):
max_rows = max_rows or height +1
return len(self) <= min(max_rows, height)
- def _repr_fits_horizontal_(self):
+ def _repr_fits_horizontal_(self,ignore_width=False):
"""
Check if full repr fits in horizontal boundaries imposed by the display
options width and max_columns. In case off non-interactive session, no
boundaries apply.
+
+ ignore_width is here so ipnb+HTML output can behave the way
+ users expect. display.max_columns remains in effect.
+ GH3541, GH3573
"""
+
+ # everytime you add an if-clause here, god slaughters a kitten.
+ # please. think of the kittens.
width, height = fmt.get_console_size()
max_columns = get_option("display.max_columns")
nb_columns = len(self.columns)
# exceed max columns
if ((max_columns and nb_columns > max_columns) or
- (width and nb_columns > (width // 2))):
+ ((not ignore_width) and width and nb_columns > (width // 2))):
return False
if width is None:
@@ -655,7 +662,12 @@ def _repr_fits_horizontal_(self):
d.to_string(buf=buf)
value = buf.getvalue()
repr_width = max([len(l) for l in value.split('\n')])
- return repr_width <= width
+
+ # special case ipnb+HTML repr
+ if not ignore_width:
+ return repr_width <= width
+ else:
+ return True
def __str__(self):
"""
@@ -731,12 +743,22 @@ def _repr_html_(self):
Return a html representation for a particular DataFrame.
Mainly for IPython notebook.
"""
+ # ipnb in html repr mode allows scrolling
+ # users strongly prefer to h-scroll a wide HTML table in the browser
+ # then to get a summary view. GH3541, GH3573
+ ipnbh = com.in_ipnb() and get_option('display.notebook_repr_html')
+
+ # qtconsole doesn't report it's line width, and also
+ # behaves badly when outputting an HTML table
+ # that doesn't fit the window, so disable it.
+ if com.in_qtconsole():
+ raise ValueError('Disable HTML output in QtConsole')
if get_option("display.notebook_repr_html"):
fits_vertical = self._repr_fits_vertical_()
fits_horizontal = False
if fits_vertical:
- fits_horizontal = self._repr_fits_horizontal_()
+ fits_horizontal = self._repr_fits_horizontal_(ignore_width=ipnbh)
if fits_horizontal and fits_vertical:
return ('<div style="max-height:1000px;'
| #3573, and SO question mentioned in #3541
| https://api.github.com/repos/pandas-dev/pandas/pulls/3657 | 2013-05-20T16:31:24Z | 2013-05-25T23:08:36Z | 2013-05-25T23:08:36Z | 2014-06-12T18:10:10Z |
DOC: add doc for reading from DataFrame.to_html | diff --git a/RELEASE.rst b/RELEASE.rst
index 3940cd6d10b51..1ab2cab84a70a 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -66,6 +66,7 @@ pandas 0.11.1
- ``melt`` now accepts the optional parameters ``var_name`` and ``value_name``
to specify custom column names of the returned DataFrame (GH3649_),
thanks @hoechenberger
+ - ``read_html`` no longer performs hard date conversion
**API Changes**
diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt
index f4f0546427ef9..6ff3afeb69581 100644
--- a/doc/source/v0.11.1.txt
+++ b/doc/source/v0.11.1.txt
@@ -68,6 +68,21 @@ Enhancements
- ``pd.read_html()`` can now parse HTML strings, files or urls and return
DataFrames, courtesy of @cpcloud. (GH3477_, GH3605_, GH3606_, GH3616_).
It works with a *single* parser backend: BeautifulSoup4 + html5lib
+ - You can use ``pd.read_html()`` to read the output from ``DataFrame.to_html()`` like so
+
+ .. ipython :: python
+
+ df = DataFrame({'a': range(3), 'b': list('abc')})
+ print df
+ html = df.to_html()
+ alist = pd.read_html(html, infer_types=True, index_col=0)
+ print df == alist[0]
+
+ Note that ``alist`` here is a Python ``list`` so ``pd.read_html()`` and
+ ``DataFrame.to_html()`` are not inverses.
+
+ - ``pd.read_html()`` no longer performs hard conversion of date strings
+ (GH3656_).
- ``HDFStore``
@@ -211,3 +226,4 @@ on GitHub for a complete list.
.. _GH3616: https://github.com/pydata/pandas/issues/3616
.. _GH3605: https://github.com/pydata/pandas/issues/3605
.. _GH3606: https://github.com/pydata/pandas/issues/3606
+.. _GH3656: https://github.com/pydata/pandas/issues/3656
diff --git a/pandas/io/html.py b/pandas/io/html.py
index 732bd57bec418..915c30ecc3c40 100644
--- a/pandas/io/html.py
+++ b/pandas/io/html.py
@@ -636,7 +636,6 @@ def _data_to_frame(data, header, index_col, infer_types, skiprows):
# must be sequential since dates trump numbers if both args are given
if infer_types:
df = df.convert_objects(convert_numeric=True)
- df = df.convert_objects(convert_dates='coerce')
if index_col is not None:
cols = df.columns[index_col]
@@ -722,7 +721,7 @@ def _parse(parser, io, match, flavor, header, index_col, skiprows, infer_types,
def read_html(io, match='.+', flavor='html5lib', header=None, index_col=None,
- skiprows=None, infer_types=False, attrs=None):
+ skiprows=None, infer_types=True, attrs=None):
r"""Read an HTML table into a DataFrame.
Parameters
diff --git a/pandas/io/tests/test_html.py b/pandas/io/tests/test_html.py
index 6e2f6ec00d8ac..7ece8f8e07d81 100644
--- a/pandas/io/tests/test_html.py
+++ b/pandas/io/tests/test_html.py
@@ -2,7 +2,6 @@
import re
from cStringIO import StringIO
from unittest import TestCase
-import collections
import numbers
from urllib2 import urlopen
from contextlib import closing
@@ -408,7 +407,7 @@ def try_remove_ws(x):
return x
df = self.run_read_html(self.banklist_data, 'Metcalf',
- attrs={'id': 'table'}, infer_types=True)[0]
+ attrs={'id': 'table'})[0]
ground_truth = read_csv(os.path.join(DATA_PATH, 'banklist.csv'),
converters={'Updated Date': Timestamp,
'Closing Date': Timestamp})
@@ -431,7 +430,9 @@ def try_remove_ws(x):
'Hamilton Bank, NA', 'The Citizens Savings Bank']
dfnew = df.applymap(try_remove_ws).replace(old, new)
gtnew = ground_truth.applymap(try_remove_ws)
- assert_frame_equal(dfnew, gtnew)
+ converted = dfnew.convert_objects(convert_numeric=True)
+ assert_frame_equal(converted.convert_objects(convert_dates='coerce'),
+ gtnew)
@slow
def test_gold_canyon(self):
@@ -487,6 +488,3 @@ def test_lxml_finds_tbody():
url = ('http://ndb.nal.usda.gov/ndb/foods/show/1732?fg=&man=&'
'lfacet=&format=&count=&max=25&offset=&sort=&qlookup=spam')
assert get_lxml_elements(url, 'tbody')
-
-
-
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index f38fe61d453c2..823d2c81bb72c 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -126,13 +126,13 @@ def assert_almost_equal(a, b, check_less_precise = False):
return assert_dict_equal(a, b)
if isinstance(a, basestring):
- assert a == b, "{0} != {1}".format(a, b)
+ assert a == b, "%s != %s" % (a, b)
return True
if isiterable(a):
np.testing.assert_(isiterable(b))
na, nb = len(a), len(b)
- assert na == nb, "{0} != {1}".format(na, nb)
+ assert na == nb, "%s != %s" % (na, nb)
if np.array_equal(a, b):
return True
@@ -154,8 +154,6 @@ def assert_almost_equal(a, b, check_less_precise = False):
if check_less_precise:
dtype_a = np.dtype(type(a))
dtype_b = np.dtype(type(b))
- if dtype_a.kind == 'i' and dtype_b == 'i':
- pass
if dtype_a.kind == 'f' and dtype_b == 'f':
if dtype_a.itemsize <= 4 and dtype_b.itemsize <= 4:
decimal = 3
| https://api.github.com/repos/pandas-dev/pandas/pulls/3656 | 2013-05-20T14:03:15Z | 2013-05-21T18:04:45Z | 2013-05-21T18:04:45Z | 2014-06-21T15:38:39Z | |
TST: add html5lib to travis | diff --git a/README.rst b/README.rst
index 2d49c168eac60..a8ecf01aac953 100644
--- a/README.rst
+++ b/README.rst
@@ -95,8 +95,14 @@ Optional dependencies
- Both `html5lib <https://github.com/html5lib/html5lib-python>`__ **and**
`Beautiful Soup 4 <http://www.crummy.com/software/BeautifulSoup>`__: for
reading HTML tables
- - These can both easily be installed by ``pip install html5lib`` and ``pip
- install beautifulsoup4``.
+
+ .. warning::
+
+ If you are on a 32-bit machine you need to install an older version of
+ Beautiful Soup. Version 4.0.2 of BeautifulSoup has been tested on Ubuntu
+ 12.04.02 32-bit.
+
+ - Any recent version of ``html5lib`` is okay.
- `boto <https://pypi.python.org/pypi/boto>`__: necessary for Amazon S3 access.
diff --git a/ci/install.sh b/ci/install.sh
index cd897cf7313c2..a091834a9570f 100755
--- a/ci/install.sh
+++ b/ci/install.sh
@@ -30,7 +30,13 @@ fi;
#scipy is not included in the cached venv
if [ x"$FULL_DEPS" == x"true" ] ; then
# for pytables gets the lib as well
- sudo apt-get $APT_ARGS install libhdf5-serial-dev;
+ sudo apt-get $APT_ARGS install libhdf5-serial-dev
+
+ if [ ${TRAVIS_PYTHON_VERSION:0:1} == "3" ]; then
+ sudo apt-get $APT_ARGS install python3-bs4
+ elif [ ${TRAVIS_PYTHON_VERSION:0:1} == "2" ]; then
+ sudo apt-get $APT_ARGS install python-bs4
+ fi
if [ ${TRAVIS_PYTHON_VERSION} == "3.2" ]; then
sudo apt-get $APT_ARGS install python3-scipy
@@ -76,8 +82,13 @@ if ( ! $VENV_FILE_AVAILABLE ); then
pip install $PIP_ARGS 'http://downloads.sourceforge.net/project/pytseries/scikits.timeseries/0.91.3/scikits.timeseries-0.91.3.tar.gz?r='
pip install $PIP_ARGS patsy
pip install $PIP_ARGS lxml
- pip install $PIP_ARGS beautifulsoup4
+ pip install $PIP_ARGS html5lib
+ if [ ${TRAVIS_PYTHON_VERSION:0:1} == "3" ]; then
+ sudo apt-get $APT_ARGS remove python3-lxml
+ elif [ ${TRAVIS_PYTHON_VERSION:0:1} == "2" ]; then
+ sudo apt-get $APT_ARGS remove python-lxml
+ fi
# fool statsmodels into thinking pandas was already installed
# so it won't refuse to install itself. We want it in the zipped venv
diff --git a/doc/source/install.rst b/doc/source/install.rst
index 658d9d78d5b29..407746e3cb000 100644
--- a/doc/source/install.rst
+++ b/doc/source/install.rst
@@ -102,8 +102,14 @@ Optional Dependencies
* Both `html5lib <https://github.com/html5lib/html5lib-python>`__ **and**
`Beautiful Soup 4 <http://www.crummy.com/software/BeautifulSoup>`__: for
reading HTML tables
- * These can both easily be installed by ``pip install html5lib`` and ``pip
- install beautifulsoup4``.
+
+ .. warning::
+
+ If you are on a 32-bit machine you need to install an older version of
+ Beautiful Soup. Version 4.0.2 of BeautifulSoup has been tested on Ubuntu
+ 12.04.02 32-bit.
+
+ * Any recent version of ``html5lib`` is okay.
.. note::
| closes #3654
| https://api.github.com/repos/pandas-dev/pandas/pulls/3655 | 2013-05-20T13:29:48Z | 2013-05-21T13:43:38Z | 2013-05-21T13:43:38Z | 2014-07-01T18:52:09Z |
TST: fix unicdoe errors test_strings | diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py
index d49338af698d1..d057dc5304277 100644
--- a/pandas/tests/test_strings.py
+++ b/pandas/tests/test_strings.py
@@ -350,8 +350,8 @@ def test_replace(self):
tm.assert_series_equal(result, exp)
#flags + unicode
- values = Series(["abcd,\xc3\xa0".decode("utf-8")])
- exp = Series(["abcd, \xc3\xa0".decode("utf-8")])
+ values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
+ exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
result = values.str.replace("(?<=\w),(?=\w)", ", ", flags=re.UNICODE)
tm.assert_series_equal(result, exp)
| PTF
| https://api.github.com/repos/pandas-dev/pandas/pulls/3652 | 2013-05-20T01:14:18Z | 2013-05-20T01:20:28Z | 2013-05-20T01:20:28Z | 2014-07-16T08:09:51Z |
ENH: Allow for custom variable/value column names when melt()'ing | diff --git a/doc/source/reshaping.rst b/doc/source/reshaping.rst
index 9a7a9c2a87e52..5f7526235a4c3 100644
--- a/doc/source/reshaping.rst
+++ b/doc/source/reshaping.rst
@@ -200,7 +200,9 @@ Reshaping by Melt
The ``melt`` function found in ``pandas.core.reshape`` is useful to massage a
DataFrame into a format where one or more columns are identifier variables,
while all other columns, considered measured variables, are "pivoted" to the
-row axis, leaving just two non-identifier columns, "variable" and "value".
+row axis, leaving just two non-identifier columns, "variable" and "value". The
+names of those columns can be customized by supplying the ``var_name`` and
+``value_name`` parameters.
For instance,
@@ -212,6 +214,7 @@ For instance,
'weight' : [130, 150]})
cheese
melt(cheese, id_vars=['first', 'last'])
+ melt(cheese, id_vars=['first', 'last'], var_name='quantity')
Combining with stats and GroupBy
--------------------------------
diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt
index a42765591c818..13d08a9fc9c76 100644
--- a/doc/source/v0.11.1.txt
+++ b/doc/source/v0.11.1.txt
@@ -138,6 +138,9 @@ Enhancements
import os
os.remove('mi.csv')
+ - ``pd.melt()`` now accepts the optional parameters ``var_name`` and ``value_name``
+ to specify custom column names of the returned DataFrame.
+
Bug Fixes
~~~~~~~~~
diff --git a/pandas/core/reshape.py b/pandas/core/reshape.py
index b2e5bb01f53af..4e0f35f5d9555 100644
--- a/pandas/core/reshape.py
+++ b/pandas/core/reshape.py
@@ -600,7 +600,8 @@ def _stack_multi_columns(frame, level=-1, dropna=True):
return result
-def melt(frame, id_vars=None, value_vars=None):
+def melt(frame, id_vars=None, value_vars=None,
+ var_name='variable', value_name='value'):
"""
"Unpivots" a DataFrame from wide format to long format, optionally leaving
id variables set
@@ -608,8 +609,10 @@ def melt(frame, id_vars=None, value_vars=None):
Parameters
----------
frame : DataFrame
- id_vars :
- value_vars :
+ id_vars : tuple, list, or ndarray
+ value_vars : tuple, list, or ndarray
+ var_name : scalar
+ value_name : scalar
Examples
--------
@@ -621,9 +624,16 @@ def melt(frame, id_vars=None, value_vars=None):
>>> melt(df, id_vars=['A'], value_vars=['B'])
A variable value
- a B 1
- b B 3
- c B 5
+ a B 1
+ b B 3
+ c B 5
+
+ >>> melt(df, id_vars=['A'], value_vars=['B'],
+ ... var_name='myVarname', value_name='myValname')
+ A myVarname myValname
+ a B 1
+ b B 3
+ c B 5
"""
# TODO: what about the existing index?
if id_vars is not None:
@@ -648,11 +658,11 @@ def melt(frame, id_vars=None, value_vars=None):
for col in id_vars:
mdata[col] = np.tile(frame.pop(col).values, K)
- mcolumns = id_vars + ['variable', 'value']
+ mcolumns = id_vars + [var_name, value_name]
- mdata['value'] = frame.values.ravel('F')
-
- mdata['variable'] = np.asarray(frame.columns).repeat(N)
+ mdata[value_name] = frame.values.ravel('F')
+ mdata[var_name] = np.asarray(frame.columns).repeat(N)
+
return DataFrame(mdata, columns=mcolumns)
diff --git a/pandas/tests/test_reshape.py b/pandas/tests/test_reshape.py
index 278e745c7d312..5ddb30b0e1377 100644
--- a/pandas/tests/test_reshape.py
+++ b/pandas/tests/test_reshape.py
@@ -25,14 +25,73 @@ def test_melt():
df['id1'] = (df['A'] > 0).astype(int)
df['id2'] = (df['B'] > 0).astype(int)
- molten1 = melt(df)
- molten2 = melt(df, id_vars=['id1'])
- molten3 = melt(df, id_vars=['id1', 'id2'])
- molten4 = melt(df, id_vars=['id1', 'id2'],
+ var_name = 'var'
+ value_name = 'val'
+
+ # Default column names
+ result = melt(df)
+ result1 = melt(df, id_vars=['id1'])
+ result2 = melt(df, id_vars=['id1', 'id2'])
+ result3 = melt(df, id_vars=['id1', 'id2'],
value_vars='A')
- molten5 = melt(df, id_vars=['id1', 'id2'],
+ result4 = melt(df, id_vars=['id1', 'id2'],
value_vars=['A', 'B'])
-
+
+ expected4 = DataFrame({'id1': df['id1'].tolist() * 2,
+ 'id2': df['id2'].tolist() * 2,
+ 'variable': ['A']*10 + ['B']*10,
+ 'value': df['A'].tolist() + df['B'].tolist()},
+ columns=['id1', 'id2', 'variable', 'value'])
+ tm.assert_frame_equal(result4, expected4)
+
+ # Supply custom name for the 'variable' column
+ result5 = melt(df, var_name=var_name)
+ result6 = melt(df, id_vars=['id1'], var_name=var_name)
+ result7 = melt(df, id_vars=['id1', 'id2'], var_name=var_name)
+ result8 = melt(df, id_vars=['id1', 'id2'],
+ value_vars='A', var_name=var_name)
+ result9 = melt(df, id_vars=['id1', 'id2'],
+ value_vars=['A', 'B'], var_name=var_name)
+
+ expected9 = DataFrame({'id1': df['id1'].tolist() * 2,
+ 'id2': df['id2'].tolist() * 2,
+ var_name: ['A']*10 + ['B']*10,
+ 'value': df['A'].tolist() + df['B'].tolist()},
+ columns=['id1', 'id2', var_name, 'value'])
+ tm.assert_frame_equal(result9, expected9)
+
+ # Supply custom name for the 'value' column
+ result10 = melt(df, value_name=value_name)
+ result11 = melt(df, id_vars=['id1'], value_name=value_name)
+ result12 = melt(df, id_vars=['id1', 'id2'], value_name=value_name)
+ result13 = melt(df, id_vars=['id1', 'id2'],
+ value_vars='A', value_name=value_name)
+ result14 = melt(df, id_vars=['id1', 'id2'],
+ value_vars=['A', 'B'], value_name=value_name)
+
+ expected14 = DataFrame({'id1': df['id1'].tolist() * 2,
+ 'id2': df['id2'].tolist() * 2,
+ 'variable': ['A']*10 + ['B']*10,
+ value_name: df['A'].tolist() + df['B'].tolist()},
+ columns=['id1', 'id2', 'variable', value_name])
+ tm.assert_frame_equal(result14, expected14)
+
+ # Supply custom names for the 'variable' and 'value' columns
+ result15 = melt(df, var_name=var_name, value_name=value_name)
+ result16 = melt(df, id_vars=['id1'], var_name=var_name, value_name=value_name)
+ result17 = melt(df, id_vars=['id1', 'id2'],
+ var_name=var_name, value_name=value_name)
+ result18 = melt(df, id_vars=['id1', 'id2'],
+ value_vars='A', var_name=var_name, value_name=value_name)
+ result19 = melt(df, id_vars=['id1', 'id2'],
+ value_vars=['A', 'B'], var_name=var_name, value_name=value_name)
+
+ expected19 = DataFrame({'id1': df['id1'].tolist() * 2,
+ 'id2': df['id2'].tolist() * 2,
+ var_name: ['A']*10 + ['B']*10,
+ value_name: df['A'].tolist() + df['B'].tolist()},
+ columns=['id1', 'id2', var_name, value_name])
+ tm.assert_frame_equal(result19, expected19)
def test_convert_dummies():
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
| ENH, CLN: When melt()'ing, allow for specification of custom variable and value names of the resulting DataFrame. Also fix indentation in melt() example to reflect actual output.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3649 | 2013-05-19T18:59:50Z | 2013-05-21T14:25:37Z | 2013-05-21T14:25:37Z | 2014-06-12T14:57:21Z |
TST: fixup 32-bit failing tests | diff --git a/pandas/core/format.py b/pandas/core/format.py
index cd4364edc6662..3d38caa84492f 100644
--- a/pandas/core/format.py
+++ b/pandas/core/format.py
@@ -778,8 +778,6 @@ def __init__(self, obj, path_or_buf, sep=",", na_rep='', float_format=None,
tupleize_cols=True):
self.engine = engine # remove for 0.12
-
- obj._consolidate_inplace()
self.obj = obj
self.path_or_buf = path_or_buf
@@ -835,7 +833,7 @@ def __init__(self, obj, path_or_buf, sep=",", na_rep='', float_format=None,
self.blocks = self.obj._data.blocks
ncols = sum(len(b.items) for b in self.blocks)
self.data =[None] * ncols
- self.column_map = self.obj._data.get_items_map()
+ self.column_map = self.obj._data.get_items_map(use_cached=False)
if chunksize is None:
chunksize = (100000/ (len(self.cols) or 1)) or 1
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 849776940512e..ca04bd3fe26e0 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -1130,17 +1130,20 @@ def maybe_create_block(block):
# when we recreate the block manager if needed
return getattr(self,'_ref_locs',None)
- def get_items_map(self):
+ def get_items_map(self, use_cached=True):
"""
return an inverted ref_loc map for an item index
block -> item (in that block) location -> column location
+
+ use_cached : boolean, use the cached items map, or recreate
"""
# cache check
- im = getattr(self,'_items_map',None)
- if im is not None:
- return im
-
+ if use_cached:
+ im = getattr(self,'_items_map',None)
+ if im is not None:
+ return im
+
im = dict()
rl = self._set_ref_locs()
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 413c39a330ad2..50bddb6ecd85c 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -4942,7 +4942,7 @@ def test_to_csv_no_index(self):
df.to_csv(path, index=False)
result = read_csv(path)
assert_frame_equal(df,result)
- df['c3'] = [7,8,9]
+ df['c3'] = Series([7,8,9],dtype='int64')
df.to_csv(path, index=False)
result = read_csv(path)
assert_frame_equal(df,result)
@@ -5000,7 +5000,8 @@ def _make_frame(names=None):
columns=MultiIndex.from_tuples([('bah', 'foo'),
('bah', 'bar'),
('ban', 'baz')],
- names=names))
+ names=names),
+ dtype='int64')
# column & index are multi-index
df = mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)
| PTF
| https://api.github.com/repos/pandas-dev/pandas/pulls/3648 | 2013-05-19T17:20:14Z | 2013-05-19T17:28:31Z | 2013-05-19T17:28:31Z | 2014-07-16T08:09:47Z |
BUG: (GH3602) Concat to produce a non-unique columns when duplicates are across dtypes | diff --git a/RELEASE.rst b/RELEASE.rst
index 74bafd419af54..4599c2a7553da 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -111,6 +111,7 @@ pandas 0.11.1
- Duplicate indexes with getitem will return items in the correct order (GH3455_, GH3457_)
and handle missing elements like unique indices (GH3561_)
- Duplicate indexes with and empty DataFrame.from_records will return a correct frame (GH3562_)
+ - Concat to produce a non-unique columns when duplicates are across dtypes is fixed (GH3602_)
- Fixed bug in groupby with empty series referencing a variable before assignment. (GH3510_)
- Fixed bug in mixed-frame assignment with aligned series (GH3492_)
- Fixed bug in selecting month/quarter/year from a series would not select the time element
@@ -196,6 +197,7 @@ pandas 0.11.1
.. _GH3626: https://github.com/pydata/pandas/issues/3626
.. _GH3601: https://github.com/pydata/pandas/issues/3601
.. _GH3631: https://github.com/pydata/pandas/issues/3631
+.. _GH3602: https://github.com/pydata/pandas/issues/3602
.. _GH1512: https://github.com/pydata/pandas/issues/1512
=======
.. _GH3571: https://github.com/pydata/pandas/issues/3571
diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt
index a724ce96a7381..a9911ed6db008 100644
--- a/doc/source/v0.11.1.txt
+++ b/doc/source/v0.11.1.txt
@@ -64,6 +64,7 @@ API changes
Enhancements
~~~~~~~~~~~~
+
- ``pd.read_html()`` can now parse HTML string, files or urls and return dataframes
courtesy of @cpcloud. (GH3477_)
- ``HDFStore``
@@ -114,10 +115,37 @@ Enhancements
import os
os.remove('mi.csv')
+Bug Fixes
+~~~~~~~~~
+
+ - Non-unique index support clarified (GH3468_).
+
+ - Fix assigning a new index to a duplicate index in a DataFrame would fail (GH3468_)
+ - Fix construction of a DataFrame with a duplicate index
+ - ref_locs support to allow duplicative indices across dtypes,
+ allows iget support to always find the index (even across dtypes) (GH2194_)
+ - applymap on a DataFrame with a non-unique index now works
+ (removed warning) (GH2786_), and fix (GH3230_)
+ - Fix to_csv to handle non-unique columns (GH3495_)
+ - Duplicate indexes with getitem will return items in the correct order (GH3455_, GH3457_)
+ and handle missing elements like unique indices (GH3561_)
+ - Duplicate indexes with and empty DataFrame.from_records will return a correct frame (GH3562_)
+ - Concat to produce a non-unique columns when duplicates are across dtypes is fixed (GH3602_)
+
See the `full release notes
<https://github.com/pydata/pandas/blob/master/RELEASE.rst>`__ or issue tracker
on GitHub for a complete list.
+.. _GH3468: https://github.com/pydata/pandas/issues/3468
+.. _GH2194: https://github.com/pydata/pandas/issues/2194
+.. _GH2786: https://github.com/pydata/pandas/issues/2786
+.. _GH3230: https://github.com/pydata/pandas/issues/3230
+.. _GH3495: https://github.com/pydata/pandas/issues/3495
+.. _GH3455: https://github.com/pydata/pandas/issues/3455
+.. _GH3457: https://github.com/pydata/pandas/issues/3457
+.. _GH3561: https://github.com/pydata/pandas/issues/3561
+.. _GH3562: https://github.com/pydata/pandas/issues/3562
+.. _GH3602: https://github.com/pydata/pandas/issues/3602
.. _GH2437: https://github.com/pydata/pandas/issues/2437
.. _GH2852: https://github.com/pydata/pandas/issues/2852
.. _GH3477: https://github.com/pydata/pandas/issues/3477
diff --git a/pandas/tools/merge.py b/pandas/tools/merge.py
index 7f05a045e36af..c77c043d26acd 100644
--- a/pandas/tools/merge.py
+++ b/pandas/tools/merge.py
@@ -1043,6 +1043,7 @@ def _concat_blocks(self, blocks):
'DataFrames')
return make_block(concat_values, blocks[0].items, self.new_axes[0])
else:
+
offsets = np.r_[0, np.cumsum([len(x._data.axes[0]) for
x in self.objs])]
indexer = np.concatenate([offsets[i] + b.ref_locs
@@ -1052,12 +1053,21 @@ def _concat_blocks(self, blocks):
concat_items = indexer
else:
concat_items = self.new_axes[0].take(indexer)
-
+
if self.ignore_index:
ref_items = self._get_fresh_axis()
return make_block(concat_values, concat_items, ref_items)
- return make_block(concat_values, concat_items, self.new_axes[0])
+ block = make_block(concat_values, concat_items, self.new_axes[0])
+
+ # we need to set the ref_locs in this block so we have the mapping
+ # as we now have a non-unique index across dtypes, and we need to
+ # map the column location to the block location
+ # GH3602
+ if not self.new_axes[0].is_unique:
+ block._ref_locs = indexer
+
+ return block
def _concat_single_item(self, objs, item):
all_values = []
diff --git a/pandas/tools/tests/test_merge.py b/pandas/tools/tests/test_merge.py
index 2fb527b2eee6b..e230a5b2d25b3 100644
--- a/pandas/tools/tests/test_merge.py
+++ b/pandas/tools/tests/test_merge.py
@@ -1682,6 +1682,20 @@ def test_concat_bug_2972(self):
expected.columns=['same name', 'same name']
assert_frame_equal(result, expected)
+ def test_concat_bug_3602(self):
+
+ # GH 3602, duplicate columns
+ df1 = DataFrame({'firmNo' : [0,0,0,0], 'stringvar' : ['rrr', 'rrr', 'rrr', 'rrr'], 'prc' : [6,6,6,6] })
+ df2 = DataFrame({'misc' : [1,2,3,4], 'prc' : [6,6,6,6], 'C' : [9,10,11,12]})
+ expected = DataFrame([[0,6,'rrr',9,1,6],
+ [0,6,'rrr',10,2,6],
+ [0,6,'rrr',11,3,6],
+ [0,6,'rrr',12,4,6]])
+ expected.columns = ['firmNo','prc','stringvar','C','misc','prc']
+
+ result = concat([df1,df2],axis=1)
+ assert_frame_equal(result,expected)
+
def test_concat_series_axis1_same_names_ignore_index(self):
dates = date_range('01-Jan-2013', '01-Jan-2014', freq='MS')[0:-1]
s1 = Series(randn(len(dates)), index=dates, name='value')
| closes #3602
DOC: added non-unique index issues to v0.11.1
| https://api.github.com/repos/pandas-dev/pandas/pulls/3647 | 2013-05-19T14:06:28Z | 2013-05-19T15:12:02Z | 2013-05-19T15:12:02Z | 2014-06-17T16:52:38Z |
ENH: Allow flags in str.replace keywords | diff --git a/pandas/core/strings.py b/pandas/core/strings.py
index 3521c9ff94b11..87a9ff7e9d95d 100644
--- a/pandas/core/strings.py
+++ b/pandas/core/strings.py
@@ -692,8 +692,9 @@ def contains(self, pat, case=True, flags=0, na=np.nan):
return self._wrap_result(result)
@copy(str_replace)
- def replace(self, pat, repl, n=-1, case=True):
- result = str_replace(self.series, pat, repl, n=n, case=case)
+ def replace(self, pat, repl, n=-1, case=True, flags=0):
+ result = str_replace(self.series, pat, repl, n=n, case=case,
+ flags=flags)
return self._wrap_result(result)
@copy(str_repeat)
diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py
index 2134eea186649..7763ed7bc75db 100644
--- a/pandas/tests/test_strings.py
+++ b/pandas/tests/test_strings.py
@@ -271,6 +271,13 @@ def test_replace(self):
exp = Series([u'foobarBAD', NA])
tm.assert_series_equal(result, exp)
+ #flags + unicode
+ values = Series(["abcd,\xc3\xa0".decode("utf-8")])
+ exp = Series(["abcd, \xc3\xa0".decode("utf-8")])
+ result = values.str.replace("(?<=\w),(?=\w)", ", ", flags=re.UNICODE)
+ tm.assert_series_equal(result, exp)
+
+
def test_repeat(self):
values = Series(['a', 'b', NA, 'c', NA, 'd'])
| Fix what appears to be an oversight.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3646 | 2013-05-19T04:18:48Z | 2013-05-19T21:13:54Z | 2013-05-19T21:13:53Z | 2014-07-16T08:09:44Z |
ENH: add Series.str iterator | diff --git a/RELEASE.rst b/RELEASE.rst
index 4e92ecb24574a..f97708de13442 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -43,6 +43,7 @@ pandas 0.11.1
multi-index column.
Note: The default value will change in 0.12 to make the default *to* write and
read multi-index columns in the new format. (GH3571_, GH1651_, GH3141_)
+ - Add iterator to ``Series.str`` (GH3638_)
**Improvements to existing features**
@@ -199,7 +200,7 @@ pandas 0.11.1
.. _GH3571: https://github.com/pydata/pandas/issues/3571
.. _GH1651: https://github.com/pydata/pandas/issues/1651
.. _GH3141: https://github.com/pydata/pandas/issues/3141
-
+.. _GH3638: https://github.com/pydata/pandas/issues/3638
pandas 0.11.0
=============
diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt
index a724ce96a7381..e9861301231d8 100644
--- a/doc/source/v0.11.1.txt
+++ b/doc/source/v0.11.1.txt
@@ -80,6 +80,27 @@ Enhancements
- ``DataFrame.replace()`` now allows regular expressions on contained
``Series`` with object dtype. See the examples section in the regular docs
:ref:`Replacing via String Expression <missing_data.replace_expression>`
+ - ``Series.str`` now supports iteration (GH3638_). You can iterate over the
+ individual elements of each string in the ``Series``. Each iteration yields
+ yields a ``Series`` with either a single character at each index of the
+ original ``Series`` or ``NaN``. For example,
+
+ .. ipython:: python
+
+ strs = 'go', 'bow', 'joe', 'slow'
+ ds = Series(strs)
+
+ for s in ds.str:
+ print s
+
+ s
+ s.dropna().values.item() == 'w'
+
+ The last element yielded by the iterator will be a ``Series`` containing
+ the last element of the longest string in the ``Series`` with all other
+ elements being ``NaN``. Here since ``'wikitravel'`` is the longest string
+ and there are no other strings with the same length ``'l'`` is the only
+ non-null string in the yielded ``Series``.
- Multi-index column support for reading and writing csvs
@@ -133,3 +154,4 @@ on GitHub for a complete list.
.. _GH3571: https://github.com/pydata/pandas/issues/3571
.. _GH1651: https://github.com/pydata/pandas/issues/1651
.. _GH3141: https://github.com/pydata/pandas/issues/3141
+.. _GH3638: https://github.com/pydata/pandas/issues/3638
diff --git a/pandas/core/strings.py b/pandas/core/strings.py
index 3521c9ff94b11..13e2b3b0a4cab 100644
--- a/pandas/core/strings.py
+++ b/pandas/core/strings.py
@@ -661,6 +661,14 @@ def __getitem__(self, key):
else:
return self.get(key)
+ def __iter__(self):
+ i = 0
+ g = self.get(i)
+ while g.notnull().any():
+ yield g
+ i += 1
+ g = self.get(i)
+
def _wrap_result(self, result):
return Series(result, index=self.series.index,
name=self.series.name)
diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py
index 2134eea186649..0eac88419f5e3 100644
--- a/pandas/tests/test_strings.py
+++ b/pandas/tests/test_strings.py
@@ -10,6 +10,8 @@
from numpy import nan as NA
import numpy as np
+from numpy.testing import assert_array_equal
+from numpy.random import randint
from pandas import (Index, Series, TimeSeries, DataFrame, isnull, notnull,
bdate_range, date_range)
@@ -25,6 +27,82 @@ class TestStringMethods(unittest.TestCase):
_multiprocess_can_split_ = True
+ def test_iter(self):
+ # GH3638
+ strs = 'google', 'wikimedia', 'wikipedia', 'wikitravel'
+ ds = Series(strs)
+
+ for s in ds.str:
+ # iter must yield a Series
+ self.assert_(isinstance(s, Series))
+
+ # indices of each yielded Series should be equal to the index of
+ # the original Series
+ assert_array_equal(s.index, ds.index)
+
+ for el in s:
+ # each element of the series is either a basestring or nan
+ self.assert_(isinstance(el, basestring) or isnull(el))
+
+ # desired behavior is to iterate until everything would be nan on the
+ # next iter so make sure the last element of the iterator was 'l' in
+ # this case since 'wikitravel' is the longest string
+ self.assertEqual(s.dropna().values.item(), 'l')
+
+ def test_iter_empty(self):
+ ds = Series([], dtype=object)
+
+ i, s = 100, 1
+
+ for i, s in enumerate(ds.str):
+ pass
+
+ # nothing to iterate over so nothing defined values should remain
+ # unchanged
+ self.assertEqual(i, 100)
+ self.assertEqual(s, 1)
+
+ def test_iter_single_element(self):
+ ds = Series(['a'])
+
+ for i, s in enumerate(ds.str):
+ pass
+
+ self.assertFalse(i)
+ assert_series_equal(ds, s)
+
+ def test_iter_numeric_try_string(self):
+ # behavior identical to empty series
+ dsi = Series(range(4))
+
+ i, s = 100, 'h'
+
+ for i, s in enumerate(dsi.str):
+ pass
+
+ self.assertEqual(i, 100)
+ self.assertEqual(s, 'h')
+
+ dsf = Series(np.arange(4.))
+
+ for i, s in enumerate(dsf.str):
+ pass
+
+ self.assertEqual(i, 100)
+ self.assertEqual(s, 'h')
+
+ def test_iter_object_try_string(self):
+ ds = Series([slice(None, randint(10), randint(10, 20))
+ for _ in xrange(4)])
+
+ i, s = 100, 'h'
+
+ for i, s in enumerate(ds.str):
+ pass
+
+ self.assertEqual(i, 100)
+ self.assertEqual(s, 'h')
+
def test_cat(self):
one = ['a', 'a', 'b', 'b', 'c', NA]
two = ['a', NA, 'b', 'd', 'foo', NA]
| closes #3638.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3645 | 2013-05-19T01:19:43Z | 2013-05-19T16:15:48Z | 2013-05-19T16:15:48Z | 2014-06-26T17:45:54Z |
BUG : Io sql one column (issue #3628) | diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index 3002f2f620f5e..b54a30d95bb54 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -228,7 +228,11 @@ def _write_sqlite(frame, table, names, cur):
wildcards = ','.join(['?'] * len(names))
insert_query = 'INSERT INTO %s (%s) VALUES (%s)' % (
table, col_names, wildcards)
- data = [tuple(x) for x in frame.values]
+ # pandas types are badly handled if there is only 1 column ( Issue #3628 )
+ if not len(frame.columns )==1 :
+ data = [tuple(x) for x in frame.values]
+ else :
+ data = [tuple(x) for x in frame.values.tolist()]
cur.executemany(insert_query, data)
def _write_mysql(frame, table, names, cur):
diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py
index b443c55f97b8d..1daa50c70a900 100644
--- a/pandas/io/tests/test_sql.py
+++ b/pandas/io/tests/test_sql.py
@@ -219,6 +219,18 @@ def test_keyword_as_column_names(self):
df = DataFrame({'From':np.ones(5)})
sql.write_frame(df, con = self.db, name = 'testkeywords')
+ def test_onecolumn_of_integer(self):
+ '''
+ a column_of_integers dataframe should transfer well to sql
+ '''
+ mono_df=DataFrame([1 , 2], columns=['c0'])
+ sql.write_frame(mono_df, con = self.db, name = 'mono_df')
+ # computing the sum via sql
+ con_x=self.db
+ the_sum=sum([my_c0[0] for my_c0 in con_x.execute("select * from mono_df")])
+ # it should not fail, and gives 3 ( Issue #3628 )
+ self.assertEqual(the_sum , 3)
+
class TestMySQL(unittest.TestCase):
| closes #3628
This should be the proper pull request to
- TST test the problem,
- BUG the problem of panda fail to write a single column of integer to sqlite
Test successfully on Travis CI
| https://api.github.com/repos/pandas-dev/pandas/pulls/3644 | 2013-05-18T22:14:11Z | 2013-05-19T16:49:55Z | 2013-05-19T16:49:55Z | 2014-06-12T07:39:51Z |
single example for release notes | diff --git a/doc/source/missing_data.rst b/doc/source/missing_data.rst
index ce19241030704..9df00afcfcf46 100644
--- a/doc/source/missing_data.rst
+++ b/doc/source/missing_data.rst
@@ -406,7 +406,7 @@ or you can pass the nested dictionary like so
.. ipython:: python
- df.replace(regex={'b': {'b': r'\s*\.\s*'}})
+ df.replace(regex={'b': {r'\s*\.\s*': nan}})
You can also use the group of a regular expression match when replacing (dict
of regex -> dict of regex), this works for lists as well
@@ -420,7 +420,7 @@ will be replaced with a scalar (list of regex -> regex)
.. ipython:: python
- df.replace([r'\s*\.\*', r'a|b'], nan, regex=True)
+ df.replace([r'\s*\.\s*', r'a|b'], nan, regex=True)
All of the regular expression examples can also be passed with the
``to_replace`` argument as the ``regex`` argument. In this case the ``value``
@@ -429,7 +429,7 @@ dictionary. The previous example, in this case, would then be
.. ipython:: python
- df.replace(regex=[r'\s*\.\*', r'a|b'], value=nan)
+ df.replace(regex=[r'\s*\.\s*', r'a|b'], value=nan)
This can be convenient if you do not want to pass ``regex=True`` every time you
want to use a regular expression.
diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt
index d5256bcf26d25..b3f1e0c76420f 100644
--- a/doc/source/v0.11.1.txt
+++ b/doc/source/v0.11.1.txt
@@ -59,6 +59,24 @@ Enhancements
``Series`` with object dtype. See the examples section in the regular docs
:ref:`Replacing via String Expression <missing_data.replace_expression>`
+ For example you can do
+
+ .. ipython :: python
+
+ df = DataFrame({'a': list('ab..'), 'b': [1, 2, 3, 4]})
+ df.replace(regex=r'\s*\.\s*', value=nan)
+
+ to replace all occurrences of the string ``'.'`` with zero or more
+ instances of surrounding whitespace with ``NaN``.
+
+ Regular string replacement still works as expected. For example, you can do
+
+ .. ipython :: python
+
+ df.replace('.', nan)
+
+ to replace all occurrences of the string ``'.'`` with ``NaN``.
+
See the `full release notes
<https://github.com/pydata/pandas/blob/master/RELEASE.rst>`__ or issue tracker
on GitHub for a complete list.
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 8e48ef094c419..b3054db56b718 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -6561,12 +6561,16 @@ def test_regex_replace_dict_nested(self):
dfmix = DataFrame(mix)
res = dfmix.replace({'b': {r'\s*\.\s*': nan}}, regex=True)
res2 = dfmix.copy()
+ res4 = dfmix.copy()
res2.replace({'b': {r'\s*\.\s*': nan}}, inplace=True, regex=True)
- print res2
+ res3 = dfmix.replace(regex={'b': {r'\s*\.\s*': nan}})
+ res4.replace(regex={'b': {r'\s*\.\s*': nan}}, inplace=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', nan, nan], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
+ assert_frame_equal(res3, expec)
+ assert_frame_equal(res4, expec)
def test_regex_replace_list_to_scalar(self):
mix = {'a': range(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
| https://api.github.com/repos/pandas-dev/pandas/pulls/3639 | 2013-05-17T21:27:54Z | 2013-05-19T16:53:11Z | 2013-05-19T16:53:11Z | 2014-06-17T23:36:42Z | |
API: Raise on iloc indexing with a non-integer based boolean mask (GH3631) | diff --git a/RELEASE.rst b/RELEASE.rst
index 9147968997fc7..acb4f429e81b0 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -70,6 +70,9 @@ pandas 0.11.1
- Add ``squeeze`` keyword to ``groupby`` to allow reduction from
DataFrame -> Series if groups are unique. Regression from 0.10.1,
partial revert on (GH2893_) with (GH3596_)
+ - Raise on ``iloc`` when boolean indexing with a label based indexer mask
+ e.g. a boolean Series, even with integer labels, will raise. Since ``iloc``
+ is purely positional based, the labels on the Series are not alignable (GH3631_)
**Bug Fixes**
@@ -182,6 +185,7 @@ pandas 0.11.1
.. _GH3624: https://github.com/pydata/pandas/issues/3624
.. _GH3626: https://github.com/pydata/pandas/issues/3626
.. _GH3601: https://github.com/pydata/pandas/issues/3601
+.. _GH3631: https://github.com/pydata/pandas/issues/3631
.. _GH1512: https://github.com/pydata/pandas/issues/1512
diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst
index 55b7e653c3630..43b512a934558 100644
--- a/doc/source/indexing.rst
+++ b/doc/source/indexing.rst
@@ -68,7 +68,6 @@ three types of multi-axis indexing.
- An integer e.g. ``5``
- A list or array of integers ``[4, 3, 0]``
- A slice object with ints ``1:7``
- - A boolean array
See more at :ref:`Selection by Position <indexing.integer>`
@@ -291,7 +290,6 @@ The ``.iloc`` attribute is the primary access method. The following are valid in
- An integer e.g. ``5``
- A list or array of integers ``[4, 3, 0]``
- A slice object with ints ``1:7``
-- A boolean array
.. ipython:: python
@@ -329,12 +327,6 @@ Select via integer list
df1.iloc[[1,3,5],[1,3]]
-Select via boolean array
-
-.. ipython:: python
-
- df1.iloc[:,df1.iloc[0]>0]
-
For slicing rows explicitly (equiv to deprecated ``df.irow(slice(1,3))``).
.. ipython:: python
diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt
index d5256bcf26d25..aed95188db26e 100644
--- a/doc/source/v0.11.1.txt
+++ b/doc/source/v0.11.1.txt
@@ -40,6 +40,27 @@ API changes
# no squeezing (the default, and behavior in 0.10.1)
df2.groupby("val1").apply(func)
+ - Raise on ``iloc`` when boolean indexing with a label based indexer mask
+ e.g. a boolean Series, even with integer labels, will raise. Since ``iloc``
+ is purely positional based, the labels on the Series are not alignable (GH3631_)
+
+ This case is rarely used, and there are plently of alternatives. This preserves the
+ ``iloc`` API to be *purely* positional based.
+
+ .. ipython:: python
+
+ df = DataFrame(range(5), list('ABCDE'), columns=['a'])
+ mask = (df.a%2 == 0)
+ mask
+
+ # this is what you should use
+ df.loc[mask]
+
+ # this will work as well
+ df.iloc[mask.values]
+
+ ``df.iloc[mask]`` will raise a ``ValueError``
+
Enhancements
~~~~~~~~~~~~
@@ -74,3 +95,4 @@ on GitHub for a complete list.
.. _GH3435: https://github.com/pydata/pandas/issues/3435
.. _GH1512: https://github.com/pydata/pandas/issues/1512
.. _GH2285: https://github.com/pydata/pandas/issues/2285
+.. _GH3631: https://github.com/pydata/pandas/issues/3631
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 1cbc5abdc3ea3..02f1cf4539ac4 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -775,7 +775,14 @@ class _iLocIndexer(_LocationIndexer):
_exception = IndexError
def _has_valid_type(self, key, axis):
- return isinstance(key, slice) or com.is_integer(key) or com._is_bool_indexer(key) or _is_list_like(key)
+ if com._is_bool_indexer(key):
+ if hasattr(key,'index') and isinstance(key.index,Index):
+ if key.index.inferred_type == 'integer':
+ raise NotImplementedError("iLocation based boolean indexing on an integer type is not available")
+ raise ValueError("iLocation based boolean indexing cannot use an indexable as a mask")
+ return True
+
+ return isinstance(key, slice) or com.is_integer(key) or _is_list_like(key)
def _getitem_tuple(self, tup):
@@ -811,9 +818,11 @@ def _get_slice_axis(self, slice_obj, axis=0):
def _getitem_axis(self, key, axis=0):
if isinstance(key, slice):
+ self._has_valid_type(key,axis)
return self._get_slice_axis(key, axis=axis)
elif com._is_bool_indexer(key):
+ self._has_valid_type(key,axis)
return self._getbool_axis(key, axis=axis)
# a single integer or a list of integers
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index f6d106f422911..d90aa369aa46e 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -888,6 +888,60 @@ def test_multi_assign(self):
df2.ix[mask, cols]= dft.ix[mask, cols].values
assert_frame_equal(df2,expected)
+ def test_iloc_mask(self):
+
+ # GH 3631, iloc with a mask (of a series) should raise
+ df = DataFrame(range(5), list('ABCDE'), columns=['a'])
+ mask = (df.a%2 == 0)
+ self.assertRaises(ValueError, df.iloc.__getitem__, tuple([mask]))
+ mask.index = range(len(mask))
+ self.assertRaises(NotImplementedError, df.iloc.__getitem__, tuple([mask]))
+
+ # ndarray ok
+ result = df.iloc[np.array([True] * len(mask),dtype=bool)]
+ assert_frame_equal(result,df)
+
+ # the possibilities
+ locs = np.arange(4)
+ nums = 2**locs
+ reps = map(bin, nums)
+ df = DataFrame({'locs':locs, 'nums':nums}, reps)
+
+ expected = {
+ (None,'') : '0b1100',
+ (None,'.loc') : '0b1100',
+ (None,'.iloc') : '0b1100',
+ ('index','') : '0b11',
+ ('index','.loc') : '0b11',
+ ('index','.iloc') : 'iLocation based boolean indexing cannot use an indexable as a mask',
+ ('locs','') : 'Unalignable boolean Series key provided',
+ ('locs','.loc') : 'Unalignable boolean Series key provided',
+ ('locs','.iloc') : 'iLocation based boolean indexing on an integer type is not available',
+ }
+
+ import warnings
+ warnings.filterwarnings(action='ignore', category=UserWarning)
+ result = dict()
+ for idx in [None, 'index', 'locs']:
+ mask = (df.nums>2).values
+ if idx:
+ mask = Series(mask, list(reversed(getattr(df, idx))))
+ for method in ['', '.loc', '.iloc']:
+ try:
+ if method:
+ accessor = getattr(df, method[1:])
+ else:
+ accessor = df
+ ans = str(bin(accessor[mask]['nums'].sum()))
+ except Exception, e:
+ ans = str(e)
+
+ key = tuple([idx,method])
+ r = expected.get(key)
+ if r != ans:
+ raise AssertionError("[%s] does not match [%s], received [%s]" %
+ (key,ans,r))
+ warnings.filterwarnings(action='always', category=UserWarning)
if __name__ == '__main__':
import nose
| closed #3631
| https://api.github.com/repos/pandas-dev/pandas/pulls/3635 | 2013-05-17T14:59:58Z | 2013-05-19T14:18:45Z | 2013-05-19T14:18:45Z | 2014-06-30T11:47:32Z |
BUG: (GH3626) issue with alignment of a DataFrame setitem with a piece of another DataFrame | diff --git a/RELEASE.rst b/RELEASE.rst
index 219eec42ec20f..19073c97d92eb 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -125,6 +125,7 @@ pandas 0.11.1
- Correctly parse date columns with embedded (nan/NaT) into datetime64[ns] dtype in ``read_csv``
when ``parse_dates`` is specified (GH3062_)
- Fix not consolidating before to_csv (GH3624_)
+ - Fix alignment issue when setitem in a DataFrame with a piece of a DataFrame (GH3626_)
.. _GH3164: https://github.com/pydata/pandas/issues/3164
.. _GH2786: https://github.com/pydata/pandas/issues/2786
@@ -177,6 +178,7 @@ pandas 0.11.1
.. _GH3611: https://github.com/pydata/pandas/issues/3611
.. _GH3062: https://github.com/pydata/pandas/issues/3062
.. _GH3624: https://github.com/pydata/pandas/issues/3624
+.. _GH3626: https://github.com/pydata/pandas/issues/3626
.. _GH1512: https://github.com/pydata/pandas/issues/1512
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index dd27fa5c3473c..1cbc5abdc3ea3 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -138,14 +138,14 @@ def setter(item, v):
# align to
if item in value:
v = value[item]
- v = v.reindex(self.obj[item].reindex(v.index).dropna().index)
+ v = v.reindex(self.obj[item].index & v.index)
setter(item, v.values)
else:
setter(item, np.nan)
# we have an equal len ndarray
- elif isinstance(value, np.ndarray) and value.ndim > 1:
- if len(labels) != len(value):
+ elif isinstance(value, np.ndarray) and value.ndim == 2:
+ if len(labels) != value.shape[1]:
raise ValueError('Must have equal len keys and value when'
' setting with an ndarray')
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index 65e7516d4b082..f6d106f422911 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -853,6 +853,41 @@ def test_iloc_panel_issue(self):
self.assert_(p.iloc[1, :3, 1].shape == (3,))
self.assert_(p.iloc[:3, 1, 1].shape == (3,))
+ def test_multi_assign(self):
+
+ # GH 3626, an assignement of a sub-df to a df
+ df = DataFrame({'FC':['a','b','a','b','a','b'],
+ 'PF':[0,0,0,0,1,1],
+ 'col1':range(6),
+ 'col2':range(6,12)})
+ df.ix[1,0]=np.nan
+ df2 = df.copy()
+
+ mask=~df2.FC.isnull()
+ cols=['col1', 'col2']
+
+ dft = df2 * 2
+ dft.ix[3,3] = np.nan
+
+ expected = DataFrame({'FC':['a',np.nan,'a','b','a','b'],
+ 'PF':[0,0,0,0,1,1],
+ 'col1':Series([0,1,4,6,8,10],dtype='float64'),
+ 'col2':[12,7,16,np.nan,20,22]})
+
+
+ # frame on rhs
+ df2.ix[mask, cols]= dft.ix[mask, cols]
+ assert_frame_equal(df2,expected)
+ df2.ix[mask, cols]= dft.ix[mask, cols]
+ assert_frame_equal(df2,expected)
+
+ # with an ndarray on rhs
+ df2 = df.copy()
+ df2.ix[mask, cols]= dft.ix[mask, cols].values
+ assert_frame_equal(df2,expected)
+ df2.ix[mask, cols]= dft.ix[mask, cols].values
+ assert_frame_equal(df2,expected)
+
if __name__ == '__main__':
import nose
| closes #3626
| https://api.github.com/repos/pandas-dev/pandas/pulls/3632 | 2013-05-17T13:05:51Z | 2013-05-17T16:28:17Z | 2013-05-17T16:28:17Z | 2014-06-20T17:50:44Z |
ENH: Add Series.sort ascending keyword | diff --git a/pandas/core/series.py b/pandas/core/series.py
index e807cf3f1dfd4..a04e931cf07e3 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -2012,7 +2012,7 @@ def dot(self, other):
Parameters
----------
- other : Series or DataFrame
+ other : Series or DataFrame
Returns
-------
@@ -2194,7 +2194,7 @@ def update(self, other):
#----------------------------------------------------------------------
# Reindexing, sorting
- def sort(self, axis=0, kind='quicksort', order=None):
+ def sort(self, axis=0, kind='quicksort', order=None, ascending=True):
"""
Sort values and index labels by value, in place. For compatibility with
ndarray API. No return value
@@ -2206,8 +2206,15 @@ def sort(self, axis=0, kind='quicksort', order=None):
Choice of sorting algorithm. See np.sort for more
information. 'mergesort' is the only stable algorithm
order : ignored
+ ascending : boolean, default True
+ Sort ascending. Passing False sorts descending
+
+ See Also
+ --------
+ pandas.Series.order
"""
- sortedSeries = self.order(na_last=True, kind=kind)
+ sortedSeries = self.order(na_last=True, kind=kind,
+ ascending=ascending)
true_base = self
while true_base.base is not None:
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index b988f2985877a..288d26eefce87 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -475,7 +475,7 @@ def test_constructor_dtype_datetime64(self):
np.datetime64(datetime(2013, 1, 2)),
np.datetime64(datetime(2013, 1, 3)),
]
-
+
s = Series(dates)
self.assert_(s.dtype == 'M8[ns]')
@@ -1162,7 +1162,7 @@ def test_where(self):
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
values = [2.5,3.5,4.5,5.5,6.5]
- s[mask] = values
+ s[mask] = values
expected = Series(values + range(5,10), dtype='float64')
assert_series_equal(s, expected)
self.assertEquals(s.dtype, expected.dtype)
@@ -2591,7 +2591,7 @@ def test_dot(self):
expected = Series(np.dot(a.values, b.values),
index=['1', '2', '3'])
assert_series_equal(result, expected)
-
+
#Check index alignment
b2 = b.reindex(index=reversed(b.index))
result = a.dot(b)
@@ -2723,6 +2723,11 @@ def test_sort(self):
self.assert_(np.array_equal(ts, self.ts.order()))
self.assert_(np.array_equal(ts.index, self.ts.order().index))
+ ts.sort(ascending=False)
+ self.assert_(np.array_equal(ts, self.ts.order(ascending=False)))
+ self.assert_(np.array_equal(ts.index,
+ self.ts.order(ascending=False).index))
+
def test_sort_index(self):
import random
| A convenience.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3630 | 2013-05-17T04:47:14Z | 2013-05-19T21:14:06Z | 2013-05-19T21:14:06Z | 2014-07-16T08:09:34Z |
BUG: (GH3588) fix pivoting with nan in the index | diff --git a/RELEASE.rst b/RELEASE.rst
index 0ade3e92c164a..2f98922eb403e 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -144,6 +144,7 @@ pandas 0.11.1
- Fix plotting of unordered DatetimeIndex (GH3601_)
- ``sql.write_frame`` failing when writing a single column to sqlite (GH3628_),
thanks to @stonebig
+ - Fix pivoting with ``nan`` in the index (GH3558_)
.. _GH3164: https://github.com/pydata/pandas/issues/3164
.. _GH2786: https://github.com/pydata/pandas/issues/2786
@@ -194,6 +195,7 @@ pandas 0.11.1
.. _GH3617: https://github.com/pydata/pandas/issues/3617
.. _GH3435: https://github.com/pydata/pandas/issues/3435
.. _GH3611: https://github.com/pydata/pandas/issues/3611
+.. _GH3558: https://github.com/pydata/pandas/issues/3558
.. _GH3062: https://github.com/pydata/pandas/issues/3062
.. _GH3624: https://github.com/pydata/pandas/issues/3624
.. _GH3626: https://github.com/pydata/pandas/issues/3626
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 02f1cf4539ac4..ea684ef11446c 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -469,11 +469,14 @@ def _reindex(keys, level=None):
missing = com._ensure_platform_int(missing)
missing_labels = keyarr.take(missing)
- missing_labels_indexer = com._ensure_int64(l[~check])
+ missing_indexer = com._ensure_int64(l[~check])
cur_labels = result._get_axis(axis).values
- cur_labels_indexer = com._ensure_int64(l[check])
- new_labels = lib.combine_from_indexers(cur_labels, cur_labels_indexer,
- missing_labels, missing_labels_indexer)
+ cur_indexer = com._ensure_int64(l[check])
+
+ new_labels = np.empty(tuple([len(indexer)]),dtype=object)
+ new_labels[cur_indexer] = cur_labels
+ new_labels[missing_indexer] = missing_labels
+
result = result.reindex_axis(new_labels,axis=axis)
return result
diff --git a/pandas/core/reshape.py b/pandas/core/reshape.py
index 8595e2a91906d..b2e5bb01f53af 100644
--- a/pandas/core/reshape.py
+++ b/pandas/core/reshape.py
@@ -10,12 +10,12 @@
from pandas.core.categorical import Categorical
from pandas.core.common import (notnull, _ensure_platform_int, _maybe_promote,
- _maybe_upcast)
+ _maybe_upcast, isnull)
from pandas.core.groupby import (get_group_index, _compress_group_index,
decons_group_index)
import pandas.core.common as com
import pandas.algos as algos
-
+from pandas import lib
from pandas.core.index import MultiIndex, Index
@@ -67,7 +67,14 @@ def __init__(self, values, index, level=-1, value_columns=None):
self.index = index
self.level = self.index._get_level_number(level)
- self.new_index_levels = list(index.levels)
+ levels = index.levels
+ labels = index.labels
+ def _make_index(lev,lab):
+ i = lev.__class__(_make_index_array_level(lev.values,lab))
+ i.name = lev.name
+ return i
+
+ self.new_index_levels = list([ _make_index(lev,lab) for lev,lab in zip(levels,labels) ])
self.new_index_names = list(index.names)
self.removed_name = self.new_index_names.pop(self.level)
@@ -140,6 +147,19 @@ def get_result(self):
values = com.take_nd(values, inds, axis=1)
columns = columns[inds]
+ # we might have a missing index
+ if len(index) != values.shape[0]:
+ mask = isnull(index)
+ if mask.any():
+ l = np.arange(len(index))
+ values, orig_values = np.empty((len(index),values.shape[1])), values
+ values.fill(np.nan)
+ values_indexer = com._ensure_int64(l[~mask])
+ for i, j in enumerate(values_indexer):
+ values[j] = orig_values[i]
+ else:
+ index = index.take(self.unique_groups)
+
return DataFrame(values, index=index, columns=columns)
def get_new_values(self):
@@ -201,11 +221,13 @@ def get_new_columns(self):
def get_new_index(self):
result_labels = []
for cur in self.sorted_labels[:-1]:
- result_labels.append(cur.take(self.compressor))
+ labels = cur.take(self.compressor)
+ labels = _make_index_array_level(labels,cur)
+ result_labels.append(labels)
# construct the new index
if len(self.new_index_levels) == 1:
- new_index = self.new_index_levels[0].take(self.unique_groups)
+ new_index = self.new_index_levels[0]
new_index.name = self.new_index_names[0]
else:
new_index = MultiIndex(levels=self.new_index_levels,
@@ -215,6 +237,26 @@ def get_new_index(self):
return new_index
+def _make_index_array_level(lev,lab):
+ """ create the combined index array, preserving nans, return an array """
+ mask = lab == -1
+ if not mask.any():
+ return lev
+
+ l = np.arange(len(lab))
+ mask_labels = np.empty(len(mask[mask]),dtype=object)
+ mask_labels.fill(np.nan)
+ mask_indexer = com._ensure_int64(l[mask])
+
+ labels = lev
+ labels_indexer = com._ensure_int64(l[~mask])
+
+ new_labels = np.empty(tuple([len(lab)]),dtype=object)
+ new_labels[labels_indexer] = labels
+ new_labels[mask_indexer] = mask_labels
+
+ return new_labels
+
def _unstack_multiple(data, clocs):
if len(clocs) == 0:
return data
diff --git a/pandas/lib.pyx b/pandas/lib.pyx
index 30c65d9fcdd9f..15791a984ecc5 100644
--- a/pandas/lib.pyx
+++ b/pandas/lib.pyx
@@ -416,26 +416,6 @@ def dicts_to_array(list dicts, list columns):
return result
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def combine_from_indexers(ndarray a, ndarray[int64_t] a_indexer,
- ndarray b, ndarray[int64_t] b_indexer):
- cdef:
- Py_ssize_t i, n_a, n_b
- ndarray result
-
- n_a = len(a)
- n_b = len(b)
- result = np.empty(n_a+n_b,dtype=object)
-
- for i in range(n_a):
- result[a_indexer[i]] = a[i]
- for i in range(n_b):
- result[b_indexer[i]] = b[i]
-
- return result
-
-
def fast_zip(list ndarrays):
'''
For zipping multiple ndarrays into an ndarray of tuples
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index d90aa369aa46e..e9afa1ae6ec1d 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -840,6 +840,16 @@ def test_set_index_nan(self):
result = df.set_index(['year','PRuid','QC']).reset_index().reindex(columns=df.columns)
assert_frame_equal(result,df)
+ def test_multi_nan_indexing(self):
+
+ # GH 3588
+ df = DataFrame({"a":['R1', 'R2', np.nan, 'R4'], 'b':["C1", "C2", "C3" , "C4"], "c":[10, 15, np.nan , 20]})
+ result = df.set_index(['a','b'], drop=False)
+ expected = DataFrame({"a":['R1', 'R2', np.nan, 'R4'], 'b':["C1", "C2", "C3" , "C4"], "c":[10, 15, np.nan , 20]},
+ index = [Index(['R1','R2',np.nan,'R4'],name='a'),Index(['C1','C2','C3','C4'],name='b')])
+ assert_frame_equal(result,expected)
+
+
def test_iloc_panel_issue(self):
# GH 3617
diff --git a/pandas/tools/tests/test_pivot.py b/pandas/tools/tests/test_pivot.py
index c0e0de1a23dad..e333691b1e6d2 100644
--- a/pandas/tools/tests/test_pivot.py
+++ b/pandas/tools/tests/test_pivot.py
@@ -2,7 +2,7 @@
import numpy as np
-from pandas import DataFrame, Series
+from pandas import DataFrame, Series, Index
from pandas.tools.merge import concat
from pandas.tools.pivot import pivot_table, crosstab
import pandas.util.testing as tm
@@ -129,6 +129,17 @@ def test_pivot_multi_functions(self):
expected = concat([means, stds], keys=['mean', 'std'], axis=1)
tm.assert_frame_equal(result, expected)
+ def test_pivot_index_with_nan(self):
+ # GH 3588
+ nan = np.nan
+ df = DataFrame({"a":['R1', 'R2', nan, 'R4'], 'b':["C1", "C2", "C3" , "C4"], "c":[10, 15, nan , 20]})
+ result = df.pivot('a','b','c')
+ expected = DataFrame([[nan,nan,nan,nan],[nan,10,nan,nan],
+ [nan,nan,nan,nan],[nan,nan,15,20]],
+ index = Index(['R1','R2',nan,'R4'],name='a'),
+ columns = Index(['C1','C2','C3','C4'],name='b'))
+ tm.assert_frame_equal(result, expected)
+
def test_margins(self):
def _check_output(res, col, rows=['A', 'B'], cols=['C']):
cmarg = res['All'][:-1]
| closed #3588
| https://api.github.com/repos/pandas-dev/pandas/pulls/3627 | 2013-05-16T21:04:18Z | 2013-05-19T16:55:29Z | 2013-05-19T16:55:29Z | 2014-06-14T22:46:49Z |
BUG: Fix not consolidating before to_csv (GH3624_) | diff --git a/RELEASE.rst b/RELEASE.rst
index aeaebd88c5ee7..219eec42ec20f 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -124,6 +124,7 @@ pandas 0.11.1
- Fix indexing issue in ndim >= 3 with ``iloc`` (GH3617_)
- Correctly parse date columns with embedded (nan/NaT) into datetime64[ns] dtype in ``read_csv``
when ``parse_dates`` is specified (GH3062_)
+ - Fix not consolidating before to_csv (GH3624_)
.. _GH3164: https://github.com/pydata/pandas/issues/3164
.. _GH2786: https://github.com/pydata/pandas/issues/2786
@@ -175,6 +176,7 @@ pandas 0.11.1
.. _GH3435: https://github.com/pydata/pandas/issues/3435
.. _GH3611: https://github.com/pydata/pandas/issues/3611
.. _GH3062: https://github.com/pydata/pandas/issues/3062
+.. _GH3624: https://github.com/pydata/pandas/issues/3624
.. _GH1512: https://github.com/pydata/pandas/issues/1512
diff --git a/pandas/core/format.py b/pandas/core/format.py
index 2924f1579fb97..bea4b59bfaaa4 100644
--- a/pandas/core/format.py
+++ b/pandas/core/format.py
@@ -777,6 +777,8 @@ def __init__(self, obj, path_or_buf, sep=",", na_rep='', float_format=None,
line_terminator='\n', chunksize=None, engine=None):
self.engine = engine # remove for 0.12
+
+ obj._consolidate_inplace()
self.obj = obj
self.path_or_buf = path_or_buf
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index b39fab1bd4828..de49eca7dab1c 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -4939,6 +4939,19 @@ def test_to_csv_from_csv_w_all_infs(self):
assert_frame_equal(self.frame, recons, check_names=False) # TODO to_csv drops column name
assert_frame_equal(np.isinf(self.frame), np.isinf(recons), check_names=False)
+ def test_to_csv_no_index(self):
+ # GH 3624, after appending columns, to_csv fails
+ pname = '__tmp_to_csv_no_index__'
+ with ensure_clean(pname) as path:
+ df = DataFrame({'c1':[1,2,3], 'c2':[4,5,6]})
+ df.to_csv(path, index=False)
+ result = read_csv(path)
+ assert_frame_equal(df,result)
+ df['c3'] = [7,8,9]
+ df.to_csv(path, index=False)
+ result = read_csv(path)
+ assert_frame_equal(df,result)
+
def test_to_csv_multiindex(self):
pname = '__tmp_to_csv_multiindex__'
| closes #3624
| https://api.github.com/repos/pandas-dev/pandas/pulls/3625 | 2013-05-16T17:13:03Z | 2013-05-16T17:38:59Z | 2013-05-16T17:38:59Z | 2014-07-16T08:09:28Z |
DOC: more informative PerformanceWarning in HDFStore | diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index d3b7533840a86..0ae835c81d870 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -50,7 +50,7 @@ class AttributeConflictWarning(Warning): pass
class PerformanceWarning(Warning): pass
performance_doc = """
your performance may suffer as PyTables will pickle object types that it cannot map
-directly to c-types [inferred_type->%s,key->%s]
+directly to c-types [inferred_type->%s,key->%s] [items->%s]
"""
# map object types
@@ -1861,7 +1861,7 @@ def write_array_empty(self, key, value):
getattr(self.group, key)._v_attrs.value_type = str(value.dtype)
getattr(self.group, key)._v_attrs.shape = value.shape
- def write_array(self, key, value):
+ def write_array(self, key, value, items=None):
if key in self.group:
self._handle.removeNode(self.group, key)
@@ -1904,7 +1904,11 @@ def write_array(self, key, value):
elif inferred_type == 'string':
pass
else:
- ws = performance_doc % (inferred_type,key)
+ try:
+ items = list(items)
+ except:
+ pass
+ ws = performance_doc % (inferred_type,key,items)
warnings.warn(ws, PerformanceWarning)
vlarr = self._handle.createVLArray(self.group, key,
@@ -2115,7 +2119,7 @@ def write(self, obj, **kwargs):
for i in range(nblocks):
blk = data.blocks[i]
# I have no idea why, but writing values before items fixed #2299
- self.write_array('block%d_values' % i, blk.values)
+ self.write_array('block%d_values' % i, blk.values, items=blk.items)
self.write_index('block%d_items' % i, blk.items)
class FrameStorer(BlockManagerStorer):
| https://api.github.com/repos/pandas-dev/pandas/pulls/3623 | 2013-05-16T14:29:22Z | 2013-05-16T14:51:07Z | 2013-05-16T14:51:07Z | 2014-07-16T08:09:26Z | |
BUG: (GH3062) Correctly parse date columns with embedded (nan/NaT) into datetime64[ns] in read_csv | diff --git a/RELEASE.rst b/RELEASE.rst
index 8c0d56666f4e1..aeaebd88c5ee7 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -122,6 +122,8 @@ pandas 0.11.1
- Fix ``read_csv`` to correctly encode identical na_values, e.g. ``na_values=[-999.0,-999]``
was failing (GH3611_)
- Fix indexing issue in ndim >= 3 with ``iloc`` (GH3617_)
+ - Correctly parse date columns with embedded (nan/NaT) into datetime64[ns] dtype in ``read_csv``
+ when ``parse_dates`` is specified (GH3062_)
.. _GH3164: https://github.com/pydata/pandas/issues/3164
.. _GH2786: https://github.com/pydata/pandas/issues/2786
@@ -172,6 +174,7 @@ pandas 0.11.1
.. _GH3617: https://github.com/pydata/pandas/issues/3617
.. _GH3435: https://github.com/pydata/pandas/issues/3435
.. _GH3611: https://github.com/pydata/pandas/issues/3611
+.. _GH3062: https://github.com/pydata/pandas/issues/3062
.. _GH1512: https://github.com/pydata/pandas/issues/1512
diff --git a/pandas/io/tests/test_parsers.py b/pandas/io/tests/test_parsers.py
index 4a9004b7068ba..38a31c042d120 100644
--- a/pandas/io/tests/test_parsers.py
+++ b/pandas/io/tests/test_parsers.py
@@ -531,6 +531,28 @@ def test_custom_na_values(self):
skiprows=[1])
assert_almost_equal(df3.values, expected)
+ def test_nat_parse(self):
+
+ # GH 3062
+ df = DataFrame(dict({
+ 'A' : np.asarray(range(10),dtype='float64'),
+ 'B' : pd.Timestamp('20010101') }))
+ df.iloc[3:6,:] = np.nan
+
+ with ensure_clean('__nat_parse_.csv') as path:
+ df.to_csv(path)
+ result = read_csv(path,index_col=0,parse_dates=['B'])
+ tm.assert_frame_equal(result,df)
+
+ expected = Series(dict( A = 'float64',B = 'datetime64[ns]'))
+ tm.assert_series_equal(expected,result.dtypes)
+
+ # test with NaT for the nan_rep
+ # we don't have a method to specif the Datetime na_rep (it defaults to '')
+ df.to_csv(path)
+ result = read_csv(path,index_col=0,parse_dates=['B'])
+ tm.assert_frame_equal(result,df)
+
def test_skiprows_bug(self):
# GH #505
text = """#foo,a,b,c
diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx
index f9c1b2329c16d..a633b9482da06 100644
--- a/pandas/tslib.pyx
+++ b/pandas/tslib.pyx
@@ -318,8 +318,10 @@ class Timestamp(_Timestamp):
ts.dts.us, ts.tzinfo)
+_nat_strings = set(['NaT','nat','NAT','nan','NaN','NAN'])
class NaTType(_NaT):
"""(N)ot-(A)-(T)ime, the time equivalent of NaN"""
+
def __new__(cls):
cdef _NaT base
@@ -647,8 +649,11 @@ cdef convert_to_tsobject(object ts, object tz):
obj.value = ts
pandas_datetime_to_datetimestruct(ts, PANDAS_FR_ns, &obj.dts)
elif util.is_string_object(ts):
- _string_to_dts(ts, &obj.dts)
- obj.value = pandas_datetimestruct_to_datetime(PANDAS_FR_ns, &obj.dts)
+ if ts in _nat_strings:
+ obj.value = NPY_NAT
+ else:
+ _string_to_dts(ts, &obj.dts)
+ obj.value = pandas_datetimestruct_to_datetime(PANDAS_FR_ns, &obj.dts)
elif PyDateTime_Check(ts):
if tz is not None:
# sort of a temporary hack
@@ -862,6 +867,10 @@ def array_to_datetime(ndarray[object] values, raise_=False, dayfirst=False,
iresult[i] = iNaT
continue
+ elif val in _nat_strings:
+ iresult[i] = iNaT
+ continue
+
_string_to_dts(val, &dts)
iresult[i] = pandas_datetimestruct_to_datetime(PANDAS_FR_ns,
&dts)
| closes #3062
| https://api.github.com/repos/pandas-dev/pandas/pulls/3621 | 2013-05-16T13:00:16Z | 2013-05-16T13:38:33Z | 2013-05-16T13:38:33Z | 2014-07-16T08:09:24Z |
BUG: fixed platform int issues on 32-bit | diff --git a/pandas/core/common.py b/pandas/core/common.py
index 6bb4b36862956..a52c932b30ba4 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -1612,13 +1612,13 @@ def _astype_nansafe(arr, dtype, copy = True):
if is_datetime64_dtype(arr):
if dtype == object:
return tslib.ints_to_pydatetime(arr.view(np.int64))
- elif issubclass(dtype.type, np.int):
+ elif dtype == np.int64:
return arr.view(dtype)
elif dtype != _NS_DTYPE:
raise TypeError("cannot astype a datetimelike from [%s] to [%s]" % (arr.dtype,dtype))
return arr.astype(_NS_DTYPE)
elif is_timedelta64_dtype(arr):
- if issubclass(dtype.type, np.int):
+ if dtype == np.int64:
return arr.view(dtype)
elif dtype == object:
return arr.astype(object)
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 8b684b621a540..dd27fa5c3473c 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -322,6 +322,7 @@ def _convert_for_reindex(self, key, axis=0):
keyarr = _asarray_tuplesafe(key)
if _is_integer_dtype(keyarr) and not _is_integer_index(labels):
+ keyarr = com._ensure_platform_int(keyarr)
return labels.take(keyarr)
return keyarr
@@ -466,10 +467,11 @@ def _reindex(keys, level=None):
if len(missing):
l = np.arange(len(indexer))
+ missing = com._ensure_platform_int(missing)
missing_labels = keyarr.take(missing)
- missing_labels_indexer = l[~check]
+ missing_labels_indexer = com._ensure_int64(l[~check])
cur_labels = result._get_axis(axis).values
- cur_labels_indexer = l[check]
+ cur_labels_indexer = com._ensure_int64(l[check])
new_labels = lib.combine_from_indexers(cur_labels, cur_labels_indexer,
missing_labels, missing_labels_indexer)
result = result.reindex_axis(new_labels,axis=axis)
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index 11ede8d759b38..b988f2985877a 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -1878,10 +1878,11 @@ def test_constructor_dtype_timedelta64(self):
self.assertRaises(TypeError, td.astype, 'm8[%s]' % t)
# valid astype
- td.astype('int')
+ td.astype('int64')
# this is an invalid casting
self.assertRaises(Exception, Series, [ timedelta(days=i) for i in range(3) ] + [ 'foo' ], dtype='m8[ns]' )
+ self.assertRaises(TypeError, td.astype, 'int32')
# leave as object here
td = Series([ timedelta(days=i) for i in range(3) ] + [ 'foo' ])
| https://api.github.com/repos/pandas-dev/pandas/pulls/3620 | 2013-05-16T12:02:48Z | 2013-05-16T12:02:59Z | 2013-05-16T12:02:59Z | 2014-07-16T08:09:22Z | |
BUG: Reindex data if plotting time/period index (GH3601) | diff --git a/RELEASE.rst b/RELEASE.rst
index 56e3096d23cb2..9147968997fc7 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -127,6 +127,7 @@ pandas 0.11.1
when ``parse_dates`` is specified (GH3062_)
- Fix not consolidating before to_csv (GH3624_)
- Fix alignment issue when setitem in a DataFrame with a piece of a DataFrame (GH3626_)
+ - Fix plotting of unordered DatetimeIndex (GH3601_)
.. _GH3164: https://github.com/pydata/pandas/issues/3164
.. _GH2786: https://github.com/pydata/pandas/issues/2786
@@ -180,6 +181,7 @@ pandas 0.11.1
.. _GH3062: https://github.com/pydata/pandas/issues/3062
.. _GH3624: https://github.com/pydata/pandas/issues/3624
.. _GH3626: https://github.com/pydata/pandas/issues/3626
+.. _GH3601: https://github.com/pydata/pandas/issues/3601
.. _GH1512: https://github.com/pydata/pandas/issues/1512
diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py
index 5033dc2d3a549..197b26014a760 100644
--- a/pandas/tests/test_graphics.py
+++ b/pandas/tests/test_graphics.py
@@ -677,7 +677,7 @@ def test_default_color_cycle(self):
@slow
def test_unordered_ts(self):
- df = DataFrame(np.random.randn(3, 1),
+ df = DataFrame(np.array([3.0, 2.0, 1.0]),
index=[date(2012, 10, 1),
date(2012, 9, 1),
date(2012, 8, 1)],
@@ -685,6 +685,8 @@ def test_unordered_ts(self):
ax = df.plot()
xticks = ax.lines[0].get_xdata()
self.assert_(xticks[0] < xticks[1])
+ ydata = ax.lines[0].get_ydata()
+ self.assert_(np.all(ydata == np.array([1.0, 2.0, 3.0])))
class TestDataFrameGroupByPlots(unittest.TestCase):
diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py
index 223e127223195..751f5fcdb82b2 100644
--- a/pandas/tools/plotting.py
+++ b/pandas/tools/plotting.py
@@ -947,8 +947,8 @@ def _get_xticks(self, convert_period=False):
if self.use_index:
if convert_period and isinstance(index, PeriodIndex):
- index = index.to_timestamp().order()
- x = index._mpl_repr()
+ self.data = self.data.reindex(index=index.order())
+ x = self.data.index.to_timestamp()._mpl_repr()
elif index.is_numeric():
"""
Matplotlib supports numeric values or datetime objects as
@@ -958,7 +958,8 @@ def _get_xticks(self, convert_period=False):
"""
x = index._mpl_repr()
elif is_datetype:
- x = index.order()._mpl_repr()
+ self.data = self.data.reindex(index=index.order())
+ x = self.data.index._mpl_repr()
else:
self._need_to_set_index = True
x = range(len(index))
| Just wanted to push this fix to see if it looked OK and let the Travis build start.
If it looks ok, i will add something in the release notes and try to write a proper test.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3619 | 2013-05-16T02:38:54Z | 2013-05-17T23:58:15Z | 2013-05-17T23:58:15Z | 2014-07-16T08:09:21Z |
BUG: (GH3617) Fix indexing issue with ndim >= 3 with iloc | diff --git a/RELEASE.rst b/RELEASE.rst
index 503ae0e6bb30e..75d46dd98df3a 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -120,6 +120,7 @@ pandas 0.11.1
- Fix incorrect dtype on groupby with ``as_index=False`` (GH3610_)
- Fix ``read_csv`` to correctly encode identical na_values, e.g. ``na_values=[-999.0,-999]``
was failing (GH3611_)
+ - Fix indexing issue in ndim >= 3 with ``iloc`` (GH3617_)
.. _GH3164: https://github.com/pydata/pandas/issues/3164
.. _GH2786: https://github.com/pydata/pandas/issues/2786
@@ -167,6 +168,7 @@ pandas 0.11.1
.. _GH3590: https://github.com/pydata/pandas/issues/3590
.. _GH3610: https://github.com/pydata/pandas/issues/3610
.. _GH3596: https://github.com/pydata/pandas/issues/3596
+.. _GH3617: https://github.com/pydata/pandas/issues/3617
.. _GH3435: https://github.com/pydata/pandas/issues/3435
.. _GH3611: https://github.com/pydata/pandas/issues/3611
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 29adce4e02591..8b684b621a540 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -778,8 +778,16 @@ def _has_valid_type(self, key, axis):
def _getitem_tuple(self, tup):
self._has_valid_tuple(tup)
+ try:
+ return self._getitem_lowerdim(tup)
+ except:
+ pass
+
retval = self.obj
for i, key in enumerate(tup):
+ if i >= self.obj.ndim:
+ raise IndexingError('Too many indexers')
+
if _is_null_slice(key):
continue
diff --git a/pandas/core/panel.py b/pandas/core/panel.py
index 44b62991cf7a3..3f090b273787b 100644
--- a/pandas/core/panel.py
+++ b/pandas/core/panel.py
@@ -326,7 +326,7 @@ def _init_arrays(self, arrays, arr_names, axes):
@property
def shape(self):
- return [len(getattr(self, a)) for a in self._AXIS_ORDERS]
+ return tuple([len(getattr(self, a)) for a in self._AXIS_ORDERS])
@classmethod
def from_dict(cls, data, intersect=False, orient='items', dtype=None):
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index 46fd98fc14ffb..65e7516d4b082 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -840,6 +840,19 @@ def test_set_index_nan(self):
result = df.set_index(['year','PRuid','QC']).reset_index().reindex(columns=df.columns)
assert_frame_equal(result,df)
+ def test_iloc_panel_issue(self):
+
+ # GH 3617
+ p = Panel(randn(4, 4, 4))
+
+ self.assert_(p.iloc[:3, :3, :3].shape == (3,3,3))
+ self.assert_(p.iloc[1, :3, :3].shape == (3,3))
+ self.assert_(p.iloc[:3, 1, :3].shape == (3,3))
+ self.assert_(p.iloc[:3, :3, 1].shape == (3,3))
+ self.assert_(p.iloc[1, 1, :3].shape == (3,))
+ self.assert_(p.iloc[1, :3, 1].shape == (3,))
+ self.assert_(p.iloc[:3, 1, 1].shape == (3,))
+
if __name__ == '__main__':
import nose
| closes #3617
| https://api.github.com/repos/pandas-dev/pandas/pulls/3618 | 2013-05-16T00:57:42Z | 2013-05-16T11:35:05Z | 2013-05-16T11:35:05Z | 2014-06-30T17:14:22Z |
ENH: read-html fixes | diff --git a/README.rst b/README.rst
index 3cdb2bf5b31f7..2d49c168eac60 100644
--- a/README.rst
+++ b/README.rst
@@ -92,12 +92,11 @@ Optional dependencies
- openpyxl version 1.6.1 or higher, for writing .xlsx files
- xlrd >= 0.9.0
- Needed for Excel I/O
- - `lxml <http://lxml.de>`__, or `Beautiful Soup 4 <http://www.crummy.com/software/BeautifulSoup>`__: for reading HTML tables
- - The differences between lxml and Beautiful Soup 4 are mostly speed (lxml
- is faster), however sometimes Beautiful Soup returns what you might
- intuitively expect. Both backends are implemented, so try them both to
- see which one you like. They should return very similar results.
- - Note that lxml requires Cython to build successfully
+ - Both `html5lib <https://github.com/html5lib/html5lib-python>`__ **and**
+ `Beautiful Soup 4 <http://www.crummy.com/software/BeautifulSoup>`__: for
+ reading HTML tables
+ - These can both easily be installed by ``pip install html5lib`` and ``pip
+ install beautifulsoup4``.
- `boto <https://pypi.python.org/pypi/boto>`__: necessary for Amazon S3 access.
diff --git a/RELEASE.rst b/RELEASE.rst
index 85cb4d9f40980..bbb04cecd6eba 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -30,8 +30,9 @@ pandas 0.11.1
**New features**
- - pd.read_html() can now parse HTML string, files or urls and return dataframes
- courtesy of @cpcloud. (GH3477_)
+ - ``pandas.read_html()`` can now parse HTML strings, files or urls and
+ returns a list of ``DataFrame`` s courtesy of @cpcloud. (GH3477_, GH3605_,
+ GH3606_)
- Support for reading Amazon S3 files. (GH3504_)
- Added module for reading and writing Stata files: pandas.io.stata (GH1512_)
- Added support for writing in ``to_csv`` and reading in ``read_csv``,
@@ -48,7 +49,7 @@ pandas 0.11.1
**Improvements to existing features**
- Fixed various issues with internal pprinting code, the repr() for various objects
- including TimeStamp and *Index now produces valid python code strings and
+ including TimeStamp and Index now produces valid python code strings and
can be used to recreate the object, (GH3038_, GH3379_, GH3251_, GH3460_)
- ``convert_objects`` now accepts a ``copy`` parameter (defaults to ``True``)
- ``HDFStore``
@@ -146,6 +147,9 @@ pandas 0.11.1
- ``sql.write_frame`` failing when writing a single column to sqlite (GH3628_),
thanks to @stonebig
- Fix pivoting with ``nan`` in the index (GH3558_)
+ - Fix running of bs4 tests when it is not installed (GH3605_)
+ - Fix parsing of html table (GH3606_)
+ - ``read_html()`` now only allows a single backend: ``html5lib`` (GH3616_)
.. _GH3164: https://github.com/pydata/pandas/issues/3164
.. _GH2786: https://github.com/pydata/pandas/issues/2786
@@ -209,6 +213,9 @@ pandas 0.11.1
.. _GH3141: https://github.com/pydata/pandas/issues/3141
.. _GH3628: https://github.com/pydata/pandas/issues/3628
.. _GH3638: https://github.com/pydata/pandas/issues/3638
+.. _GH3605: https://github.com/pydata/pandas/issues/3605
+.. _GH3606: https://github.com/pydata/pandas/issues/3606
+.. _Gh3616: https://github.com/pydata/pandas/issues/3616
pandas 0.11.0
=============
diff --git a/doc/source/install.rst b/doc/source/install.rst
index 9d14d1b11c6b1..658d9d78d5b29 100644
--- a/doc/source/install.rst
+++ b/doc/source/install.rst
@@ -99,12 +99,11 @@ Optional Dependencies
* `openpyxl <http://packages.python.org/openpyxl/>`__, `xlrd/xlwt <http://www.python-excel.org/>`__
* openpyxl version 1.6.1 or higher
* Needed for Excel I/O
- * `lxml <http://lxml.de>`__, or `Beautiful Soup 4 <http://www.crummy.com/software/BeautifulSoup>`__: for reading HTML tables
- * The differences between lxml and Beautiful Soup 4 are mostly speed (lxml
- is faster), however sometimes Beautiful Soup returns what you might
- intuitively expect. Both backends are implemented, so try them both to
- see which one you like. They should return very similar results.
- * Note that lxml requires Cython to build successfully
+ * Both `html5lib <https://github.com/html5lib/html5lib-python>`__ **and**
+ `Beautiful Soup 4 <http://www.crummy.com/software/BeautifulSoup>`__: for
+ reading HTML tables
+ * These can both easily be installed by ``pip install html5lib`` and ``pip
+ install beautifulsoup4``.
.. note::
diff --git a/doc/source/io.rst b/doc/source/io.rst
index 42ea4a2ca5d53..3dbf297dea5c5 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -918,18 +918,18 @@ which, if set to ``True``, will additionally output the length of the Series.
HTML
----
-Reading HTML format
+Reading HTML Content
~~~~~~~~~~~~~~~~~~~~~~
.. _io.read_html:
.. versionadded:: 0.11.1
-The toplevel :func:`~pandas.io.parsers.read_html` function can accept an HTML string/file/url
-and will parse HTML tables into pandas DataFrames.
+The toplevel :func:`~pandas.io.parsers.read_html` function can accept an HTML
+string/file/url and will parse HTML tables into list of pandas DataFrames.
-Writing to HTML format
+Writing to HTML files
~~~~~~~~~~~~~~~~~~~~~~
.. _io.html:
diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt
index a42765591c818..40fda1305e505 100644
--- a/doc/source/v0.11.1.txt
+++ b/doc/source/v0.11.1.txt
@@ -64,9 +64,27 @@ API changes
Enhancements
~~~~~~~~~~~~
-
- - ``pd.read_html()`` can now parse HTML string, files or urls and return dataframes
- courtesy of @cpcloud. (GH3477_)
+ - ``pd.read_html()`` can now parse HTML strings, files or urls and return
+ DataFrames
+ courtesy of @cpcloud. (GH3477_, GH3605_, GH3606_)
+ - ``read_html()`` (GH3616_)
+ - now works with only a *single* parser backend, that is:
+ - BeautifulSoup4 + html5lib
+ - does *not* and will never support using the html parsing library
+ included with Python as a parser backend
+ - is a bit smarter about the parent table elements of matched text: if
+ multiple matches are found then only the *unique* parents of the result
+ are returned (uniqueness is determined using ``set``).
+ - no longer tries to guess about what you want to do with empty table cells
+ - argument ``infer_types`` now defaults to ``False``.
+ - now returns DataFrames whose default column index is the elements of
+ ``<thead>`` elements in the HTML soup, if any exist.
+ - considers all ``<th>`` and ``<td>`` elements inside of ``<thead>``
+ elements.
+ - tests are now correctly skipped if the proper libraries are not
+ installed.
+ - tests now include a ground-truth csv file from the FDIC failed bank list
+ data set.
- ``HDFStore``
- will retain index attributes (freq,tz,name) on recreation (GH3499_)
@@ -203,3 +221,6 @@ on GitHub for a complete list.
.. _GH1651: https://github.com/pydata/pandas/issues/1651
.. _GH3141: https://github.com/pydata/pandas/issues/3141
.. _GH3638: https://github.com/pydata/pandas/issues/3638
+.. _GH3616: https://github.com/pydata/pandas/issues/3616
+.. _GH3605: https://github.com/pydata/pandas/issues/3605
+.. _GH3606: https://github.com/pydata/pandas/issues/3606
diff --git a/pandas/io/html.py b/pandas/io/html.py
index c29d16db8132b..732bd57bec418 100644
--- a/pandas/io/html.py
+++ b/pandas/io/html.py
@@ -16,6 +16,8 @@
except ImportError:
import_module = __import__
+import numpy as np
+
from pandas import DataFrame, MultiIndex
from pandas.io.parsers import _is_url
@@ -78,8 +80,34 @@ def _get_skiprows_iter(skiprows):
raise TypeError('{0} is not a valid type for skipping'
' rows'.format(type(skiprows)))
- def _parse_columns(self, row):
- return row.xpath('.//td|.//th')
+
+def _read(io):
+ """Try to read from a url, file or string.
+
+ Parameters
+ ----------
+ io : str, unicode, or file-like
+
+ Returns
+ -------
+ raw_text : str
+ """
+ if _is_url(io):
+ try:
+ with contextlib.closing(urllib2.urlopen(io)) as url:
+ raw_text = url.read()
+ except urllib2.URLError:
+ raise ValueError('Invalid URL: "{0}"'.format(io))
+ elif hasattr(io, 'read'):
+ raw_text = io.read()
+ elif os.path.isfile(io):
+ with open(io) as f:
+ raw_text = f.read()
+ elif isinstance(io, basestring):
+ raw_text = io
+ else:
+ raise ValueError("Cannot read object of type '{0}'".format(type(io)))
+ return raw_text
class _HtmlFrameParser(object):
@@ -114,9 +142,12 @@ class _HtmlFrameParser(object):
To subclass this class effectively you must override the following methods:
* :func:`_build_doc`
* :func:`_text_getter`
- * :func:`_parse_columns`
- * :func:`_parse_table`
- * :func:`_parse_rows`
+ * :func:`_parse_td`
+ * :func:`_parse_tables`
+ * :func:`_parse_tr`
+ * :func:`_parse_thead`
+ * :func:`_parse_tbody`
+ * :func:`_parse_tfoot`
See each method's respective documentation for details on their
functionality.
"""
@@ -125,33 +156,11 @@ def __init__(self, io, match, attrs):
self.match = match
self.attrs = attrs
- def parse_rows(self):
- """Return a list of list of each table's rows.
-
- Returns
- -------
- row_list : list of list of node-like
- A list of each table's rows, which are DOM nodes (usually <th> or
- <tr> elements).
- """
+ def parse_tables(self):
tables = self._parse_tables(self._build_doc(), self.match, self.attrs)
- assert tables, 'No tables found'
- return (self._parse_rows(table) for table in tables)
-
- def parse_raw_data(self):
- """Return a list of the raw data from each table.
-
- Returns
- -------
- data : list of list of lists of str or unicode
- Each table's data is contained in a list of lists of str or
- unicode.
- """
- return [self._parse_raw_data(rows, self._text_getter,
- self._parse_columns)
- for rows in self.parse_rows()]
+ return (self._build_table(table) for table in tables)
- def _parse_raw_data(self, rows, text_getter, column_finder):
+ def _parse_raw_data(self, rows):
"""Parse the raw data into a list of lists.
Parameters
@@ -177,23 +186,8 @@ def _parse_raw_data(self, rows, text_getter, column_finder):
-------
data : list of list of strings
"""
- # callable is back in Python 3.2
- assert callable(text_getter), '"text_getter" must be callable'
- assert callable(column_finder), '"column_finder" must be callable'
-
- data = []
-
- for row in rows:
- if _remove_whitespace(text_getter(row)):
- col = []
-
- for el in column_finder(row):
- t = _remove_whitespace(text_getter(el))
-
- if t:
- col.append(t)
- data.append(col)
-
+ data = [[_remove_whitespace(self._text_getter(col)) for col in
+ self._parse_td(row)] for row in rows]
return data
def _text_getter(self, obj):
@@ -211,8 +205,8 @@ def _text_getter(self, obj):
"""
raise NotImplementedError
- def _parse_columns(self, obj):
- """Return the column elements from a row element.
+ def _parse_td(self, obj):
+ """Return the td elements from a row element.
Parameters
----------
@@ -252,7 +246,7 @@ def _parse_tables(self, doc, match, attrs):
"""
raise NotImplementedError
- def _parse_rows(self, table):
+ def _parse_tr(self, table):
"""Return the list of row elements from the parsed table element.
Parameters
@@ -267,6 +261,51 @@ def _parse_rows(self, table):
"""
raise NotImplementedError
+ def _parse_thead(self, table):
+ """Return the header of a table.
+
+ Parameters
+ ----------
+ table : node-like
+ A table element that contains row elements.
+
+ Returns
+ -------
+ thead : node-like
+ A <thead>...</thead> element.
+ """
+ raise NotImplementedError
+
+ def _parse_tbody(self, table):
+ """Return the body of the table.
+
+ Parameters
+ ----------
+ table : node-like
+ A table element that contains row elements.
+
+ Returns
+ -------
+ tbody : node-like
+ A <tbody>...</tbody> element.
+ """
+ raise NotImplementedError
+
+ def _parse_tfoot(self, table):
+ """Return the footer of the table if any.
+
+ Parameters
+ ----------
+ table : node-like
+ A table element that contains row elements.
+
+ Returns
+ -------
+ tfoot : node-like
+ A <tfoot>...</tfoot> element.
+ """
+ raise NotImplementedError
+
def _build_doc(self):
"""Return a tree-like object that can be used to iterate over the DOM.
@@ -276,8 +315,37 @@ def _build_doc(self):
"""
raise NotImplementedError
+ def _build_table(self, table):
+ header = self._parse_raw_thead(table)
+ body = self._parse_raw_tbody(table)
+ footer = self._parse_raw_tfoot(table)
+ return header, body, footer
+
+ def _parse_raw_thead(self, table):
+ thead = self._parse_thead(table)
+ res = []
+ if thead:
+ res = map(self._text_getter, self._parse_th(thead[0]))
+ return np.array(res).squeeze() if res and len(res) == 1 else res
+
+ def _parse_raw_tfoot(self, table):
+ tfoot = self._parse_tfoot(table)
+ res = []
+ if tfoot:
+ res = map(self._text_getter, self._parse_td(tfoot[0]))
+ return np.array(res).squeeze() if res and len(res) == 1 else res
+
+ def _parse_raw_tbody(self, table):
+ tbody = self._parse_tbody(table)
+
+ try:
+ res = self._parse_tr(tbody[0])
+ except IndexError:
+ res = self._parse_tr(table)
+ return self._parse_raw_data(res)
+
-class _BeautifulSoupFrameParser(_HtmlFrameParser):
+class _BeautifulSoupLxmlFrameParser(_HtmlFrameParser):
"""HTML to DataFrame parser that uses BeautifulSoup under the hood.
See Also
@@ -291,48 +359,68 @@ class _BeautifulSoupFrameParser(_HtmlFrameParser):
:class:`pandas.io.html._HtmlFrameParser`.
"""
def __init__(self, *args, **kwargs):
- super(_BeautifulSoupFrameParser, self).__init__(*args, **kwargs)
+ super(_BeautifulSoupLxmlFrameParser, self).__init__(*args, **kwargs)
+ from bs4 import SoupStrainer
+ self._strainer = SoupStrainer('table')
def _text_getter(self, obj):
return obj.text
- def _parse_columns(self, row):
+ def _parse_td(self, row):
return row.find_all(('td', 'th'))
- def _parse_rows(self, table):
- return table.find_all(('tr', 'thead', 'tfoot'))
+ def _parse_tr(self, element):
+ return element.find_all('tr')
- def _parse_tables(self, doc, match, attrs):
- tables = doc.find_all('table', attrs=attrs)
- assert tables, 'No tables found'
+ def _parse_th(self, element):
+ return element.find_all('th')
+
+ def _parse_thead(self, table):
+ return table.find_all('thead')
+
+ def _parse_tbody(self, table):
+ return table.find_all('tbody')
+
+ def _parse_tfoot(self, table):
+ return table.find_all('tfoot')
- tables = [table for table in tables
- if table.find(text=match) is not None]
- assert tables, "No tables found matching '{0}'".format(match.pattern)
+ def _parse_tables(self, doc, match, attrs):
+ element_name = self._strainer.name
+ tables = doc.find_all(element_name, attrs=attrs)
+ if not tables:
+ raise AssertionError('No tables found')
+
+ mts = [table.find(text=match) for table in tables]
+ matched_tables = [mt for mt in mts if mt is not None]
+ tables = list(set(mt.find_parent(element_name)
+ for mt in matched_tables))
+
+ if not tables:
+ raise AssertionError("No tables found matching "
+ "'{0}'".format(match.pattern))
+ #import ipdb; ipdb.set_trace()
return tables
+ def _setup_build_doc(self):
+ raw_text = _read(self.io)
+ if not raw_text:
+ raise AssertionError('No text parsed from document')
+ return raw_text
+
def _build_doc(self):
- if _is_url(self.io):
- try:
- with contextlib.closing(urllib2.urlopen(self.io)) as url:
- raw_text = url.read()
- except urllib2.URLError:
- raise ValueError('Invalid URL: "{0}"'.format(self.io))
- elif hasattr(self.io, 'read'):
- raw_text = self.io.read()
- elif os.path.isfile(self.io):
- with open(self.io) as f:
- raw_text = f.read()
- elif isinstance(self.io, basestring):
- raw_text = self.io
- else:
- raise ValueError("Cannot read object of"
- " type '{0}'".format(type(self.io)))
- assert raw_text, 'No text parsed from document'
+ from bs4 import BeautifulSoup
+ return BeautifulSoup(self._setup_build_doc(), features='lxml',
+ parse_only=self._strainer)
+
- from bs4 import BeautifulSoup, SoupStrainer
- strainer = SoupStrainer('table')
- return BeautifulSoup(raw_text, parse_only=strainer)
+class _BeautifulSoupHtml5LibFrameParser(_BeautifulSoupLxmlFrameParser):
+ def __init__(self, *args, **kwargs):
+ super(_BeautifulSoupHtml5LibFrameParser, self).__init__(*args,
+ **kwargs)
+
+ def _build_doc(self):
+ from bs4 import BeautifulSoup
+ return BeautifulSoup(self._setup_build_doc(), features='html5lib')
def _build_node_xpath_expr(attrs):
@@ -358,6 +446,7 @@ def _build_node_xpath_expr(attrs):
_re_namespace = {'re': 'http://exslt.org/regular-expressions'}
+_valid_schemes = 'http', 'file', 'ftp'
class _LxmlFrameParser(_HtmlFrameParser):
@@ -370,7 +459,7 @@ class _LxmlFrameParser(_HtmlFrameParser):
See Also
--------
_HtmlFrameParser
- _BeautifulSoupFrameParser
+ _BeautifulSoupLxmlFrameParser
Notes
-----
@@ -383,11 +472,12 @@ def __init__(self, *args, **kwargs):
def _text_getter(self, obj):
return obj.text_content()
- def _parse_columns(self, row):
+ def _parse_td(self, row):
return row.xpath('.//td|.//th')
- def _parse_rows(self, table):
- return table.xpath('(.//tr|.//thead|.//tfoot)[normalize-space()]')
+ def _parse_tr(self, table):
+ expr = './/tr[normalize-space()]'
+ return table.xpath(expr)
def _parse_tables(self, doc, match, kwargs):
pattern = match.pattern
@@ -406,42 +496,68 @@ def _parse_tables(self, doc, match, kwargs):
if kwargs:
xpath_expr += _build_node_xpath_expr(kwargs)
tables = doc.xpath(xpath_expr, namespaces=_re_namespace)
- assert tables, "No tables found matching regex '{0}'".format(pattern)
+ if not tables:
+ raise AssertionError("No tables found matching regex "
+ "'{0}'".format(pattern))
return tables
def _build_doc(self):
"""
Raises
------
- IOError
- * If a valid URL is detected, but for some reason cannot be parsed.
- This is probably due to a faulty or non-existent internet
- connection.
ValueError
* If a URL that lxml cannot parse is passed.
+ Exception
+ * Any other ``Exception`` thrown. For example, trying to parse a
+ URL that is syntactically correct on a machine with no internet
+ connection will fail.
+
See Also
--------
pandas.io.html._HtmlFrameParser._build_doc
"""
from lxml.html import parse, fromstring
+ from lxml.html.clean import clean_html
try:
# try to parse the input in the simplest way
- return parse(self.io)
- except (UnicodeDecodeError, IOError):
- # something went wrong, check for not-a-url because it's probably a
- # huge string blob
+ r = parse(self.io)
+ except (UnicodeDecodeError, IOError) as e:
+ # if the input is a blob of html goop
if not _is_url(self.io):
- return fromstring(self.io)
- elif urlparse.urlparse(self.io).scheme not in ('http', 'ftp',
- 'file'):
- raise ValueError('"{0}" does not have a valid URL'
- ' protocol'.format(self.io))
+ r = fromstring(self.io)
else:
- raise IOError('"{0}" is a valid URL, so you probably are not'
- ' properly connected to the'
- ' internet'.format(self.io))
+ # not a url
+ scheme = urlparse.urlparse(self.io).scheme
+ if scheme not in _valid_schemes:
+ # lxml can't parse it
+ msg = ('{0} is not a valid url scheme, valid schemes are '
+ '{1}').format(scheme, _valid_schemes)
+ raise ValueError(msg)
+ else:
+ # something else happened: maybe a faulty connection
+ raise e
+ return clean_html(r)
+
+ def _parse_tbody(self, table):
+ return table.xpath('.//tbody')
+
+ def _parse_thead(self, table):
+ return table.xpath('.//thead')
+
+ def _parse_tfoot(self, table):
+ return table.xpath('.//tfoot')
+
+ def _parse_raw_thead(self, table):
+ expr = './/thead//th'
+ return [_remove_whitespace(x.text_content()) for x in
+ table.xpath(expr)]
+
+ def _parse_raw_tfoot(self, table):
+ expr = './/tfoot//th'
+ return [_remove_whitespace(x.text_content()) for x in
+ table.xpath(expr)]
def _data_to_frame(data, header, index_col, infer_types, skiprows):
@@ -449,7 +565,7 @@ def _data_to_frame(data, header, index_col, infer_types, skiprows):
Parameters
----------
- data : list of lists of str or unicode
+ data : tuple of lists
The raw data to be placed into a DataFrame. This is a list of lists of
strings or unicode. If it helps, it can be thought of as a matrix of
strings instead.
@@ -491,7 +607,9 @@ def _data_to_frame(data, header, index_col, infer_types, skiprows):
-----
The `data` parameter is guaranteed not to be a list of empty lists.
"""
- df = DataFrame(data)
+ thead, tbody, tfoot = data
+ columns = thead or None
+ df = DataFrame(tbody, columns=columns)
if skiprows is not None:
it = _get_skiprows_iter(skiprows)
@@ -530,16 +648,81 @@ def _data_to_frame(data, header, index_col, infer_types, skiprows):
# drop by default
df.set_index(cols, inplace=True)
+ if df.index.nlevels == 1:
+ if not (df.index.name or df.index.name is None):
+ df.index.name = None
+ else:
+ names = [name or None for name in df.index.names]
+ df.index = MultiIndex.from_tuples(df.index.values, names=names)
return df
-_possible_parsers = {'lxml': _LxmlFrameParser,
- 'bs4': _BeautifulSoupFrameParser}
+_invalid_parsers = {'lxml': _LxmlFrameParser,
+ 'bs4': _BeautifulSoupLxmlFrameParser}
+_valid_parsers = {'html5lib': _BeautifulSoupHtml5LibFrameParser}
+_all_parsers = _valid_parsers.copy()
+_all_parsers.update(_invalid_parsers)
-def read_html(io, match='.+', flavor='bs4', header=None, index_col=None,
- skiprows=None, infer_types=True, attrs=None):
+def _parser_dispatch(flavor):
+ """Choose the parser based on the input flavor.
+
+ Parameters
+ ----------
+ flavor : str
+ The type of parser to use. This must be a valid backend.
+
+ Returns
+ -------
+ cls : _HtmlFrameParser subclass
+ The parser class based on the requested input flavor.
+
+ Raises
+ ------
+ AssertionError
+ * If `flavor` is not a valid backend.
+ """
+ valid_parsers = _valid_parsers.keys()
+ if flavor not in valid_parsers:
+ raise AssertionError('"{0}" is not a valid flavor'.format(flavor))
+
+ if flavor == 'bs4':
+ try:
+ import_module('lxml')
+ parser_t = _BeautifulSoupLxmlFrameParser
+ except ImportError:
+ try:
+ import_module('html5lib')
+ parser_t = _BeautifulSoupHtml5LibFrameParser
+ except ImportError:
+ raise ImportError("read_html does not support the native "
+ "Python 'html.parser' backend for bs4, "
+ "please install either 'lxml' or 'html5lib'")
+ elif flavor == 'html5lib':
+ try:
+ # much better than python's builtin
+ import_module('html5lib')
+ parser_t = _BeautifulSoupHtml5LibFrameParser
+ except ImportError:
+ raise ImportError("html5lib not found please install it")
+ else:
+ parser_t = _LxmlFrameParser
+ return parser_t
+
+
+def _parse(parser, io, match, flavor, header, index_col, skiprows, infer_types,
+ attrs):
+ # bonus: re.compile is idempotent under function iteration so you can pass
+ # a compiled regex to it and it will return itself
+ p = parser(io, re.compile(match), attrs)
+ tables = p.parse_tables()
+ return [_data_to_frame(table, header, index_col, infer_types, skiprows)
+ for table in tables]
+
+
+def read_html(io, match='.+', flavor='html5lib', header=None, index_col=None,
+ skiprows=None, infer_types=False, attrs=None):
r"""Read an HTML table into a DataFrame.
Parameters
@@ -547,7 +730,8 @@ def read_html(io, match='.+', flavor='bs4', header=None, index_col=None,
io : str or file-like
A string or file like object that can be either a url, a file-like
object, or a raw string containing HTML. Note that lxml only accepts
- the http, ftp and file url protocols.
+ the http, ftp and file url protocols. If you have a URI that starts
+ with ``'https'`` you might removing the ``'s'``.
match : str or regex, optional
The set of tables containing text matching this regex or string will be
@@ -557,10 +741,10 @@ def read_html(io, match='.+', flavor='bs4', header=None, index_col=None,
This value is converted to a regular expression so that there is
consistent behavior between Beautiful Soup and lxml.
- flavor : str, {'lxml', 'bs4'}
- The parsing engine to use under the hood. lxml is faster and bs4
- (Beautiful Soup 4) is better at parsing nested tags, which are not
- uncommon when parsing tables. Defaults to 'bs4'.
+ flavor : str, {'html5lib'}
+ The parsing engine to use under the hood. Right now only ``html5lib``
+ is supported because it returns correct output whereas ``lxml`` does
+ not.
header : int or array-like or None, optional
The row (or rows for a MultiIndex) to use to make the columns headers.
@@ -661,6 +845,7 @@ def read_html(io, match='.+', flavor='bs4', header=None, index_col=None,
Parse some spam infomation from the USDA:
+ >>> from pandas import read_html, DataFrame
>>> url = ('http://ndb.nal.usda.gov/ndb/foods/show/1732?fg=&man=&'
... 'lfacet=&format=&count=&max=25&offset=&sort=&qlookup=spam')
>>> dfs = read_html(url, match='Water', header=0)
@@ -670,32 +855,16 @@ def read_html(io, match='.+', flavor='bs4', header=None, index_col=None,
You can pass nothing to the `match` argument:
+ >>> from pandas import read_html, DataFrame
>>> url = 'http://www.fdic.gov/bank/individual/failed/banklist.html'
>>> dfs = read_html(url)
>>> print(len(dfs)) # this will most likely be greater than 1
-
- Try a different parser:
-
- >>> url = 'http://www.fdic.gov/bank/individual/failed/banklist.html'
- >>> dfs = read_html(url, 'Florida', flavor='lxml', attrs={'id': 'table'})
- >>> assert dfs
- >>> assert isinstance(dfs, list)
- >>> assert all(map(lambda x: isinstance(x, DataFrame), dfs))
"""
- # annoying type check here because we don't want to spend time parsing HTML
- # only to end up failing because of an invalid value of skiprows
- if isinstance(skiprows, numbers.Integral):
- assert skiprows >= 0, ('cannot skip rows starting from the end of the '
- 'data (you passed a negative value)')
-
- valid_backends = _possible_parsers.keys()
- assert flavor in valid_backends, ("'{0}' is not a valid backend, the valid"
- " backends are "
- "{1}".format(flavor, valid_backends))
- parser = _possible_parsers[flavor]
-
- # bonus: re.compile is idempotent under function iteration so you can pass
- # a compiled regex to it and it will return itself
- p = parser(io, re.compile(match), attrs)
- return [_data_to_frame(data, header, index_col, infer_types, skiprows)
- for data in p.parse_raw_data()]
+ # Type check here. We don't want to parse only to fail because of an
+ # invalid value of an integer skiprows.
+ if isinstance(skiprows, numbers.Integral) and skiprows < 0:
+ raise AssertionError('cannot skip rows starting from the end of the '
+ 'data (you passed a negative value)')
+ parser = _parser_dispatch(flavor)
+ return _parse(parser, io, match, flavor, header, index_col, skiprows,
+ infer_types, attrs)
diff --git a/pandas/io/tests/data/banklist.csv b/pandas/io/tests/data/banklist.csv
new file mode 100644
index 0000000000000..6545d31fe5fd4
--- /dev/null
+++ b/pandas/io/tests/data/banklist.csv
@@ -0,0 +1,503 @@
+Bank Name,City,State,CERT #,Acquiring Institution,Closing Date,Updated Date
+Douglas County Bank,Douglasville,GA,21649,Hamilton State Bank,26-Apr-13,30-Apr-13
+Parkway Bank,Lenoir,NC,57158,"CertusBank, National Association",26-Apr-13,30-Apr-13
+Chipola Community Bank,Marianna,FL,58034,First Federal Bank of Florida,19-Apr-13,23-Apr-13
+Heritage Bank of North Florida,Orange Park,FL,26680,FirstAtlantic Bank,19-Apr-13,23-Apr-13
+First Federal Bank,Lexington,KY,29594,Your Community Bank,19-Apr-13,23-Apr-13
+Gold Canyon Bank,Gold Canyon,AZ,58066,"First Scottsdale Bank, National Association",5-Apr-13,9-Apr-13
+Frontier Bank,LaGrange,GA,16431,HeritageBank of the South,8-Mar-13,26-Mar-13
+Covenant Bank,Chicago,IL,22476,Liberty Bank and Trust Company,15-Feb-13,4-Mar-13
+1st Regents Bank,Andover,MN,57157,First Minnesota Bank,18-Jan-13,28-Feb-13
+Westside Community Bank,University Place,WA,33997,Sunwest Bank,11-Jan-13,24-Jan-13
+Community Bank of the Ozarks,Sunrise Beach,MO,27331,Bank of Sullivan,14-Dec-12,24-Jan-13
+Hometown Community Bank,Braselton,GA,57928,"CertusBank, National Association",16-Nov-12,24-Jan-13
+Citizens First National Bank,Princeton,IL,3731,Heartland Bank and Trust Company,2-Nov-12,24-Jan-13
+Heritage Bank of Florida,Lutz,FL,35009,Centennial Bank,2-Nov-12,24-Jan-13
+NOVA Bank,Berwyn,PA,27148,No Acquirer,26-Oct-12,24-Jan-13
+Excel Bank,Sedalia,MO,19189,Simmons First National Bank,19-Oct-12,24-Jan-13
+First East Side Savings Bank,Tamarac,FL,28144,Stearns Bank N.A.,19-Oct-12,24-Jan-13
+GulfSouth Private Bank,Destin,FL,58073,SmartBank,19-Oct-12,24-Jan-13
+First United Bank,Crete,IL,20685,"Old Plank Trail Community Bank, National Association",28-Sep-12,15-Nov-12
+Truman Bank,St. Louis,MO,27316,Simmons First National Bank,14-Sep-12,17-Dec-12
+First Commercial Bank,Bloomington,MN,35246,Republic Bank & Trust Company,7-Sep-12,17-Dec-12
+Waukegan Savings Bank,Waukegan,IL,28243,First Midwest Bank,3-Aug-12,11-Oct-12
+Jasper Banking Company,Jasper,GA,16240,Stearns Bank N.A.,27-Jul-12,17-Dec-12
+Second Federal Savings and Loan Association of Chicago,Chicago,IL,27986,Hinsdale Bank & Trust Company,20-Jul-12,14-Jan-13
+Heartland Bank,Leawood,KS,1361,Metcalf Bank,20-Jul-12,17-Dec-12
+First Cherokee State Bank,Woodstock,GA,32711,Community & Southern Bank,20-Jul-12,31-Oct-12
+Georgia Trust Bank,Buford,GA,57847,Community & Southern Bank,20-Jul-12,17-Dec-12
+The Royal Palm Bank of Florida,Naples,FL,57096,First National Bank of the Gulf Coast,20-Jul-12,7-Jan-13
+Glasgow Savings Bank,Glasgow,MO,1056,Regional Missouri Bank,13-Jul-12,11-Oct-12
+Montgomery Bank & Trust,Ailey,GA,19498,Ameris Bank,6-Jul-12,31-Oct-12
+The Farmers Bank of Lynchburg,Lynchburg,TN,1690,Clayton Bank and Trust,15-Jun-12,31-Oct-12
+Security Exchange Bank,Marietta,GA,35299,Fidelity Bank,15-Jun-12,10-Oct-12
+Putnam State Bank,Palatka,FL,27405,Harbor Community Bank,15-Jun-12,10-Oct-12
+Waccamaw Bank,Whiteville,NC,34515,First Community Bank,8-Jun-12,8-Nov-12
+Farmers' and Traders' State Bank,Shabbona,IL,9257,First State Bank,8-Jun-12,10-Oct-12
+Carolina Federal Savings Bank,Charleston,SC,35372,Bank of North Carolina,8-Jun-12,31-Oct-12
+First Capital Bank,Kingfisher,OK,416,F & M Bank,8-Jun-12,10-Oct-12
+"Alabama Trust Bank, National Association",Sylacauga,AL,35224,Southern States Bank,18-May-12,31-Oct-12
+"Security Bank, National Association",North Lauderdale,FL,23156,Banesco USA,4-May-12,31-Oct-12
+Palm Desert National Bank,Palm Desert,CA,23632,Pacific Premier Bank,27-Apr-12,31-Aug-12
+Plantation Federal Bank,Pawleys Island,SC,32503,First Federal Bank,27-Apr-12,31-Oct-12
+"Inter Savings Bank, fsb D/B/A InterBank, fsb",Maple Grove,MN,31495,Great Southern Bank,27-Apr-12,17-Oct-12
+HarVest Bank of Maryland,Gaithersburg,MD,57766,Sonabank,27-Apr-12,17-Oct-12
+Bank of the Eastern Shore,Cambridge,MD,26759,No Acquirer,27-Apr-12,17-Oct-12
+"Fort Lee Federal Savings Bank, FSB",Fort Lee,NJ,35527,Alma Bank,20-Apr-12,31-Aug-12
+Fidelity Bank,Dearborn,MI,33883,The Huntington National Bank,30-Mar-12,9-Aug-12
+Premier Bank,Wilmette,IL,35419,International Bank of Chicago,23-Mar-12,17-Oct-12
+Covenant Bank & Trust,Rock Spring,GA,58068,"Stearns Bank, N.A.",23-Mar-12,31-Oct-12
+New City Bank ,Chicago,IL,57597,No Acquirer,9-Mar-12,29-Oct-12
+Global Commerce Bank,Doraville,GA,34046,Metro City Bank,2-Mar-12,31-Oct-12
+Home Savings of America,Little Falls,MN,29178,No Acquirer,24-Feb-12,17-Dec-12
+Central Bank of Georgia,Ellaville,GA,5687,Ameris Bank,24-Feb-12,9-Aug-12
+SCB Bank,Shelbyville,IN,29761,"First Merchants Bank, National Association",10-Feb-12,25-Mar-13
+Charter National Bank and Trust,Hoffman Estates,IL,23187,"Barrington Bank & Trust Company, National Association",10-Feb-12,25-Mar-13
+BankEast,Knoxville,TN,19869,U.S.Bank National Association,27-Jan-12,8-Mar-13
+Patriot Bank Minnesota,Forest Lake,MN,34823,First Resource Bank,27-Jan-12,12-Sep-12
+Tennessee Commerce Bank ,Franklin,TN,35296,Republic Bank & Trust Company,27-Jan-12,20-Nov-12
+First Guaranty Bank and Trust Company of Jacksonville,Jacksonville,FL,16579,"CenterState Bank of Florida, N.A.",27-Jan-12,12-Sep-12
+American Eagle Savings Bank,Boothwyn,PA,31581,"Capital Bank, N.A.",20-Jan-12,25-Jan-13
+The First State Bank,Stockbridge,GA,19252,Hamilton State Bank,20-Jan-12,25-Jan-13
+Central Florida State Bank,Belleview,FL,57186,"CenterState Bank of Florida, N.A.",20-Jan-12,25-Jan-13
+Western National Bank,Phoenix,AZ,57917,Washington Federal,16-Dec-11,13-Aug-12
+Premier Community Bank of the Emerald Coast,Crestview,FL,58343,Summit Bank,16-Dec-11,12-Sep-12
+Central Progressive Bank,Lacombe,LA,19657,First NBC Bank,18-Nov-11,13-Aug-12
+Polk County Bank,Johnston,IA,14194,Grinnell State Bank,18-Nov-11,15-Aug-12
+Community Bank of Rockmart,Rockmart,GA,57860,Century Bank of Georgia,10-Nov-11,13-Aug-12
+SunFirst Bank,Saint George,UT,57087,Cache Valley Bank,4-Nov-11,16-Nov-12
+"Mid City Bank, Inc.",Omaha,NE,19397,Premier Bank,4-Nov-11,15-Aug-12
+All American Bank,Des Plaines,IL,57759,International Bank of Chicago,28-Oct-11,15-Aug-12
+Community Banks of Colorado,Greenwood Village,CO,21132,"Bank Midwest, N.A.",21-Oct-11,2-Jan-13
+Community Capital Bank,Jonesboro,GA,57036,State Bank and Trust Company,21-Oct-11,8-Nov-12
+Decatur First Bank,Decatur,GA,34392,Fidelity Bank,21-Oct-11,8-Nov-12
+Old Harbor Bank,Clearwater,FL,57537,1st United Bank,21-Oct-11,8-Nov-12
+Country Bank,Aledo,IL,35395,Blackhawk Bank & Trust,14-Oct-11,15-Aug-12
+First State Bank,Cranford,NJ,58046,Northfield Bank,14-Oct-11,8-Nov-12
+"Blue Ridge Savings Bank, Inc.",Asheville,NC,32347,Bank of North Carolina,14-Oct-11,8-Nov-12
+Piedmont Community Bank,Gray,GA,57256,State Bank and Trust Company,14-Oct-11,22-Jan-13
+Sun Security Bank,Ellington,MO,20115,Great Southern Bank,7-Oct-11,7-Nov-12
+The RiverBank,Wyoming,MN,10216,Central Bank,7-Oct-11,7-Nov-12
+First International Bank,Plano,TX,33513,American First National Bank,30-Sep-11,9-Oct-12
+Citizens Bank of Northern California,Nevada City,CA,33983,Tri Counties Bank,23-Sep-11,9-Oct-12
+Bank of the Commonwealth,Norfolk,VA,20408,Southern Bank and Trust Company,23-Sep-11,9-Oct-12
+The First National Bank of Florida,Milton,FL,25155,CharterBank,9-Sep-11,6-Sep-12
+CreekSide Bank,Woodstock,GA,58226,Georgia Commerce Bank,2-Sep-11,6-Sep-12
+Patriot Bank of Georgia,Cumming,GA,58273,Georgia Commerce Bank,2-Sep-11,2-Nov-12
+First Choice Bank,Geneva,IL,57212,Inland Bank & Trust,19-Aug-11,15-Aug-12
+First Southern National Bank,Statesboro,GA,57239,Heritage Bank of the South,19-Aug-11,2-Nov-12
+Lydian Private Bank,Palm Beach,FL,35356,"Sabadell United Bank, N.A.",19-Aug-11,2-Nov-12
+Public Savings Bank,Huntingdon Valley,PA,34130,"Capital Bank, N.A.",18-Aug-11,15-Aug-12
+The First National Bank of Olathe,Olathe,KS,4744,Enterprise Bank & Trust,12-Aug-11,23-Aug-12
+Bank of Whitman,Colfax,WA,22528,Columbia State Bank,5-Aug-11,16-Aug-12
+Bank of Shorewood,Shorewood,IL,22637,Heartland Bank and Trust Company,5-Aug-11,16-Aug-12
+Integra Bank National Association,Evansville,IN,4392,Old National Bank,29-Jul-11,16-Aug-12
+"BankMeridian, N.A.",Columbia,SC,58222,SCBT National Association,29-Jul-11,2-Nov-12
+Virginia Business Bank,Richmond,VA,58283,Xenith Bank,29-Jul-11,9-Oct-12
+Bank of Choice,Greeley,CO,2994,"Bank Midwest, N.A.",22-Jul-11,12-Sep-12
+LandMark Bank of Florida,Sarasota,FL,35244,American Momentum Bank,22-Jul-11,2-Nov-12
+Southshore Community Bank,Apollo Beach,FL,58056,American Momentum Bank,22-Jul-11,2-Nov-12
+Summit Bank,Prescott,AZ,57442,The Foothills Bank,15-Jul-11,16-Aug-12
+First Peoples Bank,Port St. Lucie,FL,34870,"Premier American Bank, N.A.",15-Jul-11,2-Nov-12
+High Trust Bank,Stockbridge,GA,19554,Ameris Bank,15-Jul-11,2-Nov-12
+One Georgia Bank,Atlanta,GA,58238,Ameris Bank,15-Jul-11,2-Nov-12
+Signature Bank,Windsor,CO,57835,Points West Community Bank,8-Jul-11,26-Oct-12
+Colorado Capital Bank,Castle Rock,CO,34522,First-Citizens Bank & Trust Company,8-Jul-11,15-Jan-13
+First Chicago Bank & Trust,Chicago,IL,27935,Northbrook Bank & Trust Company,8-Jul-11,9-Sep-12
+Mountain Heritage Bank,Clayton,GA,57593,First American Bank and Trust Company,24-Jun-11,2-Nov-12
+First Commercial Bank of Tampa Bay,Tampa,FL,27583,Stonegate Bank,17-Jun-11,2-Nov-12
+McIntosh State Bank,Jackson,GA,19237,Hamilton State Bank,17-Jun-11,2-Nov-12
+Atlantic Bank and Trust,Charleston,SC,58420,"First Citizens Bank and Trust Company, Inc.",3-Jun-11,31-Oct-12
+First Heritage Bank,Snohomish,WA,23626,Columbia State Bank,27-May-11,28-Jan-13
+Summit Bank,Burlington,WA,513,Columbia State Bank,20-May-11,22-Jan-13
+First Georgia Banking Company,Franklin,GA,57647,"CertusBank, National Association",20-May-11,13-Nov-12
+Atlantic Southern Bank,Macon,GA,57213,"CertusBank, National Association",20-May-11,31-Oct-12
+Coastal Bank,Cocoa Beach,FL,34898,"Florida Community Bank, a division of Premier American Bank, N.A.",6-May-11,30-Nov-12
+Community Central Bank,Mount Clemens,MI,34234,Talmer Bank & Trust,29-Apr-11,16-Aug-12
+The Park Avenue Bank,Valdosta,GA,19797,Bank of the Ozarks,29-Apr-11,30-Nov-12
+First Choice Community Bank,Dallas,GA,58539,Bank of the Ozarks,29-Apr-11,22-Jan-13
+Cortez Community Bank,Brooksville,FL,57625,"Florida Community Bank, a division of Premier American Bank, N.A.",29-Apr-11,30-Nov-12
+First National Bank of Central Florida,Winter Park,FL,26297,"Florida Community Bank, a division of Premier American Bank, N.A.",29-Apr-11,30-Nov-12
+Heritage Banking Group,Carthage,MS,14273,Trustmark National Bank,15-Apr-11,30-Nov-12
+Rosemount National Bank,Rosemount,MN,24099,Central Bank,15-Apr-11,16-Aug-12
+Superior Bank,Birmingham,AL,17750,"Superior Bank, National Association",15-Apr-11,30-Nov-12
+Nexity Bank,Birmingham,AL,19794,AloStar Bank of Commerce,15-Apr-11,4-Sep-12
+New Horizons Bank,East Ellijay,GA,57705,Citizens South Bank,15-Apr-11,16-Aug-12
+Bartow County Bank,Cartersville,GA,21495,Hamilton State Bank,15-Apr-11,22-Jan-13
+Nevada Commerce Bank,Las Vegas,NV,35418,City National Bank,8-Apr-11,9-Sep-12
+Western Springs National Bank and Trust,Western Springs,IL,10086,Heartland Bank and Trust Company,8-Apr-11,22-Jan-13
+The Bank of Commerce,Wood Dale,IL,34292,Advantage National Bank Group,25-Mar-11,22-Jan-13
+Legacy Bank,Milwaukee,WI,34818,Seaway Bank and Trust Company,11-Mar-11,12-Sep-12
+First National Bank of Davis,Davis,OK,4077,The Pauls Valley National Bank,11-Mar-11,20-Aug-12
+Valley Community Bank,St. Charles,IL,34187,First State Bank,25-Feb-11,12-Sep-12
+"San Luis Trust Bank, FSB ",San Luis Obispo,CA,34783,First California Bank,18-Feb-11,20-Aug-12
+Charter Oak Bank,Napa,CA,57855,Bank of Marin,18-Feb-11,12-Sep-12
+Citizens Bank of Effingham,Springfield,GA,34601,Heritage Bank of the South,18-Feb-11,2-Nov-12
+Habersham Bank,Clarkesville,GA,151,SCBT National Association,18-Feb-11,2-Nov-12
+Canyon National Bank,Palm Springs,CA,34692,Pacific Premier Bank,11-Feb-11,12-Sep-12
+Badger State Bank,Cassville,WI,13272,Royal Bank,11-Feb-11,12-Sep-12
+Peoples State Bank,Hamtramck,MI,14939,First Michigan Bank,11-Feb-11,22-Jan-13
+Sunshine State Community Bank,Port Orange,FL,35478,"Premier American Bank, N.A.",11-Feb-11,2-Nov-12
+Community First Bank Chicago,Chicago,IL,57948,Northbrook Bank & Trust Company,4-Feb-11,20-Aug-12
+North Georgia Bank,Watkinsville,GA,35242,BankSouth,4-Feb-11,2-Nov-12
+American Trust Bank,Roswell,GA,57432,Renasant Bank,4-Feb-11,31-Oct-12
+First Community Bank,Taos,NM,12261,"U.S. Bank, N.A.",28-Jan-11,12-Sep-12
+FirsTier Bank,Louisville,CO,57646,No Acquirer,28-Jan-11,12-Sep-12
+Evergreen State Bank,Stoughton,WI,5328,McFarland State Bank,28-Jan-11,12-Sep-12
+The First State Bank,Camargo,OK,2303,Bank 7,28-Jan-11,12-Sep-12
+United Western Bank,Denver,CO,31293,First-Citizens Bank & Trust Company,21-Jan-11,12-Sep-12
+The Bank of Asheville,Asheville,NC,34516,First Bank,21-Jan-11,2-Nov-12
+CommunitySouth Bank & Trust,Easley,SC,57868,"CertusBank, National Association",21-Jan-11,2-Nov-12
+Enterprise Banking Company,McDonough,GA,19758,No Acquirer,21-Jan-11,2-Nov-12
+Oglethorpe Bank,Brunswick,GA,57440,Bank of the Ozarks,14-Jan-11,2-Nov-12
+Legacy Bank,Scottsdale,AZ,57820,Enterprise Bank & Trust,7-Jan-11,12-Sep-12
+First Commercial Bank of Florida,Orlando,FL,34965,First Southern Bank,7-Jan-11,2-Nov-12
+Community National Bank,Lino Lakes,MN,23306,Farmers & Merchants Savings Bank,17-Dec-10,20-Aug-12
+First Southern Bank ,Batesville,AR,58052,Southern Bank,17-Dec-10,20-Aug-12
+"United Americas Bank, N.A.",Atlanta,GA,35065,State Bank and Trust Company,17-Dec-10,2-Nov-12
+"Appalachian Community Bank, FSB ",McCaysville,GA,58495,Peoples Bank of East Tennessee,17-Dec-10,31-Oct-12
+Chestatee State Bank,Dawsonville,GA,34578,Bank of the Ozarks,17-Dec-10,2-Nov-12
+"The Bank of Miami,N.A.",Coral Gables,FL,19040,1st United Bank,17-Dec-10,2-Nov-12
+Earthstar Bank,Southampton,PA,35561,Polonia Bank,10-Dec-10,20-Aug-12
+Paramount Bank,Farmington Hills,MI,34673,Level One Bank,10-Dec-10,20-Aug-12
+First Banking Center,Burlington,WI,5287,First Michigan Bank,19-Nov-10,20-Aug-12
+Allegiance Bank of North America,Bala Cynwyd,PA,35078,VIST Bank,19-Nov-10,20-Aug-12
+Gulf State Community Bank,Carrabelle,FL,20340,Centennial Bank,19-Nov-10,2-Nov-12
+Copper Star Bank,Scottsdale,AZ,35463,"Stearns Bank, N.A.",12-Nov-10,20-Aug-12
+Darby Bank & Trust Co.,Vidalia,GA,14580,Ameris Bank,12-Nov-10,15-Jan-13
+Tifton Banking Company,Tifton,GA,57831,Ameris Bank,12-Nov-10,2-Nov-12
+First Vietnamese American Bank,Westminster,CA,57885,Grandpoint Bank,5-Nov-10,12-Sep-12
+Pierce Commercial Bank,Tacoma,WA,34411,Heritage Bank,5-Nov-10,20-Aug-12
+Western Commercial Bank,Woodland Hills,CA,58087,First California Bank,5-Nov-10,12-Sep-12
+K Bank,Randallstown,MD,31263,Manufacturers and Traders Trust Company (M&T Bank),5-Nov-10,20-Aug-12
+"First Arizona Savings, A FSB",Scottsdale,AZ,32582,No Acquirer,22-Oct-10,20-Aug-12
+Hillcrest Bank,Overland Park,KS,22173,"Hillcrest Bank, N.A.",22-Oct-10,20-Aug-12
+First Suburban National Bank,Maywood,IL,16089,Seaway Bank and Trust Company,22-Oct-10,20-Aug-12
+The First National Bank of Barnesville,Barnesville,GA,2119,United Bank,22-Oct-10,2-Nov-12
+The Gordon Bank,Gordon,GA,33904,Morris Bank,22-Oct-10,2-Nov-12
+Progress Bank of Florida,Tampa,FL,32251,Bay Cities Bank,22-Oct-10,2-Nov-12
+First Bank of Jacksonville,Jacksonville,FL,27573,Ameris Bank,22-Oct-10,2-Nov-12
+Premier Bank,Jefferson City,MO,34016,Providence Bank,15-Oct-10,20-Aug-12
+WestBridge Bank and Trust Company,Chesterfield,MO,58205,Midland States Bank,15-Oct-10,20-Aug-12
+"Security Savings Bank, F.S.B.",Olathe,KS,30898,Simmons First National Bank,15-Oct-10,20-Aug-12
+Shoreline Bank,Shoreline,WA,35250,GBC International Bank,1-Oct-10,20-Aug-12
+Wakulla Bank,Crawfordville,FL,21777,Centennial Bank,1-Oct-10,2-Nov-12
+North County Bank,Arlington,WA,35053,Whidbey Island Bank,24-Sep-10,20-Aug-12
+Haven Trust Bank Florida,Ponte Vedra Beach,FL,58308,First Southern Bank,24-Sep-10,5-Nov-12
+Maritime Savings Bank,West Allis,WI,28612,"North Shore Bank, FSB",17-Sep-10,20-Aug-12
+Bramble Savings Bank,Milford,OH,27808,Foundation Bank,17-Sep-10,20-Aug-12
+The Peoples Bank,Winder,GA,182,Community & Southern Bank,17-Sep-10,5-Nov-12
+First Commerce Community Bank,Douglasville,GA,57448,Community & Southern Bank,17-Sep-10,15-Jan-13
+Bank of Ellijay,Ellijay,GA,58197,Community & Southern Bank,17-Sep-10,15-Jan-13
+ISN Bank,Cherry Hill,NJ,57107,Customers Bank,17-Sep-10,22-Aug-12
+Horizon Bank,Bradenton,FL,35061,Bank of the Ozarks,10-Sep-10,5-Nov-12
+Sonoma Valley Bank,Sonoma,CA,27259,Westamerica Bank,20-Aug-10,12-Sep-12
+Los Padres Bank,Solvang,CA,32165,Pacific Western Bank,20-Aug-10,12-Sep-12
+Butte Community Bank,Chico,CA,33219,"Rabobank, N.A.",20-Aug-10,12-Sep-12
+Pacific State Bank,Stockton,CA,27090,"Rabobank, N.A.",20-Aug-10,12-Sep-12
+ShoreBank,Chicago,IL,15640,Urban Partnership Bank,20-Aug-10,12-Sep-12
+Imperial Savings and Loan Association,Martinsville,VA,31623,"River Community Bank, N.A.",20-Aug-10,24-Aug-12
+Independent National Bank,Ocala,FL,27344,"CenterState Bank of Florida, N.A.",20-Aug-10,5-Nov-12
+Community National Bank at Bartow,Bartow,FL,25266,"CenterState Bank of Florida, N.A.",20-Aug-10,5-Nov-12
+Palos Bank and Trust Company,Palos Heights,IL,17599,First Midwest Bank,13-Aug-10,22-Aug-12
+Ravenswood Bank,Chicago,IL,34231,Northbrook Bank & Trust Company,6-Aug-10,22-Aug-12
+LibertyBank,Eugene,OR,31964,Home Federal Bank,30-Jul-10,22-Aug-12
+The Cowlitz Bank,Longview,WA,22643,Heritage Bank,30-Jul-10,22-Aug-12
+Coastal Community Bank,Panama City Beach,FL,9619,Centennial Bank,30-Jul-10,5-Nov-12
+Bayside Savings Bank,Port Saint Joe,FL,57669,Centennial Bank,30-Jul-10,5-Nov-12
+Northwest Bank & Trust,Acworth,GA,57658,State Bank and Trust Company,30-Jul-10,5-Nov-12
+Home Valley Bank ,Cave Junction,OR,23181,South Valley Bank & Trust,23-Jul-10,12-Sep-12
+SouthwestUSA Bank ,Las Vegas,NV,35434,Plaza Bank,23-Jul-10,22-Aug-12
+Community Security Bank ,New Prague,MN,34486,Roundbank,23-Jul-10,12-Sep-12
+Thunder Bank ,Sylvan Grove,KS,10506,The Bennington State Bank,23-Jul-10,13-Sep-12
+Williamsburg First National Bank ,Kingstree,SC,17837,"First Citizens Bank and Trust Company, Inc.",23-Jul-10,5-Nov-12
+Crescent Bank and Trust Company ,Jasper,GA,27559,Renasant Bank,23-Jul-10,5-Nov-12
+Sterling Bank ,Lantana,FL,32536,IBERIABANK,23-Jul-10,5-Nov-12
+"Mainstreet Savings Bank, FSB",Hastings,MI,28136,Commercial Bank,16-Jul-10,13-Sep-12
+Olde Cypress Community Bank,Clewiston,FL,28864,"CenterState Bank of Florida, N.A.",16-Jul-10,5-Nov-12
+Turnberry Bank,Aventura,FL,32280,NAFH National Bank,16-Jul-10,5-Nov-12
+Metro Bank of Dade County,Miami,FL,25172,NAFH National Bank,16-Jul-10,5-Nov-12
+First National Bank of the South,Spartanburg,SC,35383,NAFH National Bank,16-Jul-10,5-Nov-12
+Woodlands Bank,Bluffton,SC,32571,Bank of the Ozarks,16-Jul-10,5-Nov-12
+Home National Bank,Blackwell,OK,11636,RCB Bank,9-Jul-10,10-Dec-12
+USA Bank,Port Chester,NY,58072,New Century Bank,9-Jul-10,14-Sep-12
+Ideal Federal Savings Bank,Baltimore,MD,32456,No Acquirer,9-Jul-10,14-Sep-12
+Bay National Bank,Baltimore,MD,35462,"Bay Bank, FSB",9-Jul-10,15-Jan-13
+High Desert State Bank,Albuquerque,NM,35279,First American Bank,25-Jun-10,14-Sep-12
+First National Bank,Savannah,GA,34152,"The Savannah Bank, N.A.",25-Jun-10,5-Nov-12
+Peninsula Bank,Englewood,FL,26563,"Premier American Bank, N.A.",25-Jun-10,5-Nov-12
+Nevada Security Bank,Reno,NV,57110,Umpqua Bank,18-Jun-10,23-Aug-12
+Washington First International Bank,Seattle,WA,32955,East West Bank,11-Jun-10,14-Sep-12
+TierOne Bank,Lincoln,NE,29341,Great Western Bank,4-Jun-10,14-Sep-12
+Arcola Homestead Savings Bank,Arcola,IL,31813,No Acquirer,4-Jun-10,14-Sep-12
+First National Bank,Rosedale,MS,15814,The Jefferson Bank,4-Jun-10,5-Nov-12
+Sun West Bank,Las Vegas,NV,34785,City National Bank,28-May-10,14-Sep-12
+"Granite Community Bank, NA",Granite Bay,CA,57315,Tri Counties Bank,28-May-10,14-Sep-12
+Bank of Florida - Tampa,Tampa,FL,57814,EverBank,28-May-10,5-Nov-12
+Bank of Florida - Southwest,Naples,FL,35106,EverBank,28-May-10,5-Nov-12
+Bank of Florida - Southeast,Fort Lauderdale,FL,57360,EverBank,28-May-10,5-Nov-12
+Pinehurst Bank,Saint Paul,MN,57735,Coulee Bank,21-May-10,26-Oct-12
+Midwest Bank and Trust Company,Elmwood Park,IL,18117,"FirstMerit Bank, N.A.",14-May-10,23-Aug-12
+Southwest Community Bank,Springfield,MO,34255,Simmons First National Bank,14-May-10,23-Aug-12
+New Liberty Bank,Plymouth,MI,35586,Bank of Ann Arbor,14-May-10,23-Aug-12
+Satilla Community Bank,Saint Marys,GA,35114,Ameris Bank,14-May-10,5-Nov-12
+1st Pacific Bank of California,San Diego,CA,35517,City National Bank,7-May-10,13-Dec-12
+Towne Bank of Arizona,Mesa,AZ,57697,Commerce Bank of Arizona,7-May-10,23-Aug-12
+Access Bank,Champlin,MN,16476,PrinsBank,7-May-10,23-Aug-12
+The Bank of Bonifay,Bonifay,FL,14246,First Federal Bank of Florida,7-May-10,5-Nov-12
+Frontier Bank,Everett,WA,22710,"Union Bank, N.A.",30-Apr-10,15-Jan-13
+BC National Banks,Butler,MO,17792,Community First Bank,30-Apr-10,23-Aug-12
+Champion Bank,Creve Coeur,MO,58362,BankLiberty,30-Apr-10,23-Aug-12
+CF Bancorp,Port Huron,MI,30005,First Michigan Bank,30-Apr-10,15-Jan-13
+Westernbank Puerto Rico,Mayaguez,PR,31027,Banco Popular de Puerto Rico,30-Apr-10,5-Nov-12
+R-G Premier Bank of Puerto Rico,Hato Rey,PR,32185,Scotiabank de Puerto Rico,30-Apr-10,5-Nov-12
+Eurobank,San Juan,PR,27150,Oriental Bank and Trust,30-Apr-10,5-Nov-12
+Wheatland Bank,Naperville,IL,58429,Wheaton Bank & Trust,23-Apr-10,23-Aug-12
+Peotone Bank and Trust Company,Peotone,IL,10888,First Midwest Bank,23-Apr-10,23-Aug-12
+Lincoln Park Savings Bank,Chicago,IL,30600,Northbrook Bank & Trust Company,23-Apr-10,23-Aug-12
+New Century Bank,Chicago,IL,34821,"MB Financial Bank, N.A.",23-Apr-10,23-Aug-12
+Citizens Bank and Trust Company of Chicago,Chicago,IL,34658,Republic Bank of Chicago,23-Apr-10,23-Aug-12
+Broadway Bank,Chicago,IL,22853,"MB Financial Bank, N.A.",23-Apr-10,23-Aug-12
+"Amcore Bank, National Association",Rockford,IL,3735,Harris N.A.,23-Apr-10,23-Aug-12
+City Bank,Lynnwood,WA,21521,Whidbey Island Bank,16-Apr-10,14-Sep-12
+Tamalpais Bank,San Rafael,CA,33493,"Union Bank, N.A.",16-Apr-10,23-Aug-12
+Innovative Bank,Oakland,CA,23876,Center Bank,16-Apr-10,23-Aug-12
+Butler Bank,Lowell,MA,26619,People's United Bank,16-Apr-10,23-Aug-12
+Riverside National Bank of Florida,Fort Pierce,FL,24067,"TD Bank, N.A.",16-Apr-10,5-Nov-12
+AmericanFirst Bank,Clermont,FL,57724,"TD Bank, N.A.",16-Apr-10,31-Oct-12
+First Federal Bank of North Florida,Palatka,FL,28886,"TD Bank, N.A.",16-Apr-10,15-Jan-13
+Lakeside Community Bank,Sterling Heights,MI,34878,No Acquirer,16-Apr-10,23-Aug-12
+Beach First National Bank,Myrtle Beach,SC,34242,Bank of North Carolina,9-Apr-10,5-Nov-12
+Desert Hills Bank,Phoenix,AZ,57060,New York Community Bank,26-Mar-10,23-Aug-12
+Unity National Bank,Cartersville,GA,34678,Bank of the Ozarks,26-Mar-10,14-Sep-12
+Key West Bank,Key West,FL,34684,Centennial Bank,26-Mar-10,23-Aug-12
+McIntosh Commercial Bank,Carrollton,GA,57399,CharterBank,26-Mar-10,23-Aug-12
+State Bank of Aurora,Aurora,MN,8221,Northern State Bank,19-Mar-10,23-Aug-12
+First Lowndes Bank,Fort Deposit,AL,24957,First Citizens Bank,19-Mar-10,23-Aug-12
+Bank of Hiawassee,Hiawassee,GA,10054,Citizens South Bank,19-Mar-10,23-Aug-12
+Appalachian Community Bank,Ellijay,GA,33989,Community & Southern Bank,19-Mar-10,31-Oct-12
+Advanta Bank Corp.,Draper,UT,33535,No Acquirer,19-Mar-10,14-Sep-12
+Century Security Bank,Duluth,GA,58104,Bank of Upson,19-Mar-10,23-Aug-12
+American National Bank,Parma,OH,18806,The National Bank and Trust Company,19-Mar-10,23-Aug-12
+Statewide Bank,Covington,LA,29561,Home Bank,12-Mar-10,23-Aug-12
+Old Southern Bank,Orlando,FL,58182,Centennial Bank,12-Mar-10,23-Aug-12
+The Park Avenue Bank,New York,NY,27096,Valley National Bank,12-Mar-10,23-Aug-12
+LibertyPointe Bank,New York,NY,58071,Valley National Bank,11-Mar-10,23-Aug-12
+Centennial Bank,Ogden,UT,34430,No Acquirer,5-Mar-10,14-Sep-12
+Waterfield Bank,Germantown,MD,34976,No Acquirer,5-Mar-10,23-Aug-12
+Bank of Illinois,Normal,IL,9268,Heartland Bank and Trust Company,5-Mar-10,23-Aug-12
+Sun American Bank,Boca Raton,FL,27126,First-Citizens Bank & Trust Company,5-Mar-10,23-Aug-12
+Rainier Pacific Bank,Tacoma,WA,38129,Umpqua Bank,26-Feb-10,23-Aug-12
+Carson River Community Bank,Carson City,NV,58352,Heritage Bank of Nevada,26-Feb-10,15-Jan-13
+"La Jolla Bank, FSB",La Jolla,CA,32423,"OneWest Bank, FSB",19-Feb-10,24-Aug-12
+George Washington Savings Bank,Orland Park,IL,29952,"FirstMerit Bank, N.A.",19-Feb-10,24-Aug-12
+The La Coste National Bank,La Coste,TX,3287,Community National Bank,19-Feb-10,14-Sep-12
+Marco Community Bank,Marco Island,FL,57586,Mutual of Omaha Bank,19-Feb-10,24-Aug-12
+1st American State Bank of Minnesota,Hancock,MN,15448,"Community Development Bank, FSB",5-Feb-10,24-Aug-12
+American Marine Bank,Bainbridge Island,WA,16730,Columbia State Bank,29-Jan-10,24-Aug-12
+First Regional Bank,Los Angeles,CA,23011,First-Citizens Bank & Trust Company,29-Jan-10,24-Aug-12
+Community Bank and Trust,Cornelia,GA,5702,SCBT National Association,29-Jan-10,15-Jan-13
+"Marshall Bank, N.A.",Hallock,MN,16133,United Valley Bank,29-Jan-10,23-Aug-12
+Florida Community Bank,Immokalee,FL,5672,"Premier American Bank, N.A.",29-Jan-10,15-Jan-13
+First National Bank of Georgia,Carrollton,GA,16480,Community & Southern Bank,29-Jan-10,13-Dec-12
+Columbia River Bank,The Dalles,OR,22469,Columbia State Bank,22-Jan-10,14-Sep-12
+Evergreen Bank,Seattle,WA,20501,Umpqua Bank,22-Jan-10,15-Jan-13
+Charter Bank,Santa Fe,NM,32498,Charter Bank,22-Jan-10,23-Aug-12
+Bank of Leeton,Leeton,MO,8265,"Sunflower Bank, N.A.",22-Jan-10,15-Jan-13
+Premier American Bank,Miami,FL,57147,"Premier American Bank, N.A.",22-Jan-10,13-Dec-12
+Barnes Banking Company,Kaysville,UT,1252,No Acquirer,15-Jan-10,23-Aug-12
+St. Stephen State Bank,St. Stephen,MN,17522,First State Bank of St. Joseph,15-Jan-10,23-Aug-12
+Town Community Bank & Trust,Antioch,IL,34705,First American Bank,15-Jan-10,23-Aug-12
+Horizon Bank,Bellingham,WA,22977,Washington Federal Savings and Loan Association,8-Jan-10,23-Aug-12
+"First Federal Bank of California, F.S.B.",Santa Monica,CA,28536,"OneWest Bank, FSB",18-Dec-09,23-Aug-12
+Imperial Capital Bank,La Jolla,CA,26348,City National Bank,18-Dec-09,5-Sep-12
+Independent Bankers' Bank,Springfield,IL,26820,The Independent BankersBank (TIB),18-Dec-09,23-Aug-12
+New South Federal Savings Bank,Irondale,AL,32276,Beal Bank,18-Dec-09,23-Aug-12
+Citizens State Bank,New Baltimore,MI,1006,No Acquirer,18-Dec-09,5-Nov-12
+Peoples First Community Bank,Panama City,FL,32167,Hancock Bank,18-Dec-09,5-Nov-12
+RockBridge Commercial Bank,Atlanta,GA,58315,No Acquirer,18-Dec-09,5-Nov-12
+SolutionsBank,Overland Park,KS,4731,Arvest Bank,11-Dec-09,23-Aug-12
+"Valley Capital Bank, N.A.",Mesa,AZ,58399,Enterprise Bank & Trust,11-Dec-09,23-Aug-12
+"Republic Federal Bank, N.A.",Miami,FL,22846,1st United Bank,11-Dec-09,5-Nov-12
+Greater Atlantic Bank,Reston,VA,32583,Sonabank,4-Dec-09,5-Nov-12
+Benchmark Bank,Aurora,IL,10440,"MB Financial Bank, N.A.",4-Dec-09,23-Aug-12
+AmTrust Bank,Cleveland,OH,29776,New York Community Bank,4-Dec-09,5-Nov-12
+The Tattnall Bank,Reidsville,GA,12080,Heritage Bank of the South,4-Dec-09,5-Nov-12
+First Security National Bank,Norcross,GA,26290,State Bank and Trust Company,4-Dec-09,5-Nov-12
+The Buckhead Community Bank,Atlanta,GA,34663,State Bank and Trust Company,4-Dec-09,5-Nov-12
+Commerce Bank of Southwest Florida,Fort Myers,FL,58016,Central Bank,20-Nov-09,5-Nov-12
+Pacific Coast National Bank,San Clemente,CA,57914,Sunwest Bank,13-Nov-09,22-Aug-12
+Orion Bank,Naples,FL,22427,IBERIABANK,13-Nov-09,5-Nov-12
+"Century Bank, F.S.B.",Sarasota,FL,32267,IBERIABANK,13-Nov-09,22-Aug-12
+United Commercial Bank,San Francisco,CA,32469,East West Bank,6-Nov-09,5-Nov-12
+Gateway Bank of St. Louis,St. Louis,MO,19450,Central Bank of Kansas City,6-Nov-09,22-Aug-12
+Prosperan Bank,Oakdale,MN,35074,"Alerus Financial, N.A.",6-Nov-09,22-Aug-12
+Home Federal Savings Bank,Detroit,MI,30329,Liberty Bank and Trust Company,6-Nov-09,22-Aug-12
+United Security Bank,Sparta,GA,22286,Ameris Bank,6-Nov-09,15-Jan-13
+North Houston Bank,Houston,TX,18776,U.S. Bank N.A.,30-Oct-09,22-Aug-12
+Madisonville State Bank,Madisonville,TX,33782,U.S. Bank N.A.,30-Oct-09,22-Aug-12
+Citizens National Bank,Teague,TX,25222,U.S. Bank N.A.,30-Oct-09,22-Aug-12
+Park National Bank,Chicago,IL,11677,U.S. Bank N.A.,30-Oct-09,22-Aug-12
+Pacific National Bank,San Francisco,CA,30006,U.S. Bank N.A.,30-Oct-09,22-Aug-12
+California National Bank,Los Angeles,CA,34659,U.S. Bank N.A.,30-Oct-09,5-Sep-12
+San Diego National Bank,San Diego,CA,23594,U.S. Bank N.A.,30-Oct-09,22-Aug-12
+Community Bank of Lemont,Lemont,IL,35291,U.S. Bank N.A.,30-Oct-09,15-Jan-13
+"Bank USA, N.A.",Phoenix,AZ,32218,U.S. Bank N.A.,30-Oct-09,22-Aug-12
+First DuPage Bank,Westmont,IL,35038,First Midwest Bank,23-Oct-09,22-Aug-12
+Riverview Community Bank,Otsego,MN,57525,Central Bank,23-Oct-09,22-Aug-12
+Bank of Elmwood,Racine,WI,18321,Tri City National Bank,23-Oct-09,22-Aug-12
+Flagship National Bank,Bradenton,FL,35044,First Federal Bank of Florida,23-Oct-09,22-Aug-12
+Hillcrest Bank Florida,Naples,FL,58336,Stonegate Bank,23-Oct-09,22-Aug-12
+American United Bank,Lawrenceville,GA,57794,Ameris Bank,23-Oct-09,5-Sep-12
+Partners Bank,Naples,FL,57959,Stonegate Bank,23-Oct-09,15-Jan-13
+San Joaquin Bank,Bakersfield,CA,23266,Citizens Business Bank,16-Oct-09,22-Aug-12
+Southern Colorado National Bank,Pueblo,CO,57263,Legacy Bank,2-Oct-09,5-Sep-12
+Jennings State Bank,Spring Grove,MN,11416,Central Bank,2-Oct-09,21-Aug-12
+Warren Bank,Warren,MI,34824,The Huntington National Bank,2-Oct-09,21-Aug-12
+Georgian Bank,Atlanta,GA,57151,"First Citizens Bank and Trust Company, Inc.",25-Sep-09,21-Aug-12
+"Irwin Union Bank, F.S.B.",Louisville,KY,57068,"First Financial Bank, N.A.",18-Sep-09,5-Sep-12
+Irwin Union Bank and Trust Company,Columbus,IN,10100,"First Financial Bank, N.A.",18-Sep-09,21-Aug-12
+Venture Bank,Lacey,WA,22868,First-Citizens Bank & Trust Company,11-Sep-09,21-Aug-12
+Brickwell Community Bank,Woodbury,MN,57736,CorTrust Bank N.A.,11-Sep-09,15-Jan-13
+"Corus Bank, N.A.",Chicago,IL,13693,"MB Financial Bank, N.A.",11-Sep-09,21-Aug-12
+First State Bank,Flagstaff,AZ,34875,Sunwest Bank,4-Sep-09,15-Jan-13
+Platinum Community Bank,Rolling Meadows,IL,35030,No Acquirer,4-Sep-09,21-Aug-12
+Vantus Bank,Sioux City,IA,27732,Great Southern Bank,4-Sep-09,21-Aug-12
+InBank,Oak Forest,IL,20203,"MB Financial Bank, N.A.",4-Sep-09,21-Aug-12
+First Bank of Kansas City,Kansas City,MO,25231,Great American Bank,4-Sep-09,21-Aug-12
+Affinity Bank,Ventura,CA,27197,Pacific Western Bank,28-Aug-09,21-Aug-12
+Mainstreet Bank,Forest Lake,MN,1909,Central Bank,28-Aug-09,21-Aug-12
+Bradford Bank,Baltimore,MD,28312,Manufacturers and Traders Trust Company (M&T Bank),28-Aug-09,15-Jan-13
+Guaranty Bank,Austin,TX,32618,BBVA Compass,21-Aug-09,21-Aug-12
+CapitalSouth Bank,Birmingham,AL,22130,IBERIABANK,21-Aug-09,15-Jan-13
+First Coweta Bank,Newnan,GA,57702,United Bank,21-Aug-09,15-Jan-13
+ebank,Atlanta,GA,34682,"Stearns Bank, N.A.",21-Aug-09,21-Aug-12
+Community Bank of Nevada,Las Vegas,NV,34043,No Acquirer,14-Aug-09,21-Aug-12
+Community Bank of Arizona,Phoenix,AZ,57645,MidFirst Bank,14-Aug-09,21-Aug-12
+"Union Bank, National Association",Gilbert,AZ,34485,MidFirst Bank,14-Aug-09,21-Aug-12
+Colonial Bank,Montgomery,AL,9609,"Branch Banking & Trust Company, (BB&T)",14-Aug-09,5-Sep-12
+Dwelling House Savings and Loan Association,Pittsburgh,PA,31559,"PNC Bank, N.A.",14-Aug-09,15-Jan-13
+Community First Bank,Prineville,OR,23268,Home Federal Bank,7-Aug-09,15-Jan-13
+Community National Bank of Sarasota County,Venice,FL,27183,"Stearns Bank, N.A.",7-Aug-09,20-Aug-12
+First State Bank,Sarasota,FL,27364,"Stearns Bank, N.A.",7-Aug-09,20-Aug-12
+Mutual Bank,Harvey,IL,18659,United Central Bank,31-Jul-09,20-Aug-12
+First BankAmericano,Elizabeth,NJ,34270,Crown Bank,31-Jul-09,20-Aug-12
+Peoples Community Bank,West Chester,OH,32288,"First Financial Bank, N.A.",31-Jul-09,20-Aug-12
+Integrity Bank,Jupiter,FL,57604,Stonegate Bank,31-Jul-09,20-Aug-12
+First State Bank of Altus,Altus,OK,9873,Herring Bank,31-Jul-09,20-Aug-12
+Security Bank of Jones County,Gray,GA,8486,State Bank and Trust Company,24-Jul-09,20-Aug-12
+Security Bank of Houston County,Perry,GA,27048,State Bank and Trust Company,24-Jul-09,20-Aug-12
+Security Bank of Bibb County,Macon,GA,27367,State Bank and Trust Company,24-Jul-09,20-Aug-12
+Security Bank of North Metro,Woodstock,GA,57105,State Bank and Trust Company,24-Jul-09,20-Aug-12
+Security Bank of North Fulton,Alpharetta,GA,57430,State Bank and Trust Company,24-Jul-09,20-Aug-12
+Security Bank of Gwinnett County,Suwanee,GA,57346,State Bank and Trust Company,24-Jul-09,20-Aug-12
+Waterford Village Bank,Williamsville,NY,58065,"Evans Bank, N.A.",24-Jul-09,20-Aug-12
+Temecula Valley Bank,Temecula,CA,34341,First-Citizens Bank & Trust Company,17-Jul-09,20-Aug-12
+Vineyard Bank,Rancho Cucamonga,CA,23556,California Bank & Trust,17-Jul-09,20-Aug-12
+BankFirst,Sioux Falls,SD,34103,"Alerus Financial, N.A.",17-Jul-09,20-Aug-12
+First Piedmont Bank,Winder,GA,34594,First American Bank and Trust Company,17-Jul-09,15-Jan-13
+Bank of Wyoming,Thermopolis,WY,22754,Central Bank & Trust,10-Jul-09,20-Aug-12
+Founders Bank,Worth,IL,18390,The PrivateBank and Trust Company,2-Jul-09,20-Aug-12
+Millennium State Bank of Texas,Dallas,TX,57667,State Bank of Texas,2-Jul-09,26-Oct-12
+First National Bank of Danville,Danville,IL,3644,"First Financial Bank, N.A.",2-Jul-09,20-Aug-12
+Elizabeth State Bank,Elizabeth,IL,9262,Galena State Bank and Trust Company,2-Jul-09,20-Aug-12
+Rock River Bank,Oregon,IL,15302,The Harvard State Bank,2-Jul-09,20-Aug-12
+First State Bank of Winchester,Winchester,IL,11710,The First National Bank of Beardstown,2-Jul-09,20-Aug-12
+John Warner Bank,Clinton,IL,12093,State Bank of Lincoln,2-Jul-09,20-Aug-12
+Mirae Bank,Los Angeles,CA,57332,Wilshire State Bank,26-Jun-09,20-Aug-12
+MetroPacific Bank,Irvine,CA,57893,Sunwest Bank,26-Jun-09,20-Aug-12
+Horizon Bank,Pine City,MN,9744,"Stearns Bank, N.A.",26-Jun-09,20-Aug-12
+Neighborhood Community Bank,Newnan,GA,35285,CharterBank,26-Jun-09,20-Aug-12
+Community Bank of West Georgia,Villa Rica,GA,57436,No Acquirer,26-Jun-09,17-Aug-12
+First National Bank of Anthony,Anthony,KS,4614,Bank of Kansas,19-Jun-09,17-Aug-12
+Cooperative Bank,Wilmington,NC,27837,First Bank,19-Jun-09,17-Aug-12
+Southern Community Bank,Fayetteville,GA,35251,United Community Bank,19-Jun-09,17-Aug-12
+Bank of Lincolnwood,Lincolnwood,IL,17309,Republic Bank of Chicago,5-Jun-09,17-Aug-12
+Citizens National Bank,Macomb,IL,5757,Morton Community Bank,22-May-09,4-Sep-12
+Strategic Capital Bank,Champaign,IL,35175,Midland States Bank,22-May-09,4-Sep-12
+"BankUnited, FSB",Coral Gables,FL,32247,BankUnited,21-May-09,17-Aug-12
+Westsound Bank,Bremerton,WA,34843,Kitsap Bank,8-May-09,4-Sep-12
+America West Bank,Layton,UT,35461,Cache Valley Bank,1-May-09,17-Aug-12
+Citizens Community Bank,Ridgewood,NJ,57563,North Jersey Community Bank,1-May-09,4-Sep-12
+"Silverton Bank, NA",Atlanta,GA,26535,No Acquirer,1-May-09,17-Aug-12
+First Bank of Idaho,Ketchum,ID,34396,"U.S. Bank, N.A.",24-Apr-09,17-Aug-12
+First Bank of Beverly Hills,Calabasas,CA,32069,No Acquirer,24-Apr-09,4-Sep-12
+Michigan Heritage Bank,Farmington Hills,MI,34369,Level One Bank,24-Apr-09,17-Aug-12
+American Southern Bank,Kennesaw,GA,57943,Bank of North Georgia,24-Apr-09,17-Aug-12
+Great Basin Bank of Nevada,Elko,NV,33824,Nevada State Bank,17-Apr-09,4-Sep-12
+American Sterling Bank,Sugar Creek,MO,8266,Metcalf Bank,17-Apr-09,31-Aug-12
+New Frontier Bank,Greeley,CO,34881,No Acquirer,10-Apr-09,4-Sep-12
+Cape Fear Bank,Wilmington,NC,34639,First Federal Savings and Loan Association,10-Apr-09,17-Aug-12
+Omni National Bank,Atlanta,GA,22238,No Acquirer,27-Mar-09,17-Aug-12
+"TeamBank, NA",Paola,KS,4754,Great Southern Bank,20-Mar-09,17-Aug-12
+Colorado National Bank,Colorado Springs,CO,18896,Herring Bank,20-Mar-09,17-Aug-12
+FirstCity Bank,Stockbridge,GA,18243,No Acquirer,20-Mar-09,17-Aug-12
+Freedom Bank of Georgia,Commerce,GA,57558,Northeast Georgia Bank,6-Mar-09,17-Aug-12
+Security Savings Bank,Henderson,NV,34820,Bank of Nevada,27-Feb-09,7-Sep-12
+Heritage Community Bank,Glenwood,IL,20078,"MB Financial Bank, N.A.",27-Feb-09,17-Aug-12
+Silver Falls Bank,Silverton,OR,35399,Citizens Bank,20-Feb-09,17-Aug-12
+Pinnacle Bank of Oregon,Beaverton,OR,57342,Washington Trust Bank of Spokane,13-Feb-09,17-Aug-12
+Corn Belt Bank & Trust Co.,Pittsfield,IL,16500,The Carlinville National Bank,13-Feb-09,17-Aug-12
+Riverside Bank of the Gulf Coast,Cape Coral,FL,34563,TIB Bank,13-Feb-09,17-Aug-12
+Sherman County Bank,Loup City,NE,5431,Heritage Bank,13-Feb-09,17-Aug-12
+County Bank,Merced,CA,22574,Westamerica Bank,6-Feb-09,4-Sep-12
+Alliance Bank,Culver City,CA,23124,California Bank & Trust,6-Feb-09,16-Aug-12
+FirstBank Financial Services,McDonough,GA,57017,Regions Bank,6-Feb-09,16-Aug-12
+Ocala National Bank,Ocala,FL,26538,"CenterState Bank of Florida, N.A.",30-Jan-09,4-Sep-12
+Suburban FSB,Crofton,MD,30763,Bank of Essex,30-Jan-09,16-Aug-12
+MagnetBank,Salt Lake City,UT,58001,No Acquirer,30-Jan-09,16-Aug-12
+1st Centennial Bank,Redlands,CA,33025,First California Bank,23-Jan-09,16-Aug-12
+Bank of Clark County,Vancouver,WA,34959,Umpqua Bank,16-Jan-09,16-Aug-12
+National Bank of Commerce,Berkeley,IL,19733,Republic Bank of Chicago,16-Jan-09,16-Aug-12
+Sanderson State Bank,Sanderson,TX,11568,The Pecos County State Bank,12-Dec-08,4-Sep-12
+Haven Trust Bank,Duluth,GA,35379,"Branch Banking & Trust Company, (BB&T)",12-Dec-08,16-Aug-12
+First Georgia Community Bank,Jackson,GA,34301,United Bank,5-Dec-08,16-Aug-12
+PFF Bank & Trust ,Pomona,CA,28344,"U.S. Bank, N.A.",21-Nov-08,4-Jan-13
+Downey Savings & Loan,Newport Beach,CA,30968,"U.S. Bank, N.A.",21-Nov-08,4-Jan-13
+Community Bank,Loganville,GA,16490,Bank of Essex,21-Nov-08,4-Sep-12
+Security Pacific Bank,Los Angeles,CA,23595,Pacific Western Bank,7-Nov-08,28-Aug-12
+"Franklin Bank, SSB",Houston,TX,26870,Prosperity Bank,7-Nov-08,16-Aug-12
+Freedom Bank,Bradenton,FL,57930,Fifth Third Bank,31-Oct-08,16-Aug-12
+Alpha Bank & Trust,Alpharetta,GA,58241,"Stearns Bank, N.A.",24-Oct-08,16-Aug-12
+Meridian Bank,Eldred,IL,13789,National Bank,10-Oct-08,31-May-12
+Main Street Bank,Northville,MI,57654,Monroe Bank & Trust,10-Oct-08,16-Aug-12
+Washington Mutual Bank,Henderson,NV,32633,JP Morgan Chase Bank,25-Sep-08,16-Aug-12
+Ameribank,Northfork,WV,6782,The Citizens Savings Bank,19-Sep-08,16-Aug-12
+Silver State Bank,Henderson,NV,34194,Nevada State Bank,5-Sep-08,16-Aug-12
+Integrity Bank,Alpharetta,GA,35469,Regions Bank,29-Aug-08,16-Aug-12
+Columbian Bank & Trust,Topeka,KS,22728,Citizens Bank & Trust,22-Aug-08,16-Aug-12
+First Priority Bank,Bradenton,FL,57523,SunTrust Bank,1-Aug-08,16-Aug-12
+"First Heritage Bank, NA",Newport Beach,CA,57961,Mutual of Omaha Bank,25-Jul-08,28-Aug-12
+First National Bank of Nevada,Reno,NV,27011,Mutual of Omaha Bank,25-Jul-08,28-Aug-12
+IndyMac Bank,Pasadena,CA,29730,"OneWest Bank, FSB",11-Jul-08,28-Aug-12
+"First Integrity Bank, NA",Staples,MN,12736,First International Bank and Trust,30-May-08,28-Aug-12
+"ANB Financial, NA",Bentonville,AR,33901,Pulaski Bank and Trust Company,9-May-08,28-Aug-12
+Hume Bank,Hume,MO,1971,Security Bank,7-Mar-08,28-Aug-12
+Douglass National Bank,Kansas City,MO,24660,Liberty Bank and Trust Company,25-Jan-08,26-Oct-12
+Miami Valley Bank,Lakeview,OH,16848,The Citizens Banking Company,4-Oct-07,28-Aug-12
+NetBank,Alpharetta,GA,32575,ING DIRECT,28-Sep-07,28-Aug-12
+Metropolitan Savings Bank,Pittsburgh,PA,35353,Allegheny Valley Bank of Pittsburgh,2-Feb-07,27-Oct-10
+Bank of Ephraim,Ephraim,UT,1249,Far West Bank,25-Jun-04,9-Apr-08
+Reliance Bank,White Plains,NY,26778,Union State Bank,19-Mar-04,9-Apr-08
+Guaranty National Bank of Tallahassee,Tallahassee,FL,26838,Hancock Bank of Florida,12-Mar-04,5-Jun-12
+Dollar Savings Bank,Newark,NJ,31330,No Acquirer,14-Feb-04,9-Apr-08
+Pulaski Savings Bank,Philadelphia,PA,27203,Earthstar Bank,14-Nov-03,22-Jul-05
+First National Bank of Blanchardville,Blanchardville,WI,11639,The Park Bank,9-May-03,5-Jun-12
+Southern Pacific Bank,Torrance,CA,27094,Beal Bank,7-Feb-03,20-Oct-08
+Farmers Bank of Cheneyville,Cheneyville,LA,16445,Sabine State Bank & Trust,17-Dec-02,20-Oct-04
+Bank of Alamo,Alamo,TN,9961,No Acquirer,8-Nov-02,18-Mar-05
+AmTrade International Bank,Atlanta,GA,33784,No Acquirer,30-Sep-02,11-Sep-06
+Universal Federal Savings Bank,Chicago,IL,29355,Chicago Community Bank,27-Jun-02,9-Apr-08
+Connecticut Bank of Commerce,Stamford,CT,19183,Hudson United Bank,26-Jun-02,14-Feb-12
+New Century Bank,Shelby Township,MI,34979,No Acquirer,28-Mar-02,18-Mar-05
+Net 1st National Bank,Boca Raton,FL,26652,Bank Leumi USA,1-Mar-02,9-Apr-08
+"NextBank, NA",Phoenix,AZ,22314,No Acquirer,7-Feb-02,27-Aug-10
+Oakwood Deposit Bank Co.,Oakwood,OH,8966,The State Bank & Trust Company,1-Feb-02,25-Oct-12
+Bank of Sierra Blanca,Sierra Blanca,TX,22002,The Security State Bank of Pecos,18-Jan-02,6-Nov-03
+"Hamilton Bank, NA",Miami,FL,24382,Israel Discount Bank of New York,11-Jan-02,5-Jun-12
+Sinclair National Bank,Gravette,AR,34248,Delta Trust & Bank,7-Sep-01,10-Feb-04
+"Superior Bank, FSB",Hinsdale,IL,32646,"Superior Federal, FSB",27-Jul-01,5-Jun-12
+Malta National Bank,Malta,OH,6629,North Valley Bank,3-May-01,18-Nov-02
+First Alliance Bank & Trust Co.,Manchester,NH,34264,Southern New Hampshire Bank & Trust,2-Feb-01,18-Feb-03
+National State Bank of Metropolis,Metropolis,IL,3815,Banterra Bank of Marion,14-Dec-00,17-Mar-05
+Bank of Honolulu,Honolulu,HI,21029,Bank of the Orient,13-Oct-00,17-Mar-05
diff --git a/pandas/io/tests/data/failed_banklist.html b/pandas/io/tests/data/banklist.html
similarity index 97%
rename from pandas/io/tests/data/failed_banklist.html
rename to pandas/io/tests/data/banklist.html
index ea2a5c27996bf..8e15f37ccffdb 100644
--- a/pandas/io/tests/data/failed_banklist.html
+++ b/pandas/io/tests/data/banklist.html
@@ -455,8 +455,25 @@ <h1>Each depositor insured to at least $250,000 per insured bank</h1>
</tr>
</thead>
<tbody>
+
<tr>
-
+ <td><a href="douglascb.html">Douglas County Bank</a></td>
+ <td headers="city">Douglasville</td>
+ <td headers="state">GA</td>
+ <td headers="CERT #">21649</td>
+ <td headers="AI">Hamilton State Bank</td>
+ <td headers="Closing Date">April 26, 2013</td>
+ <td headers="Updated">April 30, 2013</td>
+</tr>
+ <tr>
+ <td><a href="parkway.html">Parkway Bank</a></td>
+ <td headers="city">Lenoir</td>
+ <td headers="state">NC</td>
+ <td headers="CERT #">57158</td>
+ <td headers="AI">CertusBank, National Association</td>
+ <td headers="Closing Date">April 26, 2013</td>
+ <td headers="Updated">April 30, 2013</td>
+</tr>
<tr>
<td><a href="chipola.html">Chipola Community Bank</a></td>
<td headers="city">Marianna</td>
@@ -5230,7 +5247,7 @@ <h1>Each depositor insured to at least $250,000 per insured bank</h1>
<!-- Instruction: change "mm/dd/yyyy" to the date the document was created or last modfied -->
<font face="arial, helvetica, sans-serif" size="1" color="#000066">Last Updated
- 04/23/2013</font></td>
+ 04/30/2013</font></td>
<td align="right"><font face="arial, helvetica, sans-serif" size="1" color="#000066">
<!-- Instruction: change the link text and href value of "Insert_Content_Email_Address@fdic.gov" to the fdic.gov e-mail address of the document's point of contact -->
diff --git a/pandas/io/tests/test_html.py b/pandas/io/tests/test_html.py
index d0468026caef3..6e2f6ec00d8ac 100644
--- a/pandas/io/tests/test_html.py
+++ b/pandas/io/tests/test_html.py
@@ -2,33 +2,48 @@
import re
from cStringIO import StringIO
from unittest import TestCase
+import collections
+import numbers
+from urllib2 import urlopen
+from contextlib import closing
+import warnings
import nose
import numpy as np
+from numpy.random import rand
from numpy.testing.decorators import slow
-from pandas.io.html import read_html, import_module
-from pandas import DataFrame, MultiIndex
-from pandas.util.testing import assert_frame_equal, network
+from pandas.io.html import read_html, import_module, _parse, _LxmlFrameParser
+from pandas.io.html import _BeautifulSoupHtml5LibFrameParser
+from pandas.io.html import _BeautifulSoupLxmlFrameParser, _remove_whitespace
+from pandas import DataFrame, MultiIndex, read_csv, Timestamp
+from pandas.util.testing import assert_frame_equal, network, get_data_path
+from pandas.util.testing import makeCustomDataframe as mkdf
-def _skip_if_no_parser():
+def _have_module(module_name):
try:
- import_module('lxml')
+ import_module(module_name)
+ return True
except ImportError:
- try:
- import_module('bs4')
- except ImportError:
- raise nose.SkipTest
+ return False
+
+def _skip_if_no(module_name):
+ if not _have_module(module_name):
+ raise nose.SkipTest
-DATA_PATH = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'data')
+
+def _skip_if_none(module_names):
+ if isinstance(module_names, basestring):
+ _skip_if_no(module_names)
+ else:
+ if not any(_have_module(module_name) for module_name in module_names):
+ raise nose.SkipTest
-def _run_read_html(*args, **kwargs):
- _skip_if_no_parser()
- return read_html(*args, **kwargs)
+DATA_PATH = get_data_path()
def isframe(x):
@@ -47,14 +62,36 @@ def assert_framelist_equal(list1, list2):
assert not frame_i.empty, 'frames are both empty'
+def _run_read_html(parser, io, match='.+', flavor='bs4', header=None,
+ index_col=None, skiprows=None, infer_types=False,
+ attrs=None):
+ if isinstance(skiprows, numbers.Integral) and skiprows < 0:
+ raise AssertionError('cannot skip rows starting from the end of the '
+ 'data (you passed a negative value)')
+ return _parse(parser, io, match, flavor, header, index_col, skiprows,
+ infer_types, attrs)
+
+
class TestLxmlReadHtml(TestCase):
+ def test_to_html_compat(self):
+ df = mkdf(4, 3, data_gen_f=lambda *args: rand(), c_idx_names=False,
+ r_idx_names=False).applymap('{0:.3f}'.format)
+ out = df.to_html()
+ res = self.run_read_html(out, attrs={'class': 'dataframe'},
+ index_col=0)[0]
+ print df.dtypes
+ print res.dtypes
+ assert_frame_equal(res, df)
+
def setUp(self):
self.spam_data = os.path.join(DATA_PATH, 'spam.html')
- self.banklist_data = os.path.join(DATA_PATH, 'failed_banklist.html')
+ self.banklist_data = os.path.join(DATA_PATH, 'banklist.html')
def run_read_html(self, *args, **kwargs):
kwargs['flavor'] = 'lxml'
- return _run_read_html(*args, **kwargs)
+ _skip_if_no('lxml')
+ parser = _LxmlFrameParser
+ return _run_read_html(parser, *args, **kwargs)
@network
def test_banklist_url(self):
@@ -85,13 +122,31 @@ def test_banklist(self):
@slow
def test_banklist_header(self):
+ def try_remove_ws(x):
+ try:
+ return _remove_whitespace(x)
+ except AttributeError:
+ return x
+
df = self.run_read_html(self.banklist_data, 'Metcalf',
- attrs={'id': 'table'}, header=0, skiprows=1)[0]
- self.assertFalse(df.empty)
- cols = ['Bank Name', 'City', 'State', 'CERT #',
- 'Acquiring Institution', 'Closing Date', 'Updated Date']
- self.assertListEqual(df.columns.values.tolist(), cols)
- self.assertEqual(df.shape[0], 499)
+ attrs={'id': 'table'}, infer_types=False)[0]
+ ground_truth = read_csv(os.path.join(DATA_PATH, 'banklist.csv'),
+ converters={'Closing Date': Timestamp,
+ 'Updated Date': Timestamp})
+ self.assertNotEqual(df.shape, ground_truth.shape)
+ self.assertRaises(AssertionError, assert_frame_equal, df,
+ ground_truth.applymap(try_remove_ws))
+
+ @slow
+ def test_gold_canyon(self):
+ gc = 'Gold Canyon'
+ with open(self.banklist_data, 'r') as f:
+ raw_text = f.read()
+
+ self.assertIn(gc, raw_text)
+ df = self.run_read_html(self.banklist_data, 'Gold Canyon',
+ attrs={'id': 'table'}, infer_types=False)[0]
+ self.assertNotIn(gc, df.to_string())
def test_spam(self):
df1 = self.run_read_html(self.spam_data, '.*Water.*',
@@ -99,8 +154,10 @@ def test_spam(self):
df2 = self.run_read_html(self.spam_data, 'Unit', infer_types=False)
assert_framelist_equal(df1, df2)
+ print df1[0]
- self.assertEqual(df1[0].ix[0, 0], 'Nutrient')
+ self.assertEqual(df1[0].ix[0, 0], 'Proximates')
+ self.assertEqual(df1[0].columns[0], 'Nutrient')
def test_spam_no_match(self):
dfs = self.run_read_html(self.spam_data)
@@ -113,8 +170,9 @@ def test_banklist_no_match(self):
self.assertIsInstance(df, DataFrame)
def test_spam_header(self):
- df = self.run_read_html(self.spam_data, '.*Water.*', header=0)[0]
- self.assertEqual(df.columns[0], 'Nutrient')
+ df = self.run_read_html(self.spam_data, '.*Water.*', header=0)
+ df = self.run_read_html(self.spam_data, '.*Water.*', header=1)[0]
+ self.assertEqual(df.columns[0], 'Water')
self.assertFalse(df.empty)
def test_skiprows_int(self):
@@ -179,26 +237,20 @@ def test_index(self):
df2 = self.run_read_html(self.spam_data, 'Unit', index_col=0)
assert_framelist_equal(df1, df2)
- def test_header(self):
- df1 = self.run_read_html(self.spam_data, '.*Water.*', header=0)
- df2 = self.run_read_html(self.spam_data, 'Unit', header=0)
- assert_framelist_equal(df1, df2)
- self.assertEqual(df1[0].columns[0], 'Nutrient')
-
def test_header_and_index(self):
- df1 = self.run_read_html(self.spam_data, '.*Water.*', header=0,
+ df1 = self.run_read_html(self.spam_data, '.*Water.*', header=1,
index_col=0)
- df2 = self.run_read_html(self.spam_data, 'Unit', header=0, index_col=0)
+ df2 = self.run_read_html(self.spam_data, 'Unit', header=1, index_col=0)
assert_framelist_equal(df1, df2)
def test_infer_types(self):
- df1 = self.run_read_html(self.spam_data, '.*Water.*', header=0,
- index_col=0, infer_types=False)
- df2 = self.run_read_html(self.spam_data, 'Unit', header=0, index_col=0,
+ df1 = self.run_read_html(self.spam_data, '.*Water.*', index_col=0,
+ infer_types=False)
+ df2 = self.run_read_html(self.spam_data, 'Unit', index_col=0,
infer_types=False)
assert_framelist_equal(df1, df2)
- df2 = self.run_read_html(self.spam_data, 'Unit', header=0, index_col=0,
+ df2 = self.run_read_html(self.spam_data, 'Unit', index_col=0,
infer_types=True)
self.assertRaises(AssertionError, assert_framelist_equal, df1, df2)
@@ -304,21 +356,137 @@ def test_negative_skiprows_banklist(self):
@slow
def test_multiple_matches(self):
- url = self.banklist_data
- dfs = self.run_read_html(url, match=r'Florida')
- self.assertIsInstance(dfs, list)
+ url = 'http://code.google.com/p/pythonxy/wiki/StandardPlugins'
+ dfs = self.run_read_html(url, match='Python',
+ attrs={'class': 'wikitable'})
self.assertGreater(len(dfs), 1)
- for df in dfs:
- self.assertIsInstance(df, DataFrame)
+
+ @network
+ def test_pythonxy_plugins_table(self):
+ url = 'http://code.google.com/p/pythonxy/wiki/StandardPlugins'
+ dfs = self.run_read_html(url, match='Python',
+ attrs={'class': 'wikitable'})
+ zz = [df.iloc[0, 0] for df in dfs]
+ self.assertListEqual(sorted(zz), sorted(['Python', 'SciTE']))
def test_invalid_flavor():
url = 'google.com'
- nose.tools.assert_raises(AssertionError, _run_read_html, url, 'google',
+ nose.tools.assert_raises(AssertionError, read_html, url, 'google',
flavor='not a* valid**++ flaver')
-class TestBs4ReadHtml(TestLxmlReadHtml):
+@slow
+class TestBs4LxmlParser(TestLxmlReadHtml):
+ def test(self):
+ pass
+
def run_read_html(self, *args, **kwargs):
kwargs['flavor'] = 'bs4'
- return _run_read_html(*args, **kwargs)
+ _skip_if_no('lxml')
+ parser = _BeautifulSoupLxmlFrameParser
+ return _run_read_html(parser, *args, **kwargs)
+
+
+@slow
+class TestBs4Html5LibParser(TestBs4LxmlParser):
+ def test(self):
+ pass
+
+ def run_read_html(self, *args, **kwargs):
+ kwargs['flavor'] = 'bs4'
+ _skip_if_no('html5lib')
+ parser = _BeautifulSoupHtml5LibFrameParser
+ return _run_read_html(parser, *args, **kwargs)
+
+ @slow
+ def test_banklist_header(self):
+ def try_remove_ws(x):
+ try:
+ return _remove_whitespace(x)
+ except AttributeError:
+ return x
+
+ df = self.run_read_html(self.banklist_data, 'Metcalf',
+ attrs={'id': 'table'}, infer_types=True)[0]
+ ground_truth = read_csv(os.path.join(DATA_PATH, 'banklist.csv'),
+ converters={'Updated Date': Timestamp,
+ 'Closing Date': Timestamp})
+ # these will not
+ self.assertTupleEqual(df.shape, ground_truth.shape)
+ old = ['First Vietnamese American Bank In Vietnamese',
+ 'Westernbank Puerto Rico En Espanol',
+ 'R-G Premier Bank of Puerto Rico En Espanol',
+ 'Eurobank En Espanol', 'Sanderson State Bank En Espanol',
+ 'Washington Mutual Bank (Including its subsidiary Washington '
+ 'Mutual Bank FSB)',
+ 'Silver State Bank En Espanol',
+ 'AmTrade International BankEn Espanol',
+ 'Hamilton Bank, NA En Espanol',
+ 'The Citizens Savings BankPioneer Community Bank, Inc.']
+ new = ['First Vietnamese American Bank', 'Westernbank Puerto Rico',
+ 'R-G Premier Bank of Puerto Rico', 'Eurobank',
+ 'Sanderson State Bank', 'Washington Mutual Bank',
+ 'Silver State Bank', 'AmTrade International Bank',
+ 'Hamilton Bank, NA', 'The Citizens Savings Bank']
+ dfnew = df.applymap(try_remove_ws).replace(old, new)
+ gtnew = ground_truth.applymap(try_remove_ws)
+ assert_frame_equal(dfnew, gtnew)
+
+ @slow
+ def test_gold_canyon(self):
+ gc = 'Gold Canyon'
+ with open(self.banklist_data, 'r') as f:
+ raw_text = f.read()
+
+ self.assertIn(gc, raw_text)
+ df = self.run_read_html(self.banklist_data, 'Gold Canyon',
+ attrs={'id': 'table'}, infer_types=False)[0]
+ self.assertIn(gc, df.to_string())
+
+
+def get_elements_from_url(url, flavor, element='table'):
+ _skip_if_no('bs4')
+ _skip_if_no(flavor)
+ from bs4 import BeautifulSoup, SoupStrainer
+ strainer = SoupStrainer(element)
+ with closing(urlopen(url)) as f:
+ soup = BeautifulSoup(f, features=flavor, parse_only=strainer)
+ return soup.find_all(element)
+
+
+@slow
+def test_bs4_finds_tables():
+ url = ('http://ndb.nal.usda.gov/ndb/foods/show/1732?fg=&man=&'
+ 'lfacet=&format=&count=&max=25&offset=&sort=&qlookup=spam')
+ flavors = 'lxml', 'html5lib'
+ with warnings.catch_warnings():
+ warnings.filterwarnings('ignore')
+
+ for flavor in flavors:
+ assert get_elements_from_url(url, flavor, 'table')
+
+
+def get_lxml_elements(url, element):
+
+ _skip_if_no('lxml')
+ from lxml.html import parse
+ doc = parse(url)
+ return doc.xpath('.//{0}'.format(element))
+
+
+@slow
+def test_lxml_finds_tables():
+ url = ('http://ndb.nal.usda.gov/ndb/foods/show/1732?fg=&man=&'
+ 'lfacet=&format=&count=&max=25&offset=&sort=&qlookup=spam')
+ assert get_lxml_elements(url, 'table')
+
+
+@slow
+def test_lxml_finds_tbody():
+ url = ('http://ndb.nal.usda.gov/ndb/foods/show/1732?fg=&man=&'
+ 'lfacet=&format=&count=&max=25&offset=&sort=&qlookup=spam')
+ assert get_lxml_elements(url, 'tbody')
+
+
+
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index 86387989a7a87..f38fe61d453c2 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -126,16 +126,18 @@ def assert_almost_equal(a, b, check_less_precise = False):
return assert_dict_equal(a, b)
if isinstance(a, basestring):
- assert a == b, (a, b)
+ assert a == b, "{0} != {1}".format(a, b)
return True
if isiterable(a):
np.testing.assert_(isiterable(b))
- assert(len(a) == len(b))
+ na, nb = len(a), len(b)
+ assert na == nb, "{0} != {1}".format(na, nb)
+
if np.array_equal(a, b):
return True
else:
- for i in xrange(len(a)):
+ for i in xrange(na):
assert_almost_equal(a[i], b[i], check_less_precise)
return True
@@ -169,7 +171,7 @@ def assert_almost_equal(a, b, check_less_precise = False):
np.testing.assert_almost_equal(
1, a / b, decimal=decimal, err_msg=err_msg(a, b), verbose=False)
else:
- assert(a == b)
+ assert a == b, "%s != %s" % (a, b)
def is_sorted(seq):
| Some updates and bug fixes. See release notes for more details.
- ~~`vbench` stuff~~ sort of pointless right now since we don't really have control over the speed of the parsing library
- ~~Figure out why `lxml` chooses to ignore things~~ reported a bug w/ example to lxml people
- ~~Figure out why `bs4`'s `thead.find_all(['th', 'td'])` parses differently than `lxml`'s `thead.xpath('.//thead//th|.//thead//td')` even when `lxml` is the `bs4` backend.~~ same as above
| https://api.github.com/repos/pandas-dev/pandas/pulls/3616 | 2013-05-15T22:17:07Z | 2013-05-20T11:43:11Z | 2013-05-20T11:43:11Z | 2014-06-18T01:06:50Z |
BUG: (GH3611) Fix read_csv to correctly encode identical na_values | diff --git a/RELEASE.rst b/RELEASE.rst
index 1f5bd2591470b..503ae0e6bb30e 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -118,6 +118,8 @@ pandas 0.11.1
- Fix modulo and integer division on Series,DataFrames to act similary to ``float`` dtypes to return
``np.nan`` or ``np.inf`` as appropriate (GH3590_)
- Fix incorrect dtype on groupby with ``as_index=False`` (GH3610_)
+ - Fix ``read_csv`` to correctly encode identical na_values, e.g. ``na_values=[-999.0,-999]``
+ was failing (GH3611_)
.. _GH3164: https://github.com/pydata/pandas/issues/3164
.. _GH2786: https://github.com/pydata/pandas/issues/2786
@@ -166,6 +168,7 @@ pandas 0.11.1
.. _GH3610: https://github.com/pydata/pandas/issues/3610
.. _GH3596: https://github.com/pydata/pandas/issues/3596
.. _GH3435: https://github.com/pydata/pandas/issues/3435
+.. _GH3611: https://github.com/pydata/pandas/issues/3611
pandas 0.11.0
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 60b6d6c81fdd3..f4eeb36e5e8d0 100644
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -1712,12 +1712,14 @@ def _clean_na_values(na_values, keep_default_na=True):
else:
if not com.is_list_like(na_values):
na_values = [na_values]
- na_values = set(list(na_values))
+ na_values = set(_stringify_na_values(na_values))
if keep_default_na:
na_values = na_values | _NA_VALUES
return na_values
+def _stringify_na_values(na_values):
+ return [ str(x) for x in na_values ]
def _clean_index_names(columns, index_col):
if not _is_index_col(index_col):
@@ -1768,7 +1770,7 @@ def _get_empty_meta(columns, index_col, index_names):
def _get_na_values(col, na_values):
if isinstance(na_values, dict):
if col in na_values:
- return set(list(na_values[col]))
+ return set(_stringify_na_values(list(na_values[col])))
else:
return _NA_VALUES
else:
diff --git a/pandas/io/tests/test_parsers.py b/pandas/io/tests/test_parsers.py
index 2e4689d7aa620..4a9004b7068ba 100644
--- a/pandas/io/tests/test_parsers.py
+++ b/pandas/io/tests/test_parsers.py
@@ -498,6 +498,17 @@ def test_quoting(self):
df = self.read_table(StringIO(good_line_small), sep='\t')
self.assert_(len(df) == 3)
+ def test_non_string_na_values(self):
+ # GH3611, na_values that are not a string are an issue
+ with ensure_clean('__non_string_na_values__.csv') as path:
+ df = DataFrame({'A' : [-999, 2, 3], 'B' : [1.2, -999, 4.5]})
+ df.to_csv(path, sep=' ', index=False)
+ result1 = read_csv(path, sep= ' ', header=0, na_values=['-999.0','-999'])
+ result2 = read_csv(path, sep= ' ', header=0, na_values=[-999,-999.0])
+ result3 = read_csv(path, sep= ' ', header=0, na_values=[-999.0,-999])
+ tm.assert_frame_equal(result1,result2)
+ tm.assert_frame_equal(result2,result3)
+
def test_custom_na_values(self):
data = """A,B,C
ignore,this,row
| eg. na_values=[-999.0,-999] was failing
closes #3611
| https://api.github.com/repos/pandas-dev/pandas/pulls/3615 | 2013-05-15T22:16:46Z | 2013-05-16T01:01:21Z | 2013-05-16T01:01:20Z | 2014-06-27T14:18:58Z |
ENH add date to DatetimeIndex | diff --git a/RELEASE.rst b/RELEASE.rst
index 1f5bd2591470b..e44c2d0d37e08 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -49,6 +49,7 @@ pandas 0.11.1
- support datelike columns with a timezone as data_columns (GH2852_)
- table writing performance improvements.
- Add modulo operator to Series, DataFrame
+ - Add ``date`` method to DatetimeIndex
**API Changes**
@@ -275,7 +276,7 @@ pandas 0.11.0
on rhs (GH3216_)
- Treat boolean values as integers (values 1 and 0) for numeric
operations. (GH2641_)
- - Add ``time()`` method to DatetimeIndex (GH3180_)
+ - Add ``time`` method to DatetimeIndex (GH3180_)
- Return NA when using Series.str[...] for values that are not long enough
(GH3223_)
- Display cursor coordinate information in time-series plots (GH1670_)
diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py
index 46e2488fb70e6..a918e9eb18e8b 100644
--- a/pandas/tseries/index.py
+++ b/pandas/tseries/index.py
@@ -1319,11 +1319,18 @@ def freqstr(self):
@property
def time(self):
"""
- Returns array of datetime.time. The time of the day
+ Returns numpy array of datetime.time. The time part of the Timestamps.
"""
# can't call self.map() which tries to treat func as ufunc
# and causes recursion warnings on python 2.6
- return _algos.arrmap_object(self.asobject, lambda x:x.time())
+ return _algos.arrmap_object(self.asobject, lambda x: x.time())
+
+ @property
+ def date(self):
+ """
+ Returns numpy array of datetime.date. The date part of the Timestamps.
+ """
+ return _algos.arrmap_object(self.asobject, lambda x: x.date())
def normalize(self):
diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py
index 9b20ac1e3f055..beee5caa871c5 100644
--- a/pandas/tseries/tests/test_timeseries.py
+++ b/pandas/tseries/tests/test_timeseries.py
@@ -1847,6 +1847,12 @@ def test_time(self):
expected = [t.time() for t in rng]
self.assert_((result == expected).all())
+ def test_date(self):
+ rng = pd.date_range('1/1/2000', freq='12H', periods=10)
+ result = pd.Index(rng).date
+ expected = [t.date() for t in rng]
+ self.assert_((result == expected).all())
+
class TestLegacySupport(unittest.TestCase):
_multiprocess_can_split_ = True
| Allows the date to be pulled out from a DatetimeIndex easily/efficiently.
Similar to #3180, which was for the time-part to be extracted.
_From SO question: http://stackoverflow.com/questions/16563552/pandas-fancy-indexing-a-dataframe_.
~~I haven't added this to release notes, as maybe too late for 11.1?~~
| https://api.github.com/repos/pandas-dev/pandas/pulls/3614 | 2013-05-15T21:00:01Z | 2013-05-17T16:00:23Z | 2013-05-17T16:00:22Z | 2014-06-26T11:26:58Z |
BUG: Fix incorrect dtype on groupby with ``as_index=False`` (GH3610_) | diff --git a/RELEASE.rst b/RELEASE.rst
index d81a0e405ddd9..006da5f8e76af 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -114,6 +114,7 @@ pandas 0.11.1
in a frame (GH3594_)
- Fix modulo and integer division on Series,DataFrames to act similary to ``float`` dtypes to return
``np.nan`` or ``np.inf`` as appropriate (GH3590_)
+ - Fix incorrect dtype on groupby with ``as_index=False`` (GH3610_)
.. _GH3164: https://github.com/pydata/pandas/issues/3164
.. _GH2786: https://github.com/pydata/pandas/issues/2786
@@ -159,6 +160,7 @@ pandas 0.11.1
.. _GH3556: https://github.com/pydata/pandas/issues/3556
.. _GH3594: https://github.com/pydata/pandas/issues/3594
.. _GH3590: https://github.com/pydata/pandas/issues/3590
+.. _GH3610: https://github.com/pydata/pandas/issues/3610
.. _GH3435: https://github.com/pydata/pandas/issues/3435
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index 7762803b029e9..093c61ba5af5c 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -1713,7 +1713,7 @@ def aggregate(self, arg, *args, **kwargs):
result.insert(0, name, values)
result.index = np.arange(len(result))
- return result
+ return result.convert_objects()
def _aggregate_multiple_funcs(self, arg):
from pandas.tools.merge import concat
@@ -2054,7 +2054,7 @@ def _wrap_aggregated_output(self, output, names=None):
if self.axis == 1:
result = result.T
- return result
+ return result.convert_objects()
def _wrap_agged_blocks(self, blocks):
obj = self._obj_with_exclusions
@@ -2094,7 +2094,7 @@ def _wrap_agged_blocks(self, blocks):
if self.axis == 1:
result = result.T
- return result
+ return result.convert_objects()
from pandas.tools.plotting import boxplot_frame_groupby
diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py
index 23077655d5144..c1c4217cb6f62 100644
--- a/pandas/tests/test_groupby.py
+++ b/pandas/tests/test_groupby.py
@@ -1842,6 +1842,14 @@ def test_apply_with_mixed_dtype(self):
result = df.apply(lambda x: x, axis=1)
assert_series_equal(df.get_dtype_counts(), result.get_dtype_counts())
+
+ # GH 3610 incorrect dtype conversion with as_index=False
+ df = DataFrame({"c1" : [1,2,6,6,8]})
+ df["c2"] = df.c1/2.0
+ result1 = df.groupby("c2").mean().reset_index().c2
+ result2 = df.groupby("c2", as_index=False).mean().c2
+ assert_series_equal(result1,result2)
+
def test_groupby_list_infer_array_like(self):
result = self.df.groupby(list(self.df['A'])).mean()
expected = self.df.groupby(self.df['A']).mean()
| closes #3610
| https://api.github.com/repos/pandas-dev/pandas/pulls/3613 | 2013-05-15T18:57:28Z | 2013-05-15T19:27:59Z | 2013-05-15T19:27:59Z | 2014-06-12T13:38:01Z |
Categorical cleanup | diff --git a/RELEASE.rst b/RELEASE.rst
index 31627cec01d1e..5b4cd67f4a246 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -48,6 +48,7 @@ pandas 0.11.1
to append an index with a different name than the existing
- support datelike columns with a timezone as data_columns (GH2852_)
- table writing performance improvements.
+ - Simplified the API and added a describe method to Categorical
**API Changes**
diff --git a/doc/source/groupby.rst b/doc/source/groupby.rst
index f0ca08d22d7dc..bc2ff9bbe1013 100644
--- a/doc/source/groupby.rst
+++ b/doc/source/groupby.rst
@@ -606,8 +606,8 @@ versions of pandas, but users were generally discarding the NA group anyway
Grouping with ordered factors
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Categorical variables represented as instance of pandas's ``Factor`` class can
-be used as group keys. If so, the order of the levels will be preserved:
+Categorical variables represented as instance of pandas's ``Categorical`` class
+can be used as group keys. If so, the order of the levels will be preserved:
.. ipython:: python
diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py
index a093a81a6516d..916bb2deb417e 100644
--- a/pandas/core/categorical.py
+++ b/pandas/core/categorical.py
@@ -5,6 +5,7 @@
from pandas.core.algorithms import factorize
from pandas.core.index import Index
import pandas.core.common as com
+from pandas.core.frame import DataFrame
def _cat_compare_op(op):
@@ -32,23 +33,68 @@ class Categorical(object):
Parameters
----------
labels : ndarray of integers
- levels : Index-like (unique)
-
- data : array-like
+ If levels is given, the integer at label `i` is the index of the level
+ for that label. I.e., the level at labels[i] is levels[labels[i]].
+ Otherwise, if levels is None, these are just the labels and the levels
+ are assumed to be the unique labels. See from_array.
+ levels : Index-like (unique), optional
+ The unique levels for each label. If not given, the levels are assumed
+ to be the unique values of labels.
+ name : str, optional
+ Name for the Categorical variable. If levels is None, will attempt
+ to infer from labels.
Returns
-------
**Attributes**
* labels : ndarray
* levels : ndarray
+
+ Examples
+ --------
+ >>> from pandas import Categorical
+ >>> Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3])
+ Categorical:
+ array([1, 2, 3, 1, 2, 3])
+ Levels (3): Int64Index([1, 2, 3])
+
+ >>> Categorical([0,1,2,0,1,2], ['a', 'b', 'c'])
+ Categorical:
+ array(['a', 'b', 'c', 'a', 'b', 'c'], dtype=object)
+ Levels (3): Index(['a', 'b', 'c'], dtype=object)
+
+ >>> Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
+ Categorical:
+ array(['a', 'b', 'c', 'a', 'b', 'c'], dtype=object)
+ Levels (3): Index(['a', 'b', 'c'], dtype=object)
"""
- def __init__(self, labels, levels, name=None):
+ def __init__(self, labels, levels=None, name=None):
+ if levels is None:
+ if name is None:
+ name = getattr(labels, 'name', None)
+ if isinstance(labels, Index) and hasattr(labels, 'factorize'):
+ labels, levels = labels.factorize()
+ else:
+ try:
+ labels, levels = factorize(labels, sort=True)
+ except TypeError:
+ labels, levels = factorize(labels, sort=False)
+
self.labels = labels
self.levels = levels
self.name = name
@classmethod
def from_array(cls, data):
+ """
+ Make a Categorical type from a single array-like object.
+
+ Parameters
+ ----------
+ data : array-like
+ Can be an Index or array-like. The levels are assumed to be
+ the unique values of `data`.
+ """
if isinstance(data, Index) and hasattr(data, 'factorize'):
labels, levels = data.factorize()
else:
@@ -131,4 +177,28 @@ def equals(self, other):
return (self.levels.equals(other.levels) and
np.array_equal(self.labels, other.labels))
-Factor = Categorical
+ def describe(self):
+ """
+ Returns a dataframe with frequency and counts by level.
+ """
+ #Hack?
+ grouped = DataFrame(self.labels).groupby(0)
+ counts = grouped.count().values.squeeze()
+ freqs = counts/float(counts.sum())
+ return DataFrame.from_dict(dict(
+ counts=counts,
+ freqs=freqs,
+ levels=self.levels)).set_index('levels')
+
+
+class Factor(Categorical):
+ def __init__(self, labels, levels=None, name=None):
+ from warnings import warn
+ warn("Factor is deprecated. Use Categorical instead", FutureWarning)
+ super(Factor, self).__init__(labels, levels, name)
+
+ @classmethod
+ def from_array(cls, data):
+ from warnings import warn
+ warn("Factor is deprecated. Use Categorical instead", FutureWarning)
+ return super(Factor, cls).from_array(data)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 5b2dc6dd96efb..6190208432926 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1659,8 +1659,8 @@ def convert_objects(self, convert_dates=True, convert_numeric=False, copy=True):
-------
converted : DataFrame
"""
- return self._constructor(self._data.convert(convert_dates=convert_dates,
- convert_numeric=convert_numeric,
+ return self._constructor(self._data.convert(convert_dates=convert_dates,
+ convert_numeric=convert_numeric,
copy=copy))
#----------------------------------------------------------------------
@@ -3330,7 +3330,7 @@ def fillna(self, value=None, method=None, axis=0, inplace=False,
a reference to the filled object, which is self if inplace=True
limit : int, default None
Maximum size gap to forward or backward fill
- downcast : dict, default is None, a dict of item->dtype of what to
+ downcast : dict, default is None, a dict of item->dtype of what to
downcast if possible
See also
@@ -3380,7 +3380,7 @@ def fillna(self, value=None, method=None, axis=0, inplace=False,
result[k].fillna(v, inplace=True)
return result
else:
- new_data = self._data.fillna(value, inplace=inplace,
+ new_data = self._data.fillna(value, inplace=inplace,
downcast=downcast)
if inplace:
@@ -3791,8 +3791,8 @@ def combine(self, other, func, fill_value=None, overwrite=True):
result[col] = arr
# convert_objects just in case
- return self._constructor(result,
- index=new_index,
+ return self._constructor(result,
+ index=new_index,
columns=new_columns).convert_objects(
convert_dates=True,
copy=False)
@@ -3825,7 +3825,7 @@ def combiner(x, y, needs_i8_conversion=False):
y_values = y_values.view('i8')
else:
mask = isnull(x_values)
-
+
return expressions.where(mask, y_values, x_values, raise_on_error=True)
return self.combine(other, combiner, overwrite=False)
@@ -5406,11 +5406,11 @@ def group_agg(values, bounds, f):
def factor_agg(factor, vec, func):
"""
- Aggregate array based on Factor
+ Aggregate array based on Categorical
Parameters
----------
- factor : Factor
+ factor : Categorical
length n
vec : sequence
length n
@@ -5419,7 +5419,11 @@ def factor_agg(factor, vec, func):
Returns
-------
- ndarray corresponding to Factor levels
+ ndarray corresponding to factor levels
+
+ See Also
+ --------
+ pandas.Categorical
"""
indexer = np.argsort(factor.labels)
unique_labels = np.arange(len(factor.levels))
diff --git a/pandas/core/panel.py b/pandas/core/panel.py
index 44b62991cf7a3..a19011d094499 100644
--- a/pandas/core/panel.py
+++ b/pandas/core/panel.py
@@ -10,7 +10,7 @@
_try_sort, _default_index,
_infer_dtype_from_scalar,
notnull)
-from pandas.core.categorical import Factor
+from pandas.core.categorical import Categorical
from pandas.core.index import (Index, MultiIndex, _ensure_index,
_get_combined_index)
from pandas.core.indexing import _maybe_droplevels, _is_list_like
@@ -82,8 +82,8 @@ def panel_index(time, panels, names=['time', 'panel']):
(1962, 'C')], dtype=object)
"""
time, panels = _ensure_like_indices(time, panels)
- time_factor = Factor.from_array(time)
- panel_factor = Factor.from_array(panels)
+ time_factor = Categorical.from_array(time)
+ panel_factor = Categorical.from_array(panels)
labels = [time_factor.labels, panel_factor.labels]
levels = [time_factor.levels, panel_factor.levels]
diff --git a/pandas/core/reshape.py b/pandas/core/reshape.py
index 8595e2a91906d..9f67094cfd28a 100644
--- a/pandas/core/reshape.py
+++ b/pandas/core/reshape.py
@@ -749,9 +749,9 @@ def make_axis_dummies(frame, axis='minor', transform=None):
axis : {'major', 'minor'}, default 'minor'
transform : function, default None
Function to apply to axis labels first. For example, to
- get "day of week" dummies in a time series regression
+ get "day of week" dummies in a time series regression
you might call::
-
+
make_axis_dummies(panel, axis='major',
transform=lambda d: d.weekday())
Returns
@@ -810,6 +810,6 @@ def block2d_to_blocknd(values, items, shape, labels, ref_items=None):
def factor_indexer(shape, labels):
- """ given a tuple of shape and a list of Factor lables, return the expanded label indexer """
+ """ given a tuple of shape and a list of Categorical labels, return the expanded label indexer """
mult = np.array(shape)[::-1].cumprod()[::-1]
return com._ensure_platform_int(np.sum(np.array(labels).T * np.append(mult, [1]), axis=1).T)
diff --git a/pandas/tests/test_factor.py b/pandas/tests/test_factor.py
index de2fcaa94b59d..48db7afa29aaa 100644
--- a/pandas/tests/test_factor.py
+++ b/pandas/tests/test_factor.py
@@ -9,6 +9,7 @@
from pandas.core.api import value_counts
from pandas.core.categorical import Categorical
from pandas.core.index import Index, Int64Index, MultiIndex
+from pandas.core.frame import DataFrame
from pandas.util.testing import assert_almost_equal
import pandas.core.common as com
@@ -111,6 +112,29 @@ def test_na_flags_int_levels(self):
self.assert_(np.array_equal(com.isnull(cat), labels == -1))
+ def test_levels_none(self):
+ factor = Categorical(['a', 'b', 'b', 'a',
+ 'a', 'c', 'c', 'c'])
+ self.assert_(factor.equals(self.factor))
+
+ def test_describe(self):
+ # string type
+ desc = self.factor.describe()
+ expected = DataFrame.from_dict(dict(counts=[3, 2, 3],
+ freqs=[3/8., 2/8., 3/8.],
+ levels=['a', 'b', 'c'])
+ ).set_index('levels')
+ tm.assert_frame_equal(desc, expected)
+
+ # check an integer one
+ desc = Categorical([1,2,3,1,2,3,3,2,1,1,1]).describe()
+ expected = DataFrame.from_dict(dict(counts=[5, 3, 3],
+ freqs=[5/11., 3/11., 3/11.],
+ levels=[1,2,3]
+ )
+ ).set_index('levels')
+ tm.assert_frame_equal(desc, expected)
+
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
diff --git a/pandas/tools/merge.py b/pandas/tools/merge.py
index 7f05a045e36af..a9428d472c42e 100644
--- a/pandas/tools/merge.py
+++ b/pandas/tools/merge.py
@@ -5,7 +5,7 @@
import itertools
import numpy as np
-from pandas.core.categorical import Factor
+from pandas.core.categorical import Categorical
from pandas.core.frame import DataFrame, _merge_doc
from pandas.core.generic import NDFrame
from pandas.core.groupby import get_group_index
@@ -1190,7 +1190,7 @@ def _make_concat_multiindex(indexes, keys, levels=None, names=None):
names = [None] * len(zipped)
if levels is None:
- levels = [Factor.from_array(zp).levels for zp in zipped]
+ levels = [Categorical.from_array(zp).levels for zp in zipped]
else:
levels = [_ensure_index(x) for x in levels]
else:
@@ -1228,7 +1228,7 @@ def _make_concat_multiindex(indexes, keys, levels=None, names=None):
levels.extend(concat_index.levels)
label_list.extend(concat_index.labels)
else:
- factor = Factor.from_array(concat_index)
+ factor = Categorical.from_array(concat_index)
levels.append(factor.levels)
label_list.append(factor.labels)
| Make categorical a little more user-friendly and add some documentation.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3603 | 2013-05-14T19:41:21Z | 2013-05-19T21:12:04Z | 2013-05-19T21:12:04Z | 2014-06-27T14:18:49Z |
BUG: Fix integer modulo and division to make integer and float dtypes work similarly for invalid values | diff --git a/RELEASE.rst b/RELEASE.rst
index 31627cec01d1e..0b6ed0b4d2853 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -48,6 +48,7 @@ pandas 0.11.1
to append an index with a different name than the existing
- support datelike columns with a timezone as data_columns (GH2852_)
- table writing performance improvements.
+ - Add modulo operator to Series, DataFrame
**API Changes**
@@ -110,6 +111,8 @@ pandas 0.11.1
is a ``list`` or ``tuple``.
- Fixed bug where a time-series was being selected in preference to an actual column name
in a frame (GH3594_)
+ - Fix modulo and integer division on Series,DataFrames to act similary to ``float`` dtypes to return
+ ``np.nan`` or ``np.inf`` as appropriate (GH3590_)
.. _GH3164: https://github.com/pydata/pandas/issues/3164
.. _GH2786: https://github.com/pydata/pandas/issues/2786
@@ -153,6 +156,7 @@ pandas 0.11.1
.. _GH3593: https://github.com/pydata/pandas/issues/3593
.. _GH3556: https://github.com/pydata/pandas/issues/3556
.. _GH3594: https://github.com/pydata/pandas/issues/3594
+.. _GH3590: https://github.com/pydata/pandas/issues/3590
.. _GH3435: https://github.com/pydata/pandas/issues/3435
diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt
index 74818f9542cae..3719d9eb09dee 100644
--- a/doc/source/v0.11.1.txt
+++ b/doc/source/v0.11.1.txt
@@ -9,6 +9,17 @@ enhancements along with a large number of bug fixes.
API changes
~~~~~~~~~~~
+ - Fix modulo and integer division on Series,DataFrames to act similary to ``float`` dtypes to return
+ ``np.nan`` or ``np.inf`` as appropriate (GH3590_). This correct a numpy bug that treats ``integer``
+ and ``float`` dtypes differently.
+
+ .. ipython:: python
+
+ p = DataFrame({ 'first' : [3,4,5,8], 'second' : [0,0,0,3] })
+ p % 0
+ p % p
+ p / p
+ p / 0
Enhancements
~~~~~~~~~~~~
@@ -33,4 +44,5 @@ on GitHub for a complete list.
.. _GH3477: https://github.com/pydata/pandas/issues/3477
.. _GH3492: https://github.com/pydata/pandas/issues/3492
.. _GH3499: https://github.com/pydata/pandas/issues/3499
+.. _GH3590: https://github.com/pydata/pandas/issues/3590
.. _GH3435: https://github.com/pydata/pandas/issues/3435
diff --git a/pandas/core/common.py b/pandas/core/common.py
index 2da2db052cb93..6bb4b36862956 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -793,13 +793,16 @@ def changeit():
# try to directly set by expanding our array to full
# length of the boolean
- om = other[mask]
- om_at = om.astype(result.dtype)
- if (om == om_at).all():
- new_other = result.values.copy()
- new_other[mask] = om_at
- result[:] = new_other
- return result, False
+ try:
+ om = other[mask]
+ om_at = om.astype(result.dtype)
+ if (om == om_at).all():
+ new_other = result.values.copy()
+ new_other[mask] = om_at
+ result[:] = new_other
+ return result, False
+ except:
+ pass
# we are forced to change the dtype of the result as the input isn't compatible
r, fill_value = _maybe_upcast(result, fill_value=other, dtype=dtype, copy=True)
@@ -948,6 +951,27 @@ def _lcd_dtypes(a_dtype, b_dtype):
return np.float64
return np.object
+def _fill_zeros(result, y, fill):
+ """ if we have an integer value (or array in y)
+ and we have 0's, fill them with the fill,
+ return the result """
+
+ if fill is not None:
+ if not isinstance(y, np.ndarray):
+ dtype, value = _infer_dtype_from_scalar(y)
+ y = pa.empty(result.shape,dtype=dtype)
+ y.fill(value)
+
+ if is_integer_dtype(y):
+
+ mask = y.ravel() == 0
+ if mask.any():
+ shape = result.shape
+ result, changed = _maybe_upcast_putmask(result.ravel(),mask,fill)
+ result = result.reshape(shape)
+
+ return result
+
def _interp_wrapper(f, wrap_dtype, na_override=None):
def wrapper(arr, mask, limit=None):
view = arr.view(wrap_dtype)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 5b2dc6dd96efb..c1f2f38dabd8b 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -189,10 +189,12 @@ class DataConflictError(Exception):
# Factory helper methods
-def _arith_method(op, name, str_rep = None, default_axis='columns'):
+def _arith_method(op, name, str_rep = None, default_axis='columns', fill_zeros=None):
def na_op(x, y):
try:
result = expressions.evaluate(op, str_rep, x, y, raise_on_error=True)
+ result = com._fill_zeros(result,y,fill_zeros)
+
except TypeError:
xrav = x.ravel()
result = np.empty(x.size, dtype=x.dtype)
@@ -841,20 +843,23 @@ def __contains__(self, key):
__sub__ = _arith_method(operator.sub, '__sub__', '-', default_axis=None)
__mul__ = _arith_method(operator.mul, '__mul__', '*', default_axis=None)
__truediv__ = _arith_method(operator.truediv, '__truediv__', '/',
- default_axis=None)
+ default_axis=None, fill_zeros=np.inf)
__floordiv__ = _arith_method(operator.floordiv, '__floordiv__',
- default_axis=None)
+ default_axis=None, fill_zeros=np.inf)
__pow__ = _arith_method(operator.pow, '__pow__', '**', default_axis=None)
+ __mod__ = _arith_method(operator.mod, '__mod__', '*', default_axis=None, fill_zeros=np.nan)
+
__radd__ = _arith_method(_radd_compat, '__radd__', default_axis=None)
__rmul__ = _arith_method(operator.mul, '__rmul__', default_axis=None)
__rsub__ = _arith_method(lambda x, y: y - x, '__rsub__', default_axis=None)
__rtruediv__ = _arith_method(lambda x, y: y / x, '__rtruediv__',
- default_axis=None)
+ default_axis=None, fill_zeros=np.inf)
__rfloordiv__ = _arith_method(lambda x, y: y // x, '__rfloordiv__',
- default_axis=None)
+ default_axis=None, fill_zeros=np.inf)
__rpow__ = _arith_method(lambda x, y: y ** x, '__rpow__',
default_axis=None)
+ __rmod__ = _arith_method(operator.mod, '__rmod__', default_axis=None, fill_zeros=np.nan)
# boolean operators
__and__ = _arith_method(operator.and_, '__and__', '&')
@@ -863,9 +868,10 @@ def __contains__(self, key):
# Python 2 division methods
if not py3compat.PY3:
- __div__ = _arith_method(operator.div, '__div__', '/', default_axis=None)
+ __div__ = _arith_method(operator.div, '__div__', '/',
+ default_axis=None, fill_zeros=np.inf)
__rdiv__ = _arith_method(lambda x, y: y / x, '__rdiv__',
- default_axis=None)
+ default_axis=None, fill_zeros=np.inf)
def __neg__(self):
arr = operator.neg(self.values)
diff --git a/pandas/core/series.py b/pandas/core/series.py
index a2816d93d6f1e..e807cf3f1dfd4 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -55,14 +55,17 @@
# Wrapper function for Series arithmetic methods
-def _arith_method(op, name):
+def _arith_method(op, name, fill_zeros=None):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
"""
def na_op(x, y):
try:
+
result = op(x, y)
+ result = com._fill_zeros(result,y,fill_zeros)
+
except TypeError:
result = pa.empty(len(x), dtype=x.dtype)
if isinstance(y, pa.Array):
@@ -1258,16 +1261,18 @@ def iteritems(self):
__add__ = _arith_method(operator.add, '__add__')
__sub__ = _arith_method(operator.sub, '__sub__')
__mul__ = _arith_method(operator.mul, '__mul__')
- __truediv__ = _arith_method(operator.truediv, '__truediv__')
- __floordiv__ = _arith_method(operator.floordiv, '__floordiv__')
+ __truediv__ = _arith_method(operator.truediv, '__truediv__', fill_zeros=np.inf)
+ __floordiv__ = _arith_method(operator.floordiv, '__floordiv__', fill_zeros=np.inf)
__pow__ = _arith_method(operator.pow, '__pow__')
+ __mod__ = _arith_method(operator.mod, '__mod__', fill_zeros=np.nan)
__radd__ = _arith_method(_radd_compat, '__add__')
__rmul__ = _arith_method(operator.mul, '__mul__')
__rsub__ = _arith_method(lambda x, y: y - x, '__sub__')
- __rtruediv__ = _arith_method(lambda x, y: y / x, '__truediv__')
- __rfloordiv__ = _arith_method(lambda x, y: y // x, '__floordiv__')
+ __rtruediv__ = _arith_method(lambda x, y: y / x, '__truediv__', fill_zeros=np.inf)
+ __rfloordiv__ = _arith_method(lambda x, y: y // x, '__floordiv__', fill_zeros=np.inf)
__rpow__ = _arith_method(lambda x, y: y ** x, '__pow__')
+ __rmod__ = _arith_method(operator.mod, '__mod__', fill_zeros=np.nan)
# comparisons
__gt__ = _comp_method(operator.gt, '__gt__')
@@ -1301,8 +1306,8 @@ def __invert__(self):
# Python 2 division operators
if not py3compat.PY3:
- __div__ = _arith_method(operator.div, '__div__')
- __rdiv__ = _arith_method(lambda x, y: y / x, '__div__')
+ __div__ = _arith_method(operator.div, '__div__', fill_zeros=np.inf)
+ __rdiv__ = _arith_method(lambda x, y: y / x, '__div__', fill_zeros=np.inf)
__idiv__ = __div__
#----------------------------------------------------------------------
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index ce89dda63597f..f77503bd1487d 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -4011,6 +4011,50 @@ def test_operators_none_as_na(self):
result = op(df.fillna(7), df)
assert_frame_equal(result, expected)
+ def test_modulo(self):
+
+ # GH3590, modulo as ints
+ p = DataFrame({ 'first' : [3,4,5,8], 'second' : [0,0,0,3] })
+
+ ### this is technically wrong as the integer portion is coerced to float ###
+ expected = DataFrame({ 'first' : Series([0,0,0,0],dtype='float64'), 'second' : Series([np.nan,np.nan,np.nan,0]) })
+ result = p % p
+ assert_frame_equal(result,expected)
+
+ # numpy has a slightly different (wrong) treatement
+ result2 = DataFrame(p.values % p.values,index=p.index,columns=p.columns,dtype='float64')
+ result2.iloc[0:3,1] = np.nan
+ assert_frame_equal(result2,expected)
+
+ result = p % 0
+ expected = DataFrame(np.nan,index=p.index,columns=p.columns)
+ assert_frame_equal(result,expected)
+
+ # numpy has a slightly different (wrong) treatement
+ result2 = DataFrame(p.values.astype('float64') % 0,index=p.index,columns=p.columns)
+ assert_frame_equal(result2,expected)
+
+ def test_div(self):
+
+ # integer div, but deal with the 0's
+ p = DataFrame({ 'first' : [3,4,5,8], 'second' : [0,0,0,3] })
+ result = p / p
+
+ ### this is technically wrong as the integer portion is coerced to float ###
+ expected = DataFrame({ 'first' : Series([1,1,1,1],dtype='float64'), 'second' : Series([np.inf,np.inf,np.inf,1]) })
+ assert_frame_equal(result,expected)
+
+ result2 = DataFrame(p.values.astype('float64')/p.values,index=p.index,columns=p.columns).fillna(np.inf)
+ assert_frame_equal(result2,expected)
+
+ result = p / 0
+ expected = DataFrame(np.inf,index=p.index,columns=p.columns)
+ assert_frame_equal(result,expected)
+
+ # numpy has a slightly different (wrong) treatement
+ result2 = DataFrame(p.values.astype('float64')/0,index=p.index,columns=p.columns).fillna(np.inf)
+ assert_frame_equal(result2,expected)
+
def test_logical_operators(self):
import operator
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index d98cfe3e385cb..11ede8d759b38 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -1766,6 +1766,49 @@ def test_neg(self):
def test_invert(self):
assert_series_equal(-(self.series < 0), ~(self.series < 0))
+ def test_modulo(self):
+
+ # GH3590, modulo as ints
+ p = DataFrame({ 'first' : [3,4,5,8], 'second' : [0,0,0,3] })
+ result = p['first'] % p['second']
+ expected = Series(p['first'].values % p['second'].values,dtype='float64')
+ expected.iloc[0:3] = np.nan
+ assert_series_equal(result,expected)
+
+ result = p['first'] % 0
+ expected = Series(np.nan,index=p.index)
+ assert_series_equal(result,expected)
+
+ p = p.astype('float64')
+ result = p['first'] % p['second']
+ expected = Series(p['first'].values % p['second'].values)
+ assert_series_equal(result,expected)
+
+ def test_div(self):
+
+ # integer div, but deal with the 0's
+ p = DataFrame({ 'first' : [3,4,5,8], 'second' : [0,0,0,3] })
+ result = p['first'] / p['second']
+ expected = Series(p['first'].values / p['second'].values,dtype='float64')
+ expected.iloc[0:3] = np.inf
+ assert_series_equal(result,expected)
+
+ result = p['first'] / 0
+ expected = Series(np.inf,index=p.index)
+ assert_series_equal(result,expected)
+
+ p = p.astype('float64')
+ result = p['first'] / p['second']
+ expected = Series(p['first'].values / p['second'].values)
+ assert_series_equal(result,expected)
+
+ p = DataFrame({ 'first' : [3,4,5,8], 'second' : [1,1,1,1] })
+ result = p['first'] / p['second']
+ if py3compat.PY3:
+ assert_series_equal(result,p['first'].astype('float64'))
+ else:
+ assert_series_equal(result,p['first'])
+
def test_operators(self):
def _check_op(series, other, op, pos_only=False):
| closes #3590
This is a numpy oddity that treats them differently.
```
In [131]: p = DataFrame({ 'first' : [3,4,5,8], 'second' : [0,0,0,3] })
In [132]: p % 0
Out[132]:
first second
0 NaN NaN
1 NaN NaN
2 NaN NaN
3 NaN NaN
In [133]: p % p
Out[133]:
first second
0 0 NaN
1 0 NaN
2 0 NaN
3 0 0
In [134]: p / p
Out[134]:
first second
0 1 inf
1 1 inf
2 1 inf
3 1 1.000000
In [135]: p / 0
Out[135]:
first second
0 inf inf
1 inf inf
2 inf inf
3 inf inf
```
Numpy does this (on integers), floats are as like above
```
In [3]: x
Out[3]:
array([[3, 0],
[4, 0],
[5, 0],
[8, 3]])
In [4]: x % 0
Out[4]:
array([[0, 0],
[0, 0],
[0, 0],
[0, 0]])
In [5]: x % x
Out[5]:
array([[0, 0],
[0, 0],
[0, 0],
[0, 0]])
In [6]: x / x
Out[6]:
array([[1, 0],
[1, 0],
[1, 0],
[1, 1]])
In [7]: x / 0
Out[7]:
array([[0, 0],
[0, 0],
[0, 0],
[0, 0]])
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/3600 | 2013-05-14T14:28:08Z | 2013-05-14T21:48:10Z | 2013-05-14T21:48:10Z | 2014-07-16T08:09:04Z |
BUG: Add squeeze keyword to groupby to allow reduction in returned type | diff --git a/RELEASE.rst b/RELEASE.rst
index 006da5f8e76af..1f5bd2591470b 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -65,6 +65,9 @@ pandas 0.11.1
``timedelta64[ns]`` to ``object/int`` (GH3425_)
- Do not allow datetimelike/timedeltalike creation except with valid types
(e.g. cannot pass ``datetime64[ms]``) (GH3423_)
+ - Add ``squeeze`` keyword to ``groupby`` to allow reduction from
+ DataFrame -> Series if groups are unique. Regression from 0.10.1,
+ partial revert on (GH2893_) with (GH3596_)
**Bug Fixes**
@@ -161,6 +164,7 @@ pandas 0.11.1
.. _GH3594: https://github.com/pydata/pandas/issues/3594
.. _GH3590: https://github.com/pydata/pandas/issues/3590
.. _GH3610: https://github.com/pydata/pandas/issues/3610
+.. _GH3596: https://github.com/pydata/pandas/issues/3596
.. _GH3435: https://github.com/pydata/pandas/issues/3435
diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt
index 3719d9eb09dee..c89118298a675 100644
--- a/doc/source/v0.11.1.txt
+++ b/doc/source/v0.11.1.txt
@@ -21,6 +21,26 @@ API changes
p / p
p / 0
+ - Add ``squeeze`` keyword to ``groupby`` to allow reduction from
+ DataFrame -> Series if groups are unique. This is a Regression from 0.10.1.
+ We are reverting back to the prior behavior. This means groupby will return the
+ same shaped objects whether the groups are unique or not. revert on (GH2893_)
+ with (GH3596_).
+
+ .. ipython:: python
+
+ df2 = DataFrame([{"val1": 1, "val2" : 20}, {"val1":1, "val2": 19},
+ {"val1":1, "val2": 27}, {"val1":1, "val2": 12}])
+ def func(dataf):
+ return dataf["val2"] - dataf["val2"].mean()
+
+ # squeezing the result frame to a series (because we have unique groups)
+ df2.groupby("val1", squeeze=True).apply(func)
+
+ # no squeezing (the default, and behavior in 0.10.1)
+ df2.groupby("val1").apply(func)
+
+
Enhancements
~~~~~~~~~~~~
- ``pd.read_html()`` can now parse HTML string, files or urls and return dataframes
@@ -44,5 +64,7 @@ on GitHub for a complete list.
.. _GH3477: https://github.com/pydata/pandas/issues/3477
.. _GH3492: https://github.com/pydata/pandas/issues/3492
.. _GH3499: https://github.com/pydata/pandas/issues/3499
+.. _GH2893: https://github.com/pydata/pandas/issues/2893
+.. _GH3596: https://github.com/pydata/pandas/issues/3596
.. _GH3590: https://github.com/pydata/pandas/issues/3590
.. _GH3435: https://github.com/pydata/pandas/issues/3435
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index ed90aab715cfd..4a80e2f65fd71 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -107,7 +107,7 @@ def get(self, key, default=None):
return default
def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True,
- group_keys=True):
+ group_keys=True, squeeze=False):
"""
Group series using mapper (dict or key function, apply given function
to group, return result as series) or by a series of columns
@@ -131,6 +131,9 @@ def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True,
Sort group keys. Get better performance by turning this off
group_keys : boolean, default True
When calling apply, add group keys to index to identify pieces
+ squeeze : boolean, default False
+ reduce the dimensionaility of the return type if possible, otherwise
+ return a consistent type
Examples
--------
@@ -150,7 +153,8 @@ def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True,
from pandas.core.groupby import groupby
axis = self._get_axis_number(axis)
return groupby(self, by, axis=axis, level=level, as_index=as_index,
- sort=sort, group_keys=group_keys)
+ sort=sort, group_keys=group_keys,
+ squeeze=squeeze)
def asfreq(self, freq, method=None, how=None, normalize=False):
"""
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index 093c61ba5af5c..122355581956d 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -169,7 +169,7 @@ class GroupBy(object):
def __init__(self, obj, keys=None, axis=0, level=None,
grouper=None, exclusions=None, selection=None, as_index=True,
- sort=True, group_keys=True):
+ sort=True, group_keys=True, squeeze=False):
self._selection = selection
if isinstance(obj, NDFrame):
@@ -189,6 +189,7 @@ def __init__(self, obj, keys=None, axis=0, level=None,
self.keys = keys
self.sort = sort
self.group_keys = group_keys
+ self.squeeze = squeeze
if grouper is None:
grouper, exclusions = _get_grouper(obj, keys, axis=axis,
@@ -1841,15 +1842,22 @@ def _wrap_applied_output(self, keys, values, not_indexed_same=False):
all_indexed_same = _all_indexes_same([x.index for x in values])
singular_series = len(values) == 1 and applied_index.nlevels == 1
- # assign the name to this series
- if singular_series:
- values[0].name = keys[0]
+ # GH3596
+ # provide a reduction (Frame -> Series) if groups are unique
+ if self.squeeze:
- # GH2893
- # we have series in the values array, we want to produce a series:
- # if any of the sub-series are not indexed the same
- # OR we don't have a multi-index and we have only a single values
- if singular_series or not all_indexed_same:
+ # assign the name to this series
+ if singular_series:
+ values[0].name = keys[0]
+
+ # GH2893
+ # we have series in the values array, we want to produce a series:
+ # if any of the sub-series are not indexed the same
+ # OR we don't have a multi-index and we have only a single values
+ return self._concat_objects(keys, values,
+ not_indexed_same=not_indexed_same)
+
+ if not all_indexed_same:
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py
index c1c4217cb6f62..c56fca49cce48 100644
--- a/pandas/tests/test_groupby.py
+++ b/pandas/tests/test_groupby.py
@@ -263,14 +263,14 @@ def test_groupby_nonobject_dtype(self):
def test_groupby_return_type(self):
- # GH2893
+ # GH2893, return a reduced type
df1 = DataFrame([{"val1": 1, "val2" : 20}, {"val1":1, "val2": 19},
{"val1":2, "val2": 27}, {"val1":2, "val2": 12}])
def func(dataf):
return dataf["val2"] - dataf["val2"].mean()
- result = df1.groupby("val1").apply(func)
+ result = df1.groupby("val1", squeeze=True).apply(func)
self.assert_(isinstance(result,Series))
df2 = DataFrame([{"val1": 1, "val2" : 20}, {"val1":1, "val2": 19},
@@ -278,9 +278,14 @@ def func(dataf):
def func(dataf):
return dataf["val2"] - dataf["val2"].mean()
- result = df2.groupby("val1").apply(func)
+ result = df2.groupby("val1", squeeze=True).apply(func)
self.assert_(isinstance(result,Series))
+ # GH3596, return a consistent type (regression in 0.11 from 0.10.1)
+ df = DataFrame([[1,1],[1,1]],columns=['X','Y'])
+ result = df.groupby('X',squeeze=False).count()
+ self.assert_(isinstance(result,DataFrame))
+
def test_agg_regression1(self):
grouped = self.tsframe.groupby([lambda x: x.year, lambda x: x.month])
result = grouped.agg(np.mean)
| This will allow a reduction in the returned type from DataFrame -> Series
if the groups are unique.
This is a fix for a regression from 0.10.1.
Allows functionaility in #2893, by specifying `squeeze=True`
in groupby call. #3596 functionaility is back as the default
This returns a Series because we are passing `squeeze=True`
```
In [9]: df2 = DataFrame([{"val1": 1, "val2" : 20},
{"val1":1, "val2": 19},{"val1":1, "val2": 27}, {"val1":1, "val2": 12}])
In [10]: df2
Out[10]:
val1 val2
0 1 20
1 1 19
2 1 27
3 1 12
In [11]: def func(dataf):
....: return dataf["val2"] - dataf["val2"].mean()
....:
In [12]: df2.groupby("val1", squeeze=True).apply(func)
Out[12]:
0 0.5
1 -0.5
2 7.5
3 -7.5
Name: 1, dtype: float64
```
Traditionally returns a DataFrame (even though have unique groups)
Implicity (`squeeze=False`)
```
In [13]: df = DataFrame([[1,1],[1,1]],columns=['X','Y'])
In [14]: df
Out[14]:
X Y
0 1 1
1 1 1
In [15]: df.groupby('X').count()
Out[15]:
X Y
X
1 2 2
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/3599 | 2013-05-14T11:54:01Z | 2013-05-15T20:19:14Z | 2013-05-15T20:19:14Z | 2014-07-06T08:10:53Z |
BUG: Fixed bug where a time-series was being selected in preference to an actual column name | diff --git a/RELEASE.rst b/RELEASE.rst
index 70060ee1b3497..31627cec01d1e 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -108,6 +108,8 @@ pandas 0.11.1
- Fixed bug in reset_index with ``NaN`` in a multi-index (GH3586_)
- ``fillna`` methods now raise a ``TypeError`` when the ``value`` parameter
is a ``list`` or ``tuple``.
+ - Fixed bug where a time-series was being selected in preference to an actual column name
+ in a frame (GH3594_)
.. _GH3164: https://github.com/pydata/pandas/issues/3164
.. _GH2786: https://github.com/pydata/pandas/issues/2786
@@ -150,6 +152,7 @@ pandas 0.11.1
.. _GH3579: https://github.com/pydata/pandas/issues/3579
.. _GH3593: https://github.com/pydata/pandas/issues/3593
.. _GH3556: https://github.com/pydata/pandas/issues/3556
+.. _GH3594: https://github.com/pydata/pandas/issues/3594
.. _GH3435: https://github.com/pydata/pandas/issues/3435
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 8b6acd8c7c53e..bc8b7a3646a33 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -883,6 +883,10 @@ def _convert_to_index_sliceable(obj, key):
elif isinstance(key, basestring):
+ # we are an actual column
+ if key in obj._data.items:
+ return None
+
# we need a timelike key here
if idx.is_all_dates:
try:
diff --git a/pandas/io/tests/test_parsers.py b/pandas/io/tests/test_parsers.py
index 5ff832431c917..2e4689d7aa620 100644
--- a/pandas/io/tests/test_parsers.py
+++ b/pandas/io/tests/test_parsers.py
@@ -1730,6 +1730,33 @@ def test_fwf(self):
self.assertRaises(ValueError, read_fwf, StringIO(data3),
colspecs=colspecs, widths=[6, 10, 10, 7])
+ def test_fwf_regression(self):
+ # GH 3594
+ #### turns out 'T060' is parsable as a datetime slice!
+
+ tzlist = [1,10,20,30,60,80,100]
+ ntz = len(tzlist)
+ tcolspecs = [16]+[8]*ntz
+ tcolnames = ['SST'] + ["T%03d" % z for z in tzlist[1:]]
+ data = """ 2009164202000 9.5403 9.4105 8.6571 7.8372 6.0612 5.8843 5.5192
+ 2009164203000 9.5435 9.2010 8.6167 7.8176 6.0804 5.8728 5.4869
+ 2009164204000 9.5873 9.1326 8.4694 7.5889 6.0422 5.8526 5.4657
+ 2009164205000 9.5810 9.0896 8.4009 7.4652 6.0322 5.8189 5.4379
+ 2009164210000 9.6034 9.0897 8.3822 7.4905 6.0908 5.7904 5.4039
+"""
+
+ df = read_fwf(StringIO(data),
+ index_col=0,
+ header=None,
+ names=tcolnames,
+ widths=tcolspecs,
+ parse_dates=True,
+ date_parser=lambda s: datetime.strptime(s,'%Y%j%H%M%S'))
+
+ for c in df.columns:
+ res = df.loc[:,c]
+ self.assert_(len(res))
+
def test_verbose_import(self):
text = """a,b,c,d
one,1,2,3
diff --git a/pandas/sparse/frame.py b/pandas/sparse/frame.py
index e893f83f6d640..9694cc005d178 100644
--- a/pandas/sparse/frame.py
+++ b/pandas/sparse/frame.py
@@ -43,6 +43,10 @@ def shape(self):
def axes(self):
return [self.sp_frame.columns, self.sp_frame.index]
+ @property
+ def items(self):
+ return self.sp_frame.columns
+
@property
def blocks(self):
""" return our series in the column order """
| close #3594
| https://api.github.com/repos/pandas-dev/pandas/pulls/3597 | 2013-05-14T00:36:12Z | 2013-05-14T10:51:08Z | 2013-05-14T10:51:08Z | 2014-07-04T18:54:47Z |
BUG: (GH3593) fixed a bug in the incorrect conversion of datetime64[ns] in combine_first | diff --git a/RELEASE.rst b/RELEASE.rst
index 4085d350f3766..862d458f34e22 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -104,6 +104,7 @@ pandas 0.11.1
- ``combine_first`` not returning the same dtype in cases where it can (GH3552_)
- Fixed bug with ``Panel.transpose`` argument aliases (GH3556_)
- Fixed platform bug in ``PeriodIndex.take`` (GH3579_)
+ - Fixed bud in incorrect conversion of datetime64[ns] in ``combine_first`` (GH3593_)
- Fixed bug in reset_index with ``NaN`` in a multi-index (GH3586_)
.. _GH3164: https://github.com/pydata/pandas/issues/3164
@@ -145,6 +146,7 @@ pandas 0.11.1
.. _GH3586: https://github.com/pydata/pandas/issues/3586
.. _GH3493: https://github.com/pydata/pandas/issues/3493
.. _GH3579: https://github.com/pydata/pandas/issues/3579
+.. _GH3593: https://github.com/pydata/pandas/issues/3593
.. _GH3556: https://github.com/pydata/pandas/issues/3556
diff --git a/pandas/core/common.py b/pandas/core/common.py
index f71627be1296d..2da2db052cb93 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -921,6 +921,33 @@ def _possibly_downcast_to_dtype(result, dtype):
return result
+def _lcd_dtypes(a_dtype, b_dtype):
+ """ return the lcd dtype to hold these types """
+
+ if is_datetime64_dtype(a_dtype) or is_datetime64_dtype(b_dtype):
+ return _NS_DTYPE
+ elif is_timedelta64_dtype(a_dtype) or is_timedelta64_dtype(b_dtype):
+ return _TD_DTYPE
+ elif is_complex_dtype(a_dtype):
+ if is_complex_dtype(b_dtype):
+ return a_dtype
+ return np.float64
+ elif is_integer_dtype(a_dtype):
+ if is_integer_dtype(b_dtype):
+ if a_dtype.itemsize == b_dtype.itemsize:
+ return a_dtype
+ return np.int64
+ return np.float64
+ elif is_float_dtype(a_dtype):
+ if is_float_dtype(b_dtype):
+ if a_dtype.itemsize == b_dtype.itemsize:
+ return a_dtype
+ else:
+ return np.float64
+ elif is_integer(b_dtype):
+ return np.float64
+ return np.object
+
def _interp_wrapper(f, wrap_dtype, na_override=None):
def wrapper(arr, mask, limit=None):
view = arr.view(wrap_dtype)
@@ -1524,6 +1551,13 @@ def is_float_dtype(arr_or_dtype):
tipo = arr_or_dtype.dtype.type
return issubclass(tipo, np.floating)
+def is_complex_dtype(arr_or_dtype):
+ if isinstance(arr_or_dtype, np.dtype):
+ tipo = arr_or_dtype.type
+ else:
+ tipo = arr_or_dtype.dtype.type
+ return issubclass(tipo, np.complexfloating)
+
def is_list_like(arg):
return hasattr(arg, '__iter__') and not isinstance(arg, basestring) or hasattr(arg,'len')
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 3df95b27f8736..1b01c92f03a32 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -3738,8 +3738,11 @@ def combine(self, other, func, fill_value=None, overwrite=True):
result = {}
for col in new_columns:
- series = this[col].values
- otherSeries = other[col].values
+ series = this[col]
+ otherSeries = other[col]
+
+ this_dtype = series.dtype
+ other_dtype = otherSeries.dtype
this_mask = isnull(series)
other_mask = isnull(otherSeries)
@@ -3756,18 +3759,40 @@ def combine(self, other, func, fill_value=None, overwrite=True):
series[this_mask] = fill_value
otherSeries[other_mask] = fill_value
- arr = func(series, otherSeries)
+ # if we have different dtypes, possibily promote
+ new_dtype = this_dtype
+ if this_dtype != other_dtype:
+ new_dtype = com._lcd_dtypes(this_dtype,other_dtype)
+ series = series.astype(new_dtype)
+ otherSeries = otherSeries.astype(new_dtype)
+
+ # see if we need to be represented as i8 (datetimelike)
+ # try to keep us at this dtype
+ needs_i8_conversion = com.needs_i8_conversion(new_dtype)
+ if needs_i8_conversion:
+ this_dtype = new_dtype
+ arr = func(series, otherSeries, True)
+ else:
+ arr = func(series, otherSeries)
if do_fill:
arr = com.ensure_float(arr)
arr[this_mask & other_mask] = NA
+ # try to downcast back to the original dtype
+ if needs_i8_conversion:
+ arr = com._possibly_cast_to_datetime(arr, this_dtype)
+ else:
+ arr = com._possibly_downcast_to_dtype(arr, this_dtype)
+
result[col] = arr
# convert_objects just in case
return self._constructor(result,
index=new_index,
- columns=new_columns).convert_objects(copy=False)
+ columns=new_columns).convert_objects(
+ convert_dates=True,
+ copy=False)
def combine_first(self, other):
"""
@@ -3788,8 +3813,18 @@ def combine_first(self, other):
-------
combined : DataFrame
"""
- def combiner(x, y):
- return expressions.where(isnull(x), y, x, raise_on_error=True)
+ def combiner(x, y, needs_i8_conversion=False):
+ x_values = x.values if hasattr(x,'values') else x
+ y_values = y.values if hasattr(y,'values') else y
+ if needs_i8_conversion:
+ mask = isnull(x)
+ x_values = x_values.view('i8')
+ y_values = y_values.view('i8')
+ else:
+ mask = isnull(x_values)
+
+ return expressions.where(mask, y_values, x_values, raise_on_error=True)
+
return self.combine(other, combiner, overwrite=False)
def update(self, other, join='left', overwrite=True, filter_func=None,
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index b6459b0e461b4..d058d20427ad7 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -258,14 +258,15 @@ def downcast(self, dtypes = None):
return blocks
- def astype(self, dtype, copy = True, raise_on_error = True):
+ def astype(self, dtype, copy = True, raise_on_error = True, values = None):
"""
Coerce to the new type (if copy=True, return a new copy)
raise on an except if raise == True
"""
try:
- newb = make_block(com._astype_nansafe(self.values, dtype, copy = copy),
- self.items, self.ref_items, fastpath=True)
+ if values is None:
+ values = com._astype_nansafe(self.values, dtype, copy = copy)
+ newb = make_block(values, self.items, self.ref_items, fastpath=True)
except:
if raise_on_error is True:
raise
@@ -708,6 +709,15 @@ def is_bool(self):
""" we can be a bool if we have only bool values but are of type object """
return lib.is_bool_array(self.values.ravel())
+ def astype(self, dtype, copy=True, raise_on_error=True, values=None):
+ """ allow astypes to datetime64[ns],timedelta64[ns] with coercion """
+ dtype = np.dtype(dtype)
+ if dtype == _NS_DTYPE or dtype == _TD_DTYPE:
+ values = com._possibly_convert_datetime(self.values,dtype)
+ else:
+ values = None
+ return super(ObjectBlock, self).astype(dtype=dtype,copy=copy,raise_on_error=raise_on_error,values=values)
+
def convert(self, convert_dates = True, convert_numeric = True, copy = True):
""" attempt to coerce any object types to better types
return a copy of the block (if copy = True)
diff --git a/pandas/core/series.py b/pandas/core/series.py
index cebf2f4ef9d1f..8a3f353aa7c4a 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -17,7 +17,8 @@
from pandas.core.common import (isnull, notnull, _is_bool_indexer,
_default_index, _maybe_promote, _maybe_upcast,
_asarray_tuplesafe, is_integer_dtype,
- _infer_dtype_from_scalar, is_list_like)
+ _infer_dtype_from_scalar, is_list_like,
+ _NS_DTYPE, _TD_DTYPE)
from pandas.core.index import (Index, MultiIndex, InvalidIndexError,
_ensure_index, _handle_legacy_indexes)
from pandas.core.indexing import _SeriesIndexer, _check_bool_indexer, _check_slice_bounds
@@ -929,9 +930,13 @@ def astype(self, dtype):
"""
See numpy.ndarray.astype
"""
- casted = com._astype_nansafe(self.values, dtype)
- return self._constructor(casted, index=self.index, name=self.name,
- dtype=casted.dtype)
+ dtype = np.dtype(dtype)
+ if dtype == _NS_DTYPE or dtype == _TD_DTYPE:
+ values = com._possibly_cast_to_datetime(self.values,dtype)
+ else:
+ values = com._astype_nansafe(self.values, dtype)
+ return self._constructor(values, index=self.index, name=self.name,
+ dtype=values.dtype)
def convert_objects(self, convert_dates=True, convert_numeric=True, copy=True):
"""
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 7e7813e048bd1..ce24c72f75882 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -7907,6 +7907,25 @@ def test_combine_first_mixed_bug(self):
expected = Series([True,True,False])
assert_series_equal(result,expected)
+ # GH 3593, converting datetime64[ns] incorrecly
+ df0 = DataFrame({"a":[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)]})
+ df1 = DataFrame({"a":[None, None, None]})
+ df2 = df1.combine_first(df0)
+ assert_frame_equal(df2,df0)
+
+ df2 = df0.combine_first(df1)
+ assert_frame_equal(df2,df0)
+
+ df0 = DataFrame({"a":[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)]})
+ df1 = DataFrame({"a":[datetime(2000, 1, 2), None, None]})
+ df2 = df1.combine_first(df0)
+ result = df0.copy()
+ result.iloc[0,:] = df1.iloc[0,:]
+ assert_frame_equal(df2,result)
+
+ df2 = df0.combine_first(df1)
+ assert_frame_equal(df2,df0)
+
def test_update(self):
df = DataFrame([[1.5, nan, 3.],
[1.5, nan, 3.],
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index 6fbce9df753d8..94d29e9233fb6 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -1856,7 +1856,7 @@ def test_operators_timedelta64(self):
v1 = date_range('2012-1-1', periods=3, freq='D')
v2 = date_range('2012-1-2', periods=3, freq='D')
rs = Series(v2) - Series(v1)
- xp = Series(1e9 * 3600 * 24, rs.index).astype('timedelta64[ns]')
+ xp = Series(1e9 * 3600 * 24, rs.index).astype('int64').astype('timedelta64[ns]')
assert_series_equal(rs, xp)
self.assert_(rs.dtype=='timedelta64[ns]')
| closes #3593
| https://api.github.com/repos/pandas-dev/pandas/pulls/3595 | 2013-05-13T22:07:42Z | 2013-05-13T22:49:40Z | 2013-05-13T22:49:40Z | 2014-06-19T14:13:13Z |
BUG: fix take platorm issue with PeriodIndex (GH3579) | diff --git a/RELEASE.rst b/RELEASE.rst
index eaff573a7510a..4085d350f3766 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -103,6 +103,7 @@ pandas 0.11.1
- Fix ``.diff`` on datelike and timedelta operations (GH3100_)
- ``combine_first`` not returning the same dtype in cases where it can (GH3552_)
- Fixed bug with ``Panel.transpose`` argument aliases (GH3556_)
+ - Fixed platform bug in ``PeriodIndex.take`` (GH3579_)
- Fixed bug in reset_index with ``NaN`` in a multi-index (GH3586_)
.. _GH3164: https://github.com/pydata/pandas/issues/3164
@@ -143,6 +144,7 @@ pandas 0.11.1
.. _GH3562: https://github.com/pydata/pandas/issues/3562
.. _GH3586: https://github.com/pydata/pandas/issues/3586
.. _GH3493: https://github.com/pydata/pandas/issues/3493
+.. _GH3579: https://github.com/pydata/pandas/issues/3579
.. _GH3556: https://github.com/pydata/pandas/issues/3556
diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py
index 051d8c43a48a8..23077655d5144 100644
--- a/pandas/tests/test_groupby.py
+++ b/pandas/tests/test_groupby.py
@@ -318,6 +318,15 @@ def test_agg_period_index(self):
rs = df.groupby(level=0).sum()
self.assert_(isinstance(rs.index, PeriodIndex))
+ # GH 3579
+ index = period_range(start='1999-01', periods=5, freq='M')
+ s1 = Series(np.random.rand(len(index)), index=index)
+ s2 = Series(np.random.rand(len(index)), index=index)
+ series = [('s1', s1), ('s2',s2)]
+ df = DataFrame.from_items(series)
+ grouped = df.groupby(df.index.month)
+ list(grouped)
+
def test_agg_must_agg(self):
grouped = self.df.groupby('A')['C']
self.assertRaises(Exception, grouped.agg, lambda x: x.describe())
diff --git a/pandas/tseries/period.py b/pandas/tseries/period.py
index abb7486de9351..34c640392bda9 100644
--- a/pandas/tseries/period.py
+++ b/pandas/tseries/period.py
@@ -1125,6 +1125,7 @@ def take(self, indices, axis=None):
"""
Analogous to ndarray.take
"""
+ indices = com._ensure_platform_int(indices)
taken = self.values.take(indices, axis=axis)
taken = taken.view(PeriodIndex)
taken.freq = self.freq
| closes #3579
| https://api.github.com/repos/pandas-dev/pandas/pulls/3591 | 2013-05-13T18:02:38Z | 2013-05-13T18:55:40Z | 2013-05-13T18:55:40Z | 2014-06-24T14:21:10Z |
BUG: Fixed bug in reset_index with ``NaN`` in a multi-index (GH3586_) | diff --git a/RELEASE.rst b/RELEASE.rst
index efd6b87e59c62..eaff573a7510a 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -103,6 +103,7 @@ pandas 0.11.1
- Fix ``.diff`` on datelike and timedelta operations (GH3100_)
- ``combine_first`` not returning the same dtype in cases where it can (GH3552_)
- Fixed bug with ``Panel.transpose`` argument aliases (GH3556_)
+ - Fixed bug in reset_index with ``NaN`` in a multi-index (GH3586_)
.. _GH3164: https://github.com/pydata/pandas/issues/3164
.. _GH2786: https://github.com/pydata/pandas/issues/2786
@@ -140,6 +141,7 @@ pandas 0.11.1
.. _GH3492: https://github.com/pydata/pandas/issues/3492
.. _GH3552: https://github.com/pydata/pandas/issues/3552
.. _GH3562: https://github.com/pydata/pandas/issues/3562
+.. _GH3586: https://github.com/pydata/pandas/issues/3586
.. _GH3493: https://github.com/pydata/pandas/issues/3493
.. _GH3556: https://github.com/pydata/pandas/issues/3556
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index ad1429fcea1ca..725d10c2270d3 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -2808,9 +2808,18 @@ def reset_index(self, level=None, drop=False, inplace=False, col_level=0,
else:
new_obj = self.copy()
- def _maybe_cast(values):
+ def _maybe_cast(values, labels=None):
+
if values.dtype == np.object_:
values = lib.maybe_convert_objects(values)
+
+ # if we have the labels, extract the values with a mask
+ if labels is not None:
+ mask = labels == -1
+ values = values.take(labels)
+ if mask.any():
+ values, changed = com._maybe_upcast_putmask(values,mask,np.nan)
+
return values
new_index = np.arange(len(new_obj))
@@ -2843,9 +2852,9 @@ def _maybe_cast(values):
col_name = tuple(name_lst)
# to ndarray and maybe infer different dtype
- level_values = _maybe_cast(lev.values)
+ level_values = _maybe_cast(lev.values, lab)
if level is None or i in level:
- new_obj.insert(0, col_name, level_values.take(lab))
+ new_obj.insert(0, col_name, level_values)
elif not drop:
name = self.index.name
@@ -2865,8 +2874,8 @@ def _maybe_cast(values):
self.index.tz is not None):
values = self.index.asobject
else:
- values = self.index.values
- new_obj.insert(0, name, _maybe_cast(values))
+ values = _maybe_cast(self.index.values)
+ new_obj.insert(0, name, values)
new_obj.index = new_index
if not inplace:
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index f70c781847cc7..01651f2674a90 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -799,6 +799,25 @@ def test_indexing_mixed_frame_bug(self):
self.assert_(df.iloc[0,2] == '-----')
#if I look at df, then element [0,2] equals '_'. If instead I type df.ix[idx,'test'], I get '-----', finally by typing df.iloc[0,2] I get '_'.
+
+
+ def test_set_index_nan(self):
+
+ # GH 3586
+ df = DataFrame({'PRuid': {17: 'nonQC', 18: 'nonQC', 19: 'nonQC', 20: '10', 21: '11', 22: '12', 23: '13',
+ 24: '24', 25: '35', 26: '46', 27: '47', 28: '48', 29: '59', 30: '10'},
+ 'QC': {17: 0.0, 18: 0.0, 19: 0.0, 20: nan, 21: nan, 22: nan, 23: nan, 24: 1.0, 25: nan,
+ 26: nan, 27: nan, 28: nan, 29: nan, 30: nan},
+ 'data': {17: 7.9544899999999998, 18: 8.0142609999999994, 19: 7.8591520000000008, 20: 0.86140349999999999,
+ 21: 0.87853110000000001, 22: 0.8427041999999999, 23: 0.78587700000000005, 24: 0.73062459999999996,
+ 25: 0.81668560000000001, 26: 0.81927080000000008, 27: 0.80705009999999999, 28: 0.81440240000000008,
+ 29: 0.80140849999999997, 30: 0.81307740000000006},
+ 'year': {17: 2006, 18: 2007, 19: 2008, 20: 1985, 21: 1985, 22: 1985, 23: 1985,
+ 24: 1985, 25: 1985, 26: 1985, 27: 1985, 28: 1985, 29: 1985, 30: 1986}}).reset_index()
+
+ result = df.set_index(['year','PRuid','QC']).reset_index().reindex(columns=df.columns)
+ assert_frame_equal(result,df)
+
if __name__ == '__main__':
import nose
| closes #3586
| https://api.github.com/repos/pandas-dev/pandas/pulls/3587 | 2013-05-13T13:29:29Z | 2013-05-13T17:35:20Z | 2013-05-13T17:35:20Z | 2014-06-29T12:28:29Z |
raise on fillna passed a list or tuple | diff --git a/RELEASE.rst b/RELEASE.rst
index 4085d350f3766..042b827617327 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -105,6 +105,8 @@ pandas 0.11.1
- Fixed bug with ``Panel.transpose`` argument aliases (GH3556_)
- Fixed platform bug in ``PeriodIndex.take`` (GH3579_)
- Fixed bug in reset_index with ``NaN`` in a multi-index (GH3586_)
+ - ``fillna`` methods now raise a ``TypeError`` when the ``value`` parameter
+ is a ``list`` or ``tuple``.
.. _GH3164: https://github.com/pydata/pandas/issues/3164
.. _GH2786: https://github.com/pydata/pandas/issues/2786
@@ -146,6 +148,7 @@ pandas 0.11.1
.. _GH3493: https://github.com/pydata/pandas/issues/3493
.. _GH3579: https://github.com/pydata/pandas/issues/3579
.. _GH3556: https://github.com/pydata/pandas/issues/3556
+.. _GH3435: https://github.com/pydata/pandas/issues/3435
pandas 0.11.0
diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt
index 76565df8f593c..74818f9542cae 100644
--- a/doc/source/v0.11.1.txt
+++ b/doc/source/v0.11.1.txt
@@ -21,6 +21,8 @@ Enhancements
an index with a different frequency than the existing, or attempting
to append an index with a different name than the existing
- support datelike columns with a timezone as data_columns (GH2852_)
+ - ``fillna`` methods now raise a ``TypeError`` if the ``value`` parameter is
+ a list or tuple.
See the `full release notes
<https://github.com/pydata/pandas/blob/master/RELEASE.rst>`__ or issue tracker
@@ -31,3 +33,4 @@ on GitHub for a complete list.
.. _GH3477: https://github.com/pydata/pandas/issues/3477
.. _GH3492: https://github.com/pydata/pandas/issues/3492
.. _GH3499: https://github.com/pydata/pandas/issues/3499
+.. _GH3435: https://github.com/pydata/pandas/issues/3435
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 3df95b27f8736..777749c6b35dc 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -3319,7 +3319,7 @@ def fillna(self, value=None, method=None, axis=0, inplace=False,
value : scalar or dict
Value to use to fill holes (e.g. 0), alternately a dict of values
specifying which value to use for each column (columns not in the
- dict will not be filled)
+ dict will not be filled). This value cannot be a list.
axis : {0, 1}, default 0
0: fill column-by-column
1: fill row-by-row
@@ -3341,6 +3341,9 @@ def fillna(self, value=None, method=None, axis=0, inplace=False,
-------
filled : DataFrame
"""
+ if isinstance(value, (list, tuple)):
+ raise TypeError('"value" parameter must be a scalar or dict, but '
+ 'you passed a "{0}"'.format(type(value).__name__))
self._consolidate_inplace()
axis = self._get_axis_number(axis)
diff --git a/pandas/core/panel.py b/pandas/core/panel.py
index 869bb31acad6b..44b62991cf7a3 100644
--- a/pandas/core/panel.py
+++ b/pandas/core/panel.py
@@ -1007,6 +1007,9 @@ def fillna(self, value=None, method=None):
--------
DataFrame.reindex, DataFrame.asfreq
"""
+ if isinstance(value, (list, tuple)):
+ raise TypeError('"value" parameter must be a scalar or dict, but '
+ 'you passed a "{0}"'.format(type(value).__name__))
if value is None:
if method is None:
raise ValueError('must specify a fill method or value')
diff --git a/pandas/core/series.py b/pandas/core/series.py
index cebf2f4ef9d1f..d9aacf1b0b080 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -2729,6 +2729,9 @@ def fillna(self, value=None, method=None, inplace=False,
-------
filled : Series
"""
+ if isinstance(value, (list, tuple)):
+ raise TypeError('"value" parameter must be a scalar or dict, but '
+ 'you passed a "{0}"'.format(type(value).__name__))
if not self._can_hold_na:
return self.copy() if not inplace else None
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 7e7813e048bd1..ce284b6b72f24 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -6039,6 +6039,12 @@ def test_fillna_invalid_method(self):
except ValueError, inst:
self.assert_('ffil' in str(inst))
+ def test_fillna_invalid_value(self):
+ # list
+ self.assertRaises(TypeError, self.frame.fillna, [1, 2])
+ # tuple
+ self.assertRaises(TypeError, self.frame.fillna, (1, 2))
+
def test_replace_inplace(self):
self.tsframe['A'][:5] = nan
self.tsframe['A'][-5:] = nan
diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py
index 081af101b643b..3640025bbf95c 100644
--- a/pandas/tests/test_panel.py
+++ b/pandas/tests/test_panel.py
@@ -1099,6 +1099,9 @@ def test_fillna(self):
self.assertRaises(ValueError, self.panel.fillna)
self.assertRaises(ValueError, self.panel.fillna, 5, method='ffill')
+ self.assertRaises(TypeError, self.panel.fillna, [1, 2])
+ self.assertRaises(TypeError, self.panel.fillna, (1, 2))
+
def test_ffill_bfill(self):
assert_panel_equal(self.panel.ffill(),
self.panel.fillna(method='ffill'))
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index 6fbce9df753d8..915becec8d7ff 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -3822,6 +3822,11 @@ def test_fillna_int(self):
s.fillna(method='ffill', inplace=True)
assert_series_equal(s.fillna(method='ffill', inplace=False), s)
+ def test_fillna_raise(self):
+ s = Series(np.random.randint(-100, 100, 50))
+ self.assertRaises(TypeError, s.fillna, [1, 2])
+ self.assertRaises(TypeError, s.fillna, (1, 2))
+
#------------------------------------------------------------------------------
# TimeSeries-specific
| fixes #3435.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3585 | 2013-05-13T05:40:00Z | 2013-05-13T22:51:06Z | 2013-05-13T22:51:05Z | 2014-06-18T07:35:10Z |
ENH: add regex functionality to DataFrame.replace | diff --git a/doc/source/api.rst b/doc/source/api.rst
index ca95a739ed661..c5b83e4af6999 100644
--- a/doc/source/api.rst
+++ b/doc/source/api.rst
@@ -465,6 +465,7 @@ Missing data handling
DataFrame.dropna
DataFrame.fillna
+ DataFrame.replace
Reshaping, sorting, transposing
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -492,7 +493,6 @@ Combining / joining / merging
DataFrame.append
DataFrame.join
DataFrame.merge
- DataFrame.replace
DataFrame.update
Time series-related
diff --git a/doc/source/missing_data.rst b/doc/source/missing_data.rst
index 133d83513041e..70db8abf3c503 100644
--- a/doc/source/missing_data.rst
+++ b/doc/source/missing_data.rst
@@ -334,6 +334,133 @@ missing and interpolate over them:
ser.replace([1, 2, 3], method='pad')
+String/Regular Expression Replacement
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. note::
+
+ Python strings prefixed with the ``r`` character such as ``r'hello world'``
+ are so-called "raw" strings. They have different semantics regarding
+ backslashes than strings without this prefix. Backslashes in raw strings
+ will be interpreted as an escaped backslash, e.g., ``r'\' == '\\'``. You
+ should `read about them
+ <http://docs.python.org/2/reference/lexical_analysis.html#string-literals>`_
+ if this is unclear.
+
+Replace the '.' with ``nan`` (str -> str)
+
+.. ipython:: python
+
+ from numpy.random import rand, randn
+ from numpy import nan
+ from pandas import DataFrame
+ d = {'a': range(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
+ df = DataFrame(d)
+ df.replace('.', nan)
+
+Now do it with a regular expression that removes surrounding whitespace
+(regex -> regex)
+
+.. ipython:: python
+
+ df.replace(r'\s*\.\s*', nan, regex=True)
+
+Replace a few different values (list -> list)
+
+.. ipython:: python
+
+ df.replace(['a', '.'], ['b', nan])
+
+list of regex -> list of regex
+
+.. ipython:: python
+
+ df.replace([r'\.', r'(a)'], ['dot', '\1stuff'], regex=True)
+
+Only search in column ``'b'`` (dict -> dict)
+
+.. ipython:: python
+
+ df.replace({'b': '.'}, {'b': nan})
+
+Same as the previous example, but use a regular expression for
+searching instead (dict of regex -> dict)
+
+.. ipython:: python
+
+ df.replace({'b': r'\s*\.\s*'}, {'b': nan}, regex=True)
+
+You can pass nested dictionaries of regular expressions that use ``regex=True``
+
+.. ipython:: python
+
+ df.replace({'b': {'b': r''}}, regex=True)
+
+or you can pass the nested dictionary like so
+
+.. ipython:: python
+
+ df.replace(regex={'b': {'b': r'\s*\.\s*'}})
+
+You can also use the group of a regular expression match when replacing (dict
+of regex -> dict of regex), this works for lists as well
+
+.. ipython:: python
+
+ df.replace({'b': r'\s*(\.)\s*'}, {'b': r'\1ty'}, regex=True)
+
+You can pass a list of regular expressions, of which those that match
+will be replaced with a scalar (list of regex -> regex)
+
+.. ipython:: python
+
+ df.replace([r'\s*\.\*', r'a|b'], nan, regex=True)
+
+All of the regular expression examples can also be passed with the
+``to_replace`` argument as the ``regex`` argument. In this case the ``value``
+argument must be passed explicity by name or ``regex`` must be a nested
+dictionary. The previous example, in this case, would then be
+
+.. ipython:: python
+
+ df.replace(regex=[r'\s*\.\*', r'a|b'], value=nan)
+
+This can be convenient if you do not want to pass ``regex=True`` every time you
+want to use a regular expression.
+
+.. note::
+
+ Anywhere in the above ``replace`` examples that you see a regular expression
+ a compiled regular expression is valid as well.
+
+Numeric Replacement
+^^^^^^^^^^^^^^^^^^^
+
+Similiar to ``DataFrame.fillna``
+
+.. ipython:: python
+
+ from numpy.random import rand, randn
+ from numpy import nan
+ from pandas import DataFrame
+ from pandas.util.testing import assert_frame_equal
+ df = DataFrame(randn(10, 2))
+ df[rand(df.shape[0]) > 0.5] = 1.5
+ df.replace(1.5, nan)
+
+Replacing more than one value via lists works as well
+
+.. ipython:: python
+
+ df00 = df.values[0, 0]
+ df.replace([1.5, df00], [nan, 'a'])
+ df[1].dtype
+
+You can also operate on the DataFrame in place
+
+.. ipython:: python
+
+ df.replace(1.5, nan, inplace=True)
Missing data casting rules and indexing
---------------------------------------
diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt
index 4d983905f9aaa..c16eb64631198 100644
--- a/doc/source/v0.11.1.txt
+++ b/doc/source/v0.11.1.txt
@@ -55,6 +55,9 @@ Enhancements
- ``fillna`` methods now raise a ``TypeError`` if the ``value`` parameter is
a list or tuple.
- Added module for reading and writing Stata files: pandas.io.stata (GH1512_)
+ - ``DataFrame.replace()`` now allows regular expressions on contained
+ ``Series`` with object dtype. See the examples section in the regular docs
+ and the generated documentation for the method for more details.
See the `full release notes
<https://github.com/pydata/pandas/blob/master/RELEASE.rst>`__ or issue tracker
@@ -70,3 +73,4 @@ on GitHub for a complete list.
.. _GH3590: https://github.com/pydata/pandas/issues/3590
.. _GH3435: https://github.com/pydata/pandas/issues/3435
.. _GH1512: https://github.com/pydata/pandas/issues/1512
+.. _GH2285: https://github.com/pydata/pandas/issues/2285
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 73f789a9425c6..39742557ccc56 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -17,6 +17,7 @@
import operator
import sys
import collections
+import itertools
from numpy import nan as NA
import numpy as np
@@ -32,7 +33,8 @@
_maybe_convert_indices)
from pandas.core.internals import (BlockManager,
create_block_manager_from_arrays,
- create_block_manager_from_blocks)
+ create_block_manager_from_blocks,
+ _re_compilable)
from pandas.core.series import Series, _radd_compat
import pandas.core.expressions as expressions
from pandas.compat.scipy import scoreatpercentile as _quantile
@@ -3431,17 +3433,46 @@ def bfill(self, axis=0, inplace=False, limit=None):
return self.fillna(method='bfill', axis=axis, inplace=inplace,
limit=limit)
- def replace(self, to_replace, value=None, method='pad', axis=0,
- inplace=False, limit=None):
- """
- Replace values given in 'to_replace' with 'value' or using 'method'
+ def replace(self, to_replace=None, value=None, method='pad', axis=0,
+ inplace=False, limit=None, regex=False, infer_types=False):
+ """Replace values given in 'to_replace' with 'value' or using 'method'.
Parameters
----------
- value : scalar or dict, default None
+ to_replace : str, regex, list, dict, Series, numeric, or None
+ * str or regex:
+ - str: string exactly matching `to_replace` will be replaced
+ with `value`
+ - regex: regexs matching `to_replace` will be replaced with
+ `value`
+ * list of str, regex, or numeric:
+ - First, if `to_replace` and `value` are both lists, they
+ **must** be the same length.
+ - Second, if ``regex=True`` then all of the strings in **both**
+ lists will be interpreted as regexs otherwise they will match
+ directly. This doesn't matter much for `value` since there
+ are only a few possible substitution regexes you can use.
+ - str and regex rules apply as above.
+ * dict:
+ - Nested dictionaries, e.g., {'a': {'b': nan}}, are read as
+ follows: look in column 'a' for the value 'b' and replace it
+ with nan. You can nest regular expressions as well. Note that
+ column names (the top-level dictionary keys in a nested
+ dictionary) **cannot** be regular expressions.
+ - Keys map to column names and values map to substitution
+ values. You can treat this as a special case of passing two
+ lists except that you are specifying the column to search in.
+ * None:
+ - This means that the ``regex`` argument must be a string,
+ compiled regular expression, or list, dict, ndarray or Series
+ of such elements. If `value` is also ``None`` then this
+ **must** be a nested dictionary or ``Series``.
+ See the examples section for examples of each of these.
+ value : scalar, dict, list, str, regex, default None
Value to use to fill holes (e.g. 0), alternately a dict of values
specifying which value to use for each column (columns not in the
- dict will not be filled)
+ dict will not be filled). Regular expressions, strings and lists or
+ dicts of such objects are also allowed.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default 'pad'
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
@@ -3456,23 +3487,91 @@ def replace(self, to_replace, value=None, method='pad', axis=0,
a reference to the filled object, which is self if inplace=True
limit : int, default None
Maximum size gap to forward or backward fill
+ regex : bool or same types as `to_replace`, default False
+ Whether to interpret `to_replace` and/or `value` as regular
+ expressions. If this is ``True`` then `to_replace` *must* be a
+ string. Otherwise, `to_replace` must be ``None`` because this
+ parameter will be interpreted as a regular expression or a list,
+ dict, or array of regular expressions.
+ infer_types : bool, default True
+ If ``True`` attempt to convert object blocks to a better dtype.
See also
--------
- reindex, asfreq
+ reindex, asfreq, fillna, interpolate
Returns
-------
filled : DataFrame
- """
+
+ Raises
+ ------
+ AssertionError
+ * If `regex` is not a ``bool`` and `to_replace` is not ``None``.
+ TypeError
+ * If `to_replace` is a ``dict`` and `value` is not a ``list``,
+ ``dict``, ``ndarray``, or ``Series``
+ * If `to_replace` is ``None`` and `regex` is not compilable into a
+ regular expression or is a list, dict, ndarray, or Series.
+ ValueError
+ * If `to_replace` and `value` are ``list`` s or ``ndarray`` s, but
+ they are not the same length.
+
+ Notes
+ -----
+ * Regex substitution is performed under the hood with ``re.sub``. The
+ rules for substitution for ``re.sub`` are the same.
+ * Regular expressions will only substitute on strings, meaning you
+ cannot provide, for example, a regular expression matching floating
+ point numbers and expect the columns in your frame that have a
+ numeric dtype to be matched. However, if those floating point numbers
+ *are* strings, then you can do this.
+ * This method has *a lot* of options. You are encouraged to experiment
+ and play with this method to gain intuition about how it works.
+ """
+ if not isinstance(regex, bool) and to_replace is not None:
+ raise AssertionError("'to_replace' must be 'None' if 'regex' is "
+ "not a bool")
self._consolidate_inplace()
axis = self._get_axis_number(axis)
+ method = com._clean_fill_method(method)
if value is None:
- return self._interpolate(to_replace, method, axis, inplace, limit)
+ if not isinstance(to_replace, (dict, Series)):
+ if not isinstance(regex, (dict, Series)):
+ raise TypeError('If "to_replace" and "value" are both None'
+ ' then regex must be a mapping')
+ to_replace = regex
+ regex = True
+
+ items = to_replace.items()
+ keys, values = itertools.izip(*items)
+
+ are_mappings = [isinstance(v, (dict, Series)) for v in values]
+
+ if any(are_mappings):
+ if not all(are_mappings):
+ raise TypeError("If a nested mapping is passed, all values"
+ " of the top level mapping must be "
+ "mappings")
+ # passed a nested dict/Series
+ to_rep_dict = {}
+ value_dict = {}
+
+ for k, v in items:
+ to_rep_dict[k] = v.keys()
+ value_dict[k] = v.values()
+
+ to_replace, value = to_rep_dict, value_dict
+ else:
+ to_replace, value = keys, values
+
+ return self.replace(to_replace, value, method=method, axis=axis,
+ inplace=inplace, limit=limit, regex=regex,
+ infer_types=infer_types)
else:
- if len(self.columns) == 0:
+ if not len(self.columns):
return self
new_data = self._data
@@ -3483,17 +3582,20 @@ def replace(self, to_replace, value=None, method='pad', axis=0,
if c in value and c in self:
new_data = new_data.replace(src, value[c],
filter=[ c ],
- inplace=inplace)
+ inplace=inplace,
+ regex=regex)
- elif not isinstance(value, (list, np.ndarray)):
+ elif not isinstance(value, (list, np.ndarray)): # {'A': NA} -> 0
new_data = self._data
for k, src in to_replace.iteritems():
if k in self:
new_data = new_data.replace(src, value,
filter = [ k ],
- inplace=inplace)
+ inplace=inplace,
+ regex=regex)
else:
- raise ValueError('Fill value must be scalar or dict or Series')
+ raise TypeError('Fill value must be scalar, dict, or '
+ 'Series')
elif isinstance(to_replace, (list, np.ndarray)):
# [NA, ''] -> [0, 'missing']
@@ -3504,63 +3606,93 @@ def replace(self, to_replace, value=None, method='pad', axis=0,
(len(to_replace), len(value)))
new_data = self._data.replace_list(to_replace, value,
- inplace=inplace)
+ inplace=inplace,
+ regex=regex)
else: # [NA, ''] -> 0
new_data = self._data.replace(to_replace, value,
- inplace=inplace)
-
+ inplace=inplace, regex=regex)
+ elif to_replace is None:
+ if not (_re_compilable(regex) or
+ isinstance(regex, (list, dict, np.ndarray, Series))):
+ raise TypeError("'regex' must be a string or a compiled "
+ "regular expression or a list or dict of "
+ "strings or regular expressions, you "
+ "passed a {0}".format(type(regex)))
+ return self.replace(regex, value, method=method, axis=axis,
+ inplace=inplace, limit=limit, regex=True,
+ infer_types=infer_types)
else:
# dest iterable dict-like
if isinstance(value, (dict, Series)): # NA -> {'A' : 0, 'B' : -1}
-
new_data = self._data
+
for k, v in value.iteritems():
if k in self:
new_data = new_data.replace(to_replace, v,
filter=[ k ],
- inplace=inplace)
+ inplace=inplace,
+ regex=regex)
elif not isinstance(value, (list, np.ndarray)): # NA -> 0
new_data = self._data.replace(to_replace, value,
- inplace=inplace)
+ inplace=inplace, regex=regex)
else:
- raise ValueError('Invalid to_replace type: %s' %
- type(to_replace)) # pragma: no cover
+ raise TypeError('Invalid "to_replace" type: '
+ '{0}'.format(type(to_replace))) # pragma: no cover
+ if infer_types:
+ new_data = new_data.convert()
if inplace:
self._data = new_data
else:
return self._constructor(new_data)
- def _interpolate(self, to_replace, method, axis, inplace, limit):
+ def interpolate(self, to_replace, method='pad', axis=0, inplace=False,
+ limit=None):
+ """Interpolate values according to different methods.
+
+ Parameters
+ ----------
+ to_replace : dict, Series
+ method : str
+ axis : int
+ inplace : bool
+ limit : int, default None
+
+ Returns
+ -------
+ frame : interpolated
+
+ See Also
+ --------
+ reindex, replace, fillna
+ """
if self._is_mixed_type and axis == 1:
return self.T.replace(to_replace, method=method, limit=limit).T
method = com._clean_fill_method(method)
if isinstance(to_replace, (dict, Series)):
- if axis == 1:
- return self.T.replace(to_replace, method=method,
- limit=limit).T
-
- rs = self if inplace else self.copy()
- for k, v in to_replace.iteritems():
- if k in rs:
- rs[k].replace(v, method=method, limit=limit,
- inplace=True)
- return rs if not inplace else None
-
+ if axis == 0:
+ return self.replace(to_replace, method=method, inplace=inplace,
+ limit=limit, axis=axis)
+ elif axis == 1:
+ obj = self.T
+ if inplace:
+ obj.replace(to_replace, method=method, limit=limit,
+ inplace=inplace, axis=0)
+ return obj.T
+ return obj.replace(to_replace, method=method, limit=limit,
+ inplace=inplace, axis=0).T
+ else:
+ raise ValueError('Invalid value for axis')
else:
-
- new_data = self._data.interpolate(method = method,
- axis = axis,
- limit = limit,
- inplace = inplace,
- missing = to_replace,
- coerce = False)
+ new_data = self._data.interpolate(method=method, axis=axis,
+ limit=limit, inplace=inplace,
+ missing=to_replace, coerce=False)
if inplace:
self._data = new_data
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index d058d20427ad7..849776940512e 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -1,5 +1,7 @@
import itertools
+import re
from datetime import datetime
+import collections
from numpy import nan
import numpy as np
@@ -16,6 +18,10 @@
from pandas.util import py3compat
+def _re_compilable(ex):
+ return isinstance(ex, (basestring, re._pattern_type))
+
+
class Block(object):
"""
Canonical n-dimensional unit of homogeneous dtype contained in a pandas
@@ -318,9 +324,12 @@ def to_native_types(self, slicer=None, na_rep='', **kwargs):
values[mask] = na_rep
return values.tolist()
- def replace(self, to_replace, value, inplace=False, filter=None):
- """ replace the to_replace value with value, possible to create new blocks here
- this is just a call to putmask """
+ def replace(self, to_replace, value, inplace=False, filter=None,
+ regex=False):
+ """ replace the to_replace value with value, possible to create new
+ blocks here this is just a call to putmask. regex is not used here.
+ It is used in ObjectBlocks. It is here for API
+ compatibility."""
mask = com.mask_missing(self.values, to_replace)
if filter is not None:
for i, item in enumerate(self.items):
@@ -750,6 +759,101 @@ def should_store(self, value):
(np.integer, np.floating, np.complexfloating,
np.datetime64, np.bool_))
+ def replace(self, to_replace, value, inplace=False, filter=None,
+ regex=False):
+ blk = [self]
+ to_rep_is_list = (isinstance(to_replace, collections.Iterable) and not
+ isinstance(to_replace, basestring))
+ value_is_list = (isinstance(value, collections.Iterable) and not
+ isinstance(to_replace, basestring))
+ both_lists = to_rep_is_list and value_is_list
+ either_list = to_rep_is_list or value_is_list
+
+ if not either_list and not regex:
+ blk = super(ObjectBlock, self).replace(to_replace, value,
+ inplace=inplace,
+ filter=filter, regex=regex)
+ elif both_lists and regex:
+ for to_rep, v in itertools.izip(to_replace, value):
+ blk[0], = blk[0]._replace_single(to_rep, v, inplace=inplace,
+ filter=filter, regex=regex)
+ elif to_rep_is_list and regex:
+ for to_rep in to_replace:
+ blk[0], = blk[0]._replace_single(to_rep, value,
+ inplace=inplace,
+ filter=filter, regex=regex)
+ else:
+ blk[0], = blk[0]._replace_single(to_replace, value,
+ inplace=inplace, filter=filter,
+ regex=regex)
+ return blk
+
+ def _replace_single(self, to_replace, value, inplace=False, filter=None,
+ regex=False):
+ # to_replace is regex compilable
+ to_rep_re = _re_compilable(to_replace)
+
+ # regex is regex compilable
+ regex_re = _re_compilable(regex)
+
+ if to_rep_re and regex_re:
+ raise AssertionError('only one of to_replace and regex can be '
+ 'regex compilable')
+
+ if regex_re:
+ to_replace = regex
+
+ regex = regex_re or to_rep_re
+
+ # try to get the pattern attribute (compiled re) or it's a string
+ try:
+ pattern = to_replace.pattern
+ except AttributeError:
+ pattern = to_replace
+
+ # if the pattern is not empty and to_replace is either a string or a
+ # regex
+ if regex and pattern:
+ rx = re.compile(to_replace)
+ else:
+ # if the thing to replace is not a string or compiled regex call
+ # the superclass method -> to_replace is some kind of object
+ return super(ObjectBlock, self).replace(to_replace, value,
+ inplace=inplace,
+ filter=filter, regex=regex)
+
+ new_values = self.values if inplace else self.values.copy()
+
+ # deal with replacing values with objects (strings) that match but
+ # whose replacement is not a string (numeric, nan, object)
+ if isnull(value) or not isinstance(value, basestring):
+ def re_replacer(s):
+ try:
+ return value if rx.search(s) is not None else s
+ except TypeError:
+ return s
+ else:
+ # value is guaranteed to be a string here, s can be either a string
+ # or null if it's null it gets returned
+ def re_replacer(s):
+ try:
+ return rx.sub(value, s)
+ except TypeError:
+ return s
+
+ f = np.vectorize(re_replacer, otypes=[self.dtype])
+
+ try:
+ filt = map(self.items.get_loc, filter)
+ except TypeError:
+ filt = slice(None)
+
+ new_values[filt] = f(new_values[filt])
+
+ return [self if inplace else make_block(new_values, self.items,
+ self.ref_items, fastpath=True)]
+
+
class DatetimeBlock(Block):
_can_hold_na = True
@@ -1136,7 +1240,9 @@ def _verify_integrity(self):
if len(self.items) != tot_items:
raise AssertionError('Number of manager items must equal union of '
- 'block items')
+ 'block items\n# manager items: {0}, # '
+ 'tot_items: {1}'.format(len(self.items),
+ tot_items))
def apply(self, f, *args, **kwargs):
""" iterate over the blocks, collect and create a new block manager
@@ -1203,7 +1309,7 @@ def convert(self, *args, **kwargs):
def replace(self, *args, **kwargs):
return self.apply('replace', *args, **kwargs)
- def replace_list(self, src_lst, dest_lst, inplace=False):
+ def replace_list(self, src_lst, dest_lst, inplace=False, regex=False):
""" do a list replace """
# figure out our mask a-priori to avoid repeated replacements
@@ -1220,16 +1326,20 @@ def comp(s):
# its possible to get multiple result blocks here
# replace ALWAYS will return a list
rb = [ blk if inplace else blk.copy() ]
- for i, d in enumerate(dest_lst):
+ for i, (s, d) in enumerate(zip(src_lst, dest_lst)):
new_rb = []
for b in rb:
- # get our mask for this element, sized to this
- # particular block
- m = masks[i][b.ref_locs]
- if m.any():
- new_rb.extend(b.putmask(m, d, inplace=True))
+ if b.dtype == np.object_:
+ new_rb.extend(b.replace(s, d, inplace=inplace,
+ regex=regex))
else:
- new_rb.append(b)
+ # get our mask for this element, sized to this
+ # particular block
+ m = masks[i][b.ref_locs]
+ if m.any():
+ new_rb.extend(b.putmask(m, d, inplace=True))
+ else:
+ new_rb.append(b)
rb = new_rb
result_blocks.extend(rb)
@@ -2165,7 +2275,6 @@ def _lcd_dtype(l):
else:
return _lcd_dtype(counts[FloatBlock])
-
def _consolidate(blocks, items):
"""
Merge blocks having same dtype
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index de49eca7dab1c..8e48ef094c419 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -4,7 +4,7 @@
from StringIO import StringIO
import cPickle as pickle
import operator
-import os
+import re
import unittest
import nose
@@ -6131,9 +6131,8 @@ def test_replace_inplace(self):
res = tsframe.replace(nan, 0, inplace=True)
assert_frame_equal(tsframe, self.tsframe.fillna(0))
- tsframe = self.tsframe.copy()
- res = tsframe.replace(nan, method='pad', inplace=True)
- assert_frame_equal(tsframe, self.tsframe.fillna(method='pad'))
+ self.assertRaises(TypeError, self.tsframe.replace, nan, method='pad',
+ inplace=True)
# mixed type
self.mixed_frame['foo'][5:20] = nan
@@ -6144,9 +6143,499 @@ def test_replace_inplace(self):
assert_frame_equal(result, expected)
tsframe = self.tsframe.copy()
- res = tsframe.replace([nan], [0], inplace=True)
+ tsframe.replace([nan], [0], inplace=True)
assert_frame_equal(tsframe, self.tsframe.fillna(0))
+ def test_regex_replace_scalar(self):
+ obj = {'a': list('ab..'), 'b': list('efgh')}
+ dfobj = DataFrame(obj)
+ mix = {'a': range(4), 'b': list('ab..')}
+ dfmix = DataFrame(mix)
+
+ ### simplest cases
+ ## regex -> value
+ # obj frame
+ res = dfobj.replace(r'\s*\.\s*', nan, regex=True)
+ assert_frame_equal(dfobj, res.fillna('.'))
+
+ # mixed
+ res = dfmix.replace(r'\s*\.\s*', nan, regex=True)
+ assert_frame_equal(dfmix, res.fillna('.'))
+
+ ## regex -> regex
+ # obj frame
+ res = dfobj.replace(r'\s*(\.)\s*', r'\1\1\1', regex=True)
+ objc = obj.copy()
+ objc['a'] = ['a', 'b', '...', '...']
+ expec = DataFrame(objc)
+ assert_frame_equal(res, expec)
+
+ # with mixed
+ res = dfmix.replace(r'\s*(\.)\s*', r'\1\1\1', regex=True)
+ mixc = mix.copy()
+ mixc['b'] = ['a', 'b', '...', '...']
+ expec = DataFrame(mixc)
+ assert_frame_equal(res, expec)
+
+ # everything with compiled regexs as well
+ res = dfobj.replace(re.compile(r'\s*\.\s*'), nan, regex=True)
+ assert_frame_equal(dfobj, res.fillna('.'))
+
+ # mixed
+ res = dfmix.replace(re.compile(r'\s*\.\s*'), nan, regex=True)
+ assert_frame_equal(dfmix, res.fillna('.'))
+
+ ## regex -> regex
+ # obj frame
+ res = dfobj.replace(re.compile(r'\s*(\.)\s*'), r'\1\1\1')
+ objc = obj.copy()
+ objc['a'] = ['a', 'b', '...', '...']
+ expec = DataFrame(objc)
+ assert_frame_equal(res, expec)
+
+ # with mixed
+ res = dfmix.replace(re.compile(r'\s*(\.)\s*'), r'\1\1\1')
+ mixc = mix.copy()
+ mixc['b'] = ['a', 'b', '...', '...']
+ expec = DataFrame(mixc)
+ assert_frame_equal(res, expec)
+
+ res = dfmix.replace(regex=re.compile(r'\s*(\.)\s*'), value=r'\1\1\1')
+ mixc = mix.copy()
+ mixc['b'] = ['a', 'b', '...', '...']
+ expec = DataFrame(mixc)
+ assert_frame_equal(res, expec)
+
+ res = dfmix.replace(regex=r'\s*(\.)\s*', value=r'\1\1\1')
+ mixc = mix.copy()
+ mixc['b'] = ['a', 'b', '...', '...']
+ expec = DataFrame(mixc)
+ assert_frame_equal(res, expec)
+
+ def test_regex_replace_scalar_inplace(self):
+ obj = {'a': list('ab..'), 'b': list('efgh')}
+ dfobj = DataFrame(obj)
+ mix = {'a': range(4), 'b': list('ab..')}
+ dfmix = DataFrame(mix)
+
+ ### simplest cases
+ ## regex -> value
+ # obj frame
+ res = dfobj.copy()
+ res.replace(r'\s*\.\s*', nan, regex=True, inplace=True)
+ assert_frame_equal(dfobj, res.fillna('.'))
+
+ # mixed
+ res = dfmix.copy()
+ res.replace(r'\s*\.\s*', nan, regex=True, inplace=True)
+ assert_frame_equal(dfmix, res.fillna('.'))
+
+ ## regex -> regex
+ # obj frame
+ res = dfobj.copy()
+ res.replace(r'\s*(\.)\s*', r'\1\1\1', regex=True, inplace=True)
+ objc = obj.copy()
+ objc['a'] = ['a', 'b', '...', '...']
+ expec = DataFrame(objc)
+ assert_frame_equal(res, expec)
+
+ # with mixed
+ res = dfmix.copy()
+ res.replace(r'\s*(\.)\s*', r'\1\1\1', regex=True, inplace=True)
+ mixc = mix.copy()
+ mixc['b'] = ['a', 'b', '...', '...']
+ expec = DataFrame(mixc)
+ assert_frame_equal(res, expec)
+
+ # everything with compiled regexs as well
+ res = dfobj.copy()
+ res.replace(re.compile(r'\s*\.\s*'), nan, regex=True, inplace=True)
+ assert_frame_equal(dfobj, res.fillna('.'))
+
+ # mixed
+ res = dfmix.copy()
+ res.replace(re.compile(r'\s*\.\s*'), nan, regex=True, inplace=True)
+ assert_frame_equal(dfmix, res.fillna('.'))
+
+ ## regex -> regex
+ # obj frame
+ res = dfobj.copy()
+ res.replace(re.compile(r'\s*(\.)\s*'), r'\1\1\1', regex=True,
+ inplace=True)
+ objc = obj.copy()
+ objc['a'] = ['a', 'b', '...', '...']
+ expec = DataFrame(objc)
+ assert_frame_equal(res, expec)
+
+ # with mixed
+ res = dfmix.copy()
+ res.replace(re.compile(r'\s*(\.)\s*'), r'\1\1\1', regex=True,
+ inplace=True)
+ mixc = mix.copy()
+ mixc['b'] = ['a', 'b', '...', '...']
+ expec = DataFrame(mixc)
+ assert_frame_equal(res, expec)
+
+ res = dfobj.copy()
+ res.replace(regex=r'\s*\.\s*', value=nan, inplace=True)
+ assert_frame_equal(dfobj, res.fillna('.'))
+
+ # mixed
+ res = dfmix.copy()
+ res.replace(regex=r'\s*\.\s*', value=nan, inplace=True)
+ assert_frame_equal(dfmix, res.fillna('.'))
+
+ ## regex -> regex
+ # obj frame
+ res = dfobj.copy()
+ res.replace(regex=r'\s*(\.)\s*', value=r'\1\1\1', inplace=True)
+ objc = obj.copy()
+ objc['a'] = ['a', 'b', '...', '...']
+ expec = DataFrame(objc)
+ assert_frame_equal(res, expec)
+
+ # with mixed
+ res = dfmix.copy()
+ res.replace(regex=r'\s*(\.)\s*', value=r'\1\1\1', inplace=True)
+ mixc = mix.copy()
+ mixc['b'] = ['a', 'b', '...', '...']
+ expec = DataFrame(mixc)
+ assert_frame_equal(res, expec)
+
+ # everything with compiled regexs as well
+ res = dfobj.copy()
+ res.replace(regex=re.compile(r'\s*\.\s*'), value=nan, inplace=True)
+ assert_frame_equal(dfobj, res.fillna('.'))
+
+ # mixed
+ res = dfmix.copy()
+ res.replace(regex=re.compile(r'\s*\.\s*'), value=nan, inplace=True)
+ assert_frame_equal(dfmix, res.fillna('.'))
+
+ ## regex -> regex
+ # obj frame
+ res = dfobj.copy()
+ res.replace(regex=re.compile(r'\s*(\.)\s*'), value=r'\1\1\1',
+ inplace=True)
+ objc = obj.copy()
+ objc['a'] = ['a', 'b', '...', '...']
+ expec = DataFrame(objc)
+ assert_frame_equal(res, expec)
+
+ # with mixed
+ res = dfmix.copy()
+ res.replace(regex=re.compile(r'\s*(\.)\s*'), value=r'\1\1\1',
+ inplace=True)
+ mixc = mix.copy()
+ mixc['b'] = ['a', 'b', '...', '...']
+ expec = DataFrame(mixc)
+ assert_frame_equal(res, expec)
+
+ def test_regex_replace_list_obj(self):
+ obj = {'a': list('ab..'), 'b': list('efgh'), 'c': list('helo')}
+ dfobj = DataFrame(obj)
+
+ ## lists of regexes and values
+ # list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]
+ to_replace_res = [r'\s*\.\s*', r'e|f|g']
+ values = [nan, 'crap']
+ res = dfobj.replace(to_replace_res, values, regex=True)
+ expec = DataFrame({'a': ['a', 'b', nan, nan], 'b': ['crap'] * 3 +
+ ['h'], 'c': ['h', 'crap', 'l', 'o']})
+ assert_frame_equal(res, expec)
+
+ # list of [re1, re2, ..., reN] -> [re1, re2, .., reN]
+ to_replace_res = [r'\s*(\.)\s*', r'(e|f|g)']
+ values = [r'\1\1', r'\1_crap']
+ res = dfobj.replace(to_replace_res, values, regex=True)
+ expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['e_crap',
+ 'f_crap',
+ 'g_crap', 'h'],
+ 'c': ['h', 'e_crap', 'l', 'o']})
+
+ assert_frame_equal(res, expec)
+
+ # list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN
+ # or vN)]
+ to_replace_res = [r'\s*(\.)\s*', r'e']
+ values = [r'\1\1', r'crap']
+ res = dfobj.replace(to_replace_res, values, regex=True)
+ expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['crap', 'f', 'g',
+ 'h'],
+ 'c': ['h', 'crap', 'l', 'o']})
+ assert_frame_equal(res, expec)
+
+ to_replace_res = [r'\s*(\.)\s*', r'e']
+ values = [r'\1\1', r'crap']
+ res = dfobj.replace(value=values, regex=to_replace_res)
+ expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['crap', 'f', 'g',
+ 'h'],
+ 'c': ['h', 'crap', 'l', 'o']})
+ assert_frame_equal(res, expec)
+
+ def test_regex_replace_list_obj_inplace(self):
+ ### same as above with inplace=True
+ ## lists of regexes and values
+ obj = {'a': list('ab..'), 'b': list('efgh'), 'c': list('helo')}
+ dfobj = DataFrame(obj)
+
+ ## lists of regexes and values
+ # list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]
+ to_replace_res = [r'\s*\.\s*', r'e|f|g']
+ values = [nan, 'crap']
+ res = dfobj.copy()
+ res.replace(to_replace_res, values, inplace=True, regex=True)
+ expec = DataFrame({'a': ['a', 'b', nan, nan], 'b': ['crap'] * 3 +
+ ['h'], 'c': ['h', 'crap', 'l', 'o']})
+ assert_frame_equal(res, expec)
+
+ # list of [re1, re2, ..., reN] -> [re1, re2, .., reN]
+ to_replace_res = [r'\s*(\.)\s*', r'(e|f|g)']
+ values = [r'\1\1', r'\1_crap']
+ res = dfobj.copy()
+ res.replace(to_replace_res, values, inplace=True, regex=True)
+ expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['e_crap',
+ 'f_crap',
+ 'g_crap', 'h'],
+ 'c': ['h', 'e_crap', 'l', 'o']})
+
+ assert_frame_equal(res, expec)
+
+ # list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN
+ # or vN)]
+ to_replace_res = [r'\s*(\.)\s*', r'e']
+ values = [r'\1\1', r'crap']
+ res = dfobj.copy()
+ res.replace(to_replace_res, values, inplace=True, regex=True)
+ expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['crap', 'f', 'g',
+ 'h'],
+ 'c': ['h', 'crap', 'l', 'o']})
+ assert_frame_equal(res, expec)
+
+ to_replace_res = [r'\s*(\.)\s*', r'e']
+ values = [r'\1\1', r'crap']
+ res = dfobj.copy()
+ res.replace(value=values, regex=to_replace_res, inplace=True)
+ expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['crap', 'f', 'g',
+ 'h'],
+ 'c': ['h', 'crap', 'l', 'o']})
+ assert_frame_equal(res, expec)
+
+ def test_regex_replace_list_mixed(self):
+ ## mixed frame to make sure this doesn't break things
+ mix = {'a': range(4), 'b': list('ab..')}
+ dfmix = DataFrame(mix)
+
+ ## lists of regexes and values
+ # list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]
+ to_replace_res = [r'\s*\.\s*', r'a']
+ values = [nan, 'crap']
+ mix2 = {'a': range(4), 'b': list('ab..'), 'c': list('halo')}
+ dfmix2 = DataFrame(mix2)
+ res = dfmix2.replace(to_replace_res, values, regex=True)
+ expec = DataFrame({'a': mix2['a'], 'b': ['crap', 'b', nan, nan],
+ 'c': ['h', 'crap', 'l', 'o']})
+ assert_frame_equal(res, expec)
+
+ # list of [re1, re2, ..., reN] -> [re1, re2, .., reN]
+ to_replace_res = [r'\s*(\.)\s*', r'(a|b)']
+ values = [r'\1\1', r'\1_crap']
+ res = dfmix.replace(to_replace_res, values, regex=True)
+ expec = DataFrame({'a': mix['a'], 'b': ['a_crap', 'b_crap', '..',
+ '..']})
+
+ assert_frame_equal(res, expec)
+
+ # list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN
+ # or vN)]
+ to_replace_res = [r'\s*(\.)\s*', r'a', r'(b)']
+ values = [r'\1\1', r'crap', r'\1_crap']
+ res = dfmix.replace(to_replace_res, values, regex=True)
+ expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b_crap', '..', '..']})
+ assert_frame_equal(res, expec)
+
+ to_replace_res = [r'\s*(\.)\s*', r'a', r'(b)']
+ values = [r'\1\1', r'crap', r'\1_crap']
+ res = dfmix.replace(regex=to_replace_res, value=values)
+ expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b_crap', '..', '..']})
+ assert_frame_equal(res, expec)
+
+ def test_regex_replace_list_mixed_inplace(self):
+ mix = {'a': range(4), 'b': list('ab..')}
+ dfmix = DataFrame(mix)
+ # the same inplace
+ ## lists of regexes and values
+ # list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]
+ to_replace_res = [r'\s*\.\s*', r'a']
+ values = [nan, 'crap']
+ res = dfmix.copy()
+ res.replace(to_replace_res, values, inplace=True, regex=True)
+ expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b', nan, nan]})
+ assert_frame_equal(res, expec)
+
+ # list of [re1, re2, ..., reN] -> [re1, re2, .., reN]
+ to_replace_res = [r'\s*(\.)\s*', r'(a|b)']
+ values = [r'\1\1', r'\1_crap']
+ res = dfmix.copy()
+ res.replace(to_replace_res, values, inplace=True, regex=True)
+ expec = DataFrame({'a': mix['a'], 'b': ['a_crap', 'b_crap', '..',
+ '..']})
+
+ assert_frame_equal(res, expec)
+
+ # list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN
+ # or vN)]
+ to_replace_res = [r'\s*(\.)\s*', r'a', r'(b)']
+ values = [r'\1\1', r'crap', r'\1_crap']
+ res = dfmix.copy()
+ res.replace(to_replace_res, values, inplace=True, regex=True)
+ expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b_crap', '..', '..']})
+ assert_frame_equal(res, expec)
+
+ to_replace_res = [r'\s*(\.)\s*', r'a', r'(b)']
+ values = [r'\1\1', r'crap', r'\1_crap']
+ res = dfmix.copy()
+ res.replace(regex=to_replace_res, value=values, inplace=True)
+ expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b_crap', '..', '..']})
+ assert_frame_equal(res, expec)
+
+ def test_regex_replace_dict_mixed(self):
+ mix = {'a': range(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
+ dfmix = DataFrame(mix)
+
+ ## dicts
+ # single dict {re1: v1}, search the whole frame
+ # need test for this...
+
+ # list of dicts {re1: v1, re2: v2, ..., re3: v3}, search the whole
+ # frame
+ res = dfmix.replace({'b': r'\s*\.\s*'}, {'b': nan}, regex=True)
+ res2 = dfmix.copy()
+ res2.replace({'b': r'\s*\.\s*'}, {'b': nan}, inplace=True, regex=True)
+ expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', nan, nan], 'c':
+ mix['c']})
+ assert_frame_equal(res, expec)
+ assert_frame_equal(res2, expec)
+
+ # list of dicts {re1: re11, re2: re12, ..., reN: re1N}, search the
+ # whole frame
+ res = dfmix.replace({'b': r'\s*(\.)\s*'}, {'b': r'\1ty'}, regex=True)
+ res2 = dfmix.copy()
+ res2.replace({'b': r'\s*(\.)\s*'}, {'b': r'\1ty'}, inplace=True,
+ regex=True)
+ expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', '.ty', '.ty'], 'c':
+ mix['c']})
+ assert_frame_equal(res, expec)
+ assert_frame_equal(res2, expec)
+
+ res = dfmix.replace(regex={'b': r'\s*(\.)\s*'}, value={'b': r'\1ty'})
+ res2 = dfmix.copy()
+ res2.replace(regex={'b': r'\s*(\.)\s*'}, value={'b': r'\1ty'},
+ inplace=True)
+ expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', '.ty', '.ty'], 'c':
+ mix['c']})
+ assert_frame_equal(res, expec)
+ assert_frame_equal(res2, expec)
+
+ # scalar -> dict
+ # to_replace regex, {value: value}
+ res = dfmix.replace('a', {'b': nan}, regex=True)
+ res2 = dfmix.copy()
+ res2.replace('a', {'b': nan}, regex=True, inplace=True)
+ expec = DataFrame({'a': mix['a'], 'b': [nan, 'b', '.', '.'], 'c':
+ mix['c']})
+ assert_frame_equal(res, expec)
+ assert_frame_equal(res2, expec)
+
+ res = dfmix.replace('a', {'b': nan}, regex=True)
+ res2 = dfmix.copy()
+ res2.replace(regex='a', value={'b': nan}, inplace=True)
+ expec = DataFrame({'a': mix['a'], 'b': [nan, 'b', '.', '.'], 'c':
+ mix['c']})
+ assert_frame_equal(res, expec)
+ assert_frame_equal(res2, expec)
+
+ def test_regex_replace_dict_nested(self):
+ # nested dicts will not work until this is implemented for Series
+ mix = {'a': range(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
+ dfmix = DataFrame(mix)
+ res = dfmix.replace({'b': {r'\s*\.\s*': nan}}, regex=True)
+ res2 = dfmix.copy()
+ res2.replace({'b': {r'\s*\.\s*': nan}}, inplace=True, regex=True)
+ print res2
+ expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', nan, nan], 'c':
+ mix['c']})
+ assert_frame_equal(res, expec)
+ assert_frame_equal(res2, expec)
+
+ def test_regex_replace_list_to_scalar(self):
+ mix = {'a': range(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
+ df = DataFrame(mix)
+ res = df.replace([r'\s*\.\s*', 'a|b'], nan, regex=True)
+ res2 = df.copy()
+ res3 = df.copy()
+ res2.replace([r'\s*\.\s*', 'a|b'], nan, regex=True, inplace=True)
+ res3.replace(regex=[r'\s*\.\s*', 'a|b'], value=nan, inplace=True)
+ expec = DataFrame({'a': mix['a'], 'b': np.array([nan] * 4, object),
+ 'c': [nan, nan, nan, 'd']})
+ assert_frame_equal(res, expec)
+ assert_frame_equal(res2, expec)
+ assert_frame_equal(res3, expec)
+
+ def test_regex_replace_str_to_numeric(self):
+ # what happens when you try to replace a numeric value with a regex?
+ mix = {'a': range(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
+ df = DataFrame(mix)
+ res = df.replace(r'\s*\.\s*', 0, regex=True)
+ res2 = df.copy()
+ res2.replace(r'\s*\.\s*', 0, inplace=True, regex=True)
+ res3 = df.copy()
+ res3.replace(regex=r'\s*\.\s*', value=0, inplace=True)
+ expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', 0, 0], 'c':
+ mix['c']})
+ assert_frame_equal(res, expec)
+ assert_frame_equal(res2, expec)
+ assert_frame_equal(res3, expec)
+
+ def test_regex_replace_regex_list_to_numeric(self):
+ mix = {'a': range(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
+ df = DataFrame(mix)
+ res = df.replace([r'\s*\.\s*', 'b'], 0, regex=True)
+ res2 = df.copy()
+ res2.replace([r'\s*\.\s*', 'b'], 0, regex=True, inplace=True)
+ res3 = df.copy()
+ res3.replace(regex=[r'\s*\.\s*', 'b'], value=0, inplace=True)
+ expec = DataFrame({'a': mix['a'], 'b': ['a', 0, 0, 0], 'c': ['a', 0,
+ nan,
+ 'd']})
+ assert_frame_equal(res, expec)
+ assert_frame_equal(res2, expec)
+ assert_frame_equal(res3, expec)
+
+ def test_regex_replace_series_of_regexes(self):
+ mix = {'a': range(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
+ df = DataFrame(mix)
+ s1 = Series({'b': r'\s*\.\s*'})
+ s2 = Series({'b': nan})
+ res = df.replace(s1, s2, regex=True)
+ res2 = df.copy()
+ res2.replace(s1, s2, inplace=True, regex=True)
+ res3 = df.copy()
+ res3.replace(regex=s1, value=s2, inplace=True)
+ expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', nan, nan], 'c':
+ mix['c']})
+ assert_frame_equal(res, expec)
+ assert_frame_equal(res2, expec)
+ assert_frame_equal(res3, expec)
+
+ def test_regex_replace_numeric_to_object_conversion(self):
+ mix = {'a': range(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
+ df = DataFrame(mix)
+ res = df.replace(0, 'a')
+ expec = DataFrame({'a': ['a', 1, 2, 3], 'b': mix['b'], 'c': mix['c']})
+ assert_frame_equal(res, expec)
+ self.assertEqual(res.a.dtype, np.object_)
+
def test_replace(self):
self.tsframe['A'][:5] = nan
self.tsframe['A'][-5:] = nan
@@ -6163,7 +6652,7 @@ def test_replace(self):
df = DataFrame(index=['a', 'b'])
assert_frame_equal(df, df.replace(5, 7))
- def test_resplace_series_dict(self):
+ def test_replace_series_dict(self):
# from GH 3064
df = DataFrame({'zero': {'a': 0.0, 'b': 1}, 'one': {'a': 2.0, 'b': 0}})
result = df.replace(0, {'zero': 0.5, 'one': 1.0})
@@ -6227,48 +6716,32 @@ def test_replace_mixed(self):
expected.iloc[1,1] = m[1]
assert_frame_equal(result,expected)
- def test_replace_interpolate(self):
- padded = self.tsframe.replace(nan, method='pad')
- assert_frame_equal(padded, self.tsframe.fillna(method='pad'))
+ def test_interpolate(self):
+ pass
+
+ def test_replace_value_is_none(self):
+ self.assertRaises(TypeError, self.tsframe.replace, nan, method='pad')
+ orig_value = self.tsframe.iloc[0, 0]
+ orig2 = self.tsframe.iloc[1, 0]
- result = self.tsframe.replace(to_replace={'A': nan}, method='pad',
+ self.tsframe.iloc[0, 0] = nan
+ self.tsframe.iloc[1, 0] = 1
+
+ result = self.tsframe.replace(to_replace={nan: 0}, method='pad',
axis=1)
expected = self.tsframe.T.replace(
- to_replace={'A': nan}, method='pad').T
+ to_replace={nan: 0}, method='pad').T
assert_frame_equal(result, expected)
- result = self.tsframe.replace(to_replace={'A': nan, 'B': -1e8},
+ result = self.tsframe.replace(to_replace={nan: 0, 1: -1e8},
method='bfill')
tsframe = self.tsframe.copy()
- b = tsframe['B']
- b[b == -1e8] = nan
- tsframe['B'] = b
- expected = tsframe.fillna(method='bfill')
+ tsframe.iloc[0, 0] = 0
+ tsframe.iloc[1, 0] = -1e8
+ expected = tsframe
assert_frame_equal(expected, result)
-
- bfilled = self.tsframe.replace(nan, method='bfill')
- assert_frame_equal(bfilled, self.tsframe.fillna(method='bfill'))
-
- frame = self.tsframe.copy()
- frame[frame == 0] = 1
- frame.ix[-5:, 2] = 0
- result = frame.replace([nan, 0], method='pad')
-
- expected = frame.copy()
- expected[expected == 0] = nan
- expected = expected.fillna(method='pad')
- assert_frame_equal(result, expected)
-
- result = self.mixed_frame.replace(nan, method='pad', axis=1)
- expected = self.mixed_frame.fillna(method='pad', axis=1)
- assert_frame_equal(result, expected)
-
- # no nans
- self.tsframe['A'][:5] = 1e8
- result = self.tsframe.replace(1e8, method='bfill')
- self.tsframe['A'].replace(1e8, nan, inplace=True)
- expected = self.tsframe.fillna(method='bfill')
- assert_frame_equal(result, expected)
+ self.tsframe.iloc[0, 0] = orig_value
+ self.tsframe.iloc[1, 0] = orig2
def test_replace_for_new_dtypes(self):
@@ -6351,7 +6824,7 @@ def test_replace_input_formats(self):
expected[k] = v.replace(to_rep[k], 0)
assert_frame_equal(filled, DataFrame(expected))
- self.assertRaises(ValueError, df.replace, to_rep, [np.nan, 0, ''])
+ self.assertRaises(TypeError, df.replace, to_rep, [np.nan, 0, ''])
# scalar to dict
values = {'A': 0, 'B': -1, 'C': 'missing'}
@@ -6389,8 +6862,8 @@ def test_replace_axis(self):
zero_filled = self.tsframe.replace(nan, 0, axis=1)
assert_frame_equal(zero_filled, self.tsframe.fillna(0, axis=1))
- padded = self.tsframe.replace(nan, method='pad', axis=1)
- assert_frame_equal(padded, self.tsframe.fillna(method='pad', axis=1))
+ self.assertRaises(TypeError, self.tsframe.replace, method='pad',
+ axis=1)
# mixed type
self.mixed_frame['foo'][5:20] = nan
@@ -6400,22 +6873,9 @@ def test_replace_axis(self):
expected = self.mixed_frame.fillna(value=-1e8, axis=1)
assert_frame_equal(result, expected)
- def test_replace_limit(self):
- padded = self.tsframe.replace(nan, method='pad', limit=2)
- assert_frame_equal(padded, self.tsframe.fillna(method='pad',
- limit=2))
- bfilled = self.tsframe.replace(nan, method='bfill', limit=2)
- assert_frame_equal(padded, self.tsframe.fillna(method='bfill',
- limit=2))
-
- padded = self.tsframe.replace(nan, method='pad', axis=1, limit=2)
- assert_frame_equal(padded, self.tsframe.fillna(method='pad',
- axis=1, limit=2))
-
- bfill = self.tsframe.replace(nan, method='bfill', axis=1, limit=2)
- assert_frame_equal(padded, self.tsframe.fillna(method='bfill',
- axis=1, limit=2))
+ def test_replace_limit(self):
+ pass
def test_combine_multiple_frames_dtypes(self):
from pandas import concat
| addresses #2285. cc @jreback and #3582.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3584 | 2013-05-13T03:37:37Z | 2013-05-17T19:24:58Z | 2013-05-17T19:24:58Z | 2014-06-26T07:33:10Z |
DOC: add mention of idx* methods in max/min methods of Series/DataFrame | diff --git a/doc/source/basics.rst b/doc/source/basics.rst
index da3bbcb4f0dc2..f3c1a6617a5d5 100644
--- a/doc/source/basics.rst
+++ b/doc/source/basics.rst
@@ -401,6 +401,10 @@ value, ``idxmin`` and ``idxmax`` return the first matching index:
df3
df3['A'].idxmin()
+.. note::
+
+ ``idxmin`` and ``idxmax`` are called ``argmin`` and ``argmax`` in NumPy.
+
.. _basics.discretization:
Value counts (histogramming)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index ad1429fcea1ca..86a3e79a2fcec 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -4770,6 +4770,18 @@ def mean(self, axis=0, skipna=True, level=None):
extras='')
@Appender(_stat_doc)
def min(self, axis=0, skipna=True, level=None):
+ """
+ Notes
+ -----
+ This method returns the minimum of the values in the DataFrame. If you
+ want the *index* of the minimum, use ``DataFrame.idxmin``. This is the
+ equivalent of the ``numpy.ndarray`` method ``argmin``.
+
+ See Also
+ --------
+ DataFrame.idxmin
+ Series.idxmin
+ """
if level is not None:
return self._agg_by_level('min', axis=axis, level=level,
skipna=skipna)
@@ -4780,6 +4792,18 @@ def min(self, axis=0, skipna=True, level=None):
extras='')
@Appender(_stat_doc)
def max(self, axis=0, skipna=True, level=None):
+ """
+ Notes
+ -----
+ This method returns the maximum of the values in the DataFrame. If you
+ want the *index* of the maximum, use ``DataFrame.idxmax``. This is the
+ equivalent of the ``numpy.ndarray`` method ``argmax``.
+
+ See Also
+ --------
+ DataFrame.idxmax
+ Series.idxmax
+ """
if level is not None:
return self._agg_by_level('max', axis=axis, level=level,
skipna=skipna)
@@ -4939,6 +4963,14 @@ def idxmin(self, axis=0, skipna=True):
Returns
-------
idxmin : Series
+
+ Notes
+ -----
+ This method is the DataFrame version of ``ndarray.argmin``.
+
+ See Also
+ --------
+ Series.idxmin
"""
axis = self._get_axis_number(axis)
indices = nanops.nanargmin(self.values, axis=axis, skipna=skipna)
@@ -4962,6 +4994,14 @@ def idxmax(self, axis=0, skipna=True):
Returns
-------
idxmax : Series
+
+ Notes
+ -----
+ This method is the DataFrame version of ``ndarray.argmax``.
+
+ See Also
+ --------
+ Series.idxmax
"""
axis = self._get_axis_number(axis)
indices = nanops.nanargmax(self.values, axis=axis, skipna=skipna)
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 3509e226d46fb..cebf2f4ef9d1f 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -1516,6 +1516,18 @@ def mad(self, skipna=True, level=None):
na_action=_doc_exclude_na, extras='')
@Appender(_stat_doc)
def min(self, axis=None, out=None, skipna=True, level=None):
+ """
+ Notes
+ -----
+ This method returns the minimum of the values in the Series. If you
+ want the *index* of the minimum, use ``Series.idxmin``. This is the
+ equivalent of the ``numpy.ndarray`` method ``argmin``.
+
+ See Also
+ --------
+ Series.idxmin
+ DataFrame.idxmin
+ """
if level is not None:
return self._agg_by_level('min', level=level, skipna=skipna)
return nanops.nanmin(self.values, skipna=skipna)
@@ -1524,6 +1536,18 @@ def min(self, axis=None, out=None, skipna=True, level=None):
na_action=_doc_exclude_na, extras='')
@Appender(_stat_doc)
def max(self, axis=None, out=None, skipna=True, level=None):
+ """
+ Notes
+ -----
+ This method returns the maximum of the values in the Series. If you
+ want the *index* of the maximum, use ``Series.idxmax``. This is the
+ equivalent of the ``numpy.ndarray`` method ``argmax``.
+
+ See Also
+ --------
+ Series.idxmax
+ DataFrame.idxmax
+ """
if level is not None:
return self._agg_by_level('max', level=level, skipna=skipna)
return nanops.nanmax(self.values, skipna=skipna)
@@ -1592,6 +1616,14 @@ def idxmin(self, axis=None, out=None, skipna=True):
Returns
-------
idxmin : Index of minimum of values
+
+ Notes
+ -----
+ This method is the Series version of ``ndarray.argmin``.
+
+ See Also
+ --------
+ DataFrame.idxmin
"""
i = nanops.nanargmin(self.values, skipna=skipna)
if i == -1:
@@ -1610,6 +1642,14 @@ def idxmax(self, axis=None, out=None, skipna=True):
Returns
-------
idxmax : Index of minimum of values
+
+ Notes
+ -----
+ This method is the Series version of ``ndarray.argmax``.
+
+ See Also
+ --------
+ DataFrame.idxmax
"""
i = nanops.nanargmax(self.values, skipna=skipna)
if i == -1:
| closes #3574.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3580 | 2013-05-11T21:15:29Z | 2013-05-13T17:34:35Z | 2013-05-13T17:34:35Z | 2014-07-16T08:08:44Z |
ENH: allow to_csv to write multi-index columns, read_csv to read with header=list arg | diff --git a/RELEASE.rst b/RELEASE.rst
index acb4f429e81b0..74bafd419af54 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -34,6 +34,15 @@ pandas 0.11.1
courtesy of @cpcloud. (GH3477_)
- Support for reading Amazon S3 files. (GH3504_)
- Added module for reading and writing Stata files: pandas.io.stata (GH1512_)
+ - Added support for writing in ``to_csv`` and reading in ``read_csv``,
+ multi-index columns. The ``header`` option in ``read_csv`` now accepts a
+ list of the rows from which to read the index. Added the option,
+ ``tupleize_cols`` to provide compatiblity for the pre 0.11.1 behavior of
+ writing and reading multi-index columns via a list of tuples. The default in
+ 0.11.1 is to write lists of tuples and *not* interpret list of tuples as a
+ multi-index column.
+ Note: The default value will change in 0.12 to make the default *to* write and
+ read multi-index columns in the new format. (GH3571_, GH1651_, GH3141_)
**Improvements to existing features**
@@ -180,6 +189,7 @@ pandas 0.11.1
.. _GH3596: https://github.com/pydata/pandas/issues/3596
.. _GH3617: https://github.com/pydata/pandas/issues/3617
.. _GH3435: https://github.com/pydata/pandas/issues/3435
+<<<<<<< HEAD
.. _GH3611: https://github.com/pydata/pandas/issues/3611
.. _GH3062: https://github.com/pydata/pandas/issues/3062
.. _GH3624: https://github.com/pydata/pandas/issues/3624
@@ -187,6 +197,11 @@ pandas 0.11.1
.. _GH3601: https://github.com/pydata/pandas/issues/3601
.. _GH3631: https://github.com/pydata/pandas/issues/3631
.. _GH1512: https://github.com/pydata/pandas/issues/1512
+=======
+.. _GH3571: https://github.com/pydata/pandas/issues/3571
+.. _GH1651: https://github.com/pydata/pandas/issues/1651
+.. _GH3141: https://github.com/pydata/pandas/issues/3141
+>>>>>>> DOC: updated releasenotes, v0.11.1 whatsnew, io.rst
pandas 0.11.0
diff --git a/doc/source/io.rst b/doc/source/io.rst
index f15f758c42b18..42ea4a2ca5d53 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -57,7 +57,10 @@ They can take a number of arguments:
specified, data types will be inferred.
- ``header``: row number to use as the column names, and the start of the
data. Defaults to 0 if no ``names`` passed, otherwise ``None``. Explicitly
- pass ``header=0`` to be able to replace existing names.
+ pass ``header=0`` to be able to replace existing names. The header can be
+ a list of integers that specify row locations for a multi-index on the columns
+ E.g. [0,1,3]. Interveaning rows that are not specified will be skipped.
+ (E.g. 2 in this example are skipped)
- ``skiprows``: A collection of numbers for rows in the file to skip. Can
also be an integer to skip the first ``n`` rows
- ``index_col``: column number, column name, or list of column numbers/names,
@@ -112,6 +115,10 @@ They can take a number of arguments:
- ``error_bad_lines``: if False then any lines causing an error will be skipped :ref:`bad lines <io.bad_lines>`
- ``usecols``: a subset of columns to return, results in much faster parsing
time and lower memory usage.
+ - ``mangle_dupe_cols``: boolean, default True, then duplicate columns will be specified
+ as 'X.0'...'X.N', rather than 'X'...'X'
+ - ``tupleize_cols``: boolean, default True, if False, convert a list of tuples
+ to a multi-index of columns, otherwise, leave the column index as a list of tuples
.. ipython:: python
:suppress:
@@ -762,6 +769,36 @@ column numbers to turn multiple columns into a ``MultiIndex``:
df
df.ix[1978]
+.. _io.multi_index_columns:
+
+Specifying a multi-index columns
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+By specifying list of row locations for the ``header`` argument, you
+can read in a multi-index for the columns. Specifying non-consecutive
+rows will skip the interveaing rows.
+
+.. ipython:: python
+
+ from pandas.util.testing import makeCustomDataframe as mkdf
+ df = mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)
+ df.to_csv('mi.csv',tupleize_cols=False)
+ print open('mi.csv').read()
+ pd.read_csv('mi.csv',header=[0,1,2,3],index_col=[0,1],tupleize_cols=False)
+
+Note: The default behavior in 0.11.1 remains unchanged (``tupleize_cols=True``),
+but starting with 0.12, the default *to* write and read multi-index columns will be in the new
+format (``tupleize_cols=False``)
+
+Note: If an ``index_col`` is not specified (e.g. you don't have an index, or wrote it
+with ``df.to_csv(..., index=False``), then any ``names`` on the columns index will be *lost*.
+
+.. ipython:: python
+ :suppress:
+
+ import os
+ os.remove('mi.csv')
+
.. _io.sniff:
Automatically "sniffing" the delimiter
@@ -845,6 +882,8 @@ function takes a number of arguments. Only the first is required.
- ``sep`` : Field delimiter for the output file (default ",")
- ``encoding``: a string representing the encoding to use if the contents are
non-ascii, for python versions prior to 3
+ - ``tupleize_cols``: boolean, default True, if False, write as a list of tuples,
+ otherwise write in an expanded line format suitable for ``read_csv``
Writing a formatted string
~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -876,6 +915,9 @@ The Series object also has a ``to_string`` method, but with only the ``buf``,
which, if set to ``True``, will additionally output the length of the Series.
+HTML
+----
+
Reading HTML format
~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt
index aed95188db26e..a724ce96a7381 100644
--- a/doc/source/v0.11.1.txt
+++ b/doc/source/v0.11.1.txt
@@ -73,6 +73,7 @@ Enhancements
an index with a different frequency than the existing, or attempting
to append an index with a different name than the existing
- support datelike columns with a timezone as data_columns (GH2852_)
+
- ``fillna`` methods now raise a ``TypeError`` if the ``value`` parameter is
a list or tuple.
- Added module for reading and writing Stata files: pandas.io.stata (GH1512_)
@@ -80,6 +81,39 @@ Enhancements
``Series`` with object dtype. See the examples section in the regular docs
:ref:`Replacing via String Expression <missing_data.replace_expression>`
+ - Multi-index column support for reading and writing csvs
+
+ - The ``header`` option in ``read_csv`` now accepts a
+ list of the rows from which to read the index.
+
+ - The option, ``tupleize_cols`` can now be specified in both ``to_csv`` and
+ ``read_csv``, to provide compatiblity for the pre 0.11.1 behavior of
+ writing and reading multi-index columns via a list of tuples. The default in
+ 0.11.1 is to write lists of tuples and *not* interpret list of tuples as a
+ multi-index column.
+
+ Note: The default behavior in 0.11.1 remains unchanged, but starting with 0.12,
+ the default *to* write and read multi-index columns will be in the new
+ format. (GH3571_, GH1651_, GH3141_)
+
+ - If an ``index_col`` is not specified (e.g. you don't have an index, or wrote it
+ with ``df.to_csv(..., index=False``), then any ``names`` on the columns index will
+ be *lost*.
+
+ .. ipython:: python
+
+ from pandas.util.testing import makeCustomDataframe as mkdf
+ df = mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)
+ df.to_csv('mi.csv',tupleize_cols=False)
+ print open('mi.csv').read()
+ pd.read_csv('mi.csv',header=[0,1,2,3],index_col=[0,1],tupleize_cols=False)
+
+ .. ipython:: python
+ :suppress:
+
+ import os
+ os.remove('mi.csv')
+
See the `full release notes
<https://github.com/pydata/pandas/blob/master/RELEASE.rst>`__ or issue tracker
on GitHub for a complete list.
@@ -96,3 +130,6 @@ on GitHub for a complete list.
.. _GH1512: https://github.com/pydata/pandas/issues/1512
.. _GH2285: https://github.com/pydata/pandas/issues/2285
.. _GH3631: https://github.com/pydata/pandas/issues/3631
+.. _GH3571: https://github.com/pydata/pandas/issues/3571
+.. _GH1651: https://github.com/pydata/pandas/issues/1651
+.. _GH3141: https://github.com/pydata/pandas/issues/3141
diff --git a/pandas/core/format.py b/pandas/core/format.py
index bea4b59bfaaa4..cd4364edc6662 100644
--- a/pandas/core/format.py
+++ b/pandas/core/format.py
@@ -772,9 +772,10 @@ def grouper(x):
class CSVFormatter(object):
def __init__(self, obj, path_or_buf, sep=",", na_rep='', float_format=None,
- cols=None, header=True, index=True, index_label=None,
- mode='w', nanRep=None, encoding=None, quoting=None,
- line_terminator='\n', chunksize=None, engine=None):
+ cols=None, header=True, index=True, index_label=None,
+ mode='w', nanRep=None, encoding=None, quoting=None,
+ line_terminator='\n', chunksize=None, engine=None,
+ tupleize_cols=True):
self.engine = engine # remove for 0.12
@@ -803,6 +804,15 @@ def __init__(self, obj, path_or_buf, sep=",", na_rep='', float_format=None,
msg= "columns.is_unique == False not supported with engine='python'"
raise NotImplementedError(msg)
+ self.tupleize_cols = tupleize_cols
+ self.has_mi_columns = isinstance(obj.columns, MultiIndex
+ ) and not self.tupleize_cols
+
+ # validate mi options
+ if self.has_mi_columns:
+ if cols is not None:
+ raise Exception("cannot specify cols with a multi_index on the columns")
+
if cols is not None:
if isinstance(cols,Index):
cols = cols.to_native_types(na_rep=na_rep,float_format=float_format)
@@ -958,48 +968,82 @@ def _save_header(self):
obj = self.obj
index_label = self.index_label
cols = self.cols
+ has_mi_columns = self.has_mi_columns
header = self.header
+ encoded_labels = []
has_aliases = isinstance(header, (tuple, list, np.ndarray))
- if has_aliases or self.header:
- if self.index:
- # should write something for index label
- if index_label is not False:
- if index_label is None:
- if isinstance(obj.index, MultiIndex):
- index_label = []
- for i, name in enumerate(obj.index.names):
- if name is None:
- name = ''
- index_label.append(name)
+ if not (has_aliases or self.header):
+ return
+
+ if self.index:
+ # should write something for index label
+ if index_label is not False:
+ if index_label is None:
+ if isinstance(obj.index, MultiIndex):
+ index_label = []
+ for i, name in enumerate(obj.index.names):
+ if name is None:
+ name = ''
+ index_label.append(name)
+ else:
+ index_label = obj.index.name
+ if index_label is None:
+ index_label = ['']
else:
- index_label = obj.index.name
- if index_label is None:
- index_label = ['']
- else:
- index_label = [index_label]
- elif not isinstance(index_label, (list, tuple, np.ndarray)):
- # given a string for a DF with Index
- index_label = [index_label]
+ index_label = [index_label]
+ elif not isinstance(index_label, (list, tuple, np.ndarray)):
+ # given a string for a DF with Index
+ index_label = [index_label]
- encoded_labels = list(index_label)
- else:
- encoded_labels = []
+ encoded_labels = list(index_label)
+ else:
+ encoded_labels = []
- if has_aliases:
- if len(header) != len(cols):
- raise ValueError(('Writing %d cols but got %d aliases'
- % (len(cols), len(header))))
- else:
- write_cols = header
+ if has_aliases:
+ if len(header) != len(cols):
+ raise ValueError(('Writing %d cols but got %d aliases'
+ % (len(cols), len(header))))
else:
- write_cols = cols
- encoded_cols = list(write_cols)
-
- writer.writerow(encoded_labels + encoded_cols)
+ write_cols = header
else:
- encoded_cols = list(cols)
- writer.writerow(encoded_cols)
+ write_cols = cols
+
+ if not has_mi_columns:
+ encoded_labels += list(write_cols)
+
+ else:
+
+ if not has_mi_columns:
+ encoded_labels += list(cols)
+
+ # write out the mi
+ if has_mi_columns:
+ columns = obj.columns
+
+ # write out the names for each level, then ALL of the values for each level
+ for i in range(columns.nlevels):
+
+ # we need at least 1 index column to write our col names
+ col_line = []
+ if self.index:
+
+ # name is the first column
+ col_line.append( columns.names[i] )
+
+ if isinstance(index_label,list) and len(index_label)>1:
+ col_line.extend([ '' ] * (len(index_label)-1))
+
+ col_line.extend(columns.get_level_values(i))
+
+ writer.writerow(col_line)
+
+ # add blanks for the columns, so that we
+ # have consistent seps
+ encoded_labels.extend([ '' ] * len(columns))
+
+ # write out the index label line
+ writer.writerow(encoded_labels)
def _save(self):
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 39742557ccc56..d91d21db3ec1b 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1250,7 +1250,7 @@ def _from_arrays(cls, arrays, columns, index, dtype=None):
@classmethod
def from_csv(cls, path, header=0, sep=',', index_col=0,
- parse_dates=True, encoding=None):
+ parse_dates=True, encoding=None, tupleize_cols=False):
"""
Read delimited file into DataFrame
@@ -1266,6 +1266,9 @@ def from_csv(cls, path, header=0, sep=',', index_col=0,
is used. Different default from read_table
parse_dates : boolean, default True
Parse dates. Different default from read_table
+ tupleize_cols : boolean, default True
+ write multi_index columns as a list of tuples (if True)
+ or new (expanded format) if False)
Notes
-----
@@ -1280,7 +1283,7 @@ def from_csv(cls, path, header=0, sep=',', index_col=0,
from pandas.io.parsers import read_table
return read_table(path, header=header, sep=sep,
parse_dates=parse_dates, index_col=index_col,
- encoding=encoding)
+ encoding=encoding,tupleize_cols=False)
@classmethod
def from_dta(dta, path, parse_dates=True, convert_categoricals=True, encoding=None, index_col=None):
@@ -1391,7 +1394,8 @@ def to_panel(self):
def to_csv(self, path_or_buf, sep=",", na_rep='', float_format=None,
cols=None, header=True, index=True, index_label=None,
mode='w', nanRep=None, encoding=None, quoting=None,
- line_terminator='\n', chunksize=None,**kwds):
+ line_terminator='\n', chunksize=None,
+ tupleize_cols=True, **kwds):
"""
Write DataFrame to a comma-separated values (csv) file
@@ -1429,6 +1433,9 @@ def to_csv(self, path_or_buf, sep=",", na_rep='', float_format=None,
quoting : optional constant from csv module
defaults to csv.QUOTE_MINIMAL
chunksize : rows to write at a time
+ tupleize_cols : boolean, default True
+ write multi_index columns as a list of tuples (if True)
+ or new (expanded format) if False)
"""
if nanRep is not None: # pragma: no cover
import warnings
@@ -1445,7 +1452,8 @@ def to_csv(self, path_or_buf, sep=",", na_rep='', float_format=None,
float_format=float_format, cols=cols,
header=header, index=index,
index_label=index_label,mode=mode,
- chunksize=chunksize,engine=kwds.get("engine") )
+ chunksize=chunksize,engine=kwds.get("engine"),
+ tupleize_cols=tupleize_cols)
formatter.save()
def to_excel(self, excel_writer, sheet_name='sheet1', na_rep='',
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 044b25041afd9..61be871e62595 100644
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -52,9 +52,11 @@ class DateConversionError(Exception):
dialect : string or csv.Dialect instance, default None
If None defaults to Excel dialect. Ignored if sep longer than 1 char
See csv.Dialect documentation for more details
-header : int, default 0 if names parameter not specified, otherwise None
+header : int, default 0 if names parameter not specified,
Row to use for the column labels of the parsed DataFrame. Specify None if
- there is no header row.
+ there is no header row. Can be a list of integers that specify row
+ locations for a multi-index on the columns E.g. [0,1,3]. Interveaning
+ rows that are not specified (E.g. 2 in this example are skipped)
skiprows : list-like or integer
Row numbers to skip (0-indexed) or number of rows to skip (int)
at the start of the file
@@ -125,6 +127,11 @@ class DateConversionError(Exception):
usecols : array-like
Return a subset of the columns.
Results in much faster parsing time and lower memory usage.
+mangle_dupe_cols: boolean, default True
+ Duplicate columns will be specified as 'X.0'...'X.N', rather than 'X'...'X'
+tupleize_cols: boolean, default False
+ Leave a list of tuples on columns as is (default is to convert to
+ a Multi Index on the columns)
Returns
-------
@@ -292,6 +299,7 @@ def _read(filepath_or_buffer, kwds):
'squeeze': False,
'compression': None,
'mangle_dupe_cols': True,
+ 'tupleize_cols':True,
}
@@ -378,7 +386,8 @@ def parser_f(filepath_or_buffer,
verbose=False,
encoding=None,
squeeze=False,
- mangle_dupe_cols=True
+ mangle_dupe_cols=True,
+ tupleize_cols=True,
):
# Alias sep -> delimiter.
@@ -436,7 +445,8 @@ def parser_f(filepath_or_buffer,
error_bad_lines=error_bad_lines,
low_memory=low_memory,
buffer_lines=buffer_lines,
- mangle_dupe_cols=mangle_dupe_cols
+ mangle_dupe_cols=mangle_dupe_cols,
+ tupleize_cols=tupleize_cols,
)
return _read(filepath_or_buffer, kwds)
@@ -677,10 +687,8 @@ def read(self, nrows=None):
if self.options.get('as_recarray'):
return ret
- index, columns, col_dict = ret
-
# May alter columns / col_dict
- # index, columns, col_dict = self._create_index(col_dict, columns)
+ index, columns, col_dict = self._create_index(ret)
df = DataFrame(col_dict, columns=columns, index=index)
@@ -688,8 +696,9 @@ def read(self, nrows=None):
return df[df.columns[0]]
return df
- def _create_index(self, col_dict, columns):
- pass
+ def _create_index(self, ret):
+ index, columns, col_dict = ret
+ return index, columns, col_dict
def get_chunk(self, size=None):
if size is None:
@@ -709,6 +718,7 @@ def __init__(self, kwds):
self.index_col = kwds.pop('index_col', None)
self.index_names = None
+ self.col_names = None
self.parse_dates = kwds.pop('parse_dates', False)
self.date_parser = kwds.pop('date_parser', None)
@@ -718,10 +728,31 @@ def __init__(self, kwds):
self.na_values = kwds.get('na_values')
self.true_values = kwds.get('true_values')
self.false_values = kwds.get('false_values')
+ self.tupleize_cols = kwds.get('tupleize_cols',True)
self._date_conv = _make_date_converter(date_parser=self.date_parser,
dayfirst=self.dayfirst)
+ # validate header options for mi
+ self.header = kwds.get('header')
+ if isinstance(self.header,(list,tuple,np.ndarray)):
+ if kwds.get('as_recarray'):
+ raise Exception("cannot specify as_recarray when "
+ "specifying a multi-index header")
+ if kwds.get('usecols'):
+ raise Exception("cannot specify usecols when "
+ "specifying a multi-index header")
+ if kwds.get('names'):
+ raise Exception("cannot specify names when "
+ "specifying a multi-index header")
+
+ # validate index_col that only contains integers
+ if self.index_col is not None:
+ if not (isinstance(self.index_col,(list,tuple,np.ndarray)) and all(
+ [ com.is_integer(i) for i in self.index_col ]) or com.is_integer(self.index_col)):
+ raise Exception("index_col must only contain row numbers "
+ "when specifying a multi-index header")
+
self._name_processed = False
@property
@@ -743,7 +774,62 @@ def _should_parse_dates(self, i):
else:
return (j in self.parse_dates) or (name in self.parse_dates)
- def _make_index(self, data, alldata, columns):
+
+ def _extract_multi_indexer_columns(self, header, index_names, col_names, passed_names=False):
+ """ extract and return the names, index_names, col_names
+ header is a list-of-lists returned from the parsers """
+ if len(header) < 2:
+ return header[0], index_names, col_names, passed_names
+
+ # the names are the tuples of the header that are not the index cols
+ # 0 is the name of the index, assuming index_col is a list of column
+ # numbers
+ ic = self.index_col
+ if ic is None:
+ ic = []
+
+ if not isinstance(ic, (list,tuple,np.ndarray)):
+ ic = [ ic ]
+ sic = set(ic)
+
+ orig_header = list(header)
+
+ # clean the index_names
+ index_names = header.pop(-1)
+ (index_names, names,
+ index_col) = _clean_index_names(index_names, self.index_col)
+
+ # extract the columns
+ field_count = len(header[0])
+ def extract(r):
+ return tuple([ r[i] for i in range(field_count) if i not in sic ])
+ columns = zip(*[ extract(r) for r in header ])
+ names = ic + columns
+
+ # if we find 'Unnamed' all of a single level, then our header was too long
+ for n in range(len(columns[0])):
+ if all([ 'Unnamed' in c[n] for c in columns ]):
+ raise _parser.CParserError("Passed header=[%s] are too many rows for this "
+ "multi_index of columns" % ','.join([ str(x) for x in self.header ]))
+
+ # clean the column names (if we have an index_col)
+ if len(ic):
+ col_names = [ r[0] if len(r[0]) and 'Unnamed' not in r[0] else None for r in header ]
+ else:
+ col_names = [ None ] * len(header)
+
+ passed_names = True
+
+ return names, index_names, col_names, passed_names
+
+ def _maybe_make_multi_index_columns(self, columns, col_names=None):
+ # possibly create a column mi here
+ if not self.tupleize_cols and len(columns) and not isinstance(
+ columns, MultiIndex) and all([ isinstance(c,tuple) for c in columns]):
+ columns = MultiIndex.from_tuples(columns,names=col_names)
+ return columns
+
+ def _make_index(self, data, alldata, columns, indexnamerow=False):
if not _is_index_col(self.index_col) or len(self.index_col) == 0:
index = None
@@ -760,7 +846,15 @@ def _make_index(self, data, alldata, columns):
index = self._get_complex_date_index(data, columns)
index = self._agg_index(index, try_parse_dates=False)
- return index
+ # add names for the index
+ if indexnamerow:
+ coffset = len(indexnamerow) - len(columns)
+ index.names = indexnamerow[:coffset]
+
+ # maybe create a mi on the columns
+ columns = self._maybe_make_multi_index_columns(columns, self.col_names)
+
+ return index, columns
_implicit_index = False
@@ -942,7 +1036,12 @@ def __init__(self, src, **kwds):
if self._reader.header is None:
self.names = None
else:
- self.names = list(self._reader.header)
+ if len(self._reader.header) > 1:
+ # we have a multi index in the columns
+ self.names, self.index_names, self.col_names, passed_names = self._extract_multi_indexer_columns(
+ self._reader.header, self.index_names, self.col_names, passed_names)
+ else:
+ self.names = list(self._reader.header[0])
if self.names is None:
if self.prefix:
@@ -958,12 +1057,14 @@ def __init__(self, src, **kwds):
if not self._has_complex_date_col:
if (self._reader.leading_cols == 0 and
- _is_index_col(self.index_col)):
+ _is_index_col(self.index_col)):
self._name_processed = True
- (self.index_names, self.names,
- self.index_col) = _clean_index_names(self.names,
- self.index_col)
+ (index_names, self.names,
+ self.index_col) = _clean_index_names(self.names, self.index_col)
+
+ if self.index_names is None:
+ self.index_names = index_names
if self._reader.header is None and not passed_names:
self.index_names = [None] * len(self.index_names)
@@ -1049,7 +1150,10 @@ def read(self, nrows=None):
data = dict((k, v) for k, (i, v) in zip(names, data))
names, data = self._do_date_conversions(names, data)
- index = self._make_index(data, alldata, names)
+ index, names = self._make_index(data, alldata, names)
+
+ # maybe create a mi on the columns
+ names = self._maybe_make_multi_index_columns(names, self.col_names)
return index, names, data
@@ -1061,7 +1165,7 @@ def _filter_usecols(self, names):
return names
def _get_index_names(self):
- names = list(self._reader.header)
+ names = list(self._reader.header[0])
idx_names = None
if self._reader.leading_cols == 0 and self.index_col is not None:
@@ -1169,7 +1273,6 @@ def __init__(self, f, **kwds):
raise Exception("usecols not supported with engine='python'"
" or multicharacter separators (yet).")
- self.header = kwds['header']
self.encoding = kwds['encoding']
self.compression = kwds['compression']
self.skiprows = kwds['skiprows']
@@ -1208,6 +1311,13 @@ def __init__(self, f, **kwds):
self.data = f
self.columns = self._infer_columns()
+ # we are processing a multi index column
+ if len(self.columns) > 1:
+ self.columns, self.index_names, self.col_names, _ = self._extract_multi_indexer_columns(
+ self.columns, self.index_names, self.col_names)
+ else:
+ self.columns = self.columns[0]
+
# get popped off for index
self.orig_names = list(self.columns)
@@ -1215,9 +1325,11 @@ def __init__(self, f, **kwds):
# multiple date column thing turning into a real spaghetti factory
if not self._has_complex_date_col:
- (self.index_names,
+ (index_names,
self.orig_names, _) = self._get_index_name(self.columns)
self._name_processed = True
+ if self.index_names is None:
+ self.index_names = index_names
self._first_chunk = True
def _make_reader(self, f):
@@ -1321,10 +1433,7 @@ def read(self, rows=None):
columns, data = self._do_date_conversions(self.columns, data)
data = self._convert_data(data)
- index = self._make_index(data, alldata, columns)
- if indexnamerow:
- coffset = len(indexnamerow) - len(columns)
- index.names = indexnamerow[:coffset]
+ index, columns = self._make_index(data, alldata, columns, indexnamerow)
return index, columns, data
@@ -1350,36 +1459,58 @@ def _infer_columns(self):
names = self.names
if self.header is not None:
- if len(self.buf) > 0:
- line = self.buf[0]
- else:
- line = self._next_line()
+ header = self.header
- while self.pos <= self.header:
- line = self._next_line()
+ # we have a mi columns, so read and extra line
+ if isinstance(header,(list,tuple,np.ndarray)):
+ have_mi_columns = True
+ header = list(header) + [header[-1]+1]
+ else:
+ have_mi_columns = False
+ header = [ header ]
columns = []
- for i, c in enumerate(line):
- if c == '':
- columns.append('Unnamed: %d' % i)
+ for level, hr in enumerate(header):
+
+ if len(self.buf) > 0:
+ line = self.buf[0]
else:
- columns.append(c)
+ line = self._next_line()
+
+ while self.pos <= hr:
+ line = self._next_line()
- if self.mangle_dupe_cols:
- counts = {}
- for i, col in enumerate(columns):
- cur_count = counts.get(col, 0)
- if cur_count > 0:
- columns[i] = '%s.%d' % (col, cur_count)
- counts[col] = cur_count + 1
+ this_columns = []
+ for i, c in enumerate(line):
+ if c == '':
+ if have_mi_columns:
+ this_columns.append('Unnamed: %d_level_%d' % (i,level))
+ else:
+ this_columns.append('Unnamed: %d' % i)
+ else:
+ this_columns.append(c)
+
+ if not have_mi_columns:
+ if self.mangle_dupe_cols:
+ counts = {}
+ for i, col in enumerate(this_columns):
+ cur_count = counts.get(col, 0)
+ if cur_count > 0:
+ this_columns[i] = '%s.%d' % (col, cur_count)
+ counts[col] = cur_count + 1
+
+ columns.append(this_columns)
self._clear_buffer()
if names is not None:
- if len(names) != len(columns):
+ if len(names) != len(columns[0]):
raise Exception('Number of passed names did not match '
'number of header fields in the file')
- columns = names
+ if len(columns) > 1:
+ raise Exception('Cannot pass names with multi-index columns')
+ columns = [ names ]
+
else:
if len(self.buf) > 0:
line = self.buf[0]
@@ -1389,11 +1520,11 @@ def _infer_columns(self):
ncols = len(line)
if not names:
if self.prefix:
- columns = ['X%d' % i for i in range(ncols)]
+ columns = [ ['X%d' % i for i in range(ncols)] ]
else:
- columns = range(ncols)
+ columns = [ range(ncols) ]
else:
- columns = names
+ columns = [ names ]
return columns
diff --git a/pandas/io/tests/test_cparser.py b/pandas/io/tests/test_cparser.py
index b352b189a74b8..0c5b168ee8de5 100644
--- a/pandas/io/tests/test_cparser.py
+++ b/pandas/io/tests/test_cparser.py
@@ -179,7 +179,7 @@ def test_header_not_enough_lines(self):
reader = TextReader(StringIO(data), delimiter=',', header=2,
as_recarray=True)
header = reader.header
- expected = ['a', 'b', 'c']
+ expected = [['a', 'b', 'c']]
self.assertEquals(header, expected)
recs = reader.read()
diff --git a/pandas/io/tests/test_parsers.py b/pandas/io/tests/test_parsers.py
index 38a31c042d120..be47f28749848 100644
--- a/pandas/io/tests/test_parsers.py
+++ b/pandas/io/tests/test_parsers.py
@@ -20,6 +20,7 @@
TextFileReader, TextParser)
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
+ makeCustomDataframe as mkdf,
network,
ensure_clean)
import pandas.util.testing as tm
@@ -994,6 +995,49 @@ def test_header_not_first_line(self):
expected = self.read_csv(StringIO(data2), header=0, index_col=0)
tm.assert_frame_equal(df, expected)
+ def test_header_multi_index(self):
+ expected = mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)
+
+ data = """\
+C0,,C_l0_g0,C_l0_g1,C_l0_g2
+
+C1,,C_l1_g0,C_l1_g1,C_l1_g2
+C2,,C_l2_g0,C_l2_g1,C_l2_g2
+C3,,C_l3_g0,C_l3_g1,C_l3_g2
+R0,R1,,,
+R_l0_g0,R_l1_g0,R0C0,R0C1,R0C2
+R_l0_g1,R_l1_g1,R1C0,R1C1,R1C2
+R_l0_g2,R_l1_g2,R2C0,R2C1,R2C2
+R_l0_g3,R_l1_g3,R3C0,R3C1,R3C2
+R_l0_g4,R_l1_g4,R4C0,R4C1,R4C2
+"""
+
+ # basic test with both engines
+ for engine in ['c','python']:
+ df = read_csv(StringIO(data), header=[0,2,3,4],index_col=[0,1], tupleize_cols=False,
+ engine=engine)
+ tm.assert_frame_equal(df, expected)
+
+ # skipping lines in the header
+ df = read_csv(StringIO(data), header=[0,2,3,4],index_col=[0,1], tupleize_cols=False)
+ tm.assert_frame_equal(df, expected)
+
+ #### invalid options ####
+
+ # no as_recarray
+ self.assertRaises(Exception, read_csv, StringIO(data), header=[0,1,2,3],
+ index_col=[0,1], as_recarray=True, tupleize_cols=False)
+
+ # names
+ self.assertRaises(Exception, read_csv, StringIO(data), header=[0,1,2,3],
+ index_col=[0,1], names=['foo','bar'], tupleize_cols=False)
+ # usecols
+ self.assertRaises(Exception, read_csv, StringIO(data), header=[0,1,2,3],
+ index_col=[0,1], usecols=['foo','bar'], tupleize_cols=False)
+ # non-numeric index_col
+ self.assertRaises(Exception, read_csv, StringIO(data), header=[0,1,2,3],
+ index_col=['foo','bar'], tupleize_cols=False)
+
def test_pass_names_with_index(self):
lines = self.data1.split('\n')
no_header = '\n'.join(lines[1:])
diff --git a/pandas/src/parser.pyx b/pandas/src/parser.pyx
index 694a769641b0d..ee92e2e60960c 100644
--- a/pandas/src/parser.pyx
+++ b/pandas/src/parser.pyx
@@ -143,6 +143,8 @@ cdef extern from "parser/tokenizer.h":
char thousands
int header # Boolean: 1: has header, 0: no header
+ int header_start # header row start
+ int header_end # header row end
void *skipset
int skip_footer
@@ -230,7 +232,7 @@ cdef class TextReader:
cdef:
parser_t *parser
object file_handle
- bint factorize, na_filter, verbose, has_usecols
+ bint factorize, na_filter, verbose, has_usecols, has_mi_columns
int parser_start
list clocks
char *c_encoding
@@ -242,7 +244,7 @@ cdef class TextReader:
object na_values, true_values, false_values
object memory_map
object as_recarray
- object header, names
+ object header, orig_header, names, header_start, header_end
object low_memory
object skiprows
object compact_ints, use_unsigned
@@ -250,12 +252,15 @@ cdef class TextReader:
object encoding
object compression
object mangle_dupe_cols
+ object tupleize_cols
set noconvert, usecols
def __cinit__(self, source,
delimiter=b',',
header=0,
+ header_start=0,
+ header_end=0,
names=None,
memory_map=False,
@@ -300,12 +305,14 @@ cdef class TextReader:
skiprows=None,
skip_footer=0,
verbose=False,
- mangle_dupe_cols=True):
+ mangle_dupe_cols=True,
+ tupleize_cols=True):
self.parser = parser_new()
self.parser.chunksize = tokenize_chunksize
self.mangle_dupe_cols=mangle_dupe_cols
+ self.tupleize_cols=tupleize_cols
# For timekeeping
self.clocks = []
@@ -433,13 +440,34 @@ cdef class TextReader:
self.leading_cols = 0
# TODO: no header vs. header is not the first row
+ self.has_mi_columns = 0
+ self.orig_header = header
if header is None:
# sentinel value
+ self.parser.header_start = -1
+ self.parser.header_end = -1
self.parser.header = -1
self.parser_start = 0
+ self.header = []
else:
- self.parser.header = header
- self.parser_start = header + 1
+ if isinstance(header, list) and len(header):
+ # need to artifically skip the final line
+ # which is still a header line
+ header = list(header)
+ header.append(header[-1]+1)
+
+ self.parser.header_start = header[0]
+ self.parser.header_end = header[-1]
+ self.parser.header = header[0]
+ self.parser_start = header[-1] + 1
+ self.has_mi_columns = 1
+ self.header = header
+ else:
+ self.parser.header_start = header
+ self.parser.header_end = header
+ self.parser.header = header
+ self.parser_start = header + 1
+ self.header = [ header ]
self.names = names
self.header, self.table_width = self._get_header()
@@ -534,8 +562,10 @@ cdef class TextReader:
' got %s type' % type(source))
cdef _get_header(self):
+ # header is now a list of lists, so field_count should use header[0]
+
cdef:
- size_t i, start, data_line, field_count, passed_count
+ size_t i, start, data_line, field_count, passed_count, hr
char *word
object name
int status
@@ -544,49 +574,59 @@ cdef class TextReader:
header = []
- if self.parser.header >= 0:
- # Header is in the file
+ if self.parser.header_start >= 0:
- if self.parser.lines < self.parser.header + 1:
- self._tokenize_rows(self.parser.header + 2)
+ # Header is in the file
+ for level, hr in enumerate(self.header):
- # e.g., if header=3 and file only has 2 lines
- if self.parser.lines < self.parser.header + 1:
- raise CParserError('Passed header=%d but only %d lines in file'
- % (self.parser.header, self.parser.lines))
+ this_header = []
- field_count = self.parser.line_fields[self.parser.header]
- start = self.parser.line_start[self.parser.header]
+ if self.parser.lines < hr + 1:
+ self._tokenize_rows(hr + 2)
- # TODO: Py3 vs. Py2
- counts = {}
- for i in range(field_count):
- word = self.parser.words[start + i]
+ # e.g., if header=3 and file only has 2 lines
+ if self.parser.lines < hr + 1:
+ msg = self.orig_header
+ if isinstance(msg,list):
+ msg = "[%s], len of %d," % (','.join([ str(m) for m in msg ]),len(msg))
+ raise CParserError('Passed header=%s but only %d lines in file'
+ % (msg, self.parser.lines))
- if self.c_encoding == NULL and not PY3:
- name = PyBytes_FromString(word)
- else:
- if self.c_encoding == NULL or self.c_encoding == b'utf-8':
- name = PyUnicode_FromString(word)
- else:
- name = PyUnicode_Decode(word, strlen(word),
- self.c_encoding, errors)
+ field_count = self.parser.line_fields[hr]
+ start = self.parser.line_start[hr]
- if name == '':
- name = 'Unnamed: %d' % i
+ # TODO: Py3 vs. Py2
+ counts = {}
+ for i in range(field_count):
+ word = self.parser.words[start + i]
+ if self.c_encoding == NULL and not PY3:
+ name = PyBytes_FromString(word)
+ else:
+ if self.c_encoding == NULL or self.c_encoding == b'utf-8':
+ name = PyUnicode_FromString(word)
+ else:
+ name = PyUnicode_Decode(word, strlen(word),
+ self.c_encoding, errors)
+
+ if name == '':
+ if self.has_mi_columns:
+ name = 'Unnamed: %d_level_%d' % (i,level)
+ else:
+ name = 'Unnamed: %d' % i
+
+ count = counts.get(name, 0)
+ if count > 0 and self.mangle_dupe_cols and not self.has_mi_columns:
+ this_header.append('%s.%d' % (name, count))
+ else:
+ this_header.append(name)
+ counts[name] = count + 1
- count = counts.get(name, 0)
- if count > 0 and self.mangle_dupe_cols:
- header.append('%s.%d' % (name, count))
- else:
- header.append(name)
- counts[name] = count + 1
-
- data_line = self.parser.header + 1
+ data_line = hr + 1
+ header.append(this_header)
if self.names is not None:
- header = self.names
+ header = [ self.names ]
elif self.names is not None:
# Enforce this unless usecols
@@ -597,11 +637,11 @@ cdef class TextReader:
if self.parser.lines < 1:
self._tokenize_rows(1)
- header = self.names
+ header = [ self.names ]
data_line = 0
if self.parser.lines < 1:
- field_count = len(header)
+ field_count = len(header[0])
else:
field_count = self.parser.line_fields[data_line]
else:
@@ -613,7 +653,7 @@ cdef class TextReader:
# Corner case, not enough lines in the file
if self.parser.lines < data_line + 1:
- field_count = len(header)
+ field_count = len(header[0])
else: # not self.has_usecols:
field_count = self.parser.line_fields[data_line]
@@ -622,7 +662,7 @@ cdef class TextReader:
if self.names is not None:
field_count = max(field_count, len(self.names))
- passed_count = len(header)
+ passed_count = len(header[0])
# if passed_count > field_count:
# raise CParserError('Column names have %d fields, '
@@ -1038,10 +1078,10 @@ cdef class TextReader:
if self.header is not None:
j = i - self.leading_cols
# hack for #2442
- if j == len(self.header):
+ if j == len(self.header[0]):
return j
else:
- return self.header[j]
+ return self.header[0][j]
else:
return None
@@ -1762,6 +1802,9 @@ def _to_structured_array(dict columns, object names):
if names is None:
names = ['%d' % i for i in range(len(columns))]
+ else:
+ # single line header
+ names = names[0]
dt = np.dtype([(str(name), columns[i].dtype)
for i, name in enumerate(names)])
diff --git a/pandas/src/parser/tokenizer.c b/pandas/src/parser/tokenizer.c
index 09cddd07e1c1d..81fda37acbb71 100644
--- a/pandas/src/parser/tokenizer.c
+++ b/pandas/src/parser/tokenizer.c
@@ -463,7 +463,7 @@ static int end_line(parser_t *self) {
/* printf("Line: %d, Fields: %d, Ex-fields: %d\n", self->lines, fields, ex_fields); */
- if (!(self->lines <= self->header + 1)
+ if (!(self->lines <= self->header_end + 1)
&& (self->expected_fields < 0 && fields > ex_fields)) {
// increment file line count
self->file_lines++;
@@ -498,7 +498,7 @@ static int end_line(parser_t *self) {
}
else {
/* missing trailing delimiters */
- if ((self->lines >= self->header + 1) && fields < ex_fields) {
+ if ((self->lines >= self->header_end + 1) && fields < ex_fields) {
/* Might overrun the buffer when closing fields */
if (make_stream_space(self, ex_fields - fields) < 0) {
diff --git a/pandas/src/parser/tokenizer.h b/pandas/src/parser/tokenizer.h
index 566e89ae5f9a7..5ba1b99a29d39 100644
--- a/pandas/src/parser/tokenizer.h
+++ b/pandas/src/parser/tokenizer.h
@@ -195,6 +195,8 @@ typedef struct parser_t {
char thousands;
int header; // Boolean: 1: has header, 0: no header
+ int header_start; // header row start
+ int header_end; // header row end
void *skipset;
int skip_footer;
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 8e48ef094c419..fa2e8131b6916 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -4755,9 +4755,15 @@ def test_to_csv_moar(self):
def _do_test(df,path,r_dtype=None,c_dtype=None,rnlvl=None,cnlvl=None,
dupe_col=False):
- with ensure_clean(path) as path:
- df.to_csv(path,encoding='utf8',chunksize=chunksize)
- recons = DataFrame.from_csv(path,parse_dates=False)
+ if cnlvl:
+ header = range(cnlvl)
+ with ensure_clean(path) as path:
+ df.to_csv(path,encoding='utf8',chunksize=chunksize,tupleize_cols=False)
+ recons = DataFrame.from_csv(path,header=range(cnlvl),tupleize_cols=False,parse_dates=False)
+ else:
+ with ensure_clean(path) as path:
+ df.to_csv(path,encoding='utf8',chunksize=chunksize)
+ recons = DataFrame.from_csv(path,header=0,parse_dates=False)
def _to_uni(x):
if not isinstance(x,unicode):
@@ -4773,16 +4779,6 @@ def _to_uni(x):
recons.index = ix
recons = recons.iloc[:,rnlvl-1:]
- if cnlvl:
- def stuple_to_tuple(x):
- import re
- x = x.split(",")
- x = map(lambda x: re.sub("[\'\"\s\(\)]","",x),x)
- return x
-
- cols=MultiIndex.from_tuples(map(stuple_to_tuple,recons.columns))
- recons.columns = cols
-
type_map = dict(i='i',f='f',s='O',u='O',dt='O',p='O')
if r_dtype:
if r_dtype == 'u': # unicode
@@ -4827,7 +4823,6 @@ def stuple_to_tuple(x):
assert_frame_equal(df, recons,check_names=False,check_less_precise=True)
-
N = 100
chunksize=1000
@@ -4962,6 +4957,7 @@ def test_to_csv_multiindex(self):
frame.index = new_index
with ensure_clean(pname) as path:
+
frame.to_csv(path, header=False)
frame.to_csv(path, cols=['A', 'B'])
@@ -4973,7 +4969,7 @@ def test_to_csv_multiindex(self):
self.assertEqual(frame.index.names, df.index.names)
self.frame.index = old_index # needed if setUP becomes a classmethod
- # try multiindex with dates
+ # try multiindex with dates
tsframe = self.tsframe
old_index = tsframe.index
new_index = [old_index, np.arange(len(old_index))]
@@ -4994,6 +4990,102 @@ def test_to_csv_multiindex(self):
assert_almost_equal(recons.values, self.tsframe.values)
self.tsframe.index = old_index # needed if setUP becomes classmethod
+ with ensure_clean(pname) as path:
+ # GH3571, GH1651, GH3141
+
+ def _make_frame(names=None):
+ if names is True:
+ names = ['first','second']
+ return DataFrame(np.random.randint(0,10,size=(3,3)),
+ columns=MultiIndex.from_tuples([('bah', 'foo'),
+ ('bah', 'bar'),
+ ('ban', 'baz')],
+ names=names))
+
+ # column & index are multi-index
+ df = mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)
+ df.to_csv(path,tupleize_cols=False)
+ result = read_csv(path,header=[0,1,2,3],index_col=[0,1],tupleize_cols=False)
+ assert_frame_equal(df,result)
+
+ # column is mi
+ df = mkdf(5,3,r_idx_nlevels=1,c_idx_nlevels=4)
+ df.to_csv(path,tupleize_cols=False)
+ result = read_csv(path,header=[0,1,2,3],index_col=0,tupleize_cols=False)
+ assert_frame_equal(df,result)
+
+ # dup column names?
+ df = mkdf(5,3,r_idx_nlevels=3,c_idx_nlevels=4)
+ df.to_csv(path,tupleize_cols=False)
+ result = read_csv(path,header=[0,1,2,3],index_col=[0,1],tupleize_cols=False)
+ result.columns = ['R2','A','B','C']
+ new_result = result.reset_index().set_index(['R0','R1','R2'])
+ new_result.columns = df.columns
+ assert_frame_equal(df,new_result)
+
+ # writing with no index
+ df = _make_frame()
+ df.to_csv(path,tupleize_cols=False,index=False)
+ result = read_csv(path,header=[0,1],tupleize_cols=False)
+ assert_frame_equal(df,result)
+
+ # we lose the names here
+ df = _make_frame(True)
+ df.to_csv(path,tupleize_cols=False,index=False)
+ result = read_csv(path,header=[0,1],tupleize_cols=False)
+ self.assert_(all([ x is None for x in result.columns.names ]))
+ result.columns.names = df.columns.names
+ assert_frame_equal(df,result)
+
+ # tupleize_cols=True and index=False
+ df = _make_frame(True)
+ df.to_csv(path,tupleize_cols=True,index=False)
+ result = read_csv(path,header=0,tupleize_cols=True,index_col=None)
+ result.columns = df.columns
+ assert_frame_equal(df,result)
+
+ # whatsnew example
+ df = _make_frame()
+ df.to_csv(path,tupleize_cols=False)
+ result = read_csv(path,header=[0,1],index_col=[0],tupleize_cols=False)
+ assert_frame_equal(df,result)
+
+ df = _make_frame(True)
+ df.to_csv(path,tupleize_cols=False)
+ result = read_csv(path,header=[0,1],index_col=[0],tupleize_cols=False)
+ assert_frame_equal(df,result)
+
+ # column & index are multi-index (compatibility)
+ df = mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)
+ df.to_csv(path,tupleize_cols=True)
+ result = read_csv(path,header=0,index_col=[0,1],tupleize_cols=True)
+ result.columns = df.columns
+ assert_frame_equal(df,result)
+
+ # invalid options
+ df = _make_frame(True)
+ df.to_csv(path,tupleize_cols=False)
+
+ # catch invalid headers
+ try:
+ read_csv(path,tupleize_cols=False,header=range(3),index_col=0)
+ except (Exception), detail:
+ if not str(detail).startswith('Passed header=[0,1,2] are too many rows for this multi_index of columns'):
+ raise AssertionError("failure in read_csv header=range(3)")
+
+ try:
+ read_csv(path,tupleize_cols=False,header=range(7),index_col=0)
+ except (Exception), detail:
+ if not str(detail).startswith('Passed header=[0,1,2,3,4,5,6], len of 7, but only 6 lines in file'):
+ raise AssertionError("failure in read_csv header=range(7)")
+
+ for i in [3,4,5,6,7]:
+ self.assertRaises(Exception, read_csv, path, tupleize_cols=False, header=range(i), index_col=0)
+ self.assertRaises(Exception, read_csv, path, tupleize_cols=False, header=[0,2], index_col=0)
+
+ # write with cols
+ self.assertRaises(Exception, df.to_csv, path,tupleize_cols=False,cols=['foo','bar'])
+
with ensure_clean(pname) as path:
# empty
tsframe[:0].to_csv(path)
| In theory should close:
#3571, #1651, #3141
Works, but a couple of issues/caveats:
- ~~index_col needs to be specified as an integer list (can be fixed)~~
- header is a list of rows to read that contain the multi-index,
a row that is skipped (e.g. [0,1,3,5], will just be skipped, like a comment)
- the writing format might be a bit odd: the col names go in the first column,
other index_cols are blanks (they are separated, just == '')
The names of an multi-index on the index are after the columns and before the data,
and are a full row (but blank after the row names).
- ~~I am not sure if we should allow `df.to_csv('path',index=False)` when have a multi-index columns, could just ban it I guess (mainly as it screws up the write format, and then where do you put the names?)~~
- ~~The `cols` argument needs testing and prob is broken when using multi-index on the columns (it really should be specified as a tuple I think, but that is work, so maybe just ban it when using multi-index columns)~~
- needs more testing
```
In [14]: df = mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)
In [15]: df.to_csv('test.csv')
In [16]: !cat 'test.csv'
C0,,C_l0_g0,C_l0_g1,C_l0_g2
C1,,C_l1_g0,C_l1_g1,C_l1_g2
C2,,C_l2_g0,C_l2_g1,C_l2_g2
C3,,C_l3_g0,C_l3_g1,C_l3_g2
R0,R1,,,
R_l0_g0,R_l1_g0,R0C0,R0C1,R0C2
R_l0_g1,R_l1_g1,R1C0,R1C1,R1C2
R_l0_g2,R_l1_g2,R2C0,R2C1,R2C2
R_l0_g3,R_l1_g3,R3C0,R3C1,R3C2
R_l0_g4,R_l1_g4,R4C0,R4C1,R4C2
In [17]: res = read_csv('test.csv',header=[0,1,2,3],index_col=[0,1])
In [18]: res.index
Out[18]:
MultiIndex
[(u'R_l0_g0', u'R_l1_g0'), (u'R_l0_g1', u'R_l1_g1'), (u'R_l0_g2', u'R_l1_g2'), (u'R_l0_g3', u'R_l1_g3'), (u'R_l0_g4', u'R_l1_g4')]
In [19]: res.columns
Out[19]:
MultiIndex
[(u'C_l0_g0', u'C_l1_g0', u'C_l2_g0', u'C_l3_g0'), (u'C_l0_g1', u'C_l1_g1', u'C_l2_g1', u'C_l3_g1'), (u'C_l0_g2', u'C_l1_g2', u'C_l2_g2', u'C_l3_g2')]
In [20]: res
Out[20]:
C0 C_l0_g0 C_l0_g1 C_l0_g2
C1 C_l1_g0 C_l1_g1 C_l1_g2
C2 C_l2_g0 C_l2_g1 C_l2_g2
C3 C_l3_g0 C_l3_g1 C_l3_g2
R0 R1
R_l0_g0 R_l1_g0 R0C0 R0C1 R0C2
R_l0_g1 R_l1_g1 R1C0 R1C1 R1C2
R_l0_g2 R_l1_g2 R2C0 R2C1 R2C2
R_l0_g3 R_l1_g3 R3C0 R3C1 R3C2
R_l0_g4 R_l1_g4 R4C0 R4C1 R4C2
In [21]: res.index.names
Out[21]: ['R0', 'R1']
In [22]: res.columns.names
Out[22]: ['C0', 'C1', 'C2', 'C3']
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/3575 | 2013-05-11T04:08:31Z | 2013-05-19T14:22:04Z | 2013-05-19T14:22:04Z | 2014-06-12T12:04:57Z |
ENH: plot only numeric data and raise an exception *before* plotting if there is no numeric data | diff --git a/RELEASE.rst b/RELEASE.rst
index b4f3fa1999c8a..efc0f912060b7 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -67,6 +67,10 @@ pandas 0.11.1
to specify custom column names of the returned DataFrame (GH3649_),
thanks @hoechenberger
- ``read_html`` no longer performs hard date conversion
+ - Plotting functions now raise a ``TypeError`` before trying to plot anything
+ if the associated objects have have a dtype of ``object`` (GH1818_,
+ GH3572_). This happens before any drawing takes place which elimnates any
+ spurious plots from showing up.
**API Changes**
@@ -89,6 +93,9 @@ pandas 0.11.1
- Raise on ``iloc`` when boolean indexing with a label based indexer mask
e.g. a boolean Series, even with integer labels, will raise. Since ``iloc``
is purely positional based, the labels on the Series are not alignable (GH3631_)
+ - The ``raise_on_error`` option to plotting methods is obviated by GH3572_,
+ so it is removed. Plots now always raise when data cannot be plotted or the
+ object being plotted has a dtype of ``object``.
**Bug Fixes**
@@ -227,6 +234,8 @@ pandas 0.11.1
.. _GH3659: https://github.com/pydata/pandas/issues/3659
.. _GH3649: https://github.com/pydata/pandas/issues/3649
.. _Gh3616: https://github.com/pydata/pandas/issues/3616
+.. _GH1818: https://github.com/pydata/pandas/issues/1818
+.. _GH3572: https://github.com/pydata/pandas/issues/3572
pandas 0.11.0
=============
diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt
index 6ff3afeb69581..9209c3938023e 100644
--- a/doc/source/v0.11.1.txt
+++ b/doc/source/v0.11.1.txt
@@ -61,6 +61,13 @@ API changes
``df.iloc[mask]`` will raise a ``ValueError``
+ - The ``raise_on_error`` argument to plotting functions is removed. Instead,
+ plotting functions raise a ``TypeError`` when the ``dtype`` of the object
+ is ``object`` to remind you to avoid ``object`` arrays whenever possible
+ and thus you should cast to an appropriate numeric dtype if you need to
+ plot something.
+
+
Enhancements
~~~~~~~~~~~~
@@ -118,7 +125,7 @@ Enhancements
The last element yielded by the iterator will be a ``Series`` containing
the last element of the longest string in the ``Series`` with all other
- elements being ``NaN``. Here since ``'slow`` is the longest string
+ elements being ``NaN``. Here since ``'slow'`` is the longest string
and there are no other strings with the same length ``'w'`` is the only
non-null string in the yielded ``Series``.
@@ -158,6 +165,11 @@ Enhancements
- ``pd.melt()`` now accepts the optional parameters ``var_name`` and ``value_name``
to specify custom column names of the returned DataFrame.
+ - Plotting functions now raise a ``TypeError`` before trying to plot anything
+ if the associated objects have have a ``dtype`` of ``object`` (GH1818_,
+ GH3572_). This happens before any drawing takes place which elimnates any
+ spurious plots from showing up.
+
Bug Fixes
~~~~~~~~~
@@ -227,3 +239,5 @@ on GitHub for a complete list.
.. _GH3605: https://github.com/pydata/pandas/issues/3605
.. _GH3606: https://github.com/pydata/pandas/issues/3606
.. _GH3656: https://github.com/pydata/pandas/issues/3656
+.. _GH1818: https://github.com/pydata/pandas/issues/1818
+.. _GH3572: https://github.com/pydata/pandas/issues/3572
diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py
index 197b26014a760..5a1411ccf577e 100644
--- a/pandas/tests/test_graphics.py
+++ b/pandas/tests/test_graphics.py
@@ -187,6 +187,27 @@ def test_bootstrap_plot(self):
from pandas.tools.plotting import bootstrap_plot
_check_plot_works(bootstrap_plot, self.ts, size=10)
+ @slow
+ def test_all_invalid_plot_data(self):
+ s = Series(list('abcd'))
+ kinds = 'line', 'bar', 'barh', 'kde', 'density'
+
+ for kind in kinds:
+ self.assertRaises(TypeError, s.plot, kind=kind)
+
+ @slow
+ def test_partially_invalid_plot_data(self):
+ s = Series(['a', 'b', 1.0, 2])
+ kinds = 'line', 'bar', 'barh', 'kde', 'density'
+
+ for kind in kinds:
+ self.assertRaises(TypeError, s.plot, kind=kind)
+
+ @slow
+ def test_invalid_kind(self):
+ s = Series([1, 2])
+ self.assertRaises(ValueError, s.plot, kind='aasdf')
+
class TestDataFramePlots(unittest.TestCase):
@@ -249,11 +270,9 @@ def test_nonnumeric_exclude(self):
plt.close('all')
df = DataFrame({'A': ["x", "y", "z"], 'B': [1,2,3]})
- ax = df.plot(raise_on_error=False) # it works
+ ax = df.plot()
self.assert_(len(ax.get_lines()) == 1) #B was plotted
- self.assertRaises(Exception, df.plot)
-
@slow
def test_label(self):
import matplotlib.pyplot as plt
@@ -688,6 +707,26 @@ def test_unordered_ts(self):
ydata = ax.lines[0].get_ydata()
self.assert_(np.all(ydata == np.array([1.0, 2.0, 3.0])))
+ @slow
+ def test_all_invalid_plot_data(self):
+ kinds = 'line', 'bar', 'barh', 'kde', 'density'
+ df = DataFrame(list('abcd'))
+ for kind in kinds:
+ self.assertRaises(TypeError, df.plot, kind=kind)
+
+ @slow
+ def test_partially_invalid_plot_data(self):
+ kinds = 'line', 'bar', 'barh', 'kde', 'density'
+ df = DataFrame(np.random.randn(10, 2), dtype=object)
+ df[np.random.rand(df.shape[0]) > 0.5] = 'a'
+ for kind in kinds:
+ self.assertRaises(TypeError, df.plot, kind=kind)
+
+ @slow
+ def test_invalid_kind(self):
+ df = DataFrame(np.random.randn(10, 2))
+ self.assertRaises(ValueError, df.plot, kind='aasdf')
+
class TestDataFrameGroupByPlots(unittest.TestCase):
@classmethod
diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py
index 751f5fcdb82b2..583aecdbf9290 100644
--- a/pandas/tools/plotting.py
+++ b/pandas/tools/plotting.py
@@ -1,6 +1,5 @@
# being a bit too dynamic
# pylint: disable=E1101
-from itertools import izip
import datetime
import warnings
import re
@@ -701,10 +700,8 @@ class MPLPlot(object):
"""
_default_rot = 0
- _pop_attributes = ['label', 'style', 'logy', 'logx', 'loglog',
- 'raise_on_error']
- _attr_defaults = {'logy': False, 'logx': False, 'loglog': False,
- 'raise_on_error': True}
+ _pop_attributes = ['label', 'style', 'logy', 'logx', 'loglog']
+ _attr_defaults = {'logy': False, 'logx': False, 'loglog': False}
def __init__(self, data, kind=None, by=None, subplots=False, sharex=True,
sharey=False, use_index=True,
@@ -875,7 +872,27 @@ def _get_layout(self):
return (len(self.data.columns), 1)
def _compute_plot_data(self):
- pass
+ try:
+ # might be a frame
+ numeric_data = self.data._get_numeric_data()
+ except AttributeError:
+ # a series, but no object dtypes allowed!
+ if self.data.dtype == np.object_:
+ raise TypeError('invalid dtype for plotting, please cast to a '
+ 'numeric dtype explicitly if you want to plot')
+
+ numeric_data = self.data
+
+ try:
+ is_empty = numeric_data.empty
+ except AttributeError:
+ is_empty = not len(numeric_data)
+
+ # no empty frames or series allowed
+ if is_empty:
+ raise TypeError('No numeric data to plot')
+
+ self.data = numeric_data
def _make_plot(self):
raise NotImplementedError
@@ -1184,27 +1201,17 @@ def _make_plot(self):
else:
args = (ax, x, y, style)
- try:
- newline = plotf(*args, **kwds)[0]
- lines.append(newline)
- leg_label = label
- if self.mark_right and self.on_right(i):
- leg_label += ' (right)'
- labels.append(leg_label)
- ax.grid(self.grid)
-
- if self._is_datetype():
- left, right = _get_xlim(lines)
- ax.set_xlim(left, right)
- except AttributeError as inst: # non-numeric
- msg = ('Unable to plot data %s vs index %s,\n'
- 'error was: %s' % (str(y), str(x), str(inst)))
- if not self.raise_on_error:
- print msg
- else:
- msg = msg + ('\nConsider setting raise_on_error=False'
- 'to suppress')
- raise Exception(msg)
+ newline = plotf(*args, **kwds)[0]
+ lines.append(newline)
+ leg_label = label
+ if self.mark_right and self.on_right(i):
+ leg_label += ' (right)'
+ labels.append(leg_label)
+ ax.grid(self.grid)
+
+ if self._is_datetype():
+ left, right = _get_xlim(lines)
+ ax.set_xlim(left, right)
self._make_legend(lines, labels)
@@ -1223,22 +1230,12 @@ def to_leg_label(label, i):
return label
def _plot(data, col_num, ax, label, style, **kwds):
- try:
- newlines = tsplot(data, plotf, ax=ax, label=label,
- style=style, **kwds)
- ax.grid(self.grid)
- lines.append(newlines[0])
- leg_label = to_leg_label(label, col_num)
- labels.append(leg_label)
- except AttributeError as inst: #non-numeric
- msg = ('Unable to plot %s,\n'
- 'error was: %s' % (str(data), str(inst)))
- if not self.raise_on_error:
- print msg
- else:
- msg = msg + ('\nConsider setting raise_on_error=False'
- 'to suppress')
- raise Exception(msg)
+ newlines = tsplot(data, plotf, ax=ax, label=label,
+ style=style, **kwds)
+ ax.grid(self.grid)
+ lines.append(newlines[0])
+ leg_label = to_leg_label(label, col_num)
+ labels.append(leg_label)
if isinstance(data, Series):
ax = self._get_ax(0) # self.axes[0]
@@ -1610,8 +1607,8 @@ def plot_series(series, label=None, kind='line', use_index=True, rot=None,
If not passed, uses gca()
style : string, default matplotlib default
matplotlib line style to use
- grid : matplot grid
- legend: matplot legende
+ grid : matplotlib grid
+ legend: matplotlib legend
logx : boolean, default False
For line plots, use log scaling on x axis
logy : boolean, default False
@@ -1633,6 +1630,8 @@ def plot_series(series, label=None, kind='line', use_index=True, rot=None,
klass = BarPlot
elif kind == 'kde':
klass = KdePlot
+ else:
+ raise ValueError('Invalid chart type given %s' % kind)
"""
If no axis is specified, we check whether there are existing figures.
diff --git a/pandas/tseries/tests/test_plotting.py b/pandas/tseries/tests/test_plotting.py
index 5d7dc880b2868..eae04081e7479 100644
--- a/pandas/tseries/tests/test_plotting.py
+++ b/pandas/tseries/tests/test_plotting.py
@@ -87,18 +87,14 @@ def test_nonnumeric_exclude(self):
idx = date_range('1/1/1987', freq='A', periods=3)
df = DataFrame({'A': ["x", "y", "z"], 'B': [1,2,3]}, idx)
- self.assertRaises(Exception, df.plot)
plt.close('all')
- ax = df.plot(raise_on_error=False) # it works
+ ax = df.plot() # it works
self.assert_(len(ax.get_lines()) == 1) #B was plotted
plt.close('all')
- self.assertRaises(Exception, df.A.plot)
- plt.close('all')
- ax = df['A'].plot(raise_on_error=False) # it works
- self.assert_(len(ax.get_lines()) == 0)
+ self.assertRaises(TypeError, df['A'].plot)
@slow
def test_tsplot(self):
| Raise a `TypeError` alerting the user to the fact that they are trying to plot nonnumeric data, or if there are any numeric data plot those. closes #1818.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3572 | 2013-05-11T00:06:40Z | 2013-05-21T21:01:11Z | 2013-05-21T21:01:11Z | 2014-06-13T08:15:50Z |
DOC: document non-preservation of dtypes across rows with iterrows | diff --git a/doc/source/basics.rst b/doc/source/basics.rst
index 5739fe0922d6d..da3bbcb4f0dc2 100644
--- a/doc/source/basics.rst
+++ b/doc/source/basics.rst
@@ -835,7 +835,6 @@ containing the data in each row:
...: print '%s\n%s' % (row_index, row)
...:
-
For instance, a contrived way to transpose the dataframe would be:
.. ipython:: python
@@ -847,6 +846,18 @@ For instance, a contrived way to transpose the dataframe would be:
df2_t = DataFrame(dict((idx,values) for idx, values in df2.iterrows()))
print df2_t
+.. note::
+
+ ``iterrows`` does **not** preserve dtypes across the rows (dtypes are
+ preserved across columns for DataFrames). For example,
+
+ .. ipython:: python
+
+ df = DataFrame([[1, 1.0]], columns=['x', 'y'])
+ row = next(df.iterrows())[1]
+ print row['x'].dtype
+ print df['x'].dtype
+
itertuples
~~~~~~~~~~
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 0ffdcb0e036ce..ad1429fcea1ca 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -772,7 +772,25 @@ def iteritems(self):
def iterrows(self):
"""
- Iterate over rows of DataFrame as (index, Series) pairs
+ Iterate over rows of DataFrame as (index, Series) pairs.
+
+ Notes
+ -----
+
+ * ``iterrows`` does **not** preserve dtypes across the rows (dtypes
+ are preserved across columns for DataFrames). For example,
+
+ >>> df = DataFrame([[1, 1.0]], columns=['x', 'y'])
+ >>> row = next(df.iterrows())[1]
+ >>> print row['x'].dtype
+ float64
+ >>> print df['x'].dtype
+ int64
+
+ Returns
+ -------
+ it : generator
+ A generator that iterates over the rows of the frame.
"""
columns = self.columns
for k, v in izip(self.index, self.values):
| iterrows doesn't preserve dtypes across rows, document it, #3566. i learned that you can nest ..ipython:: blocks in ..note:: blocks in sphinx. sweet.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3569 | 2013-05-10T23:09:09Z | 2013-05-11T11:47:28Z | 2013-05-11T11:47:28Z | 2014-07-07T00:24:42Z |
DOC: add bs4/lxml install note to README.rst | diff --git a/README.rst b/README.rst
index ea713006c7189..ee728ce7fc7a4 100644
--- a/README.rst
+++ b/README.rst
@@ -90,6 +90,12 @@ Optional dependencies
* openpyxl version 1.6.1 or higher, for writing .xlsx files
* xlrd >= 0.9.0
* Needed for Excel I/O
+ * `lxml <http://lxml.de>`__, or `Beautiful Soup 4 <http://www.crummy.com/software/BeautifulSoup>`__: for reading HTML tables
+ * The differences between lxml and Beautiful Soup 4 are mostly speed (lxml
+ is faster), however sometimes Beautiful Soup returns what you might
+ intuitively expect. Both backends are implemented, so try them both to
+ see which one you like. They should return very similar results.
+ * Note that lxml requires Cython to build successfully
* `boto <https://pypi.python.org/pypi/boto>`__: necessary for Amazon S3 access.
| Glanced at the GitHub landing page and didn't see this.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3565 | 2013-05-10T15:30:06Z | 2013-05-10T22:32:39Z | 2013-05-10T22:32:39Z | 2014-06-14T20:18:33Z |
BUG: Duplicate indexes with and empty DataFrame.from_records will return a correct frame (GH3562) | diff --git a/RELEASE.rst b/RELEASE.rst
index 8e48395efc9ab..82f88a0c8e592 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -77,6 +77,7 @@ pandas 0.11.1
- ``.loc`` was not raising when passed an integer list (GH3449_)
- Unordered time series selection was misbehaving when using label slicing (GH3448_)
- Duplicate indexes with getitem will return items in the correct order (GH3455_, GH3457_)
+ - Duplicate indexes with and empty DataFrame.from_records will return a correct frame (GH3562_)
- Fix sorting in a frame with a list of columns which contains datetime64[ns] dtypes (GH3461_)
- DataFrames fetched via FRED now handle '.' as a NaN. (GH3469_)
- Fix regression in a DataFrame apply with axis=1, objects were not being converted back
@@ -137,6 +138,7 @@ pandas 0.11.1
.. _GH3495: https://github.com/pydata/pandas/issues/3495
.. _GH3492: https://github.com/pydata/pandas/issues/3492
.. _GH3552: https://github.com/pydata/pandas/issues/3552
+.. _GH3562: https://github.com/pydata/pandas/issues/3562
.. _GH3493: https://github.com/pydata/pandas/issues/3493
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 13e1654963844..b6459b0e461b4 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -1960,7 +1960,8 @@ def form_blocks(arrays, names, axes):
items = axes[0]
if len(arrays) < len(items):
- extra_items = items - Index(names)
+ nn = set(names)
+ extra_items = Index([ i for i in items if i not in nn ])
else:
extra_items = []
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 0c9dd21d2f645..7e7813e048bd1 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -3518,6 +3518,16 @@ def test_from_records_misc_brokenness(self):
results = df2_obj.get_dtype_counts()
expected = Series({ 'datetime64[ns]' : 1, 'int64' : 1 })
+ def test_from_records_empty(self):
+ # 3562
+ result = DataFrame.from_records([], columns=['a','b','c'])
+ expected = DataFrame(columns=['a','b','c'])
+ assert_frame_equal(result, expected)
+
+ result = DataFrame.from_records([], columns=['a','b','b'])
+ expected = DataFrame(columns=['a','b','b'])
+ assert_frame_equal(result, expected)
+
def test_to_records_floats(self):
df = DataFrame(np.random.rand(10, 10))
df.to_records()
| closes #3562
| https://api.github.com/repos/pandas-dev/pandas/pulls/3564 | 2013-05-10T15:00:17Z | 2013-05-10T16:01:33Z | 2013-05-10T16:01:33Z | 2014-06-29T09:51:40Z |
BUG: (GH3561) non-unique indexers with a list-like now return in the same order as the passed values | diff --git a/RELEASE.rst b/RELEASE.rst
index 31627cec01d1e..4e6570669656d 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -91,6 +91,7 @@ pandas 0.11.1
(removed warning) (GH2786_), and fix (GH3230_)
- Fix to_csv to handle non-unique columns (GH3495_)
- Duplicate indexes with getitem will return items in the correct order (GH3455_, GH3457_)
+ and handle missing elements like unique indices (GH3561_)
- Duplicate indexes with and empty DataFrame.from_records will return a correct frame (GH3562_)
- Fixed bug in groupby with empty series referencing a variable before assignment. (GH3510_)
- Fixed bug in mixed-frame assignment with aligned series (GH3492_)
@@ -148,6 +149,7 @@ pandas 0.11.1
.. _GH3552: https://github.com/pydata/pandas/issues/3552
.. _GH3562: https://github.com/pydata/pandas/issues/3562
.. _GH3586: https://github.com/pydata/pandas/issues/3586
+.. _GH3561: https://github.com/pydata/pandas/issues/3561
.. _GH3493: https://github.com/pydata/pandas/issues/3493
.. _GH3579: https://github.com/pydata/pandas/issues/3579
.. _GH3593: https://github.com/pydata/pandas/issues/3593
diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst
index d67a2d51cc1b8..55b7e653c3630 100644
--- a/doc/source/indexing.rst
+++ b/doc/source/indexing.rst
@@ -1368,6 +1368,9 @@ incompatible the new object internals are with the ``Index`` functions):
- ``slice_locs``: returns the "range" to slice between two labels
- ``get_indexer``: Computes the indexing vector for reindexing / data
alignment purposes. See the source / docstrings for more on this
+ - ``get_indexer_non_unique``: Computes the indexing vector for reindexing / data
+ alignment purposes when the index is non-unique. See the source / docstrings
+ for more on this
- ``reindex``: Does any pre-conversion of the input index then calls
``get_indexer``
- ``union``, ``intersection``: computes the union or intersection of two
diff --git a/pandas/core/index.py b/pandas/core/index.py
index 7baae543714ec..3e5a4f5676437 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -859,6 +859,25 @@ def get_indexer(self, target, method=None, limit=None):
return com._ensure_platform_int(indexer)
+ def get_indexer_non_unique(self, target, **kwargs):
+ """ return an indexer suitable for taking from a non unique index
+ return the labels in the same order as the target, and
+ return a missing indexer into the target (missing are marked as -1
+ in the indexer); target must be an iterable """
+ target = _ensure_index(target)
+ pself, ptarget = self._possibly_promote(target)
+ if pself is not self or ptarget is not target:
+ return pself.get_indexer_non_unique(ptarget)
+
+ if self.is_all_dates:
+ self = Index(self.asi8)
+ tgt_values = target.asi8
+ else:
+ tgt_values = target.values
+
+ indexer, missing = self._engine.get_indexer_non_unique(tgt_values)
+ return Index(indexer), missing
+
def _possibly_promote(self, other):
# A hack, but it works
from pandas.tseries.index import DatetimeIndex
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index bc8b7a3646a33..29adce4e02591 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -458,8 +458,23 @@ def _reindex(keys, level=None):
if labels.is_unique:
return _reindex(keyarr, level=level)
else:
- mask = labels.isin(keyarr)
- return self.obj.take(mask.nonzero()[0], axis=axis, convert=False)
+ indexer, missing = labels.get_indexer_non_unique(keyarr)
+ check = indexer != -1
+ result = self.obj.take(indexer[check], axis=axis, convert=False)
+
+ # need to merge the result labels and the missing labels
+ if len(missing):
+ l = np.arange(len(indexer))
+
+ missing_labels = keyarr.take(missing)
+ missing_labels_indexer = l[~check]
+ cur_labels = result._get_axis(axis).values
+ cur_labels_indexer = l[check]
+ new_labels = lib.combine_from_indexers(cur_labels, cur_labels_indexer,
+ missing_labels, missing_labels_indexer)
+ result = result.reindex_axis(new_labels,axis=axis)
+
+ return result
def _convert_to_indexer(self, obj, axis=0):
"""
@@ -569,20 +584,8 @@ def _convert_to_indexer(self, obj, axis=0):
# non-unique (dups)
else:
- indexer = []
- check = np.arange(len(labels))
- lvalues = labels.values
- for x in objarr:
- # ugh
- to_or = lib.map_infer(lvalues, x.__eq__)
- if not to_or.any():
- raise KeyError('%s not in index' % str(x))
-
- # add the indicies (as we want to take)
- indexer.extend(check[to_or])
-
- indexer = Index(indexer)
-
+ indexer, missing = labels.get_indexer_non_unique(objarr)
+ check = indexer
mask = check == -1
if mask.any():
diff --git a/pandas/index.pyx b/pandas/index.pyx
index 2ad5474549ec6..7d33d6083d0eb 100644
--- a/pandas/index.pyx
+++ b/pandas/index.pyx
@@ -267,8 +267,46 @@ cdef class IndexEngine:
self._ensure_mapping_populated()
return self.mapping.lookup(values)
+ def get_indexer_non_unique(self, targets):
+ """ return an indexer suitable for takng from a non unique index
+ return the labels in the same order ast the target
+ and a missing indexer into the targets (which correspond
+ to the -1 indicies in the results """
+ cdef:
+ ndarray values
+ ndarray[int64_t] result, missing
+ object v, val
+ int count = 0, count_missing = 0
+ Py_ssize_t i, j, n, found
+
+ self._ensure_mapping_populated()
+ values = self._get_index_values()
+ n = len(values)
+ n_t = len(targets)
+ result = np.empty(n+n_t, dtype=np.int64)
+ missing = np.empty(n_t, dtype=np.int64)
+
+ for i in range(n_t):
+ val = util.get_value_at(targets, i)
+ found = 0
+
+ for j in range(n):
+ v = util.get_value_at(values, j)
+
+ if v == val:
+ result[count] = j
+ count += 1
+ found = 1
+
+ # value not found
+ if found == 0:
+ result[count] = -1
+ count += 1
+ missing[count_missing] = i
+ count_missing += 1
+ return result[0:count], missing[0:count_missing]
cdef class Int64Engine(IndexEngine):
diff --git a/pandas/lib.pyx b/pandas/lib.pyx
index d043691bc061e..30c65d9fcdd9f 100644
--- a/pandas/lib.pyx
+++ b/pandas/lib.pyx
@@ -416,6 +416,25 @@ def dicts_to_array(list dicts, list columns):
return result
+@cython.wraparound(False)
+@cython.boundscheck(False)
+def combine_from_indexers(ndarray a, ndarray[int64_t] a_indexer,
+ ndarray b, ndarray[int64_t] b_indexer):
+ cdef:
+ Py_ssize_t i, n_a, n_b
+ ndarray result
+
+ n_a = len(a)
+ n_b = len(b)
+ result = np.empty(n_a+n_b,dtype=object)
+
+ for i in range(n_a):
+ result[a_indexer[i]] = a[i]
+ for i in range(n_b):
+ result[b_indexer[i]] = b[i]
+
+ return result
+
def fast_zip(list ndarrays):
'''
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index ce89dda63597f..e92cc22dccaf6 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -4668,8 +4668,29 @@ def _check_df(df,cols=None):
with ensure_clean() as path:
df.to_csv(path,cols = cols,chunksize=chunksize)
rs_c = pd.read_csv(path,index_col=0)
- rs_c.columns = df.columns
- assert_frame_equal(df,rs_c,check_names=False)
+
+ # we wrote them in a different order
+ # so compare them in that order
+ if cols is not None:
+
+ if df.columns.is_unique:
+ rs_c.columns = cols
+ else:
+ indexer, missing = df.columns.get_indexer_non_unique(cols)
+ rs_c.columns = df.columns.take(indexer)
+
+ for c in cols:
+ obj_df = df[c]
+ obj_rs = rs_c[c]
+ if isinstance(obj_df,Series):
+ assert_series_equal(obj_df,obj_rs)
+ else:
+ assert_frame_equal(obj_df,obj_rs,check_names=False)
+
+ # wrote in the same order
+ else:
+ rs_c.columns = df.columns
+ assert_frame_equal(df,rs_c,check_names=False)
chunksize=5
N = int(chunksize*2.5)
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index 01651f2674a90..46fd98fc14ffb 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -784,6 +784,28 @@ def test_dups_fancy_indexing(self):
assert_frame_equal(df,result)
+ # GH 3561, dups not in selected order
+ ind = ['A', 'A', 'B', 'C']
+ df = DataFrame({'test':range(len(ind))}, index=ind)
+ rows = ['C', 'B']
+ res = df.ix[rows]
+ self.assert_(rows == list(res.index))
+
+ res = df.ix[Index(rows)]
+ self.assert_(Index(rows).equals(res.index))
+
+ rows = ['C','B','E']
+ res = df.ix[rows]
+ self.assert_(rows == list(res.index))
+
+ # inconcistent returns for unique/duplicate indices when values are missing
+ df = DataFrame(randn(4,3),index=list('ABCD'))
+ expected = df.ix[['E']]
+
+ dfnu = DataFrame(randn(5,3),index=list('AABCD'))
+ result = dfnu.ix[['E']]
+ assert_frame_equal(result, expected)
+
def test_indexing_mixed_frame_bug(self):
# GH3492
| close #3561
| https://api.github.com/repos/pandas-dev/pandas/pulls/3563 | 2013-05-10T14:42:27Z | 2013-05-14T21:44:58Z | 2013-05-14T21:44:57Z | 2014-07-10T09:20:30Z |
ENH: unicode of PeriodIndex returns valid Python code | diff --git a/pandas/tseries/period.py b/pandas/tseries/period.py
index d9dfa51bc0bff..abb7486de9351 100644
--- a/pandas/tseries/period.py
+++ b/pandas/tseries/period.py
@@ -1102,6 +1102,25 @@ def __repr__(self):
output += 'length: %d' % len(self)
return output
+ def __unicode__(self):
+ output = self.__class__.__name__
+ output += u'('
+ prefix = '' if py3compat.PY3 else 'u'
+ mapper = "{0}'{{0}}'".format(prefix)
+ output += '[{0}]'.format(', '.join(map(mapper.format, self)))
+ output += ", freq='{0}'".format(self.freq)
+ output += ')'
+ return output
+
+ def __bytes__(self):
+ encoding = com.get_option('display.encoding')
+ return self.__unicode__().encode(encoding, 'replace')
+
+ def __str__(self):
+ if py3compat.PY3:
+ return self.__unicode__()
+ return self.__bytes__()
+
def take(self, indices, axis=None):
"""
Analogous to ndarray.take
diff --git a/pandas/tseries/tests/test_period.py b/pandas/tseries/tests/test_period.py
index 10a5e039b9fc6..95de08909a50a 100644
--- a/pandas/tseries/tests/test_period.py
+++ b/pandas/tseries/tests/test_period.py
@@ -1616,6 +1616,83 @@ def test_ts_repr(self):
ts = Series(np.random.randn(len(index)), index=index)
repr(ts)
+ def test_period_index_unicode(self):
+ pi = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
+ assert_equal(len(pi), 9)
+ assert_equal(pi, eval(unicode(pi)))
+
+ pi = PeriodIndex(freq='Q', start='1/1/2001', end='12/1/2009')
+ assert_equal(len(pi), 4 * 9)
+ assert_equal(pi, eval(unicode(pi)))
+
+ pi = PeriodIndex(freq='M', start='1/1/2001', end='12/1/2009')
+ assert_equal(len(pi), 12 * 9)
+ assert_equal(pi, eval(unicode(pi)))
+
+ start = Period('02-Apr-2005', 'B')
+ i1 = PeriodIndex(start=start, periods=20)
+ assert_equal(len(i1), 20)
+ assert_equal(i1.freq, start.freq)
+ assert_equal(i1[0], start)
+ assert_equal(i1, eval(unicode(i1)))
+
+ end_intv = Period('2006-12-31', 'W')
+ i1 = PeriodIndex(end=end_intv, periods=10)
+ assert_equal(len(i1), 10)
+ assert_equal(i1.freq, end_intv.freq)
+ assert_equal(i1[-1], end_intv)
+ assert_equal(i1, eval(unicode(i1)))
+
+ end_intv = Period('2006-12-31', '1w')
+ i2 = PeriodIndex(end=end_intv, periods=10)
+ assert_equal(len(i1), len(i2))
+ self.assert_((i1 == i2).all())
+ assert_equal(i1.freq, i2.freq)
+ assert_equal(i1, eval(unicode(i1)))
+ assert_equal(i2, eval(unicode(i2)))
+
+ end_intv = Period('2006-12-31', ('w', 1))
+ i2 = PeriodIndex(end=end_intv, periods=10)
+ assert_equal(len(i1), len(i2))
+ self.assert_((i1 == i2).all())
+ assert_equal(i1.freq, i2.freq)
+ assert_equal(i1, eval(unicode(i1)))
+ assert_equal(i2, eval(unicode(i2)))
+
+ try:
+ PeriodIndex(start=start, end=end_intv)
+ raise AssertionError('Cannot allow mixed freq for start and end')
+ except ValueError:
+ pass
+
+ end_intv = Period('2005-05-01', 'B')
+ i1 = PeriodIndex(start=start, end=end_intv)
+ assert_equal(i1, eval(unicode(i1)))
+
+ try:
+ PeriodIndex(start=start)
+ raise AssertionError(
+ 'Must specify periods if missing start or end')
+ except ValueError:
+ pass
+
+ # infer freq from first element
+ i2 = PeriodIndex([end_intv, Period('2005-05-05', 'B')])
+ assert_equal(len(i2), 2)
+ assert_equal(i2[0], end_intv)
+ assert_equal(i2, eval(unicode(i2)))
+
+ i2 = PeriodIndex(np.array([end_intv, Period('2005-05-05', 'B')]))
+ assert_equal(len(i2), 2)
+ assert_equal(i2[0], end_intv)
+ assert_equal(i2, eval(unicode(i2)))
+
+ # Mixed freq should fail
+ vals = [end_intv, Period('2006-12-31', 'w')]
+ self.assertRaises(ValueError, PeriodIndex, vals)
+ vals = np.array(vals)
+ self.assertRaises(ValueError, PeriodIndex, vals)
+
def test_frame_index_to_string(self):
index = PeriodIndex(['2011-1', '2011-2', '2011-3'], freq='M')
frame = DataFrame(np.random.randn(3, 4), index=index)
| addresses #3460.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3560 | 2013-05-10T01:22:14Z | 2013-05-10T09:29:37Z | 2013-05-10T09:29:37Z | 2014-06-23T18:20:54Z |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.