title stringlengths 1 185 | diff stringlengths 0 32.2M | body stringlengths 0 123k ⌀ | url stringlengths 57 58 | created_at stringlengths 20 20 | closed_at stringlengths 20 20 | merged_at stringlengths 20 20 ⌀ | updated_at stringlengths 20 20 |
|---|---|---|---|---|---|---|---|
ENH: raise a more a useful exception on empty files | diff --git a/doc/source/release.rst b/doc/source/release.rst
index df09d2f5a50ba..27299844997cb 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -96,6 +96,8 @@ pandas 0.11.1
explicitly checking a website as a proxy for seeing if there is network
connectivity. Plus, new ``optional_args`` decorator factory for decorators.
(:issue:`3910`, :issue:`3914`)
+ - ``read_csv`` will now throw a more informative error message when a file
+ contains no columns, e.g., all newline characters
**API Changes**
diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt
index cc60ce07c2c4d..e7624225853a0 100644
--- a/doc/source/v0.11.1.txt
+++ b/doc/source/v0.11.1.txt
@@ -240,6 +240,9 @@ I/O Enhancements
import os
os.remove(path)
+ - ``read_csv`` will now throw a more informative error message when a file
+ contains no columns, e.g., all newline characters
+
Other Enhancements
~~~~~~~~~~~~~~~~~~
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 377ef4ff5a44f..3fa8091feeb15 100644
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -949,6 +949,7 @@ def __init__(self, src, **kwds):
# #2442
kwds['allow_leading_cols'] = self.index_col is not False
+
self._reader = _parser.TextReader(src, **kwds)
# XXX
diff --git a/pandas/io/tests/test_parsers.py b/pandas/io/tests/test_parsers.py
index f9e956f60dde6..d75dcb6f02bfc 100644
--- a/pandas/io/tests/test_parsers.py
+++ b/pandas/io/tests/test_parsers.py
@@ -2330,6 +2330,19 @@ def test_tokenize_CR_with_quoting(self):
expected = self.read_csv(StringIO(data.replace('\r', '\n')))
tm.assert_frame_equal(result, expected)
+ def test_raise_on_no_columns(self):
+ # single newline
+ data = """
+"""
+ self.assertRaises(ValueError, self.read_csv, StringIO(data))
+
+ # test with more than a single newline
+ data = """
+
+
+"""
+ self.assertRaises(ValueError, self.read_csv, StringIO(data))
+
class TestParseSQL(unittest.TestCase):
diff --git a/pandas/parser.pyx b/pandas/parser.pyx
index eaa588ef4d150..185cf1a752803 100644
--- a/pandas/parser.pyx
+++ b/pandas/parser.pyx
@@ -476,6 +476,9 @@ cdef class TextReader:
self.names = names
self.header, self.table_width = self._get_header()
+ if not self.table_width:
+ raise ValueError("No columns to parse from file")
+
# compute buffer_lines as function of table width
heuristic = 2**20 // self.table_width
self.buffer_lines = 1
| https://api.github.com/repos/pandas-dev/pandas/pulls/3989 | 2013-06-22T03:14:30Z | 2013-06-22T22:32:30Z | 2013-06-22T22:32:30Z | 2014-07-16T08:15:21Z | |
TST: mark google tests as slow | diff --git a/pandas/io/tests/test_google.py b/pandas/io/tests/test_google.py
index 8ceda94f07a52..f3ab089546a1c 100644
--- a/pandas/io/tests/test_google.py
+++ b/pandas/io/tests/test_google.py
@@ -5,11 +5,12 @@
import numpy as np
import pandas as pd
import pandas.io.data as web
+from numpy.testing.decorators import slow
from pandas.util.testing import network, with_connectivity_check
class TestGoogle(unittest.TestCase):
-
+ @slow
@with_connectivity_check("http://www.google.com")
def test_google(self):
# asserts that google is minimally working and that it throws
@@ -27,17 +28,19 @@ def test_google(self):
lambda: web.DataReader("NON EXISTENT TICKER", 'google',
start, end))
-
@network
def test_get_quote(self):
self.assertRaises(NotImplementedError,
lambda: web.get_quote_google(pd.Series(['GOOG', 'AAPL', 'GOOG'])))
+
+ @slow
@with_connectivity_check('http://www.google.com')
def test_get_goog_volume(self):
df = web.get_data_google('GOOG')
assert df.Volume.ix['OCT-08-2010'] == 2863473
+ @slow
@with_connectivity_check('http://www.google.com')
def test_get_multi1(self):
sl = ['AAPL', 'AMZN', 'GOOG']
@@ -45,6 +48,7 @@ def test_get_multi1(self):
ts = pan.Close.GOOG.index[pan.Close.AAPL > pan.Close.GOOG]
assert ts[0].dayofyear == 96
+ @slow
@with_connectivity_check('http://www.google.com')
def test_get_multi2(self):
pan = web.get_data_google(['GE', 'MSFT', 'INTC'], 'JAN-01-12', 'JAN-31-12')
| closes #3985.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3988 | 2013-06-22T01:36:36Z | 2013-06-22T03:16:01Z | null | 2014-06-27T11:41:38Z |
TST/BUG: fix MANIFEST.in to reflect the change of file type of RELEASE | diff --git a/MANIFEST.in b/MANIFEST.in
index 649d96e7b2051..02de7790d11cf 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,6 +1,6 @@
include MANIFEST.in
include LICENSE
-include RELEASE.rst
+include RELEASE.md
include README.rst
include TODO.rst
include setup.py
| https://api.github.com/repos/pandas-dev/pandas/pulls/3987 | 2013-06-21T23:33:58Z | 2013-06-21T23:54:55Z | 2013-06-21T23:54:55Z | 2014-07-16T08:15:20Z | |
BUG: Index shift drops index name | diff --git a/doc/source/release.rst b/doc/source/release.rst
index afca7511bf11f..07489a140c018 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -270,6 +270,7 @@ pandas 0.11.1
- Indexing with a string with seconds resolution not selecting from a time index (:issue:`3925`)
- csv parsers would loop infinitely if ``iterator=True`` but no ``chunksize`` was
specified (:issue:`3967`), python parser failing with ``chunksize=1``
+ - Fix index name not propogating when using ``shift``
.. _Gh3616: https://github.com/pydata/pandas/issues/3616
diff --git a/pandas/core/index.py b/pandas/core/index.py
index a5880b9f18670..c06c46cde36c8 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -563,7 +563,7 @@ def shift(self, periods=1, freq=None):
return self
offset = periods * freq
- return Index([idx + offset for idx in self])
+ return Index([idx + offset for idx in self], name=self.name)
def argsort(self, *args, **kwargs):
"""
diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py
index 7ce4a11229561..d9808ab48ca41 100644
--- a/pandas/tests/test_index.py
+++ b/pandas/tests/test_index.py
@@ -204,6 +204,9 @@ def test_shift(self):
shifted = self.dateIndex.shift(1, 'B')
self.assert_(np.array_equal(shifted, self.dateIndex + offsets.BDay()))
+ shifted.name = 'shifted'
+ self.assertEqual(shifted.name, shifted.shift(1, 'D').name)
+
def test_intersection(self):
first = self.strIndex[:20]
second = self.strIndex[:10]
| ```
>>> idx = pd.Index([pd.to_datetime('2013-06-21')], name='idx')
>>> idx.name
'idx'
>>> idx.shift(1, freq='B').name == None
True
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/3986 | 2013-06-21T21:18:26Z | 2013-06-21T23:04:23Z | 2013-06-21T23:04:23Z | 2014-07-15T16:07:40Z |
BUG/TST: catch socket.error in py2/3.2 and ConnectionError in py3.3 | diff --git a/doc/source/release.rst b/doc/source/release.rst
index 917d91a14441e..c356b6378ce37 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -281,6 +281,8 @@ pandas 0.12
- Fixed flattening of columns when renaming MultiIndex columns DataFrame (:issue:`4004`)
- Fix ``Series.clip`` for datetime series. NA/NaN threshold values will now throw ValueError (:issue:`3996`)
- Fixed insertion issue into DataFrame, after rename (:issue:`4032`)
+ - Fixed testing issue where too many sockets where open thus leading to a
+ connection reset issue (:issue:`3982`, :issue:`3985`)
pandas 0.11.0
diff --git a/doc/source/v0.12.0.txt b/doc/source/v0.12.0.txt
index e146e892722d8..eb41c2dbca82f 100644
--- a/doc/source/v0.12.0.txt
+++ b/doc/source/v0.12.0.txt
@@ -420,6 +420,8 @@ Bug Fixes
explicitly checking a website as a proxy for seeing if there is network
connectivity. Plus, new ``optional_args`` decorator factory for decorators.
(:issue:`3910`, :issue:`3914`)
+ - Fixed testing issue where too many sockets where open thus leading to a
+ connection reset issue (:issue:`3982`, :issue:`3985`)
See the :ref:`full release notes
<release>` or issue tracker
diff --git a/pandas/io/data.py b/pandas/io/data.py
index 21f69e2e7daf4..9cf5eeb1fed4e 100644
--- a/pandas/io/data.py
+++ b/pandas/io/data.py
@@ -8,9 +8,9 @@
import numpy as np
import datetime as dt
import urllib
-import urllib2
import time
-import warnings
+from contextlib import closing
+from urllib2 import urlopen
from zipfile import ZipFile
from pandas.util.py3compat import StringIO, BytesIO, bytes_to_str
@@ -109,10 +109,11 @@ def get_quote_yahoo(symbols):
data = dict(zip(codes.keys(), [[] for i in range(len(codes))]))
- urlStr = 'http://finance.yahoo.com/d/quotes.csv?s=%s&f=%s' % (
- sym_list, request)
+ url_str = 'http://finance.yahoo.com/d/quotes.csv?s=%s&f=%s' % (sym_list,
+ request)
- lines = urllib2.urlopen(urlStr).readlines()
+ with closing(urlopen(url_str)) as url:
+ lines = url.readlines()
for line in lines:
fields = line.decode('utf-8').strip().split(',')
@@ -151,29 +152,29 @@ def _get_hist_yahoo(sym=None, start=None, end=None, retry_count=3,
yahoo_URL = 'http://ichart.yahoo.com/table.csv?'
- url = yahoo_URL + 's=%s' % sym + \
- '&a=%s' % (start.month - 1) + \
- '&b=%s' % start.day + \
- '&c=%s' % start.year + \
- '&d=%s' % (end.month - 1) + \
- '&e=%s' % end.day + \
- '&f=%s' % end.year + \
- '&g=d' + \
- '&ignore=.csv'
-
- for _ in range(retry_count):
- resp = urllib2.urlopen(url)
- if resp.code == 200:
- lines = resp.read()
- rs = read_csv(StringIO(bytes_to_str(lines)), index_col=0,
- parse_dates=True)[::-1]
-
- # Yahoo! Finance sometimes does this awesome thing where they
- # return 2 rows for the most recent business day
- if len(rs) > 2 and rs.index[-1] == rs.index[-2]: # pragma: no cover
- rs = rs[:-1]
-
- return rs
+ url = (yahoo_URL + 's=%s' % sym +
+ '&a=%s' % (start.month - 1) +
+ '&b=%s' % start.day +
+ '&c=%s' % start.year +
+ '&d=%s' % (end.month - 1) +
+ '&e=%s' % end.day +
+ '&f=%s' % end.year +
+ '&g=d' +
+ '&ignore=.csv')
+
+ for _ in xrange(retry_count):
+ with closing(urlopen(url)) as resp:
+ if resp.code == 200:
+ lines = resp.read()
+ rs = read_csv(StringIO(bytes_to_str(lines)), index_col=0,
+ parse_dates=True)[::-1]
+
+ # Yahoo! Finance sometimes does this awesome thing where they
+ # return 2 rows for the most recent business day
+ if len(rs) > 2 and rs.index[-1] == rs.index[-2]: # pragma: no cover
+ rs = rs[:-1]
+
+ return rs
time.sleep(pause)
@@ -198,17 +199,19 @@ def _get_hist_google(sym=None, start=None, end=None, retry_count=3,
google_URL = 'http://www.google.com/finance/historical?'
# www.google.com/finance/historical?q=GOOG&startdate=Jun+9%2C+2011&enddate=Jun+8%2C+2013&output=csv
- url = google_URL + urllib.urlencode({"q": sym, \
- "startdate": start.strftime('%b %d, %Y'), \
- "enddate": end.strftime('%b %d, %Y'), "output": "csv" })
- for _ in range(retry_count):
- resp = urllib2.urlopen(url)
- if resp.code == 200:
- lines = resp.read()
- rs = read_csv(StringIO(bytes_to_str(lines)), index_col=0,
- parse_dates=True)[::-1]
-
- return rs
+ url = google_URL + urllib.urlencode({"q": sym,
+ "startdate": start.strftime('%b %d, '
+ '%Y'),
+ "enddate": end.strftime('%b %d, %Y'),
+ "output": "csv"})
+ for _ in xrange(retry_count):
+ with closing(urlopen(url)) as resp:
+ if resp.code == 200:
+ lines = resp.read()
+ rs = read_csv(StringIO(bytes_to_str(lines)), index_col=0,
+ parse_dates=True)[::-1]
+
+ return rs
time.sleep(pause)
@@ -280,19 +283,19 @@ def get_components_yahoo(idx_sym):
'&e=.csv&h={2}'
idx_mod = idx_sym.replace('^', '@%5E')
- urlStr = url.format(idx_mod, stats, 1)
+ url_str = url.format(idx_mod, stats, 1)
idx_df = DataFrame()
mask = [True]
comp_idx = 1
- #LOOP across component index structure,
- #break when no new components are found
- while (True in mask):
- urlStr = url.format(idx_mod, stats, comp_idx)
- lines = (urllib.urlopen(urlStr).read().decode('utf-8').strip().
- strip('"').split('"\r\n"'))
-
+ # LOOP across component index structure,
+ # break when no new components are found
+ while True in mask:
+ url_str = url.format(idx_mod, stats, comp_idx)
+ with closing(urlopen(url_str)) as resp:
+ raw = resp.read()
+ lines = raw.decode('utf-8').strip().strip('"').split('"\r\n"')
lines = [line.strip().split('","') for line in lines]
temp_df = DataFrame(lines, columns=['ticker', 'name', 'exchange'])
@@ -468,11 +471,11 @@ def get_data_fred(name=None, start=dt.datetime(2010, 1, 1),
fred_URL = "http://research.stlouisfed.org/fred2/series/"
- url = fred_URL + '%s' % name + \
- '/downloaddata/%s' % name + '.csv'
- data = read_csv(urllib.urlopen(url), index_col=0, parse_dates=True,
- header=None, skiprows=1, names=["DATE", name],
- na_values='.')
+ url = fred_URL + '%s' % name + '/downloaddata/%s' % name + '.csv'
+ with closing(urlopen(url)) as resp:
+ data = read_csv(resp, index_col=0, parse_dates=True,
+ header=None, skiprows=1, names=["DATE", name],
+ na_values='.')
try:
return data.truncate(start, end)
except KeyError:
@@ -489,9 +492,9 @@ def get_data_famafrench(name, start=None, end=None):
# path of zip files
zipFileURL = "http://mba.tuck.dartmouth.edu/pages/faculty/ken.french/ftp/"
- url = urllib.urlopen(zipFileURL + name + ".zip")
- zipfile = ZipFile(StringIO(url.read()))
- data = zipfile.open(name + ".txt").readlines()
+ with closing(urlopen(zipFileURL + name + ".zip")) as url:
+ with closing(ZipFile(StringIO(url.read()))) as zf:
+ data = zf.read(name + ".txt").splitlines()
file_edges = np.where(np.array([len(d) for d in data]) == 2)[0]
@@ -638,7 +641,7 @@ def get_options_data(self, month=None, year=None, expiry=None):
url = str('http://finance.yahoo.com/q/op?s=' + self.symbol +
'+Options')
- parsed = parse(urllib2.urlopen(url))
+ parsed = parse(url)
doc = parsed.getroot()
tables = doc.findall('.//table')
calls = tables[9]
@@ -709,7 +712,7 @@ def get_call_data(self, month=None, year=None, expiry=None):
url = str('http://finance.yahoo.com/q/op?s=' + self.symbol +
'+Options')
- parsed = parse(urllib2.urlopen(url))
+ parsed = parse(url)
doc = parsed.getroot()
tables = doc.findall('.//table')
calls = tables[9]
@@ -777,7 +780,7 @@ def get_put_data(self, month=None, year=None, expiry=None):
url = str('http://finance.yahoo.com/q/op?s=' + self.symbol +
'+Options')
- parsed = parse(urllib2.urlopen(url))
+ parsed = parse(url)
doc = parsed.getroot()
tables = doc.findall('.//table')
puts = tables[13]
| closes #3982.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3985 | 2013-06-21T21:16:40Z | 2013-06-26T18:16:36Z | 2013-06-26T18:16:36Z | 2014-06-13T07:41:23Z |
BUG (GH3967) csv parsers would loop infinitely if iterator=True but no chunksize specified | diff --git a/doc/source/release.rst b/doc/source/release.rst
index 882826765d057..f16036692c8d3 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -258,6 +258,8 @@ pandas 0.11.1
- Fixed ``__truediv__`` in Python 2.7 with ``numexpr`` installed to actually do true division when dividing
two integer arrays with at least 10000 cells total (:issue:`3764`)
- Indexing with a string with seconds resolution not selecting from a time index (:issue:`3925`)
+ - csv parsers would loop infinitely if ``iterator=True`` but no ``chunksize`` was
+ specified (:issue:`3967`), python parser failing with ``chunksize=1``
.. _Gh3616: https://github.com/pydata/pandas/issues/3616
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 054363d8cda06..658532e80682d 100644
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -186,7 +186,7 @@ def _read(filepath_or_buffer, kwds):
kwds['parse_dates'] = True
# Extract some of the arguments (pass chunksize on).
- iterator = kwds.pop('iterator', False)
+ iterator = kwds.get('iterator', False)
nrows = kwds.pop('nrows', None)
chunksize = kwds.get('chunksize', None)
@@ -569,8 +569,11 @@ def _clean_options(self, options, engine):
def __iter__(self):
try:
- while True:
- yield self.read(self.chunksize)
+ if self.chunksize:
+ while True:
+ yield self.read(self.chunksize)
+ else:
+ yield self.read()
except StopIteration:
pass
@@ -1594,47 +1597,58 @@ def _rows_to_cols(self, content):
def _get_lines(self, rows=None):
source = self.data
lines = self.buf
+ new_rows = None
# already fetched some number
if rows is not None:
- rows -= len(self.buf)
- if isinstance(source, list):
- if self.pos > len(source):
- raise StopIteration
- if rows is None:
- lines.extend(source[self.pos:])
- self.pos = len(source)
+ # we already have the lines in the buffer
+ if len(self.buf) >= rows:
+ new_rows, self.buf = self.buf[:rows], self.buf[rows:]
+
+ # need some lines
else:
- lines.extend(source[self.pos:self.pos + rows])
- self.pos += rows
- else:
- new_rows = []
- try:
- if rows is not None:
- for _ in xrange(rows):
- new_rows.append(next(source))
- lines.extend(new_rows)
+ rows -= len(self.buf)
+
+ if new_rows is None:
+ if isinstance(source, list):
+ if self.pos > len(source):
+ raise StopIteration
+ if rows is None:
+ lines.extend(source[self.pos:])
+ self.pos = len(source)
else:
- rows = 0
- while True:
- try:
+ lines.extend(source[self.pos:self.pos + rows])
+ self.pos += rows
+ else:
+ new_rows = []
+ try:
+ if rows is not None:
+ for _ in xrange(rows):
new_rows.append(next(source))
- rows += 1
- except csv.Error, inst:
- if 'newline inside string' in str(inst):
- row_num = str(self.pos + rows)
- msg = ('EOF inside string starting with line '
- + row_num)
- raise Exception(msg)
- raise
- except StopIteration:
- lines.extend(new_rows)
- if len(lines) == 0:
- raise
- self.pos += len(new_rows)
+ lines.extend(new_rows)
+ else:
+ rows = 0
+ while True:
+ try:
+ new_rows.append(next(source))
+ rows += 1
+ except csv.Error, inst:
+ if 'newline inside string' in str(inst):
+ row_num = str(self.pos + rows)
+ msg = ('EOF inside string starting with line '
+ + row_num)
+ raise Exception(msg)
+ raise
+ except StopIteration:
+ lines.extend(new_rows)
+ if len(lines) == 0:
+ raise
+ self.pos += len(new_rows)
- self.buf = []
+ self.buf = []
+ else:
+ lines = new_rows
if self.skip_footer:
lines = lines[:-self.skip_footer]
diff --git a/pandas/io/tests/test_parsers.py b/pandas/io/tests/test_parsers.py
index cc2dddd829302..f9e956f60dde6 100644
--- a/pandas/io/tests/test_parsers.py
+++ b/pandas/io/tests/test_parsers.py
@@ -1037,6 +1037,24 @@ def test_iterator(self):
iterator=True)
self.assert_(isinstance(treader, TextFileReader))
+ # stopping iteration when on chunksize is specified, GH 3967
+ data = """A,B,C
+foo,1,2,3
+bar,4,5,6
+baz,7,8,9
+"""
+ reader = self.read_csv(StringIO(data), iterator=True)
+ result = list(reader)
+ expected = DataFrame(dict(A = [1,4,7], B = [2,5,8], C = [3,6,9]), index=['foo','bar','baz'])
+ tm.assert_frame_equal(result[0], expected)
+
+ # chunksize = 1
+ reader = self.read_csv(StringIO(data), chunksize=1)
+ result = list(reader)
+ expected = DataFrame(dict(A = [1,4,7], B = [2,5,8], C = [3,6,9]), index=['foo','bar','baz'])
+ self.assert_(len(result) == 3)
+ tm.assert_frame_equal(pd.concat(result), expected)
+
def test_header_not_first_line(self):
data = """got,to,ignore,this,line
got,to,ignore,this,line
| BUG: python parser failing with `chunksize=1`
closes #3967
| https://api.github.com/repos/pandas-dev/pandas/pulls/3978 | 2013-06-21T01:00:15Z | 2013-06-21T01:22:34Z | 2013-06-21T01:22:33Z | 2014-07-10T21:01:57Z |
BUG: Possibly invalidate the item_cache when numpy implicty converts a v... | diff --git a/doc/source/release.rst b/doc/source/release.rst
index 0fa7b4b2ed5f2..555169441f3cb 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -287,7 +287,7 @@ pandas 0.12
- Fixed insertion issue into DataFrame, after rename (:issue:`4032`)
- Fixed testing issue where too many sockets where open thus leading to a
connection reset issue (:issue:`3982`, :issue:`3985`)
-
+ - Possibly invalidate the item_cache when numpy implicty converts a view to a copy (:issue:`3970`)
pandas 0.11.0
=============
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 627a8ab825e5f..caf610ad2a0a6 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -1,5 +1,6 @@
# pylint: disable=W0231,E1101
+import weakref
import numpy as np
from pandas.core.index import MultiIndex
@@ -666,6 +667,7 @@ def _get_item_cache(self, item):
values = self._data.get(item)
res = self._box_item_values(item, values)
cache[item] = res
+ res._cacher = weakref.ref(self)
return res
def _box_item_values(self, key, values):
@@ -1065,6 +1067,7 @@ def take(self, indices, axis=0, convert=True):
new_data = self._data.reindex_axis(new_items, axis=0)
else:
new_data = self._data.take(indices, axis=axis, verify=False)
+
return self._constructor(new_data)
def tz_convert(self, tz, axis=0, copy=True):
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 27c12fcd2e8eb..ace285c7399c7 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -184,6 +184,14 @@ def setter(item, v):
if np.prod(values.shape):
values[indexer] = value
+ # we might need to invalidate a cached version of myself
+ cacher = getattr(self.obj,'_cacher',None)
+ if cacher is not None:
+ try:
+ cacher()._clear_item_cache()
+ except:
+ pass
+
def _align_series(self, indexer, ser):
# indexer to assign Series can be tuple or scalar
if isinstance(indexer, tuple):
@@ -709,6 +717,7 @@ def _getbool_axis(self, key, axis=0):
return self.obj.take(inds, axis=axis, convert=False)
except (Exception), detail:
raise self._exception(detail)
+
def _get_slice_axis(self, slice_obj, axis=0):
""" this is pretty simple as we just have to deal with labels """
obj = self.obj
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 0fbadafeca617..c0f62a7bc725c 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -1650,6 +1650,7 @@ def _consolidate_inplace(self):
self._known_consolidated = True
def get(self, item):
+
if self.items.is_unique:
_, block = self._find_block(item)
return block.get(item)
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index 8b6bf1ed7f651..491d347320b72 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -1066,7 +1066,18 @@ def test_iloc_non_unique_indexing(self):
result = df2.loc[idx]
assert_frame_equal(result, expected)
-
+ def test_series_iloc(self):
+ # GH 3970
+
+ df = DataFrame({ "aa":range(5), "bb":[2.2]*5})
+ df["cc"] = 0.0
+ ck = [True]*len(df)
+ df["bb"].loc[0] = .13 # works
+ df_tmp = df.iloc[ck]
+ df["bb"].loc[0] = .15 # doesn't work
+ expected = DataFrame({ "aa":range(5), "bb":[0.15,2.2,2.2,2.2,2.2], "cc": 0.0 })
+ assert_frame_equal(df, expected)
+
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
| closes #3970
`df['bb'].iloc[0] = 0.13`
here `bb` is put into the `_item_cache` and the first element assigned 0.13; `bb` is still a view onto the frame block
`df_tmp = df.iloc[[True]*len(df)]`
`df_tmp` is now a copy of df, but built up as a take block-by-block from `df`. I believe numpy then decides to invalidate the views to the memory in the float block (why I have no idea); `bb` in `df` is still holding the view in the `_item_cache` to the old memory location.
`df['bb'].iloc[0] = 0.15`
grabs `bb` from the cache and assigns 0.15 to it, BUT it is not longer a view onto the float block in `df` so nothing appears to get updated
I tried 2 fixes:
1) when getting an item from the `_item_cache` check if its a view of the underlying data - this works, but makes lookups way slow
2) clear the cache when `taking` ON THE ORIGINAL frame, so future lookups will cache-miss and get the correct data from the block manager
Fundamentally we COULD do this in the first statement `df['bb'].iloc[0] = 0.13`, where we don't reset the cache, except we don't have a reference to df (well we do, but by the time the operation is carried out we have an operation on the series, so have lost the frame reference)
So this is fixed in this case, but not sure what other numpy ops implicity convert view-> copy (and we are holding onto the cached view).
| https://api.github.com/repos/pandas-dev/pandas/pulls/3977 | 2013-06-21T00:00:43Z | 2013-06-28T17:58:04Z | null | 2013-06-28T17:58:04Z |
DOC: put release notes link to dev until 0.11.1 is released | diff --git a/RELEASE.md b/RELEASE.md
index 9e21bbf23948a..b1e2aadf485a8 100644
--- a/RELEASE.md
+++ b/RELEASE.md
@@ -2,5 +2,5 @@ Release Notes
=============
The list of changes to pandas between each release can be found
-[here](http://pandas.pydata.org/pandas-docs/stable/release.html). For full
+[here](http://pandas.pydata.org/pandas-docs/dev/release.html). For full
details, see the commit logs at http://github.com/pydata/pandas.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3975 | 2013-06-20T23:39:14Z | 2013-06-21T00:34:29Z | 2013-06-21T00:34:29Z | 2014-07-16T08:15:12Z | |
ENH: print more detailed truncated sequence if a limit is given | works similar to numpy except that the edgeitems is overriden by threshold rather than the other way around which is what np does.
closes #3391
- [x] add release notes
| https://api.github.com/repos/pandas-dev/pandas/pulls/3974 | 2013-06-20T21:52:45Z | 2014-03-13T14:37:40Z | 2014-03-13T14:37:40Z | 2014-07-17T06:09:18Z | |
ENH (GH3969) Implement unit='D' in to_datetime | diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py
index 08bcd9cfad8cc..d8c3caaabb36f 100644
--- a/pandas/tseries/tests/test_timeseries.py
+++ b/pandas/tseries/tests/test_timeseries.py
@@ -2752,23 +2752,31 @@ def test_basics_nanos(self):
self.assert_(stamp.nanosecond == 500)
def test_unit(self):
- def check(val,unit=None,s=1,us=0):
+ def check(val,unit=None,h=1,s=1,us=0):
stamp = Timestamp(val, unit=unit)
self.assert_(stamp.year == 2000)
self.assert_(stamp.month == 1)
self.assert_(stamp.day == 1)
- self.assert_(stamp.hour == 1)
- self.assert_(stamp.minute == 1)
- self.assert_(stamp.second == s)
- self.assert_(stamp.microsecond == us)
+ self.assert_(stamp.hour == h)
+ if unit != 'D':
+ self.assert_(stamp.minute == 1)
+ self.assert_(stamp.second == s)
+ self.assert_(stamp.microsecond == us)
+ else:
+ self.assert_(stamp.minute == 0)
+ self.assert_(stamp.second == 0)
+ self.assert_(stamp.microsecond == 0)
self.assert_(stamp.nanosecond == 0)
- val = Timestamp('20000101 01:01:01').value
+ ts = Timestamp('20000101 01:01:01')
+ val = ts.value
+ days = (ts - Timestamp('1970-01-01')).days
check(val)
check(val/1000L,unit='us')
check(val/1000000L,unit='ms')
check(val/1000000000L,unit='s')
+ check(days,unit='D',h=0)
# using truediv, so these are like floats
if py3compat.PY3:
@@ -2792,6 +2800,7 @@ def check(val,unit=None,s=1,us=0):
check(val/1000000.0 + 0.5,unit='ms',us=500)
check(val/1000000.0 + 0.005,unit='ms',us=5)
check(val/1000000000.0 + 0.5,unit='s',us=500000)
+ check(days + 0.5,unit='D',h=12)
# nan
result = Timestamp(np.nan)
diff --git a/pandas/tseries/tools.py b/pandas/tseries/tools.py
index 531d9f399279b..43d44702d2d5e 100644
--- a/pandas/tseries/tools.py
+++ b/pandas/tseries/tools.py
@@ -69,7 +69,7 @@ def to_datetime(arg, errors='ignore', dayfirst=False, utc=None, box=True,
format : string, default None
strftime to parse time, eg "%d/%m/%Y"
coerce : force errors to NaT (False by default)
- unit : unit of the arg (s,ms,us,ns) denote the unit in epoch
+ unit : unit of the arg (D,s,ms,us,ns) denote the unit in epoch
(e.g. a unix timestamp), which is an integer/float number
Returns
diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx
index 9b611032455ae..e6e0f8b71cf1b 100644
--- a/pandas/tslib.pyx
+++ b/pandas/tslib.pyx
@@ -1269,7 +1269,10 @@ cdef inline _get_datetime64_nanos(object val):
cdef inline int64_t cast_from_unit(object unit, object ts):
""" return a casting of the unit represented to nanoseconds
round the fractional part of a float to our precision, p """
- if unit == 's':
+ if unit == 'D':
+ m = 1000000000L * 86400
+ p = 6
+ elif unit == 's':
m = 1000000000L
p = 6
elif unit == 'ms':
| closes #3969
| https://api.github.com/repos/pandas-dev/pandas/pulls/3973 | 2013-06-20T21:45:00Z | 2013-06-21T00:09:49Z | null | 2014-06-27T23:18:38Z |
CLN: fix grammar in extract_index error message | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 47142daa8b20b..bf9d1cd7d30b9 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -5713,7 +5713,8 @@ def extract_index(data):
raw_lengths.append(len(v))
if not indexes and not raw_lengths:
- raise ValueError('If use all scalar values, must pass index')
+ raise ValueError('If using all scalar values, you must must pass'
+ ' an index')
if have_series or have_dicts:
index = _union_indexes(indexes)
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 63f92e9fa7a35..8b32b3a641ebb 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -2245,8 +2245,9 @@ def test_constructor_error_msgs(self):
try:
DataFrame({'a': False, 'b': True})
except (Exception), detail:
+ msg = 'If using all scalar values, you must must pass an index'
self.assert_(type(detail) == ValueError)
- self.assert_("If use all scalar values, must pass index" in str(detail))
+ self.assert_(msg in str(detail))
def test_constructor_subclass_dict(self):
# Test for passing dict subclass to constructor
| closes #3968.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3972 | 2013-06-20T18:03:40Z | 2013-06-20T19:26:51Z | 2013-06-20T19:26:50Z | 2014-07-16T08:15:11Z |
ENH add cython tutorial | diff --git a/doc/source/enhancingperf.rst b/doc/source/enhancingperf.rst
new file mode 100644
index 0000000000000..db28dfde926bf
--- /dev/null
+++ b/doc/source/enhancingperf.rst
@@ -0,0 +1,273 @@
+.. _enhancingperf:
+
+.. currentmodule:: pandas
+
+.. ipython:: python
+ :suppress:
+
+ import os
+ import csv
+ from pandas import DataFrame
+ import pandas as pd
+
+ import numpy as np
+ np.random.seed(123456)
+ randn = np.random.randn
+ randint = np.random.randint
+ np.set_printoptions(precision=4, suppress=True)
+
+
+*********************
+Enhancing Performance
+*********************
+
+.. _enhancingperf.cython:
+
+Cython (Writing C extensions for pandas)
+----------------------------------------
+
+For many use cases writing pandas in pure python and numpy is sufficient. In some
+computationally heavy applications however, it can be possible to achieve sizeable
+speed-ups by offloading work to `cython <http://cython.org/>`_.
+
+This tutorial assumes you have refactored as much as possible in python, for example
+trying to remove for loops and making use of numpy vectorization, it's always worth
+optimising in python first.
+
+This tutorial walks through a "typical" process of cythonizing a slow computation.
+We use an `example from the cython documentation <http://docs.cython.org/src/quickstart/cythonize.html>`_
+but in the context of pandas. Our final cythonized solution is around 100 times
+faster than the pure python.
+
+.. _enhancingperf.pure:
+
+Pure python
+~~~~~~~~~~~
+
+We have a DataFrame to which we want to apply a function row-wise.
+
+.. ipython:: python
+
+ df = DataFrame({'a': randn(1000), 'b': randn(1000),'N': randint(100, 1000, (1000)), 'x': 'x'})
+ df
+
+Here's the function in pure python:
+
+.. ipython:: python
+
+ def f(x):
+ return x * (x - 1)
+
+ def integrate_f(a, b, N):
+ s = 0
+ dx = (b - a) / N
+ for i in range(N):
+ s += f(a + i * dx)
+ return s * dx
+
+We achieve our result by by using ``apply`` (row-wise):
+
+.. ipython:: python
+
+ %timeit df.apply(lambda x: integrate_f(x['a'], x['b'], x['N']), axis=1)
+
+But clearly this isn't fast enough for us. Let's take a look and see where the
+time is spent during this operation (limited to the most time consuming
+four calls) using the `prun ipython magic function <http://ipython.org/ipython-doc/stable/api/generated/IPython.core.magics.execution.html#IPython.core.magics.execution.ExecutionMagics.prun>`_:
+
+.. ipython:: python
+
+ %prun -l 4 df.apply(lambda x: integrate_f(x['a'], x['b'], x['N']), axis=1)
+
+By far the majority of time is spend inside either ``integrate_f`` or ``f``,
+hence we'll concentrate our efforts cythonizing these two functions.
+
+.. note::
+
+ In python 2 replacing the ``range`` with its generator counterpart (``xrange``)
+ would mean the ``range`` line would vanish. In python 3 range is already a generator.
+
+.. _enhancingperf.plain:
+
+Plain cython
+~~~~~~~~~~~~
+
+First we're going to need to import the cython magic function to ipython:
+
+.. ipython:: python
+
+ %load_ext cythonmagic
+
+
+Now, let's simply copy our functions over to cython as is (the suffix
+is here to distinguish between function versions):
+
+.. ipython::
+
+ In [2]: %%cython
+ ...: def f_plain(x):
+ ...: return x * (x - 1)
+ ...: def integrate_f_plain(a, b, N):
+ ...: s = 0
+ ...: dx = (b - a) / N
+ ...: for i in range(N):
+ ...: s += f_plain(a + i * dx)
+ ...: return s * dx
+ ...:
+
+.. note::
+
+ If you're having trouble pasting the above into your ipython, you may need
+ to be using bleeding edge ipython for paste to play well with cell magics.
+
+
+.. ipython:: python
+
+ %timeit df.apply(lambda x: integrate_f_plain(x['a'], x['b'], x['N']), axis=1)
+
+Already this has shaved a third off, not too bad for a simple copy and paste.
+
+.. _enhancingperf.type:
+
+Adding type
+~~~~~~~~~~~
+
+We get another huge improvement simply by providing type information:
+
+.. ipython::
+
+ In [3]: %%cython
+ ...: cdef double f_typed(double x) except? -2:
+ ...: return x * (x - 1)
+ ...: cpdef double integrate_f_typed(double a, double b, int N):
+ ...: cdef int i
+ ...: cdef double s, dx
+ ...: s = 0
+ ...: dx = (b - a) / N
+ ...: for i in range(N):
+ ...: s += f_typed(a + i * dx)
+ ...: return s * dx
+ ...:
+
+.. ipython:: python
+
+ %timeit df.apply(lambda x: integrate_f_typed(x['a'], x['b'], x['N']), axis=1)
+
+Now, we're talking! It's now over ten times faster than the original python
+implementation, and we haven't *really* modified the code. Let's have another
+look at what's eating up time:
+
+.. ipython:: python
+
+ %prun -l 4 df.apply(lambda x: integrate_f_typed(x['a'], x['b'], x['N']), axis=1)
+
+.. _enhancingperf.ndarray:
+
+Using ndarray
+~~~~~~~~~~~~~
+
+It's calling series... a lot! It's creating a Series from each row, and get-ting from both
+the index and the series (three times for each row). Function calls are expensive
+in python, so maybe we could minimise these by cythonizing the apply part.
+
+.. note::
+
+ We are now passing ndarrays into the cython function, fortunately cython plays
+ very nicely with numpy.
+
+.. ipython::
+
+ In [4]: %%cython
+ ...: cimport numpy as np
+ ...: import numpy as np
+ ...: cdef double f_typed(double x) except? -2:
+ ...: return x * (x - 1)
+ ...: cpdef double integrate_f_typed(double a, double b, int N):
+ ...: cdef int i
+ ...: cdef double s, dx
+ ...: s = 0
+ ...: dx = (b - a) / N
+ ...: for i in range(N):
+ ...: s += f_typed(a + i * dx)
+ ...: return s * dx
+ ...: cpdef np.ndarray[double] apply_integrate_f(np.ndarray col_a, np.ndarray col_b, np.ndarray col_N):
+ ...: assert (col_a.dtype == np.float and col_b.dtype == np.float and col_N.dtype == np.int)
+ ...: cdef Py_ssize_t i, n = len(col_N)
+ ...: assert (len(col_a) == len(col_b) == n)
+ ...: cdef np.ndarray[double] res = np.empty(n)
+ ...: for i in range(len(col_a)):
+ ...: res[i] = integrate_f_typed(col_a[i], col_b[i], col_N[i])
+ ...: return res
+ ...:
+
+
+The implementation is simple, it creates an array of zeros and loops over
+the rows, applying our ``integrate_f_typed``, and putting this in the zeros array.
+
+
+.. note::
+
+ Loop like this would be *extremely* slow in python, but in cython looping over
+ numpy arrays is *fast*.
+
+.. ipython:: python
+
+ %timeit apply_integrate_f(df['a'], df['b'], df['N'])
+
+We've gone another three times faster! Let's check again where the time is spent:
+
+.. ipython:: python
+
+ %prun -l 4 apply_integrate_f(df['a'], df['b'], df['N'])
+
+As one might expect, the majority of the time is now spent in ``apply_integrate_f``,
+so if we wanted to make anymore efficiencies we must continue to concentrate our
+efforts here.
+
+.. _enhancingperf.boundswrap:
+
+More advanced techniques
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+There is still scope for improvement, here's an example of using some more
+advanced cython techniques:
+
+.. ipython::
+
+ In [5]: %%cython
+ ...: cimport cython
+ ...: cimport numpy as np
+ ...: import numpy as np
+ ...: cdef double f_typed(double x) except? -2:
+ ...: return x * (x - 1)
+ ...: cpdef double integrate_f_typed(double a, double b, int N):
+ ...: cdef int i
+ ...: cdef double s, dx
+ ...: s = 0
+ ...: dx = (b - a) / N
+ ...: for i in range(N):
+ ...: s += f_typed(a + i * dx)
+ ...: return s * dx
+ ...: @cython.boundscheck(False)
+ ...: @cython.wraparound(False)
+ ...: cpdef np.ndarray[double] apply_integrate_f_wrap(np.ndarray[double] col_a, np.ndarray[double] col_b, np.ndarray[Py_ssize_t] col_N):
+ ...: cdef Py_ssize_t i, n = len(col_N)
+ ...: assert len(col_a) == len(col_b) == n
+ ...: cdef np.ndarray[double] res = np.empty(n)
+ ...: for i in range(n):
+ ...: res[i] = integrate_f_typed(col_a[i], col_b[i], col_N[i])
+ ...: return res
+ ...:
+
+.. ipython:: python
+
+ %timeit apply_integrate_f_wrap(df['a'], df['b'], df['N'])
+
+This shaves another third off!
+
+Further topics
+~~~~~~~~~~~~~~
+
+- Loading C modules into cython.
+
+Read more in the `cython docs <http://docs.cython.org/>`_.
\ No newline at end of file
diff --git a/doc/source/index.rst b/doc/source/index.rst
index 21a79ffdb85fd..67f1a3c1e6312 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -126,6 +126,7 @@ See the package overview for more detail about what's in the library.
visualization
rplot
io
+ performance
sparse
gotchas
r_interface
diff --git a/doc/sphinxext/ipython_directive.py b/doc/sphinxext/ipython_directive.py
index bc3c46dd5cc93..b237341e81125 100644
--- a/doc/sphinxext/ipython_directive.py
+++ b/doc/sphinxext/ipython_directive.py
@@ -296,11 +296,14 @@ def process_input(self, data, input_prompt, lineno):
is_savefig = decorator is not None and \
decorator.startswith('@savefig')
- input_lines = input.split('\n')
+ def _remove_first_space_if_any(line):
+ return line[1:] if line.startswith(' ') else line
+
+ input_lines = map(_remove_first_space_if_any, input.split('\n'))
self.datacontent = data
- continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
+ continuation = ' %s: '%''.join(['.']*(len(str(lineno))+2))
if is_savefig:
image_file, image_directive = self.process_image(decorator)
| WIP #3923
Please checkout this draft for a pandas cython tutorial, any feedback or ideas appreciated.
_Thanks to @cpcloud for patiently working out/explaining how to force ipython sphinx directive to play nicely with %%cython (it's incredibly sensitive!) and fixing the spacing bug. This seems to build now._ :)
| https://api.github.com/repos/pandas-dev/pandas/pulls/3965 | 2013-06-19T23:28:35Z | 2013-06-21T00:15:16Z | 2013-06-21T00:15:16Z | 2014-06-29T19:01:34Z |
DOC/CLN: remove gh links and use the new issue format for whatsnew | diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 85ce838c2b414..7fdb1c53cc15b 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -71,10 +71,10 @@ your contribution or address the issue you're having.
which fails on python 2.6, use `self.assertRaises(TheException,func,args)` instead.
- - RELEASE.rst and doc/source/vx.y.z.txt contain an on-going changelog for each
- release as it is worked on. Add entries to these files as needed in
- a separate commit in your PR, documenting the fix, enhancement or (unavoidable)
- breaking change.
+ - doc/source/release.rst and doc/source/vx.y.z.txt contain an on-going
+ changelog for each release as it is worked on. Add entries to these files
+ as needed in a separate commit in your PR, documenting the fix, enhancement
+ or (unavoidable) breaking change.
- For extra brownie points, use "git rebase -i" to squash and reorder
commits in your PR so that the history makes the most sense. Use your own
judgment to decide what history needs to be preserved.
diff --git a/RELEASE.md b/RELEASE.md
new file mode 100644
index 0000000000000..9e21bbf23948a
--- /dev/null
+++ b/RELEASE.md
@@ -0,0 +1,6 @@
+Release Notes
+=============
+
+The list of changes to pandas between each release can be found
+[here](http://pandas.pydata.org/pandas-docs/stable/release.html). For full
+details, see the commit logs at http://github.com/pydata/pandas.
diff --git a/RELEASE.rst b/RELEASE.rst
deleted file mode 100644
index da5b95a2c0647..0000000000000
--- a/RELEASE.rst
+++ /dev/null
@@ -1,4169 +0,0 @@
-
-=============
-Release Notes
-=============
-
-This is the list of changes to pandas between each release. For full details,
-see the commit logs at http://github.com/pydata/pandas
-
-What is it
-----------
-
-pandas is a Python package providing fast, flexible, and expressive data
-structures designed to make working with “relational” or “labeled” data both
-easy and intuitive. It aims to be the fundamental high-level building block for
-doing practical, real world data analysis in Python. Additionally, it has the
-broader goal of becoming the most powerful and flexible open source data
-analysis / manipulation tool available in any language.
-
-Where to get it
----------------
-
-* Source code: http://github.com/pydata/pandas
-* Binary installers on PyPI: http://pypi.python.org/pypi/pandas
-* Documentation: http://pandas.pydata.org
-
-pandas 0.11.1
-=============
-
-**Release date:** not-yet-released
-
-**New features**
-
- - ``pd.read_html()`` can now parse HTML strings, files or urls and
- returns a list of ``DataFrame`` s courtesy of @cpcloud. (GH3477_, GH3605_,
- GH3606_)
- - Support for reading Amazon S3 files. (GH3504_)
- - Added module for reading and writing Stata files: pandas.io.stata (GH1512_)
- includes ``to_stata`` DataFrame method, and a ``read_stata`` top-level reader
- - Added support for writing in ``to_csv`` and reading in ``read_csv``,
- multi-index columns. The ``header`` option in ``read_csv`` now accepts a
- list of the rows from which to read the index. Added the option,
- ``tupleize_cols`` to provide compatiblity for the pre 0.11.1 behavior of
- writing and reading multi-index columns via a list of tuples. The default in
- 0.11.1 is to write lists of tuples and *not* interpret list of tuples as a
- multi-index column.
- Note: The default value will change in 0.12 to make the default *to* write and
- read multi-index columns in the new format. (GH3571_, GH1651_, GH3141_)
- - Add iterator to ``Series.str`` (GH3638_)
- - ``pd.set_option()`` now allows N option, value pairs (GH3667_).
- - Added keyword parameters for different types of scatter_matrix subplots
- - A ``filter`` method on grouped Series or DataFrames returns a subset of
- the original (GH3680_, GH919_)
- - Access to historical Google Finance data in pandas.io.data (GH3814_)
-
-**Improvements to existing features**
-
- - Fixed various issues with internal pprinting code, the repr() for various objects
- including TimeStamp and Index now produces valid python code strings and
- can be used to recreate the object, (GH3038_, GH3379_, GH3251_, GH3460_)
- - ``convert_objects`` now accepts a ``copy`` parameter (defaults to ``True``)
- - ``HDFStore``
-
- - will retain index attributes (freq,tz,name) on recreation (GH3499_)
- - will warn with a ``AttributeConflictWarning`` if you are attempting to append
- an index with a different frequency than the existing, or attempting
- to append an index with a different name than the existing
- - support datelike columns with a timezone as data_columns (GH2852_)
- - table writing performance improvements.
- - support python3 (via ``PyTables 3.0.0``) (GH3750_)
- - Add modulo operator to Series, DataFrame
- - Add ``date`` method to DatetimeIndex
- - Simplified the API and added a describe method to Categorical
- - ``melt`` now accepts the optional parameters ``var_name`` and ``value_name``
- to specify custom column names of the returned DataFrame (GH3649_),
- thanks @hoechenberger
- - clipboard functions use pyperclip (no dependencies on Windows, alternative
- dependencies offered for Linux) (GH3837_).
- - Plotting functions now raise a ``TypeError`` before trying to plot anything
- if the associated objects have have a dtype of ``object`` (GH1818_,
- GH3572_, GH3911_, GH3912_), but they will try to convert object arrays to
- numeric arrays if possible so that you can still plot, for example, an
- object array with floats. This happens before any drawing takes place which
- elimnates any spurious plots from showing up.
- - Added Faq section on repr display options, to help users customize their setup.
- - ``where`` operations that result in block splitting are much faster (GH3733_)
- - Series and DataFrame hist methods now take a ``figsize`` argument (GH3834_)
- - DatetimeIndexes no longer try to convert mixed-integer indexes during join
- operations (GH3877_)
- - Add ``unit`` keyword to ``Timestamp`` and ``to_datetime`` to enable passing of
- integers or floats that are in an epoch unit of ``s, ms, us, ns``
- (e.g. unix timestamps or epoch ``s``, with fracional seconds allowed) (GH3540_)
- - DataFrame corr method (spearman) is now cythonized.
-
-**API Changes**
-
- - ``HDFStore``
-
- - When removing an object, ``remove(key)`` raises
- ``KeyError`` if the key is not a valid store object.
- - raise a ``TypeError`` on passing ``where`` or ``columns``
- to select with a Storer; these are invalid parameters at this time
- - can now specify an ``encoding`` option to ``append/put``
- to enable alternate encodings (GH3750_)
- - enable support for ``iterator/chunksize`` with ``read_hdf``
- - The repr() for (Multi)Index now obeys display.max_seq_items rather
- then numpy threshold print options. (GH3426_, GH3466_)
- - Added mangle_dupe_cols option to read_table/csv, allowing users
- to control legacy behaviour re dupe cols (A, A.1, A.2 vs A, A ) (GH3468_)
- Note: The default value will change in 0.12 to the "no mangle" behaviour,
- If your code relies on this behaviour, explicitly specify mangle_dupe_cols=True
- in your calls.
- - Do not allow astypes on ``datetime64[ns]`` except to ``object``, and
- ``timedelta64[ns]`` to ``object/int`` (GH3425_)
- - The behavior of ``datetime64`` dtypes has changed with respect to certain
- so-called reduction operations (GH3726_). The following operations now
- raise a ``TypeError`` when perfomed on a ``Series`` and return an *empty*
- ``Series`` when performed on a ``DataFrame`` similar to performing these
- operations on, for example, a ``DataFrame`` of ``slice`` objects:
- - sum, prod, mean, std, var, skew, kurt, corr, and cov
- - Do not allow datetimelike/timedeltalike creation except with valid types
- (e.g. cannot pass ``datetime64[ms]``) (GH3423_)
- - Add ``squeeze`` keyword to ``groupby`` to allow reduction from
- DataFrame -> Series if groups are unique. Regression from 0.10.1,
- partial revert on (GH2893_) with (GH3596_)
- - Raise on ``iloc`` when boolean indexing with a label based indexer mask
- e.g. a boolean Series, even with integer labels, will raise. Since ``iloc``
- is purely positional based, the labels on the Series are not alignable (GH3631_)
- - The ``raise_on_error`` option to plotting methods is obviated by GH3572_,
- so it is removed. Plots now always raise when data cannot be plotted or the
- object being plotted has a dtype of ``object``.
- - ``DataFrame.interpolate()`` is now deprecated. Please use
- ``DataFrame.fillna()`` and ``DataFrame.replace()`` instead (GH3582_,
- GH3675_, GH3676_).
- - the ``method`` and ``axis`` arguments of ``DataFrame.replace()`` are
- deprecated
- - ``DataFrame.replace`` 's ``infer_types`` parameter is removed and now
- performs conversion by default. (GH3907_)
- - Deprecated display.height, display.width is now only a formatting option
- does not control triggering of summary, similar to < 0.11.0.
- - Add the keyword ``allow_duplicates`` to ``DataFrame.insert`` to allow a duplicate column
- to be inserted if ``True``, default is ``False`` (same as prior to 0.11.1) (GH3679_)
- - io API changes
-
- - added ``pandas.io.api`` for i/o imports
- - removed ``Excel`` support to ``pandas.io.excel``
- - added top-level ``pd.read_sql`` and ``to_sql`` DataFrame methods
- - removed ``clipboard`` support to ``pandas.io.clipboard``
- - replace top-level and instance methods ``save`` and ``load`` with top-level ``read_pickle`` and
- ``to_pickle`` instance method, ``save`` and ``load`` will give deprecation warning.
- - the ``method`` and ``axis`` arguments of ``DataFrame.replace()`` are
- deprecated
- - the ``method`` and ``axis`` arguments of ``DataFrame.replace()`` are
- deprecated
- - Implement ``__nonzero__`` for ``NDFrame`` objects (GH3691_, GH3696_)
- - ``as_matrix`` with mixed signed and unsigned dtypes will result in 2 x the lcd of the unsigned
- as an int, maxing with ``int64``, to avoid precision issues (GH3733_)
- - ``na_values`` in a list provided to ``read_csv/read_excel`` will match string and numeric versions
- e.g. ``na_values=['99']`` will match 99 whether the column ends up being int, float, or string (GH3611_)
- - ``read_html`` now defaults to ``None`` when reading, and falls back on
- ``bs4`` + ``html5lib`` when lxml fails to parse. a list of parsers to try
- until success is also valid
- - more consistency in the to_datetime return types (give string/array of string inputs) (GH3888_)
-
-**Bug Fixes**
-
- - Fixed an esoteric excel reading bug, xlrd>= 0.9.0 now required for excel
- support. Should provide python3 support (for reading) which has been
- lacking. (GH3164_)
- - Allow unioning of date ranges sharing a timezone (GH3491_)
- - Fix to_csv issue when having a large number of rows and ``NaT`` in some
- columns (GH3437_)
- - ``.loc`` was not raising when passed an integer list (GH3449_)
- - Unordered time series selection was misbehaving when using label slicing (GH3448_)
- - Fix sorting in a frame with a list of columns which contains datetime64[ns] dtypes (GH3461_)
- - DataFrames fetched via FRED now handle '.' as a NaN. (GH3469_)
- - Fix regression in a DataFrame apply with axis=1, objects were not being converted back
- to base dtypes correctly (GH3480_)
- - Fix issue when storing uint dtypes in an HDFStore. (GH3493_)
- - Non-unique index support clarified (GH3468_)
-
- - Addressed handling of dupe columns in df.to_csv new and old (GH3454_, GH3457_)
- - Fix assigning a new index to a duplicate index in a DataFrame would fail (GH3468_)
- - Fix construction of a DataFrame with a duplicate index
- - ref_locs support to allow duplicative indices across dtypes,
- allows iget support to always find the index (even across dtypes) (GH2194_)
- - applymap on a DataFrame with a non-unique index now works
- (removed warning) (GH2786_), and fix (GH3230_)
- - Fix to_csv to handle non-unique columns (GH3495_)
- - Duplicate indexes with getitem will return items in the correct order (GH3455_, GH3457_)
- and handle missing elements like unique indices (GH3561_)
- - Duplicate indexes with and empty DataFrame.from_records will return a correct frame (GH3562_)
- - Concat to produce a non-unique columns when duplicates are across dtypes is fixed (GH3602_)
- - Non-unique indexing with a slice via ``loc`` and friends fixed (GH3659_)
- - Allow insert/delete to non-unique columns (GH3679_)
- - Extend ``reindex`` to correctly deal with non-unique indices (GH3679_)
- - ``DataFrame.itertuples()`` now works with frames with duplicate column
- names (GH3873_)
- - Fixed bug in groupby with empty series referencing a variable before assignment. (GH3510_)
- - Fixed bug in mixed-frame assignment with aligned series (GH3492_)
- - Fixed bug in selecting month/quarter/year from a series would not select the time element
- on the last day (GH3546_)
- - Fixed a couple of MultiIndex rendering bugs in df.to_html() (GH3547_, GH3553_)
- - Properly convert np.datetime64 objects in a Series (GH3416_)
- - Raise a ``TypeError`` on invalid datetime/timedelta operations
- e.g. add datetimes, multiple timedelta x datetime
- - Fix ``.diff`` on datelike and timedelta operations (GH3100_)
- - ``combine_first`` not returning the same dtype in cases where it can (GH3552_)
- - Fixed bug with ``Panel.transpose`` argument aliases (GH3556_)
- - Fixed platform bug in ``PeriodIndex.take`` (GH3579_)
- - Fixed bud in incorrect conversion of datetime64[ns] in ``combine_first`` (GH3593_)
- - Fixed bug in reset_index with ``NaN`` in a multi-index (GH3586_)
- - ``fillna`` methods now raise a ``TypeError`` when the ``value`` parameter
- is a ``list`` or ``tuple``.
- - Fixed bug where a time-series was being selected in preference to an actual column name
- in a frame (GH3594_)
- - Fix modulo and integer division on Series,DataFrames to act similary to ``float`` dtypes to return
- ``np.nan`` or ``np.inf`` as appropriate (GH3590_)
- - Fix incorrect dtype on groupby with ``as_index=False`` (GH3610_)
- - Fix ``read_csv/read_excel`` to correctly encode identical na_values, e.g. ``na_values=[-999.0,-999]``
- was failing (GH3611_)
- - Disable HTML output in qtconsole again. (GH3657_)
- - Reworked the new repr display logic, which users found confusing. (GH3663_)
- - Fix indexing issue in ndim >= 3 with ``iloc`` (GH3617_)
- - Correctly parse date columns with embedded (nan/NaT) into datetime64[ns] dtype in ``read_csv``
- when ``parse_dates`` is specified (GH3062_)
- - Fix not consolidating before to_csv (GH3624_)
- - Fix alignment issue when setitem in a DataFrame with a piece of a DataFrame (GH3626_) or
- a mixed DataFrame and a Series (GH3668_)
- - Fix plotting of unordered DatetimeIndex (GH3601_)
- - ``sql.write_frame`` failing when writing a single column to sqlite (GH3628_),
- thanks to @stonebig
- - Fix pivoting with ``nan`` in the index (GH3558_)
- - Fix running of bs4 tests when it is not installed (GH3605_)
- - Fix parsing of html table (GH3606_)
- - ``read_html()`` now only allows a single backend: ``html5lib`` (GH3616_)
- - ``convert_objects`` with ``convert_dates='coerce'`` was parsing some single-letter strings
- into today's date
- - ``DataFrame.from_records`` did not accept empty recarrays (GH3682_)
- - ``DataFrame.to_csv`` will succeed with the deprecated option ``nanRep``, @tdsmith
- - ``DataFrame.to_html`` and ``DataFrame.to_latex`` now accept a path for
- their first argument (GH3702_)
- - Fix file tokenization error with \r delimiter and quoted fields (GH3453_)
- - Groupby transform with item-by-item not upcasting correctly (GH3740_)
- - Incorrectly read a HDFStore multi-index Frame witha column specification (GH3748_)
- - ``read_html`` now correctly skips tests (GH3741_)
- - PandasObjects raise TypeError when trying to hash (GH3882_)
- - Fix incorrect arguments passed to concat that are not list-like (e.g. concat(df1,df2)) (GH3481_)
- - Correctly parse when passed the ``dtype=str`` (or other variable-len string dtypes)
- in ``read_csv`` (GH3795_)
- - Fix index name not propogating when using ``loc/ix`` (GH3880_)
- - Fix groupby when applying a custom function resulting in a returned DataFrame was
- not converting dtypes (GH3911_)
- - Fixed a bug where ``DataFrame.replace`` with a compiled regular expression
- in the ``to_replace`` argument wasn't working (GH3907_)
- - Fixed ``__truediv__`` in Python 2.7 with ``numexpr`` installed to actually do true division when dividing
- two integer arrays with at least 10000 cells total (GH3764_)
- - Indexing with a string with seconds resolution not selecting from a time index (GH3925_)
-
-.. _GH3164: https://github.com/pydata/pandas/issues/3164
-.. _GH2786: https://github.com/pydata/pandas/issues/2786
-.. _GH2194: https://github.com/pydata/pandas/issues/2194
-.. _GH3230: https://github.com/pydata/pandas/issues/3230
-.. _GH3425: https://github.com/pydata/pandas/issues/3425
-.. _GH3416: https://github.com/pydata/pandas/issues/3416
-.. _GH3423: https://github.com/pydata/pandas/issues/3423
-.. _GH3251: https://github.com/pydata/pandas/issues/3251
-.. _GH3379: https://github.com/pydata/pandas/issues/3379
-.. _GH3480: https://github.com/pydata/pandas/issues/3480
-.. _GH3481: https://github.com/pydata/pandas/issues/3481
-.. _GH2852: https://github.com/pydata/pandas/issues/2852
-.. _GH3100: https://github.com/pydata/pandas/issues/3100
-.. _GH3454: https://github.com/pydata/pandas/issues/3454
-.. _GH3457: https://github.com/pydata/pandas/issues/3457
-.. _GH3491: https://github.com/pydata/pandas/issues/3491
-.. _GH3426: https://github.com/pydata/pandas/issues/3426
-.. _GH3466: https://github.com/pydata/pandas/issues/3466
-.. _GH3038: https://github.com/pydata/pandas/issues/3038
-.. _GH3510: https://github.com/pydata/pandas/issues/3510
-.. _GH3547: https://github.com/pydata/pandas/issues/3547
-.. _GH3553: https://github.com/pydata/pandas/issues/3553
-.. _GH3437: https://github.com/pydata/pandas/issues/3437
-.. _GH3468: https://github.com/pydata/pandas/issues/3468
-.. _GH3453: https://github.com/pydata/pandas/issues/3453
-.. _GH3455: https://github.com/pydata/pandas/issues/3455
-.. _GH3457: https://github.com/pydata/pandas/issues/3457
-.. _GH3477: https://github.com/pydata/pandas/issues/3457
-.. _GH3460: https://github.com/pydata/pandas/issues/3460
-.. _GH3461: https://github.com/pydata/pandas/issues/3461
-.. _GH3546: https://github.com/pydata/pandas/issues/3546
-.. _GH3468: https://github.com/pydata/pandas/issues/3468
-.. _GH3448: https://github.com/pydata/pandas/issues/3448
-.. _GH3499: https://github.com/pydata/pandas/issues/3499
-.. _GH3495: https://github.com/pydata/pandas/issues/3495
-.. _GH3492: https://github.com/pydata/pandas/issues/3492
-.. _GH3540: https://github.com/pydata/pandas/issues/3540
-.. _GH3552: https://github.com/pydata/pandas/issues/3552
-.. _GH3562: https://github.com/pydata/pandas/issues/3562
-.. _GH3586: https://github.com/pydata/pandas/issues/3586
-.. _GH3561: https://github.com/pydata/pandas/issues/3561
-.. _GH3493: https://github.com/pydata/pandas/issues/3493
-.. _GH3579: https://github.com/pydata/pandas/issues/3579
-.. _GH3593: https://github.com/pydata/pandas/issues/3593
-.. _GH3556: https://github.com/pydata/pandas/issues/3556
-.. _GH3594: https://github.com/pydata/pandas/issues/3594
-.. _GH3590: https://github.com/pydata/pandas/issues/3590
-.. _GH3610: https://github.com/pydata/pandas/issues/3610
-.. _GH3596: https://github.com/pydata/pandas/issues/3596
-.. _GH3617: https://github.com/pydata/pandas/issues/3617
-.. _GH3435: https://github.com/pydata/pandas/issues/3435
-.. _GH3611: https://github.com/pydata/pandas/issues/3611
-.. _GH3558: https://github.com/pydata/pandas/issues/3558
-.. _GH3062: https://github.com/pydata/pandas/issues/3062
-.. _GH3624: https://github.com/pydata/pandas/issues/3624
-.. _GH3626: https://github.com/pydata/pandas/issues/3626
-.. _GH3601: https://github.com/pydata/pandas/issues/3601
-.. _GH3631: https://github.com/pydata/pandas/issues/3631
-.. _GH3602: https://github.com/pydata/pandas/issues/3602
-.. _GH1512: https://github.com/pydata/pandas/issues/1512
-.. _GH3571: https://github.com/pydata/pandas/issues/3571
-.. _GH1651: https://github.com/pydata/pandas/issues/1651
-.. _GH3141: https://github.com/pydata/pandas/issues/3141
-.. _GH3628: https://github.com/pydata/pandas/issues/3628
-.. _GH3638: https://github.com/pydata/pandas/issues/3638
-.. _GH3668: https://github.com/pydata/pandas/issues/3668
-.. _GH3605: https://github.com/pydata/pandas/issues/3605
-.. _GH3606: https://github.com/pydata/pandas/issues/3606
-.. _GH3659: https://github.com/pydata/pandas/issues/3659
-.. _GH3649: https://github.com/pydata/pandas/issues/3649
-.. _GH3679: https://github.com/pydata/pandas/issues/3679
-.. _Gh3616: https://github.com/pydata/pandas/issues/3616
-.. _GH1818: https://github.com/pydata/pandas/issues/1818
-.. _GH3572: https://github.com/pydata/pandas/issues/3572
-.. _GH3582: https://github.com/pydata/pandas/issues/3582
-.. _GH3676: https://github.com/pydata/pandas/issues/3676
-.. _GH3675: https://github.com/pydata/pandas/issues/3675
-.. _GH3682: https://github.com/pydata/pandas/issues/3682
-.. _GH3702: https://github.com/pydata/pandas/issues/3702
-.. _GH3691: https://github.com/pydata/pandas/issues/3691
-.. _GH3696: https://github.com/pydata/pandas/issues/3696
-.. _GH3667: https://github.com/pydata/pandas/issues/3667
-.. _GH3733: https://github.com/pydata/pandas/issues/3733
-.. _GH3740: https://github.com/pydata/pandas/issues/3740
-.. _GH3748: https://github.com/pydata/pandas/issues/3748
-.. _GH3741: https://github.com/pydata/pandas/issues/3741
-.. _GH3750: https://github.com/pydata/pandas/issues/3750
-.. _GH3726: https://github.com/pydata/pandas/issues/3726
-.. _GH3795: https://github.com/pydata/pandas/issues/3795
-.. _GH3814: https://github.com/pydata/pandas/issues/3814
-.. _GH3834: https://github.com/pydata/pandas/issues/3834
-.. _GH3873: https://github.com/pydata/pandas/issues/3873
-.. _GH3877: https://github.com/pydata/pandas/issues/3877
-.. _GH3659: https://github.com/pydata/pandas/issues/3659
-.. _GH3679: https://github.com/pydata/pandas/issues/3679
-.. _GH3880: https://github.com/pydata/pandas/issues/3880
-.. _GH3911: https://github.com/pydata/pandas/issues/3911
-.. _GH3907: https://github.com/pydata/pandas/issues/3907
-.. _GH3911: https://github.com/pydata/pandas/issues/3911
-.. _GH3912: https://github.com/pydata/pandas/issues/3912
-.. _GH3764: https://github.com/pydata/pandas/issues/3764
-.. _GH3888: https://github.com/pydata/pandas/issues/3888
-.. _GH3925: https://github.com/pydata/pandas/issues/3925
-
-pandas 0.11.0
-=============
-
-**Release date:** 2013-04-22
-
-**New features**
-
- - New documentation section, ``10 Minutes to Pandas``
- - New documentation section, ``Cookbook``
- - Allow mixed dtypes (e.g ``float32/float64/int32/int16/int8``) to coexist in
- DataFrames and propogate in operations
- - Add function to pandas.io.data for retrieving stock index components from
- Yahoo! finance (GH2795_)
- - Support slicing with time objects (GH2681_)
- - Added ``.iloc`` attribute, to support strict integer based indexing,
- analogous to ``.ix`` (GH2922_)
- - Added ``.loc`` attribute, to support strict label based indexing, analagous
- to ``.ix`` (GH3053_)
- - Added ``.iat`` attribute, to support fast scalar access via integers
- (replaces ``iget_value/iset_value``)
- - Added ``.at`` attribute, to support fast scalar access via labels (replaces
- ``get_value/set_value``)
- - Moved functionaility from ``irow,icol,iget_value/iset_value`` to ``.iloc`` indexer
- (via ``_ixs`` methods in each object)
- - Added support for expression evaluation using the ``numexpr`` library
- - Added ``convert=boolean`` to ``take`` routines to translate negative
- indices to positive, defaults to True
- - Added to_series() method to indices, to facilitate the creation of indexeres
- (GH3275_)
-
-**Improvements to existing features**
-
- - Improved performance of df.to_csv() by up to 10x in some cases. (GH3059_)
- - added ``blocks`` attribute to DataFrames, to return a dict of dtypes to
- homogeneously dtyped DataFrames
- - added keyword ``convert_numeric`` to ``convert_objects()`` to try to
- convert object dtypes to numeric types (default is False)
- - ``convert_dates`` in ``convert_objects`` can now be ``coerce`` which will
- return a datetime64[ns] dtype with non-convertibles set as ``NaT``; will
- preserve an all-nan object (e.g. strings), default is True (to perform
- soft-conversion
- - Series print output now includes the dtype by default
- - Optimize internal reindexing routines (GH2819_, GH2867_)
- - ``describe_option()`` now reports the default and current value of options.
- - Add ``format`` option to ``pandas.to_datetime`` with faster conversion of
- strings that can be parsed with datetime.strptime
- - Add ``axes`` property to ``Series`` for compatibility
- - Add ``xs`` function to ``Series`` for compatibility
- - Allow setitem in a frame where only mixed numerics are present (e.g. int
- and float), (GH3037_)
- - ``HDFStore``
-
- - Provide dotted attribute access to ``get`` from stores
- (e.g. store.df == store['df'])
- - New keywords ``iterator=boolean``, and ``chunksize=number_in_a_chunk``
- are provided to support iteration on ``select`` and
- ``select_as_multiple`` (GH3076_)
- - support ``read_hdf/to_hdf`` API similar to ``read_csv/to_csv`` (GH3222_)
-
- - Add ``squeeze`` method to possibly remove length 1 dimensions from an
- object.
-
- .. ipython:: python
-
- p = Panel(randn(3,4,4),items=['ItemA','ItemB','ItemC'],
- major_axis=date_range('20010102',periods=4),
- minor_axis=['A','B','C','D'])
- p
- p.reindex(items=['ItemA']).squeeze()
- p.reindex(items=['ItemA'],minor=['B']).squeeze()
-
- - Improvement to Yahoo API access in ``pd.io.data.Options`` (GH2758_)
- - added option `display.max_seq_items` to control the number of
- elements printed per sequence pprinting it. (GH2979_)
- - added option `display.chop_threshold` to control display of small numerical
- values. (GH2739_)
- - added option `display.max_info_rows` to prevent verbose_info from being
- calculated for frames above 1M rows (configurable). (GH2807_, GH2918_)
- - value_counts() now accepts a "normalize" argument, for normalized
- histograms. (GH2710_).
- - DataFrame.from_records now accepts not only dicts but any instance of
- the collections.Mapping ABC.
- - Allow selection semantics via a string with a datelike index to work in both
- Series and DataFrames (GH3070_)
-
- .. ipython:: python
-
- idx = date_range("2001-10-1", periods=5, freq='M')
- ts = Series(np.random.rand(len(idx)),index=idx)
- ts['2001']
-
- df = DataFrame(dict(A = ts))
- df['2001']
- - added option `display.mpl_style` providing a sleeker visual style
- for plots. Based on https://gist.github.com/huyng/816622 (GH3075_).
-
-
- - Improved performance across several core functions by taking memory
- ordering of arrays into account. Courtesy of @stephenwlin (GH3130_)
- - Improved performance of groupby transform method (GH2121_)
- - Handle "ragged" CSV files missing trailing delimiters in rows with missing
- fields when also providing explicit list of column names (so the parser
- knows how many columns to expect in the result) (GH2981_)
- - On a mixed DataFrame, allow setting with indexers with ndarray/DataFrame
- on rhs (GH3216_)
- - Treat boolean values as integers (values 1 and 0) for numeric
- operations. (GH2641_)
- - Add ``time`` method to DatetimeIndex (GH3180_)
- - Return NA when using Series.str[...] for values that are not long enough
- (GH3223_)
- - Display cursor coordinate information in time-series plots (GH1670_)
- - to_html() now accepts an optional "escape" argument to control reserved
- HTML character escaping (enabled by default) and escapes ``&``, in addition
- to ``<`` and ``>``. (GH2919_)
-
-**API Changes**
-
- - Do not automatically upcast numeric specified dtypes to ``int64`` or
- ``float64`` (GH622_ and GH797_)
- - DataFrame construction of lists and scalars, with no dtype present, will
- result in casting to ``int64`` or ``float64``, regardless of platform.
- This is not an apparent change in the API, but noting it.
- - Guarantee that ``convert_objects()`` for Series/DataFrame always returns a
- copy
- - groupby operations will respect dtypes for numeric float operations
- (float32/float64); other types will be operated on, and will try to cast
- back to the input dtype (e.g. if an int is passed, as long as the output
- doesn't have nans, then an int will be returned)
- - backfill/pad/take/diff/ohlc will now support ``float32/int16/int8``
- operations
- - Block types will upcast as needed in where/masking operations (GH2793_)
- - Series now automatically will try to set the correct dtype based on passed
- datetimelike objects (datetime/Timestamp)
-
- - timedelta64 are returned in appropriate cases (e.g. Series - Series,
- when both are datetime64)
- - mixed datetimes and objects (GH2751_) in a constructor will be cast
- correctly
- - astype on datetimes to object are now handled (as well as NaT
- conversions to np.nan)
- - all timedelta like objects will be correctly assigned to ``timedelta64``
- with mixed ``NaN`` and/or ``NaT`` allowed
-
- - arguments to DataFrame.clip were inconsistent to numpy and Series clipping
- (GH2747_)
- - util.testing.assert_frame_equal now checks the column and index names (GH2964_)
- - Constructors will now return a more informative ValueError on failures
- when invalid shapes are passed
- - Don't suppress TypeError in GroupBy.agg (GH3238_)
- - Methods return None when inplace=True (GH1893_)
- - ``HDFStore``
-
- - added the method ``select_column`` to select a single column from a table as a Series.
- - deprecated the ``unique`` method, can be replicated by ``select_column(key,column).unique()``
- - ``min_itemsize`` parameter will now automatically create data_columns for passed keys
-
- - Downcast on pivot if possible (GH3283_), adds argument ``downcast`` to ``fillna``
- - Introduced options `display.height/width` for explicitly specifying terminal
- height/width in characters. Deprecated display.line_width, now replaced by display.width.
- These defaults are in effect for scripts as well, so unless disabled, previously
- very wide output will now be output as "expand_repr" style wrapped output.
- - Various defaults for options (including display.max_rows) have been revised,
- after a brief survey concluded they were wrong for everyone. Now at w=80,h=60.
- - HTML repr output in IPython qtconsole is once again controlled by the option
- `display.notebook_repr_html`, and on by default.
-
-**Bug Fixes**
-
- - Fix seg fault on empty data frame when fillna with ``pad`` or ``backfill``
- (GH2778_)
- - Single element ndarrays of datetimelike objects are handled
- (e.g. np.array(datetime(2001,1,1,0,0))), w/o dtype being passed
- - 0-dim ndarrays with a passed dtype are handled correctly
- (e.g. np.array(0.,dtype='float32'))
- - Fix some boolean indexing inconsistencies in Series.__getitem__/__setitem__
- (GH2776_)
- - Fix issues with DataFrame and Series constructor with integers that
- overflow ``int64`` and some mixed typed type lists (GH2845_)
-
- - ``HDFStore``
-
- - Fix weird PyTables error when using too many selectors in a where
- also correctly filter on any number of values in a Term expression
- (so not using numexpr filtering, but isin filtering)
- - Internally, change all variables to be private-like (now have leading
- underscore)
- - Fixes for query parsing to correctly interpret boolean and != (GH2849_, GH2973_)
- - Fixes for pathological case on SparseSeries with 0-len array and
- compression (GH2931_)
- - Fixes bug with writing rows if part of a block was all-nan (GH3012_)
- - Exceptions are now ValueError or TypeError as needed
- - A table will now raise if min_itemsize contains fields which are not queryables
-
- - Bug showing up in applymap where some object type columns are converted (GH2909_)
- had an incorrect default in convert_objects
-
- - TimeDeltas
-
- - Series ops with a Timestamp on the rhs was throwing an exception (GH2898_)
- added tests for Series ops with datetimes,timedeltas,Timestamps, and datelike
- Series on both lhs and rhs
- - Fixed subtle timedelta64 inference issue on py3 & numpy 1.7.0 (GH3094_)
- - Fixed some formatting issues on timedelta when negative
- - Support null checking on timedelta64, representing (and formatting) with NaT
- - Support setitem with np.nan value, converts to NaT
- - Support min/max ops in a Dataframe (abs not working, nor do we error on non-supported ops)
- - Support idxmin/idxmax/abs/max/min in a Series (GH2989_, GH2982_)
-
- - Bug on in-place putmasking on an ``integer`` series that needs to be converted to
- ``float`` (GH2746_)
- - Bug in argsort of ``datetime64[ns]`` Series with ``NaT`` (GH2967_)
- - Bug in value_counts of ``datetime64[ns]`` Series (GH3002_)
- - Fixed printing of ``NaT` in an index
- - Bug in idxmin/idxmax of ``datetime64[ns]`` Series with ``NaT`` (GH2982__)
- - Bug in ``icol, take`` with negative indicies was producing incorrect return
- values (see GH2922_, GH2892_), also check for out-of-bounds indices (GH3029_)
- - Bug in DataFrame column insertion when the column creation fails, existing frame is left in
- an irrecoverable state (GH3010_)
- - Bug in DataFrame update, combine_first where non-specified values could cause
- dtype changes (GH3016_, GH3041_)
- - Bug in groupby with first/last where dtypes could change (GH3041_, GH2763_)
- - Formatting of an index that has ``nan`` was inconsistent or wrong (would fill from
- other values), (GH2850_)
- - Unstack of a frame with no nans would always cause dtype upcasting (GH2929_)
- - Fix scalar datetime.datetime parsing bug in read_csv (GH3071_)
- - Fixed slow printing of large Dataframes, due to inefficient dtype
- reporting (GH2807_)
- - Fixed a segfault when using a function as grouper in groupby (GH3035_)
- - Fix pretty-printing of infinite data structures (closes GH2978_)
- - Fixed exception when plotting timeseries bearing a timezone (closes GH2877_)
- - str.contains ignored na argument (GH2806_)
- - Substitute warning for segfault when grouping with categorical grouper
- of mismatched length (GH3011_)
- - Fix exception in SparseSeries.density (GH2083_)
- - Fix upsampling bug with closed='left' and daily to daily data (GH3020_)
- - Fixed missing tick bars on scatter_matrix plot (GH3063_)
- - Fixed bug in Timestamp(d,tz=foo) when d is date() rather then datetime() (GH2993_)
- - series.plot(kind='bar') now respects pylab color schem (GH3115_)
- - Fixed bug in reshape if not passed correct input, now raises TypeError (GH2719_)
- - Fixed a bug where Series ctor did not respect ordering if OrderedDict passed in (GH3282_)
- - Fix NameError issue on RESO_US (GH2787_)
- - Allow selection in an *unordered* timeseries to work similary
- to an *ordered* timeseries (GH2437_).
- - Fix implemented ``.xs`` when called with ``axes=1`` and a level parameter (GH2903_)
- - Timestamp now supports the class method fromordinal similar to datetimes (GH3042_)
- - Fix issue with indexing a series with a boolean key and specifiying a 1-len list on the rhs (GH2745_)
- or a list on the rhs (GH3235_)
- - Fixed bug in groupby apply when kernel generate list of arrays having unequal len (GH1738_)
- - fixed handling of rolling_corr with center=True which could produce corr>1 (GH3155_)
- - Fixed issues where indices can be passed as 'index/column' in addition to 0/1 for the axis parameter
- - PeriodIndex.tolist now boxes to Period (GH3178_)
- - PeriodIndex.get_loc KeyError now reports Period instead of ordinal (GH3179_)
- - df.to_records bug when handling MultiIndex (GH3189)
- - Fix Series.__getitem__ segfault when index less than -length (GH3168_)
- - Fix bug when using Timestamp as a date parser (GH2932_)
- - Fix bug creating date range from Timestamp with time zone and passing same
- time zone (GH2926_)
- - Add comparison operators to Period object (GH2781_)
- - Fix bug when concatenating two Series into a DataFrame when they have the
- same name (GH2797_)
- - Fix automatic color cycling when plotting consecutive timeseries
- without color arguments (GH2816_)
- - fixed bug in the pickling of PeriodIndex (GH2891_)
- - Upcast/split blocks when needed in a mixed DataFrame when setitem
- with an indexer (GH3216_)
- - Invoking df.applymap on a dataframe with dupe cols now raises a ValueError (GH2786_)
- - Apply with invalid returned indices raise correct Exception (GH2808_)
- - Fixed a bug in plotting log-scale bar plots (GH3247_)
- - df.plot() grid on/off now obeys the mpl default style, just like
- series.plot(). (GH3233_)
- - Fixed a bug in the legend of plotting.andrews_curves() (GH3278_)
- - Produce a series on apply if we only generate a singular series and have
- a simple index (GH2893_)
- - Fix Python ascii file parsing when integer falls outside of floating point
- spacing (GH3258_)
- - fixed pretty priniting of sets (GH3294_)
- - Panel() and Panel.from_dict() now respects ordering when give OrderedDict (GH3303_)
- - DataFrame where with a datetimelike incorrectly selecting (GH3311_)
- - Ensure index casts work even in Int64Index
- - Fix set_index segfault when passing MultiIndex (GH3308_)
- - Ensure pickles created in py2 can be read in py3
- - Insert ellipsis in MultiIndex summary repr (GH3348_)
- - Groupby will handle mutation among an input groups columns (and fallback
- to non-fast apply) (GH3380_)
- - Eliminated unicode errors on FreeBSD when using MPL GTK backend (GH3360_)
- - Period.strftime should return unicode strings always (GH3363_)
- - Respect passed read_* chunksize in get_chunk function (GH3406_)
-
-.. _GH3294: https://github.com/pydata/pandas/issues/3294
-.. _GH622: https://github.com/pydata/pandas/issues/622
-.. _GH3348: https://github.com/pydata/pandas/issues/3348
-.. _GH797: https://github.com/pydata/pandas/issues/797
-.. _GH1893: https://github.com/pydata/pandas/issues/1893
-.. _GH1978: https://github.com/pydata/pandas/issues/1978
-.. _GH3360: https://github.com/pydata/pandas/issues/3360
-.. _GH3363: https://github.com/pydata/pandas/issues/3363
-.. _GH2758: https://github.com/pydata/pandas/issues/2758
-.. _GH3275: https://github.com/pydata/pandas/issues/3275
-.. _GH2121: https://github.com/pydata/pandas/issues/2121
-.. _GH3247: https://github.com/pydata/pandas/issues/3247
-.. _GH2809: https://github.com/pydata/pandas/issues/2809
-.. _GH2810: https://github.com/pydata/pandas/issues/2810
-.. _GH2837: https://github.com/pydata/pandas/issues/2837
-.. _GH2898: https://github.com/pydata/pandas/issues/2898
-.. _GH3233: https://github.com/pydata/pandas/issues/3233
-.. _GH3035: https://github.com/pydata/pandas/issues/3035
-.. _GH3020: https://github.com/pydata/pandas/issues/3020
-.. _GH2978: https://github.com/pydata/pandas/issues/2978
-.. _GH2877: https://github.com/pydata/pandas/issues/2877
-.. _GH2739: https://github.com/pydata/pandas/issues/2739
-.. _GH2710: https://github.com/pydata/pandas/issues/2710
-.. _GH2806: https://github.com/pydata/pandas/issues/2806
-.. _GH2807: https://github.com/pydata/pandas/issues/2807
-.. _GH3278: https://github.com/pydata/pandas/issues/3278
-.. _GH2891: https://github.com/pydata/pandas/issues/2891
-.. _GH2918: https://github.com/pydata/pandas/issues/2918
-.. _GH3011: https://github.com/pydata/pandas/issues/3011
-.. _GH2745: https://github.com/pydata/pandas/issues/2745
-.. _GH622: https://github.com/pydata/pandas/issues/622
-.. _GH797: https://github.com/pydata/pandas/issues/797
-.. _GH1670: https://github.com/pydata/pandas/issues/1670
-.. _GH2681: https://github.com/pydata/pandas/issues/2681
-.. _GH2719: https://github.com/pydata/pandas/issues/2719
-.. _GH2746: https://github.com/pydata/pandas/issues/2746
-.. _GH2747: https://github.com/pydata/pandas/issues/2747
-.. _GH2751: https://github.com/pydata/pandas/issues/2751
-.. _GH2763: https://github.com/pydata/pandas/issues/2763
-.. _GH2776: https://github.com/pydata/pandas/issues/2776
-.. _GH2778: https://github.com/pydata/pandas/issues/2778
-.. _GH2781: https://github.com/pydata/pandas/issues/2781
-.. _GH2786: https://github.com/pydata/pandas/issues/2786
-.. _GH2787: https://github.com/pydata/pandas/issues/2787
-.. _GH3282: https://github.com/pydata/pandas/issues/3282
-.. _GH2437: https://github.com/pydata/pandas/issues/2437
-.. _GH2753: https://github.com/pydata/pandas/issues/2753
-.. _GH2793: https://github.com/pydata/pandas/issues/2793
-.. _GH2795: https://github.com/pydata/pandas/issues/2795
-.. _GH2797: https://github.com/pydata/pandas/issues/2797
-.. _GH2819: https://github.com/pydata/pandas/issues/2819
-.. _GH2845: https://github.com/pydata/pandas/issues/2845
-.. _GH2867: https://github.com/pydata/pandas/issues/2867
-.. _GH2803: https://github.com/pydata/pandas/issues/2803
-.. _GH2807: https://github.com/pydata/pandas/issues/2807
-.. _GH2808: https://github.com/pydata/pandas/issues/2808
-.. _GH2849: https://github.com/pydata/pandas/issues/2849
-.. _GH2850: https://github.com/pydata/pandas/issues/2850
-.. _GH2898: https://github.com/pydata/pandas/issues/2898
-.. _GH2892: https://github.com/pydata/pandas/issues/2892
-.. _GH2893: https://github.com/pydata/pandas/issues/2893
-.. _GH2902: https://github.com/pydata/pandas/issues/2902
-.. _GH2903: https://github.com/pydata/pandas/issues/2903
-.. _GH2909: https://github.com/pydata/pandas/issues/2909
-.. _GH2922: https://github.com/pydata/pandas/issues/2922
-.. _GH2926: https://github.com/pydata/pandas/issues/2926
-.. _GH2929: https://github.com/pydata/pandas/issues/2929
-.. _GH2931: https://github.com/pydata/pandas/issues/2931
-.. _GH2932: https://github.com/pydata/pandas/issues/2932
-.. _GH2973: https://github.com/pydata/pandas/issues/2973
-.. _GH2967: https://github.com/pydata/pandas/issues/2967
-.. _GH2981: https://github.com/pydata/pandas/issues/2981
-.. _GH2982: https://github.com/pydata/pandas/issues/2982
-.. _GH2989: https://github.com/pydata/pandas/issues/2989
-.. _GH2993: https://github.com/pydata/pandas/issues/2993
-.. _GH3002: https://github.com/pydata/pandas/issues/3002
-.. _GH3155: https://github.com/pydata/pandas/issues/3155
-.. _GH3010: https://github.com/pydata/pandas/issues/3010
-.. _GH1738: https://github.com/pydata/pandas/issues/1738
-.. _GH3012: https://github.com/pydata/pandas/issues/3012
-.. _GH3029: https://github.com/pydata/pandas/issues/3029
-.. _GH3037: https://github.com/pydata/pandas/issues/3037
-.. _GH3041: https://github.com/pydata/pandas/issues/3041
-.. _GH3042: https://github.com/pydata/pandas/issues/3042
-.. _GH3053: https://github.com/pydata/pandas/issues/3053
-.. _GH3070: https://github.com/pydata/pandas/issues/3070
-.. _GH3076: https://github.com/pydata/pandas/issues/3076
-.. _GH3063: https://github.com/pydata/pandas/issues/3063
-.. _GH3059: https://github.com/pydata/pandas/issues/3059
-.. _GH2993: https://github.com/pydata/pandas/issues/2993
-.. _GH3115: https://github.com/pydata/pandas/issues/3115
-.. _GH3070: https://github.com/pydata/pandas/issues/3070
-.. _GH3075: https://github.com/pydata/pandas/issues/3075
-.. _GH3094: https://github.com/pydata/pandas/issues/3094
-.. _GH3130: https://github.com/pydata/pandas/issues/3130
-.. _GH3168: https://github.com/pydata/pandas/issues/3168
-.. _GH3178: https://github.com/pydata/pandas/issues/3178
-.. _GH3179: https://github.com/pydata/pandas/issues/3179
-.. _GH3189: https://github.com/pydata/pandas/issues/3189
-.. _GH2751: https://github.com/pydata/pandas/issues/2751
-.. _GH2747: https://github.com/pydata/pandas/issues/2747
-.. _GH2816: https://github.com/pydata/pandas/issues/2816
-.. _GH3216: https://github.com/pydata/pandas/issues/3216
-.. _GH3222: https://github.com/pydata/pandas/issues/3222
-.. _GH2641: https://github.com/pydata/pandas/issues/2641
-.. _GH3223: https://github.com/pydata/pandas/issues/3223
-.. _GH3238: https://github.com/pydata/pandas/issues/3238
-.. _GH3258: https://github.com/pydata/pandas/issues/3258
-.. _GH3283: https://github.com/pydata/pandas/issues/3283
-.. _GH2919: https://github.com/pydata/pandas/issues/2919
-.. _GH3308: https://github.com/pydata/pandas/issues/3308
-.. _GH3311: https://github.com/pydata/pandas/issues/3311
-.. _GH3380: https://github.com/pydata/pandas/issues/3380
-.. _GH3406: https://github.com/pydata/pandas/issues/3406
-
-pandas 0.10.1
-=============
-
-**Release date:** 2013-01-22
-
-**New features**
-
- - Add data inferface to World Bank WDI pandas.io.wb (GH2592_)
-
-**API Changes**
-
- - Restored inplace=True behavior returning self (same object) with
- deprecation warning until 0.11 (GH1893_)
- - ``HDFStore``
-
- - refactored HFDStore to deal with non-table stores as objects, will allow future enhancements
- - removed keyword ``compression`` from ``put`` (replaced by keyword
- ``complib`` to be consistent across library)
- - warn `PerformanceWarning` if you are attempting to store types that will be pickled by PyTables
-
-**Improvements to existing features**
-
- - ``HDFStore``
-
- - enables storing of multi-index dataframes (closes GH1277_)
- - support data column indexing and selection, via ``data_columns`` keyword
- in append
- - support write chunking to reduce memory footprint, via ``chunksize``
- keyword to append
- - support automagic indexing via ``index`` keyword to append
- - support ``expectedrows`` keyword in append to inform ``PyTables`` about
- the expected tablesize
- - support ``start`` and ``stop`` keywords in select to limit the row
- selection space
- - added ``get_store`` context manager to automatically import with pandas
- - added column filtering via ``columns`` keyword in select
- - added methods append_to_multiple/select_as_multiple/select_as_coordinates
- to do multiple-table append/selection
- - added support for datetime64 in columns
- - added method ``unique`` to select the unique values in an indexable or
- data column
- - added method ``copy`` to copy an existing store (and possibly upgrade)
- - show the shape of the data on disk for non-table stores when printing the
- store
- - added ability to read PyTables flavor tables (allows compatiblity to
- other HDF5 systems)
- - Add ``logx`` option to DataFrame/Series.plot (GH2327_, GH2565_)
- - Support reading gzipped data from file-like object
- - ``pivot_table`` aggfunc can be anything used in GroupBy.aggregate (GH2643_)
- - Implement DataFrame merges in case where set cardinalities might overflow
- 64-bit integer (GH2690_)
- - Raise exception in C file parser if integer dtype specified and have NA
- values. (GH2631_)
- - Attempt to parse ISO8601 format dates when parse_dates=True in read_csv for
- major performance boost in such cases (GH2698_)
- - Add methods ``neg`` and ``inv`` to Series
- - Implement ``kind`` option in ``ExcelFile`` to indicate whether it's an XLS
- or XLSX file (GH2613_)
-
-**Bug fixes**
-
- - Fix read_csv/read_table multithreading issues (GH2608_)
- - ``HDFStore``
-
- - correctly handle ``nan`` elements in string columns; serialize via the
- ``nan_rep`` keyword to append
- - raise correctly on non-implemented column types (unicode/date)
- - handle correctly ``Term`` passed types (e.g. ``index<1000``, when index
- is ``Int64``), (closes GH512_)
- - handle Timestamp correctly in data_columns (closes GH2637_)
- - contains correctly matches on non-natural names
- - correctly store ``float32`` dtypes in tables (if not other float types in
- the same table)
- - Fix DataFrame.info bug with UTF8-encoded columns. (GH2576_)
- - Fix DatetimeIndex handling of FixedOffset tz (GH2604_)
- - More robust detection of being in IPython session for wide DataFrame
- console formatting (GH2585_)
- - Fix platform issues with ``file:///`` in unit test (GH2564_)
- - Fix bug and possible segfault when grouping by hierarchical level that
- contains NA values (GH2616_)
- - Ensure that MultiIndex tuples can be constructed with NAs (GH2616_)
- - Fix int64 overflow issue when unstacking MultiIndex with many levels
- (GH2616_)
- - Exclude non-numeric data from DataFrame.quantile by default (GH2625_)
- - Fix a Cython C int64 boxing issue causing read_csv to return incorrect
- results (GH2599_)
- - Fix groupby summing performance issue on boolean data (GH2692_)
- - Don't bork Series containing datetime64 values with to_datetime (GH2699_)
- - Fix DataFrame.from_records corner case when passed columns, index column,
- but empty record list (GH2633_)
- - Fix C parser-tokenizer bug with trailing fields. (GH2668_)
- - Don't exclude non-numeric data from GroupBy.max/min (GH2700_)
- - Don't lose time zone when calling DatetimeIndex.drop (GH2621_)
- - Fix setitem on a Series with a boolean key and a non-scalar as value
- (GH2686_)
- - Box datetime64 values in Series.apply/map (GH2627_, GH2689_)
- - Upconvert datetime + datetime64 values when concatenating frames (GH2624_)
- - Raise a more helpful error message in merge operations when one DataFrame
- has duplicate columns (GH2649_)
- - Fix partial date parsing issue occuring only when code is run at EOM
- (GH2618_)
- - Prevent MemoryError when using counting sort in sortlevel with
- high-cardinality MultiIndex objects (GH2684_)
- - Fix Period resampling bug when all values fall into a single bin (GH2070_)
- - Fix buggy interaction with usecols argument in read_csv when there is an
- implicit first index column (GH2654_)
-
-.. _GH512: https://github.com/pydata/pandas/issues/512
-.. _GH1277: https://github.com/pydata/pandas/issues/1277
-.. _GH2070: https://github.com/pydata/pandas/issues/2070
-.. _GH2327: https://github.com/pydata/pandas/issues/2327
-.. _GH2565: https://github.com/pydata/pandas/issues/2565
-.. _GH2585: https://github.com/pydata/pandas/issues/2585
-.. _GH2599: https://github.com/pydata/pandas/issues/2599
-.. _GH2604: https://github.com/pydata/pandas/issues/2604
-.. _GH2576: https://github.com/pydata/pandas/issues/2576
-.. _GH2608: https://github.com/pydata/pandas/issues/2608
-.. _GH2613: https://github.com/pydata/pandas/issues/2613
-.. _GH2616: https://github.com/pydata/pandas/issues/2616
-.. _GH2621: https://github.com/pydata/pandas/issues/2621
-.. _GH2624: https://github.com/pydata/pandas/issues/2624
-.. _GH2625: https://github.com/pydata/pandas/issues/2625
-.. _GH2627: https://github.com/pydata/pandas/issues/2627
-.. _GH2631: https://github.com/pydata/pandas/issues/2631
-.. _GH2633: https://github.com/pydata/pandas/issues/2633
-.. _GH2637: https://github.com/pydata/pandas/issues/2637
-.. _GH2643: https://github.com/pydata/pandas/issues/2643
-.. _GH2649: https://github.com/pydata/pandas/issues/2649
-.. _GH2654: https://github.com/pydata/pandas/issues/2654
-.. _GH2668: https://github.com/pydata/pandas/issues/2668
-.. _GH2684: https://github.com/pydata/pandas/issues/2684
-.. _GH2689: https://github.com/pydata/pandas/issues/2689
-.. _GH2690: https://github.com/pydata/pandas/issues/2690
-.. _GH2692: https://github.com/pydata/pandas/issues/2692
-.. _GH2698: https://github.com/pydata/pandas/issues/2698
-.. _GH2699: https://github.com/pydata/pandas/issues/2699
-.. _GH2700: https://github.com/pydata/pandas/issues/2700
-.. _GH2686: https://github.com/pydata/pandas/issues/2686
-.. _GH2618: https://github.com/pydata/pandas/issues/2618
-.. _GH2592: https://github.com/pydata/pandas/issues/2592
-.. _GH2564: https://github.com/pydata/pandas/issues/2564
-.. _GH2616: https://github.com/pydata/pandas/issues/2616
-
-pandas 0.10.0
-=============
-
-**Release date:** 2012-12-17
-
-**New features**
-
- - Brand new high-performance delimited file parsing engine written in C and
- Cython. 50% or better performance in many standard use cases with a
- fraction as much memory usage. (GH407_, GH821_)
- - Many new file parser (read_csv, read_table) features:
-
- - Support for on-the-fly gzip or bz2 decompression (`compression` option)
- - Ability to get back numpy.recarray instead of DataFrame
- (`as_recarray=True`)
- - `dtype` option: explicit column dtypes
- - `usecols` option: specify list of columns to be read from a file. Good
- for reading very wide files with many irrelevant columns (GH1216_ GH926_, GH2465_)
- - Enhanced unicode decoding support via `encoding` option
- - `skipinitialspace` dialect option
- - Can specify strings to be recognized as True (`true_values`) or False
- (`false_values`)
- - High-performance `delim_whitespace` option for whitespace-delimited
- files; a preferred alternative to the '\s+' regular expression delimiter
- - Option to skip "bad" lines (wrong number of fields) that would otherwise
- have caused an error in the past (`error_bad_lines` and `warn_bad_lines`
- options)
- - Substantially improved performance in the parsing of integers with
- thousands markers and lines with comments
- - Easy of European (and other) decimal formats (`decimal` option) (GH584_, GH2466_)
- - Custom line terminators (e.g. lineterminator='~') (GH2457_)
- - Handling of no trailing commas in CSV files (GH2333_)
- - Ability to handle fractional seconds in date_converters (GH2209_)
- - read_csv allow scalar arg to na_values (GH1944_)
- - Explicit column dtype specification in read_* functions (GH1858_)
- - Easier CSV dialect specification (GH1743_)
- - Improve parser performance when handling special characters (GH1204_)
-
- - Google Analytics API integration with easy oauth2 workflow (GH2283_)
- - Add error handling to Series.str.encode/decode (GH2276_)
- - Add ``where`` and ``mask`` to Series (GH2337_)
- - Grouped histogram via `by` keyword in Series/DataFrame.hist (GH2186_)
- - Support optional ``min_periods`` keyword in ``corr`` and ``cov``
- for both Series and DataFrame (GH2002_)
- - Add ``duplicated`` and ``drop_duplicates`` functions to Series (GH1923_)
- - Add docs for ``HDFStore table`` format
- - 'density' property in `SparseSeries` (GH2384_)
- - Add ``ffill`` and ``bfill`` convenience functions for forward- and
- backfilling time series data (GH2284_)
- - New option configuration system and functions `set_option`, `get_option`,
- `describe_option`, and `reset_option`. Deprecate `set_printoptions` and
- `reset_printoptions` (GH2393_).
- You can also access options as attributes via ``pandas.options.X``
- - Wide DataFrames can be viewed more easily in the console with new
- `expand_frame_repr` and `line_width` configuration options. This is on by
- default now (GH2436_)
- - Scikits.timeseries-like moving window functions via ``rolling_window`` (GH1270_)
-
-**Experimental Features**
-
- - Add support for Panel4D, a named 4 Dimensional stucture
- - Add support for ndpanel factory functions, to create custom,
- domain-specific N-Dimensional containers
-
-**API Changes**
-
- - The default binning/labeling behavior for ``resample`` has been changed to
- `closed='left', label='left'` for daily and lower frequencies. This had
- been a large source of confusion for users. See "what's new" page for more
- on this. (GH2410_)
- - Methods with ``inplace`` option now return None instead of the calling
- (modified) object (GH1893_)
- - The special case DataFrame - TimeSeries doing column-by-column broadcasting
- has been deprecated. Users should explicitly do e.g. df.sub(ts, axis=0)
- instead. This is a legacy hack and can lead to subtle bugs.
- - inf/-inf are no longer considered as NA by isnull/notnull. To be clear, this
- is legacy cruft from early pandas. This behavior can be globally re-enabled
- using the new option ``mode.use_inf_as_null`` (GH2050_, GH1919_)
- - ``pandas.merge`` will now default to ``sort=False``. For many use cases
- sorting the join keys is not necessary, and doing it by default is wasteful
- - Specify ``header=0`` explicitly to replace existing column names in file in
- read_* functions.
- - Default column names for header-less parsed files (yielded by read_csv,
- etc.) are now the integers 0, 1, .... A new argument `prefix` has been
- added; to get the v0.9.x behavior specify ``prefix='X'`` (GH2034_). This API
- change was made to make the default column names more consistent with the
- DataFrame constructor's default column names when none are specified.
- - DataFrame selection using a boolean frame now preserves input shape
- - If function passed to Series.apply yields a Series, result will be a
- DataFrame (GH2316_)
- - Values like YES/NO/yes/no will not be considered as boolean by default any
- longer in the file parsers. This can be customized using the new
- ``true_values`` and ``false_values`` options (GH2360_)
- - `obj.fillna()` is no longer valid; make `method='pad'` no longer the
- default option, to be more explicit about what kind of filling to
- perform. Add `ffill/bfill` convenience functions per above (GH2284_)
- - `HDFStore.keys()` now returns an absolute path-name for each key
- - `to_string()` now always returns a unicode string. (GH2224_)
- - File parsers will not handle NA sentinel values arising from passed
- converter functions
-
-**Improvements to existing features**
-
- - Add ``nrows`` option to DataFrame.from_records for iterators (GH1794_)
- - Unstack/reshape algorithm rewrite to avoid high memory use in cases where
- the number of observed key-tuples is much smaller than the total possible
- number that could occur (GH2278_). Also improves performance in most cases.
- - Support duplicate columns in DataFrame.from_records (GH2179_)
- - Add ``normalize`` option to Series/DataFrame.asfreq (GH2137_)
- - SparseSeries and SparseDataFrame construction from empty and scalar
- values now no longer create dense ndarrays unnecessarily (GH2322_)
- - ``HDFStore`` now supports hierarchial keys (GH2397_)
- - Support multiple query selection formats for ``HDFStore tables`` (GH1996_)
- - Support ``del store['df']`` syntax to delete HDFStores
- - Add multi-dtype support for ``HDFStore tables``
- - ``min_itemsize`` parameter can be specified in ``HDFStore table`` creation
- - Indexing support in ``HDFStore tables`` (GH698_)
- - Add `line_terminator` option to DataFrame.to_csv (GH2383_)
- - added implementation of str(x)/unicode(x)/bytes(x) to major pandas data
- structures, which should do the right thing on both py2.x and py3.x. (GH2224_)
- - Reduce groupby.apply overhead substantially by low-level manipulation of
- internal NumPy arrays in DataFrames (GH535_)
- - Implement ``value_vars`` in ``melt`` and add ``melt`` to pandas namespace
- (GH2412_)
- - Added boolean comparison operators to Panel
- - Enable ``Series.str.strip/lstrip/rstrip`` methods to take an argument (GH2411_)
- - The DataFrame ctor now respects column ordering when given
- an OrderedDict (GH2455_)
- - Assigning DatetimeIndex to Series changes the class to TimeSeries (GH2139_)
- - Improve performance of .value_counts method on non-integer data (GH2480_)
- - ``get_level_values`` method for MultiIndex return Index instead of ndarray (GH2449_)
- - ``convert_to_r_dataframe`` conversion for datetime values (GH2351_)
- - Allow ``DataFrame.to_csv`` to represent inf and nan differently (GH2026_)
- - Add ``min_i`` argument to ``nancorr`` to specify minimum required observations (GH2002_)
- - Add ``inplace`` option to ``sortlevel`` / ``sort`` functions on DataFrame (GH1873_)
- - Enable DataFrame to accept scalar constructor values like Series (GH1856_)
- - DataFrame.from_records now takes optional ``size`` parameter (GH1794_)
- - include iris dataset (GH1709_)
- - No datetime64 DataFrame column conversion of datetime.datetime with tzinfo (GH1581_)
- - Micro-optimizations in DataFrame for tracking state of internal consolidation (GH217_)
- - Format parameter in DataFrame.to_csv (GH1525_)
- - Partial string slicing for ``DatetimeIndex`` for daily and higher frequencies (GH2306_)
- - Implement ``col_space`` parameter in ``to_html`` and ``to_string`` in DataFrame (GH1000_)
- - Override ``Series.tolist`` and box datetime64 types (GH2447_)
- - Optimize ``unstack`` memory usage by compressing indices (GH2278_)
- - Fix HTML repr in IPython qtconsole if opening window is small (GH2275_)
- - Escape more special characters in console output (GH2492_)
- - df.select now invokes bool on the result of crit(x) (GH2487_)
-
-**Bug fixes**
-
- - Fix major performance regression in DataFrame.iteritems (GH2273_)
- - Fixes bug when negative period passed to Series/DataFrame.diff (GH2266_)
- - Escape tabs in console output to avoid alignment issues (GH2038_)
- - Properly box datetime64 values when retrieving cross-section from
- mixed-dtype DataFrame (GH2272_)
- - Fix concatenation bug leading to GH2057_, GH2257_
- - Fix regression in Index console formatting (GH2319_)
- - Box Period data when assigning PeriodIndex to frame column (GH2243_, GH2281_)
- - Raise exception on calling reset_index on Series with inplace=True (GH2277_)
- - Enable setting multiple columns in DataFrame with hierarchical columns
- (GH2295_)
- - Respect dtype=object in DataFrame constructor (GH2291_)
- - Fix DatetimeIndex.join bug with tz-aware indexes and how='outer' (GH2317_)
- - pop(...) and del works with DataFrame with duplicate columns (GH2349_)
- - Treat empty strings as NA in date parsing (rather than let dateutil do
- something weird) (GH2263_)
- - Prevent uint64 -> int64 overflows (GH2355_)
- - Enable joins between MultiIndex and regular Index (GH2024_)
- - Fix time zone metadata issue when unioning non-overlapping DatetimeIndex
- objects (GH2367_)
- - Raise/handle int64 overflows in parsers (GH2247_)
- - Deleting of consecutive rows in ``HDFStore tables``` is much faster than before
- - Appending on a HDFStore would fail if the table was not first created via ``put``
- - Use `col_space` argument as minimum column width in DataFrame.to_html (GH2328_)
- - Fix tz-aware DatetimeIndex.to_period (GH2232_)
- - Fix DataFrame row indexing case with MultiIndex (GH2314_)
- - Fix to_excel exporting issues with Timestamp objects in index (GH2294_)
- - Fixes assigning scalars and array to hierarchical column chunk (GH1803_)
- - Fixed a UnicdeDecodeError with series tidy_repr (GH2225_)
- - Fixed issued with duplicate keys in an index (GH2347_, GH2380_)
- - Fixed issues re: Hash randomization, default on starting w/ py3.3 (GH2331_)
- - Fixed issue with missing attributes after loading a pickled dataframe (GH2431_)
- - Fix Timestamp formatting with tzoffset time zone in dateutil 2.1 (GH2443_)
- - Fix GroupBy.apply issue when using BinGrouper to do ts binning (GH2300_)
- - Fix issues resulting from datetime.datetime columns being converted to
- datetime64 when calling DataFrame.apply. (GH2374_)
- - Raise exception when calling to_panel on non uniquely-indexed frame (GH2441_)
- - Improved detection of console encoding on IPython zmq frontends (GH2458_)
- - Preserve time zone when .append-ing two time series (GH2260_)
- - Box timestamps when calling reset_index on time-zone-aware index rather
- than creating a tz-less datetime64 column (GH2262_)
- - Enable searching non-string columns in DataFrame.filter(like=...) (GH2467_)
- - Fixed issue with losing nanosecond precision upon conversion to DatetimeIndex(GH2252_)
- - Handle timezones in Datetime.normalize (GH2338_)
- - Fix test case where dtype specification with endianness causes
- failures on big endian machines (GH2318_)
- - Fix plotting bug where upsampling causes data to appear shifted in time (GH2448_)
- - Fix ``read_csv`` failure for UTF-16 with BOM and skiprows(GH2298_)
- - read_csv with names arg not implicitly setting header=None(GH2459_)
- - Unrecognized compression mode causes segfault in read_csv(GH2474_)
- - In read_csv, header=0 and passed names should discard first row(GH2269_)
- - Correctly route to stdout/stderr in read_table (GH2071_)
- - Fix exception when Timestamp.to_datetime is called on a Timestamp with tzoffset (GH2471_)
- - Fixed unintentional conversion of datetime64 to long in groupby.first() (GH2133_)
- - Union of empty DataFrames now return empty with concatenated index (GH2307_)
- - DataFrame.sort_index raises more helpful exception if sorting by column
- with duplicates (GH2488_)
- - DataFrame.to_string formatters can be list, too (GH2520_)
- - DataFrame.combine_first will always result in the union of the index and
- columns, even if one DataFrame is length-zero (GH2525_)
- - Fix several DataFrame.icol/irow with duplicate indices issues (GH2228_, GH2259_)
- - Use Series names for column names when using concat with axis=1 (GH2489_)
- - Raise Exception if start, end, periods all passed to date_range (GH2538_)
- - Fix Panel resampling issue (GH2537_)
-
-.. _GH407: https://github.com/pydata/pandas/issues/407
-.. _GH821: https://github.com/pydata/pandas/issues/821
-.. _GH1216: https://github.com/pydata/pandas/issues/1216
-.. _GH926: https://github.com/pydata/pandas/issues/926
-.. _GH2465: https://github.com/pydata/pandas/issues/2465
-.. _GH584: https://github.com/pydata/pandas/issues/584
-.. _GH2466: https://github.com/pydata/pandas/issues/2466
-.. _GH2457: https://github.com/pydata/pandas/issues/2457
-.. _GH2333: https://github.com/pydata/pandas/issues/2333
-.. _GH2209: https://github.com/pydata/pandas/issues/2209
-.. _GH1944: https://github.com/pydata/pandas/issues/1944
-.. _GH1858: https://github.com/pydata/pandas/issues/1858
-.. _GH1743: https://github.com/pydata/pandas/issues/1743
-.. _GH1204: https://github.com/pydata/pandas/issues/1204
-.. _GH2283: https://github.com/pydata/pandas/issues/2283
-.. _GH2276: https://github.com/pydata/pandas/issues/2276
-.. _GH2337: https://github.com/pydata/pandas/issues/2337
-.. _GH2186: https://github.com/pydata/pandas/issues/2186
-.. _GH2002: https://github.com/pydata/pandas/issues/2002
-.. _GH1923: https://github.com/pydata/pandas/issues/1923
-.. _GH2384: https://github.com/pydata/pandas/issues/2384
-.. _GH2284: https://github.com/pydata/pandas/issues/2284
-.. _GH2393: https://github.com/pydata/pandas/issues/2393
-.. _GH2436: https://github.com/pydata/pandas/issues/2436
-.. _GH1270: https://github.com/pydata/pandas/issues/1270
-.. _GH2410: https://github.com/pydata/pandas/issues/2410
-.. _GH1893: https://github.com/pydata/pandas/issues/1893
-.. _GH2050: https://github.com/pydata/pandas/issues/2050
-.. _GH1919: https://github.com/pydata/pandas/issues/1919
-.. _GH2034: https://github.com/pydata/pandas/issues/2034
-.. _GH2316: https://github.com/pydata/pandas/issues/2316
-.. _GH2360: https://github.com/pydata/pandas/issues/2360
-.. _GH2224: https://github.com/pydata/pandas/issues/2224
-.. _GH1794: https://github.com/pydata/pandas/issues/1794
-.. _GH2278: https://github.com/pydata/pandas/issues/2278
-.. _GH2179: https://github.com/pydata/pandas/issues/2179
-.. _GH2137: https://github.com/pydata/pandas/issues/2137
-.. _GH2322: https://github.com/pydata/pandas/issues/2322
-.. _GH2397: https://github.com/pydata/pandas/issues/2397
-.. _GH1996: https://github.com/pydata/pandas/issues/1996
-.. _GH698: https://github.com/pydata/pandas/issues/698
-.. _GH2383: https://github.com/pydata/pandas/issues/2383
-.. _GH535: https://github.com/pydata/pandas/issues/535
-.. _GH2412: https://github.com/pydata/pandas/issues/2412
-.. _GH2411: https://github.com/pydata/pandas/issues/2411
-.. _GH2455: https://github.com/pydata/pandas/issues/2455
-.. _GH2139: https://github.com/pydata/pandas/issues/2139
-.. _GH2480: https://github.com/pydata/pandas/issues/2480
-.. _GH2449: https://github.com/pydata/pandas/issues/2449
-.. _GH2351: https://github.com/pydata/pandas/issues/2351
-.. _GH2026: https://github.com/pydata/pandas/issues/2026
-.. _GH1873: https://github.com/pydata/pandas/issues/1873
-.. _GH1856: https://github.com/pydata/pandas/issues/1856
-.. _GH1709: https://github.com/pydata/pandas/issues/1709
-.. _GH1581: https://github.com/pydata/pandas/issues/1581
-.. _GH217: https://github.com/pydata/pandas/issues/217
-.. _GH1525: https://github.com/pydata/pandas/issues/1525
-.. _GH2306: https://github.com/pydata/pandas/issues/2306
-.. _GH1000: https://github.com/pydata/pandas/issues/1000
-.. _GH2447: https://github.com/pydata/pandas/issues/2447
-.. _GH2275: https://github.com/pydata/pandas/issues/2275
-.. _GH2492: https://github.com/pydata/pandas/issues/2492
-.. _GH2487: https://github.com/pydata/pandas/issues/2487
-.. _GH2273: https://github.com/pydata/pandas/issues/2273
-.. _GH2266: https://github.com/pydata/pandas/issues/2266
-.. _GH2038: https://github.com/pydata/pandas/issues/2038
-.. _GH2272: https://github.com/pydata/pandas/issues/2272
-.. _GH2057: https://github.com/pydata/pandas/issues/2057
-.. _GH2257: https://github.com/pydata/pandas/issues/2257
-.. _GH2319: https://github.com/pydata/pandas/issues/2319
-.. _GH2243: https://github.com/pydata/pandas/issues/2243
-.. _GH2281: https://github.com/pydata/pandas/issues/2281
-.. _GH2277: https://github.com/pydata/pandas/issues/2277
-.. _GH2295: https://github.com/pydata/pandas/issues/2295
-.. _GH2291: https://github.com/pydata/pandas/issues/2291
-.. _GH2317: https://github.com/pydata/pandas/issues/2317
-.. _GH2349: https://github.com/pydata/pandas/issues/2349
-.. _GH2263: https://github.com/pydata/pandas/issues/2263
-.. _GH2355: https://github.com/pydata/pandas/issues/2355
-.. _GH2024: https://github.com/pydata/pandas/issues/2024
-.. _GH2367: https://github.com/pydata/pandas/issues/2367
-.. _GH2247: https://github.com/pydata/pandas/issues/2247
-.. _GH2328: https://github.com/pydata/pandas/issues/2328
-.. _GH2232: https://github.com/pydata/pandas/issues/2232
-.. _GH2314: https://github.com/pydata/pandas/issues/2314
-.. _GH2294: https://github.com/pydata/pandas/issues/2294
-.. _GH1803: https://github.com/pydata/pandas/issues/1803
-.. _GH2225: https://github.com/pydata/pandas/issues/2225
-.. _GH2347: https://github.com/pydata/pandas/issues/2347
-.. _GH2380: https://github.com/pydata/pandas/issues/2380
-.. _GH2331: https://github.com/pydata/pandas/issues/2331
-.. _GH2431: https://github.com/pydata/pandas/issues/2431
-.. _GH2443: https://github.com/pydata/pandas/issues/2443
-.. _GH2300: https://github.com/pydata/pandas/issues/2300
-.. _GH2374: https://github.com/pydata/pandas/issues/2374
-.. _GH2441: https://github.com/pydata/pandas/issues/2441
-.. _GH2458: https://github.com/pydata/pandas/issues/2458
-.. _GH2260: https://github.com/pydata/pandas/issues/2260
-.. _GH2262: https://github.com/pydata/pandas/issues/2262
-.. _GH2467: https://github.com/pydata/pandas/issues/2467
-.. _GH2252: https://github.com/pydata/pandas/issues/2252
-.. _GH2338: https://github.com/pydata/pandas/issues/2338
-.. _GH2318: https://github.com/pydata/pandas/issues/2318
-.. _GH2448: https://github.com/pydata/pandas/issues/2448
-.. _GH2298: https://github.com/pydata/pandas/issues/2298
-.. _GH2459: https://github.com/pydata/pandas/issues/2459
-.. _GH2474: https://github.com/pydata/pandas/issues/2474
-.. _GH2269: https://github.com/pydata/pandas/issues/2269
-.. _GH2071: https://github.com/pydata/pandas/issues/2071
-.. _GH2471: https://github.com/pydata/pandas/issues/2471
-.. _GH2133: https://github.com/pydata/pandas/issues/2133
-.. _GH2307: https://github.com/pydata/pandas/issues/2307
-.. _GH2488: https://github.com/pydata/pandas/issues/2488
-.. _GH2520: https://github.com/pydata/pandas/issues/2520
-.. _GH2525: https://github.com/pydata/pandas/issues/2525
-.. _GH2228: https://github.com/pydata/pandas/issues/2228
-.. _GH2259: https://github.com/pydata/pandas/issues/2259
-.. _GH2489: https://github.com/pydata/pandas/issues/2489
-.. _GH2538: https://github.com/pydata/pandas/issues/2538
-.. _GH2537: https://github.com/pydata/pandas/issues/2537
-
-
-pandas 0.9.1
-============
-
-**Release date:** 2012-11-14
-
-**New features**
-
- - Can specify multiple sort orders in DataFrame/Series.sort/sort_index (GH928_)
- - New `top` and `bottom` options for handling NAs in rank (GH1508_, GH2159_)
- - Add `where` and `mask` functions to DataFrame (GH2109_, GH2151_)
- - Add `at_time` and `between_time` functions to DataFrame (GH2149_)
- - Add flexible `pow` and `rpow` methods to DataFrame (GH2190_)
-
-**API Changes**
-
- - Upsampling period index "spans" intervals. Example: annual periods
- upsampled to monthly will span all months in each year
- - Period.end_time will yield timestamp at last nanosecond in the interval
- (GH2124_, GH2125_, GH1764_)
- - File parsers no longer coerce to float or bool for columns that have custom
- converters specified (GH2184_)
-
-**Improvements to existing features**
-
- - Time rule inference for week-of-month (e.g. WOM-2FRI) rules (GH2140_)
- - Improve performance of datetime + business day offset with large number of
- offset periods
- - Improve HTML display of DataFrame objects with hierarchical columns
- - Enable referencing of Excel columns by their column names (GH1936_)
- - DataFrame.dot can accept ndarrays (GH2042_)
- - Support negative periods in Panel.shift (GH2164_)
- - Make .drop(...) work with non-unique indexes (GH2101_)
- - Improve performance of Series/DataFrame.diff (re: GH2087_)
- - Support unary ~ (__invert__) in DataFrame (GH2110_)
- - Turn off pandas-style tick locators and formatters (GH2205_)
- - DataFrame[DataFrame] uses DataFrame.where to compute masked frame (GH2230_)
-
-**Bug fixes**
-
- - Fix some duplicate-column DataFrame constructor issues (GH2079_)
- - Fix bar plot color cycle issues (GH2082_)
- - Fix off-center grid for stacked bar plots (GH2157_)
- - Fix plotting bug if inferred frequency is offset with N > 1 (GH2126_)
- - Implement comparisons on date offsets with fixed delta (GH2078_)
- - Handle inf/-inf correctly in read_* parser functions (GH2041_)
- - Fix matplotlib unicode interaction bug
- - Make WLS r-squared match statsmodels 0.5.0 fixed value
- - Fix zero-trimming DataFrame formatting bug
- - Correctly compute/box datetime64 min/max values from Series.min/max (GH2083_)
- - Fix unstacking edge case with unrepresented groups (GH2100_)
- - Fix Series.str failures when using pipe pattern '|' (GH2119_)
- - Fix pretty-printing of dict entries in Series, DataFrame (GH2144_)
- - Cast other datetime64 values to nanoseconds in DataFrame ctor (GH2095_)
- - Alias Timestamp.astimezone to tz_convert, so will yield Timestamp (GH2060_)
- - Fix timedelta64 formatting from Series (GH2165_, GH2146_)
- - Handle None values gracefully in dict passed to Panel constructor (GH2075_)
- - Box datetime64 values as Timestamp objects in Series/DataFrame.iget (GH2148_)
- - Fix Timestamp indexing bug in DatetimeIndex.insert (GH2155_)
- - Use index name(s) (if any) in DataFrame.to_records (GH2161_)
- - Don't lose index names in Panel.to_frame/DataFrame.to_panel (GH2163_)
- - Work around length-0 boolean indexing NumPy bug (GH2096_)
- - Fix partial integer indexing bug in DataFrame.xs (GH2107_)
- - Fix variety of cut/qcut string-bin formatting bugs (GH1978_, GH1979_)
- - Raise Exception when xs view not possible of MultiIndex'd DataFrame (GH2117_)
- - Fix groupby(...).first() issue with datetime64 (GH2133_)
- - Better floating point error robustness in some rolling_* functions
- (GH2114_, GH2527_)
- - Fix ewma NA handling in the middle of Series (GH2128_)
- - Fix numerical precision issues in diff with integer data (GH2087_)
- - Fix bug in MultiIndex.__getitem__ with NA values (GH2008_)
- - Fix DataFrame.from_records dict-arg bug when passing columns (GH2179_)
- - Fix Series and DataFrame.diff for integer dtypes (GH2087_, GH2174_)
- - Fix bug when taking intersection of DatetimeIndex with empty index (GH2129_)
- - Pass through timezone information when calling DataFrame.align (GH2127_)
- - Properly sort when joining on datetime64 values (GH2196_)
- - Fix indexing bug in which False/True were being coerced to 0/1 (GH2199_)
- - Many unicode formatting fixes (GH2201_)
- - Fix improper MultiIndex conversion issue when assigning
- e.g. DataFrame.index (GH2200_)
- - Fix conversion of mixed-type DataFrame to ndarray with dup columns (GH2236_)
- - Fix duplicate columns issue (GH2218_, GH2219_)
- - Fix SparseSeries.__pow__ issue with NA input (GH2220_)
- - Fix icol with integer sequence failure (GH2228_)
- - Fixed resampling tz-aware time series issue (GH2245_)
- - SparseDataFrame.icol was not returning SparseSeries (GH2227_, GH2229_)
- - Enable ExcelWriter to handle PeriodIndex (GH2240_)
- - Fix issue constructing DataFrame from empty Series with name (GH2234_)
- - Use console-width detection in interactive sessions only (GH1610_)
- - Fix parallel_coordinates legend bug with mpl 1.2.0 (GH2237_)
- - Make tz_localize work in corner case of empty Series (GH2248_)
-
-.. _GH928: https://github.com/pydata/pandas/issues/928
-.. _GH1508: https://github.com/pydata/pandas/issues/1508
-.. _GH2159: https://github.com/pydata/pandas/issues/2159
-.. _GH2109: https://github.com/pydata/pandas/issues/2109
-.. _GH2151: https://github.com/pydata/pandas/issues/2151
-.. _GH2149: https://github.com/pydata/pandas/issues/2149
-.. _GH2190: https://github.com/pydata/pandas/issues/2190
-.. _GH2124: https://github.com/pydata/pandas/issues/2124
-.. _GH2125: https://github.com/pydata/pandas/issues/2125
-.. _GH1764: https://github.com/pydata/pandas/issues/1764
-.. _GH2184: https://github.com/pydata/pandas/issues/2184
-.. _GH2140: https://github.com/pydata/pandas/issues/2140
-.. _GH1936: https://github.com/pydata/pandas/issues/1936
-.. _GH2042: https://github.com/pydata/pandas/issues/2042
-.. _GH2164: https://github.com/pydata/pandas/issues/2164
-.. _GH2101: https://github.com/pydata/pandas/issues/2101
-.. _GH2087: https://github.com/pydata/pandas/issues/2087
-.. _GH2110: https://github.com/pydata/pandas/issues/2110
-.. _GH2205: https://github.com/pydata/pandas/issues/2205
-.. _GH2230: https://github.com/pydata/pandas/issues/2230
-.. _GH2079: https://github.com/pydata/pandas/issues/2079
-.. _GH2082: https://github.com/pydata/pandas/issues/2082
-.. _GH2157: https://github.com/pydata/pandas/issues/2157
-.. _GH2126: https://github.com/pydata/pandas/issues/2126
-.. _GH2078: https://github.com/pydata/pandas/issues/2078
-.. _GH2041: https://github.com/pydata/pandas/issues/2041
-.. _GH2083: https://github.com/pydata/pandas/issues/2083
-.. _GH2100: https://github.com/pydata/pandas/issues/2100
-.. _GH2119: https://github.com/pydata/pandas/issues/2119
-.. _GH2144: https://github.com/pydata/pandas/issues/2144
-.. _GH2095: https://github.com/pydata/pandas/issues/2095
-.. _GH2060: https://github.com/pydata/pandas/issues/2060
-.. _GH2165: https://github.com/pydata/pandas/issues/2165
-.. _GH2146: https://github.com/pydata/pandas/issues/2146
-.. _GH2075: https://github.com/pydata/pandas/issues/2075
-.. _GH2148: https://github.com/pydata/pandas/issues/2148
-.. _GH2155: https://github.com/pydata/pandas/issues/2155
-.. _GH2161: https://github.com/pydata/pandas/issues/2161
-.. _GH2163: https://github.com/pydata/pandas/issues/2163
-.. _GH2096: https://github.com/pydata/pandas/issues/2096
-.. _GH2107: https://github.com/pydata/pandas/issues/2107
-.. _GH1978: https://github.com/pydata/pandas/issues/1978
-.. _GH1979: https://github.com/pydata/pandas/issues/1979
-.. _GH2117: https://github.com/pydata/pandas/issues/2117
-.. _GH2133: https://github.com/pydata/pandas/issues/2133
-.. _GH2114: https://github.com/pydata/pandas/issues/2114
-.. _GH2527: https://github.com/pydata/pandas/issues/2527
-.. _GH2128: https://github.com/pydata/pandas/issues/2128
-.. _GH2008: https://github.com/pydata/pandas/issues/2008
-.. _GH2179: https://github.com/pydata/pandas/issues/2179
-.. _GH2174: https://github.com/pydata/pandas/issues/2174
-.. _GH2129: https://github.com/pydata/pandas/issues/2129
-.. _GH2127: https://github.com/pydata/pandas/issues/2127
-.. _GH2196: https://github.com/pydata/pandas/issues/2196
-.. _GH2199: https://github.com/pydata/pandas/issues/2199
-.. _GH2201: https://github.com/pydata/pandas/issues/2201
-.. _GH2200: https://github.com/pydata/pandas/issues/2200
-.. _GH2236: https://github.com/pydata/pandas/issues/2236
-.. _GH2218: https://github.com/pydata/pandas/issues/2218
-.. _GH2219: https://github.com/pydata/pandas/issues/2219
-.. _GH2220: https://github.com/pydata/pandas/issues/2220
-.. _GH2228: https://github.com/pydata/pandas/issues/2228
-.. _GH2245: https://github.com/pydata/pandas/issues/2245
-.. _GH2227: https://github.com/pydata/pandas/issues/2227
-.. _GH2229: https://github.com/pydata/pandas/issues/2229
-.. _GH2240: https://github.com/pydata/pandas/issues/2240
-.. _GH2234: https://github.com/pydata/pandas/issues/2234
-.. _GH1610: https://github.com/pydata/pandas/issues/1610
-.. _GH2237: https://github.com/pydata/pandas/issues/2237
-.. _GH2248: https://github.com/pydata/pandas/issues/2248
-
-
-pandas 0.9.0
-============
-
-**Release date:** 10/7/2012
-
-**New features**
-
- - Add ``str.encode`` and ``str.decode`` to Series (GH1706_)
- - Add `to_latex` method to DataFrame (GH1735_)
- - Add convenient expanding window equivalents of all rolling_* ops (GH1785_)
- - Add Options class to pandas.io.data for fetching options data from Yahoo!
- Finance (GH1748_, GH1739_)
- - Recognize and convert more boolean values in file parsing (Yes, No, TRUE,
- FALSE, variants thereof) (GH1691_, GH1295_)
- - Add Panel.update method, analogous to DataFrame.update (GH1999_, GH1988_)
-
-**Improvements to existing features**
-
- - Proper handling of NA values in merge operations (GH1990_)
- - Add ``flags`` option for ``re.compile`` in some Series.str methods (GH1659_)
- - Parsing of UTC date strings in read_* functions (GH1693_)
- - Handle generator input to Series (GH1679_)
- - Add `na_action='ignore'` to Series.map to quietly propagate NAs (GH1661_)
- - Add args/kwds options to Series.apply (GH1829_)
- - Add inplace option to Series/DataFrame.reset_index (GH1797_)
- - Add ``level`` parameter to ``Series.reset_index``
- - Add quoting option for DataFrame.to_csv (GH1902_)
- - Indicate long column value truncation in DataFrame output with ... (GH1854_)
- - DataFrame.dot will not do data alignment, and also work with Series (GH1915_)
- - Add ``na`` option for missing data handling in some vectorized string
- methods (GH1689_)
- - If index_label=False in DataFrame.to_csv, do not print fields/commas in the
- text output. Results in easier importing into R (GH1583_)
- - Can pass tuple/list of axes to DataFrame.dropna to simplify repeated calls
- (dropping both columns and rows) (GH924_)
- - Improve DataFrame.to_html output for hierarchically-indexed rows (do not
- repeat levels) (GH1929_)
- - TimeSeries.between_time can now select times across midnight (GH1871_)
- - Enable `skip_footer` parameter in `ExcelFile.parse` (GH1843_)
-
-**API Changes**
-
- - Change default header names in read_* functions to more Pythonic X0, X1,
- etc. instead of X.1, X.2. (GH2000_)
- - Deprecated ``day_of_year`` API removed from PeriodIndex, use ``dayofyear``
- (GH1723_)
- - Don't modify NumPy suppress printoption at import time
- - The internal HDF5 data arrangement for DataFrames has been
- transposed. Legacy files will still be readable by HDFStore (GH1834_, GH1824_)
- - Legacy cruft removed: pandas.stats.misc.quantileTS
- - Use ISO8601 format for Period repr: monthly, daily, and on down (GH1776_)
- - Empty DataFrame columns are now created as object dtype. This will prevent
- a class of TypeErrors that was occurring in code where the dtype of a
- column would depend on the presence of data or not (e.g. a SQL query having
- results) (GH1783_)
- - Setting parts of DataFrame/Panel using ix now aligns input Series/DataFrame
- (GH1630_)
- - `first` and `last` methods in `GroupBy` no longer drop non-numeric columns
- (GH1809_)
- - Resolved inconsistencies in specifying custom NA values in text parser.
- `na_values` of type dict no longer override default NAs unless
- `keep_default_na` is set to false explicitly (GH1657_)
- - Enable `skipfooter` parameter in text parsers as an alias for `skip_footer`
-
-**Bug fixes**
-
- - Perform arithmetic column-by-column in mixed-type DataFrame to avoid type
- upcasting issues. Caused downstream DataFrame.diff bug (GH1896_)
- - Fix matplotlib auto-color assignment when no custom spectrum passed. Also
- respect passed color keyword argument (GH1711_)
- - Fix resampling logical error with closed='left' (GH1726_)
- - Fix critical DatetimeIndex.union bugs (GH1730_, GH1719_, GH1745_, GH1702_, GH1753_)
- - Fix critical DatetimeIndex.intersection bug with unanchored offsets (GH1708_)
- - Fix MM-YYYY time series indexing case (GH1672_)
- - Fix case where Categorical group key was not being passed into index in
- GroupBy result (GH1701_)
- - Handle Ellipsis in Series.__getitem__/__setitem__ (GH1721_)
- - Fix some bugs with handling datetime64 scalars of other units in NumPy 1.6
- and 1.7 (GH1717_)
- - Fix performance issue in MultiIndex.format (GH1746_)
- - Fixed GroupBy bugs interacting with DatetimeIndex asof / map methods (GH1677_)
- - Handle factors with NAs in pandas.rpy (GH1615_)
- - Fix statsmodels import in pandas.stats.var (GH1734_)
- - Fix DataFrame repr/info summary with non-unique columns (GH1700_)
- - Fix Series.iget_value for non-unique indexes (GH1694_)
- - Don't lose tzinfo when passing DatetimeIndex as DataFrame column (GH1682_)
- - Fix tz conversion with time zones that haven't had any DST transitions since
- first date in the array (GH1673_)
- - Fix field access with UTC->local conversion on unsorted arrays (GH1756_)
- - Fix isnull handling of array-like (list) inputs (GH1755_)
- - Fix regression in handling of Series in Series constructor (GH1671_)
- - Fix comparison of Int64Index with DatetimeIndex (GH1681_)
- - Fix min_periods handling in new rolling_max/min at array start (GH1695_)
- - Fix errors with how='median' and generic NumPy resampling in some cases
- caused by SeriesBinGrouper (GH1648_, GH1688_)
- - When grouping by level, exclude unobserved levels (GH1697_)
- - Don't lose tzinfo in DatetimeIndex when shifting by different offset (GH1683_)
- - Hack to support storing data with a zero-length axis in HDFStore (GH1707_)
- - Fix DatetimeIndex tz-aware range generation issue (GH1674_)
- - Fix method='time' interpolation with intraday data (GH1698_)
- - Don't plot all-NA DataFrame columns as zeros (GH1696_)
- - Fix bug in scatter_plot with by option (GH1716_)
- - Fix performance problem in infer_freq with lots of non-unique stamps (GH1686_)
- - Fix handling of PeriodIndex as argument to create MultiIndex (GH1705_)
- - Fix re: unicode MultiIndex level names in Series/DataFrame repr (GH1736_)
- - Handle PeriodIndex in to_datetime instance method (GH1703_)
- - Support StaticTzInfo in DatetimeIndex infrastructure (GH1692_)
- - Allow MultiIndex setops with length-0 other type indexes (GH1727_)
- - Fix handling of DatetimeIndex in DataFrame.to_records (GH1720_)
- - Fix handling of general objects in isnull on which bool(...) fails (GH1749_)
- - Fix .ix indexing with MultiIndex ambiguity (GH1678_)
- - Fix .ix setting logic error with non-unique MultiIndex (GH1750_)
- - Basic indexing now works on MultiIndex with > 1000000 elements, regression
- from earlier version of pandas (GH1757_)
- - Handle non-float64 dtypes in fast DataFrame.corr/cov code paths (GH1761_)
- - Fix DatetimeIndex.isin to function properly (GH1763_)
- - Fix conversion of array of tz-aware datetime.datetime to DatetimeIndex with
- right time zone (GH1777_)
- - Fix DST issues with generating ancxhored date ranges (GH1778_)
- - Fix issue calling sort on result of Series.unique (GH1807_)
- - Fix numerical issue leading to square root of negative number in
- rolling_std (GH1840_)
- - Let Series.str.split accept no arguments (like str.split) (GH1859_)
- - Allow user to have dateutil 2.1 installed on a Python 2 system (GH1851_)
- - Catch ImportError less aggressively in pandas/__init__.py (GH1845_)
- - Fix pip source installation bug when installing from GitHub (GH1805_)
- - Fix error when window size > array size in rolling_apply (GH1850_)
- - Fix pip source installation issues via SSH from GitHub
- - Fix OLS.summary when column is a tuple (GH1837_)
- - Fix bug in __doc__ patching when -OO passed to interpreter
- (GH1792_ GH1741_ GH1774_)
- - Fix unicode console encoding issue in IPython notebook (GH1782_, GH1768_)
- - Fix unicode formatting issue with Series.name (GH1782_)
- - Fix bug in DataFrame.duplicated with datetime64 columns (GH1833_)
- - Fix bug in Panel internals resulting in error when doing fillna after
- truncate not changing size of panel (GH1823_)
- - Prevent segfault due to MultiIndex not being supported in HDFStore table
- format (GH1848_)
- - Fix UnboundLocalError in Panel.__setitem__ and add better error (GH1826_)
- - Fix to_csv issues with list of string entries. Isnull works on list of
- strings now too (GH1791_)
- - Fix Timestamp comparisons with datetime values outside the nanosecond range
- (1677-2262)
- - Revert to prior behavior of normalize_date with datetime.date objects
- (return datetime)
- - Fix broken interaction between np.nansum and Series.any/all
- - Fix bug with multiple column date parsers (GH1866_)
- - DatetimeIndex.union(Int64Index) was broken
- - Make plot x vs y interface consistent with integer indexing (GH1842_)
- - set_index inplace modified data even if unique check fails (GH1831_)
- - Only use Q-OCT/NOV/DEC in quarterly frequency inference (GH1789_)
- - Upcast to dtype=object when unstacking boolean DataFrame (GH1820_)
- - Fix float64/float32 merging bug (GH1849_)
- - Fixes to Period.start_time for non-daily frequencies (GH1857_)
- - Fix failure when converter used on index_col in read_csv (GH1835_)
- - Implement PeriodIndex.append so that pandas.concat works correctly (GH1815_)
- - Avoid Cython out-of-bounds access causing segfault sometimes in pad_2d,
- backfill_2d
- - Fix resampling error with intraday times and anchored target time (like
- AS-DEC) (GH1772_)
- - Fix .ix indexing bugs with mixed-integer indexes (GH1799_)
- - Respect passed color keyword argument in Series.plot (GH1890_)
- - Fix rolling_min/max when the window is larger than the size of the input
- array. Check other malformed inputs (GH1899_, GH1897_)
- - Rolling variance / standard deviation with only a single observation in
- window (GH1884_)
- - Fix unicode sheet name failure in to_excel (GH1828_)
- - Override DatetimeIndex.min/max to return Timestamp objects (GH1895_)
- - Fix column name formatting issue in length-truncated column (GH1906_)
- - Fix broken handling of copying Index metadata to new instances created by
- view(...) calls inside the NumPy infrastructure
- - Support datetime.date again in DateOffset.rollback/rollforward
- - Raise Exception if set passed to Series constructor (GH1913_)
- - Add TypeError when appending HDFStore table w/ wrong index type (GH1881_)
- - Don't raise exception on empty inputs in EW functions (e.g. ewma) (GH1900_)
- - Make asof work correctly with PeriodIndex (GH1883_)
- - Fix extlinks in doc build
- - Fill boolean DataFrame with NaN when calling shift (GH1814_)
- - Fix setuptools bug causing pip not to Cythonize .pyx files sometimes
- - Fix negative integer indexing regression in .ix from 0.7.x (GH1888_)
- - Fix error while retrieving timezone and utc offset from subclasses of
- datetime.tzinfo without .zone and ._utcoffset attributes (GH1922_)
- - Fix DataFrame formatting of small, non-zero FP numbers (GH1911_)
- - Various fixes by upcasting of date -> datetime (GH1395_)
- - Raise better exception when passing multiple functions with the same name,
- such as lambdas, to GroupBy.aggregate
- - Fix DataFrame.apply with axis=1 on a non-unique index (GH1878_)
- - Proper handling of Index subclasses in pandas.unique (GH1759_)
- - Set index names in DataFrame.from_records (GH1744_)
- - Fix time series indexing error with duplicates, under and over hash table
- size cutoff (GH1821_)
- - Handle list keys in addition to tuples in DataFrame.xs when
- partial-indexing a hierarchically-indexed DataFrame (GH1796_)
- - Support multiple column selection in DataFrame.__getitem__ with duplicate
- columns (GH1943_)
- - Fix time zone localization bug causing improper fields (e.g. hours) in time
- zones that have not had a UTC transition in a long time (GH1946_)
- - Fix errors when parsing and working with with fixed offset timezones
- (GH1922_, GH1928_)
- - Fix text parser bug when handling UTC datetime objects generated by
- dateutil (GH1693_)
- - Fix plotting bug when 'B' is the inferred frequency but index actually
- contains weekends (GH1668_, GH1669_)
- - Fix plot styling bugs (GH1666_, GH1665_, GH1658_)
- - Fix plotting bug with index/columns with unicode (GH1685_)
- - Fix DataFrame constructor bug when passed Series with datetime64 dtype
- in a dict (GH1680_)
- - Fixed regression in generating DatetimeIndex using timezone aware
- datetime.datetime (GH1676_)
- - Fix DataFrame bug when printing concatenated DataFrames with duplicated
- columns (GH1675_)
- - Fixed bug when plotting time series with multiple intraday frequencies
- (GH1732_)
- - Fix bug in DataFrame.duplicated to enable iterables other than list-types
- as input argument (GH1773_)
- - Fix resample bug when passed list of lambdas as `how` argument (GH1808_)
- - Repr fix for MultiIndex level with all NAs (GH1971_)
- - Fix PeriodIndex slicing bug when slice start/end are out-of-bounds (GH1977_)
- - Fix read_table bug when parsing unicode (GH1975_)
- - Fix BlockManager.iget bug when dealing with non-unique MultiIndex as columns
- (GH1970_)
- - Fix reset_index bug if both drop and level are specified (GH1957_)
- - Work around unsafe NumPy object->int casting with Cython function (GH1987_)
- - Fix datetime64 formatting bug in DataFrame.to_csv (GH1993_)
- - Default start date in pandas.io.data to 1/1/2000 as the docs say (GH2011_)
-
-
-.. _GH1706: https://github.com/pydata/pandas/issues/1706
-.. _GH1735: https://github.com/pydata/pandas/issues/1735
-.. _GH1785: https://github.com/pydata/pandas/issues/1785
-.. _GH1748: https://github.com/pydata/pandas/issues/1748
-.. _GH1739: https://github.com/pydata/pandas/issues/1739
-.. _GH1691: https://github.com/pydata/pandas/issues/1691
-.. _GH1295: https://github.com/pydata/pandas/issues/1295
-.. _GH1999: https://github.com/pydata/pandas/issues/1999
-.. _GH1988: https://github.com/pydata/pandas/issues/1988
-.. _GH1990: https://github.com/pydata/pandas/issues/1990
-.. _GH1659: https://github.com/pydata/pandas/issues/1659
-.. _GH1693: https://github.com/pydata/pandas/issues/1693
-.. _GH1679: https://github.com/pydata/pandas/issues/1679
-.. _GH1661: https://github.com/pydata/pandas/issues/1661
-.. _GH1829: https://github.com/pydata/pandas/issues/1829
-.. _GH1797: https://github.com/pydata/pandas/issues/1797
-.. _GH1902: https://github.com/pydata/pandas/issues/1902
-.. _GH1854: https://github.com/pydata/pandas/issues/1854
-.. _GH1915: https://github.com/pydata/pandas/issues/1915
-.. _GH1689: https://github.com/pydata/pandas/issues/1689
-.. _GH1583: https://github.com/pydata/pandas/issues/1583
-.. _GH924: https://github.com/pydata/pandas/issues/924
-.. _GH1929: https://github.com/pydata/pandas/issues/1929
-.. _GH1871: https://github.com/pydata/pandas/issues/1871
-.. _GH1843: https://github.com/pydata/pandas/issues/1843
-.. _GH2000: https://github.com/pydata/pandas/issues/2000
-.. _GH1723: https://github.com/pydata/pandas/issues/1723
-.. _GH1834: https://github.com/pydata/pandas/issues/1834
-.. _GH1824: https://github.com/pydata/pandas/issues/1824
-.. _GH1776: https://github.com/pydata/pandas/issues/1776
-.. _GH1783: https://github.com/pydata/pandas/issues/1783
-.. _GH1630: https://github.com/pydata/pandas/issues/1630
-.. _GH1809: https://github.com/pydata/pandas/issues/1809
-.. _GH1657: https://github.com/pydata/pandas/issues/1657
-.. _GH1896: https://github.com/pydata/pandas/issues/1896
-.. _GH1711: https://github.com/pydata/pandas/issues/1711
-.. _GH1726: https://github.com/pydata/pandas/issues/1726
-.. _GH1730: https://github.com/pydata/pandas/issues/1730
-.. _GH1719: https://github.com/pydata/pandas/issues/1719
-.. _GH1745: https://github.com/pydata/pandas/issues/1745
-.. _GH1702: https://github.com/pydata/pandas/issues/1702
-.. _GH1753: https://github.com/pydata/pandas/issues/1753
-.. _GH1708: https://github.com/pydata/pandas/issues/1708
-.. _GH1672: https://github.com/pydata/pandas/issues/1672
-.. _GH1701: https://github.com/pydata/pandas/issues/1701
-.. _GH1721: https://github.com/pydata/pandas/issues/1721
-.. _GH1717: https://github.com/pydata/pandas/issues/1717
-.. _GH1746: https://github.com/pydata/pandas/issues/1746
-.. _GH1677: https://github.com/pydata/pandas/issues/1677
-.. _GH1615: https://github.com/pydata/pandas/issues/1615
-.. _GH1734: https://github.com/pydata/pandas/issues/1734
-.. _GH1700: https://github.com/pydata/pandas/issues/1700
-.. _GH1694: https://github.com/pydata/pandas/issues/1694
-.. _GH1682: https://github.com/pydata/pandas/issues/1682
-.. _GH1673: https://github.com/pydata/pandas/issues/1673
-.. _GH1756: https://github.com/pydata/pandas/issues/1756
-.. _GH1755: https://github.com/pydata/pandas/issues/1755
-.. _GH1671: https://github.com/pydata/pandas/issues/1671
-.. _GH1681: https://github.com/pydata/pandas/issues/1681
-.. _GH1695: https://github.com/pydata/pandas/issues/1695
-.. _GH1648: https://github.com/pydata/pandas/issues/1648
-.. _GH1688: https://github.com/pydata/pandas/issues/1688
-.. _GH1697: https://github.com/pydata/pandas/issues/1697
-.. _GH1683: https://github.com/pydata/pandas/issues/1683
-.. _GH1707: https://github.com/pydata/pandas/issues/1707
-.. _GH1674: https://github.com/pydata/pandas/issues/1674
-.. _GH1698: https://github.com/pydata/pandas/issues/1698
-.. _GH1696: https://github.com/pydata/pandas/issues/1696
-.. _GH1716: https://github.com/pydata/pandas/issues/1716
-.. _GH1686: https://github.com/pydata/pandas/issues/1686
-.. _GH1705: https://github.com/pydata/pandas/issues/1705
-.. _GH1736: https://github.com/pydata/pandas/issues/1736
-.. _GH1703: https://github.com/pydata/pandas/issues/1703
-.. _GH1692: https://github.com/pydata/pandas/issues/1692
-.. _GH1727: https://github.com/pydata/pandas/issues/1727
-.. _GH1720: https://github.com/pydata/pandas/issues/1720
-.. _GH1749: https://github.com/pydata/pandas/issues/1749
-.. _GH1678: https://github.com/pydata/pandas/issues/1678
-.. _GH1750: https://github.com/pydata/pandas/issues/1750
-.. _GH1757: https://github.com/pydata/pandas/issues/1757
-.. _GH1761: https://github.com/pydata/pandas/issues/1761
-.. _GH1763: https://github.com/pydata/pandas/issues/1763
-.. _GH1777: https://github.com/pydata/pandas/issues/1777
-.. _GH1778: https://github.com/pydata/pandas/issues/1778
-.. _GH1807: https://github.com/pydata/pandas/issues/1807
-.. _GH1840: https://github.com/pydata/pandas/issues/1840
-.. _GH1859: https://github.com/pydata/pandas/issues/1859
-.. _GH1851: https://github.com/pydata/pandas/issues/1851
-.. _GH1845: https://github.com/pydata/pandas/issues/1845
-.. _GH1805: https://github.com/pydata/pandas/issues/1805
-.. _GH1850: https://github.com/pydata/pandas/issues/1850
-.. _GH1837: https://github.com/pydata/pandas/issues/1837
-.. _GH1792: https://github.com/pydata/pandas/issues/1792
-.. _GH1741: https://github.com/pydata/pandas/issues/1741
-.. _GH1774: https://github.com/pydata/pandas/issues/1774
-.. _GH1782: https://github.com/pydata/pandas/issues/1782
-.. _GH1768: https://github.com/pydata/pandas/issues/1768
-.. _GH1833: https://github.com/pydata/pandas/issues/1833
-.. _GH1823: https://github.com/pydata/pandas/issues/1823
-.. _GH1848: https://github.com/pydata/pandas/issues/1848
-.. _GH1826: https://github.com/pydata/pandas/issues/1826
-.. _GH1791: https://github.com/pydata/pandas/issues/1791
-.. _GH1866: https://github.com/pydata/pandas/issues/1866
-.. _GH1842: https://github.com/pydata/pandas/issues/1842
-.. _GH1831: https://github.com/pydata/pandas/issues/1831
-.. _GH1789: https://github.com/pydata/pandas/issues/1789
-.. _GH1820: https://github.com/pydata/pandas/issues/1820
-.. _GH1849: https://github.com/pydata/pandas/issues/1849
-.. _GH1857: https://github.com/pydata/pandas/issues/1857
-.. _GH1835: https://github.com/pydata/pandas/issues/1835
-.. _GH1815: https://github.com/pydata/pandas/issues/1815
-.. _GH1772: https://github.com/pydata/pandas/issues/1772
-.. _GH1799: https://github.com/pydata/pandas/issues/1799
-.. _GH1890: https://github.com/pydata/pandas/issues/1890
-.. _GH1899: https://github.com/pydata/pandas/issues/1899
-.. _GH1897: https://github.com/pydata/pandas/issues/1897
-.. _GH1884: https://github.com/pydata/pandas/issues/1884
-.. _GH1828: https://github.com/pydata/pandas/issues/1828
-.. _GH1895: https://github.com/pydata/pandas/issues/1895
-.. _GH1906: https://github.com/pydata/pandas/issues/1906
-.. _GH1913: https://github.com/pydata/pandas/issues/1913
-.. _GH1881: https://github.com/pydata/pandas/issues/1881
-.. _GH1900: https://github.com/pydata/pandas/issues/1900
-.. _GH1883: https://github.com/pydata/pandas/issues/1883
-.. _GH1814: https://github.com/pydata/pandas/issues/1814
-.. _GH1888: https://github.com/pydata/pandas/issues/1888
-.. _GH1922: https://github.com/pydata/pandas/issues/1922
-.. _GH1911: https://github.com/pydata/pandas/issues/1911
-.. _GH1395: https://github.com/pydata/pandas/issues/1395
-.. _GH1878: https://github.com/pydata/pandas/issues/1878
-.. _GH1759: https://github.com/pydata/pandas/issues/1759
-.. _GH1744: https://github.com/pydata/pandas/issues/1744
-.. _GH1821: https://github.com/pydata/pandas/issues/1821
-.. _GH1796: https://github.com/pydata/pandas/issues/1796
-.. _GH1943: https://github.com/pydata/pandas/issues/1943
-.. _GH1946: https://github.com/pydata/pandas/issues/1946
-.. _GH1928: https://github.com/pydata/pandas/issues/1928
-.. _GH1668: https://github.com/pydata/pandas/issues/1668
-.. _GH1669: https://github.com/pydata/pandas/issues/1669
-.. _GH1666: https://github.com/pydata/pandas/issues/1666
-.. _GH1665: https://github.com/pydata/pandas/issues/1665
-.. _GH1658: https://github.com/pydata/pandas/issues/1658
-.. _GH1685: https://github.com/pydata/pandas/issues/1685
-.. _GH1680: https://github.com/pydata/pandas/issues/1680
-.. _GH1676: https://github.com/pydata/pandas/issues/1676
-.. _GH1675: https://github.com/pydata/pandas/issues/1675
-.. _GH1732: https://github.com/pydata/pandas/issues/1732
-.. _GH1773: https://github.com/pydata/pandas/issues/1773
-.. _GH1808: https://github.com/pydata/pandas/issues/1808
-.. _GH1971: https://github.com/pydata/pandas/issues/1971
-.. _GH1977: https://github.com/pydata/pandas/issues/1977
-.. _GH1975: https://github.com/pydata/pandas/issues/1975
-.. _GH1970: https://github.com/pydata/pandas/issues/1970
-.. _GH1957: https://github.com/pydata/pandas/issues/1957
-.. _GH1987: https://github.com/pydata/pandas/issues/1987
-.. _GH1993: https://github.com/pydata/pandas/issues/1993
-.. _GH2011: https://github.com/pydata/pandas/issues/2011
-
-
-pandas 0.8.1
-============
-
-**Release date:** July 22, 2012
-
-**New features**
-
- - Add vectorized, NA-friendly string methods to Series (GH1621_, GH620_)
- - Can pass dict of per-column line styles to DataFrame.plot (GH1559_)
- - Selective plotting to secondary y-axis on same subplot (GH1640_)
- - Add new ``bootstrap_plot`` plot function
- - Add new ``parallel_coordinates`` plot function (GH1488_)
- - Add ``radviz`` plot function (GH1566_)
- - Add ``multi_sparse`` option to ``set_printoptions`` to modify display of
- hierarchical indexes (GH1538_)
- - Add ``dropna`` method to Panel (GH171_)
-
-**Improvements to existing features**
-
- - Use moving min/max algorithms from Bottleneck in rolling_min/rolling_max
- for > 100x speedup. (GH1504_, GH50_)
- - Add Cython group median method for >15x speedup (GH1358_)
- - Drastically improve ``to_datetime`` performance on ISO8601 datetime strings
- (with no time zones) (GH1571_)
- - Improve single-key groupby performance on large data sets, accelerate use of
- groupby with a Categorical variable
- - Add ability to append hierarchical index levels with ``set_index`` and to
- drop single levels with ``reset_index`` (GH1569_, GH1577_)
- - Always apply passed functions in ``resample``, even if upsampling (GH1596_)
- - Avoid unnecessary copies in DataFrame constructor with explicit dtype (GH1572_)
- - Cleaner DatetimeIndex string representation with 1 or 2 elements (GH1611_)
- - Improve performance of array-of-Period to PeriodIndex, convert such arrays
- to PeriodIndex inside Index (GH1215_)
- - More informative string representation for weekly Period objects (GH1503_)
- - Accelerate 3-axis multi data selection from homogeneous Panel (GH979_)
- - Add ``adjust`` option to ewma to disable adjustment factor (GH1584_)
- - Add new matplotlib converters for high frequency time series plotting (GH1599_)
- - Handling of tz-aware datetime.datetime objects in to_datetime; raise
- Exception unless utc=True given (GH1581_)
-
-**Bug fixes**
-
- - Fix NA handling in DataFrame.to_panel (GH1582_)
- - Handle TypeError issues inside PyObject_RichCompareBool calls in khash
- (GH1318_)
- - Fix resampling bug to lower case daily frequency (GH1588_)
- - Fix kendall/spearman DataFrame.corr bug with no overlap (GH1595_)
- - Fix bug in DataFrame.set_index (GH1592_)
- - Don't ignore axes in boxplot if by specified (GH1565_)
- - Fix Panel .ix indexing with integers bug (GH1603_)
- - Fix Partial indexing bugs (years, months, ...) with PeriodIndex (GH1601_)
- - Fix MultiIndex console formatting issue (GH1606_)
- - Unordered index with duplicates doesn't yield scalar location for single
- entry (GH1586_)
- - Fix resampling of tz-aware time series with "anchored" freq (GH1591_)
- - Fix DataFrame.rank error on integer data (GH1589_)
- - Selection of multiple SparseDataFrame columns by list in __getitem__ (GH1585_)
- - Override Index.tolist for compatibility with MultiIndex (GH1576_)
- - Fix hierarchical summing bug with MultiIndex of length 1 (GH1568_)
- - Work around numpy.concatenate use/bug in Series.set_value (GH1561_)
- - Ensure Series/DataFrame are sorted before resampling (GH1580_)
- - Fix unhandled IndexError when indexing very large time series (GH1562_)
- - Fix DatetimeIndex intersection logic error with irregular indexes (GH1551_)
- - Fix unit test errors on Python 3 (GH1550_)
- - Fix .ix indexing bugs in duplicate DataFrame index (GH1201_)
- - Better handle errors with non-existing objects in HDFStore (GH1254_)
- - Don't copy int64 array data in DatetimeIndex when copy=False (GH1624_)
- - Fix resampling of conforming periods quarterly to annual (GH1622_)
- - Don't lose index name on resampling (GH1631_)
- - Support python-dateutil version 2.1 (GH1637_)
- - Fix broken scatter_matrix axis labeling, esp. with time series (GH1625_)
- - Fix cases where extra keywords weren't being passed on to matplotlib from
- Series.plot (GH1636_)
- - Fix BusinessMonthBegin logic for dates before 1st bday of month (GH1645_)
- - Ensure string alias converted (valid in DatetimeIndex.get_loc) in
- DataFrame.xs / __getitem__ (GH1644_)
- - Fix use of string alias timestamps with tz-aware time series (GH1647_)
- - Fix Series.max/min and Series.describe on len-0 series (GH1650_)
- - Handle None values in dict passed to concat (GH1649_)
- - Fix Series.interpolate with method='values' and DatetimeIndex (GH1646_)
- - Fix IndexError in left merges on a DataFrame with 0-length (GH1628_)
- - Fix DataFrame column width display with UTF-8 encoded characters (GH1620_)
- - Handle case in pandas.io.data.get_data_yahoo where Yahoo! returns duplicate
- dates for most recent business day
- - Avoid downsampling when plotting mixed frequencies on the same subplot (GH1619_)
- - Fix read_csv bug when reading a single line (GH1553_)
- - Fix bug in C code causing monthly periods prior to December 1969 to be off (GH1570_)
-
-.. _GH1621: https://github.com/pydata/pandas/issues/1621
-.. _GH620: https://github.com/pydata/pandas/issues/620
-.. _GH1559: https://github.com/pydata/pandas/issues/1559
-.. _GH1640: https://github.com/pydata/pandas/issues/1640
-.. _GH1488: https://github.com/pydata/pandas/issues/1488
-.. _GH1566: https://github.com/pydata/pandas/issues/1566
-.. _GH1538: https://github.com/pydata/pandas/issues/1538
-.. _GH171: https://github.com/pydata/pandas/issues/171
-.. _GH1504: https://github.com/pydata/pandas/issues/1504
-.. _GH50: https://github.com/pydata/pandas/issues/50
-.. _GH1358: https://github.com/pydata/pandas/issues/1358
-.. _GH1571: https://github.com/pydata/pandas/issues/1571
-.. _GH1569: https://github.com/pydata/pandas/issues/1569
-.. _GH1577: https://github.com/pydata/pandas/issues/1577
-.. _GH1596: https://github.com/pydata/pandas/issues/1596
-.. _GH1572: https://github.com/pydata/pandas/issues/1572
-.. _GH1611: https://github.com/pydata/pandas/issues/1611
-.. _GH1215: https://github.com/pydata/pandas/issues/1215
-.. _GH1503: https://github.com/pydata/pandas/issues/1503
-.. _GH979: https://github.com/pydata/pandas/issues/979
-.. _GH1584: https://github.com/pydata/pandas/issues/1584
-.. _GH1599: https://github.com/pydata/pandas/issues/1599
-.. _GH1581: https://github.com/pydata/pandas/issues/1581
-.. _GH1582: https://github.com/pydata/pandas/issues/1582
-.. _GH1318: https://github.com/pydata/pandas/issues/1318
-.. _GH1588: https://github.com/pydata/pandas/issues/1588
-.. _GH1595: https://github.com/pydata/pandas/issues/1595
-.. _GH1592: https://github.com/pydata/pandas/issues/1592
-.. _GH1565: https://github.com/pydata/pandas/issues/1565
-.. _GH1603: https://github.com/pydata/pandas/issues/1603
-.. _GH1601: https://github.com/pydata/pandas/issues/1601
-.. _GH1606: https://github.com/pydata/pandas/issues/1606
-.. _GH1586: https://github.com/pydata/pandas/issues/1586
-.. _GH1591: https://github.com/pydata/pandas/issues/1591
-.. _GH1589: https://github.com/pydata/pandas/issues/1589
-.. _GH1585: https://github.com/pydata/pandas/issues/1585
-.. _GH1576: https://github.com/pydata/pandas/issues/1576
-.. _GH1568: https://github.com/pydata/pandas/issues/1568
-.. _GH1561: https://github.com/pydata/pandas/issues/1561
-.. _GH1580: https://github.com/pydata/pandas/issues/1580
-.. _GH1562: https://github.com/pydata/pandas/issues/1562
-.. _GH1551: https://github.com/pydata/pandas/issues/1551
-.. _GH1550: https://github.com/pydata/pandas/issues/1550
-.. _GH1201: https://github.com/pydata/pandas/issues/1201
-.. _GH1254: https://github.com/pydata/pandas/issues/1254
-.. _GH1624: https://github.com/pydata/pandas/issues/1624
-.. _GH1622: https://github.com/pydata/pandas/issues/1622
-.. _GH1631: https://github.com/pydata/pandas/issues/1631
-.. _GH1637: https://github.com/pydata/pandas/issues/1637
-.. _GH1625: https://github.com/pydata/pandas/issues/1625
-.. _GH1636: https://github.com/pydata/pandas/issues/1636
-.. _GH1645: https://github.com/pydata/pandas/issues/1645
-.. _GH1644: https://github.com/pydata/pandas/issues/1644
-.. _GH1647: https://github.com/pydata/pandas/issues/1647
-.. _GH1650: https://github.com/pydata/pandas/issues/1650
-.. _GH1649: https://github.com/pydata/pandas/issues/1649
-.. _GH1646: https://github.com/pydata/pandas/issues/1646
-.. _GH1628: https://github.com/pydata/pandas/issues/1628
-.. _GH1620: https://github.com/pydata/pandas/issues/1620
-.. _GH1619: https://github.com/pydata/pandas/issues/1619
-.. _GH1553: https://github.com/pydata/pandas/issues/1553
-.. _GH1570: https://github.com/pydata/pandas/issues/1570
-
-
-pandas 0.8.0
-============
-
-**Release date:** 6/29/2012
-
-**New features**
-
- - New unified DatetimeIndex class for nanosecond-level timestamp data
- - New Timestamp datetime.datetime subclass with easy time zone conversions,
- and support for nanoseconds
- - New PeriodIndex class for timespans, calendar logic, and Period scalar object
- - High performance resampling of timestamp and period data. New `resample`
- method of all pandas data structures
- - New frequency names plus shortcut string aliases like '15h', '1h30min'
- - Time series string indexing shorthand (GH222_)
- - Add week, dayofyear array and other timestamp array-valued field accessor
- functions to DatetimeIndex
- - Add GroupBy.prod optimized aggregation function and 'prod' fast time series
- conversion method (GH1018_)
- - Implement robust frequency inference function and `inferred_freq` attribute
- on DatetimeIndex (GH391_)
- - New ``tz_convert`` and ``tz_localize`` methods in Series / DataFrame
- - Convert DatetimeIndexes to UTC if time zones are different in join/setops
- (GH864_)
- - Add limit argument for forward/backward filling to reindex, fillna,
- etc. (GH825_ and others)
- - Add support for indexes (dates or otherwise) with duplicates and common
- sense indexing/selection functionality
- - Series/DataFrame.update methods, in-place variant of combine_first (GH961_)
- - Add ``match`` function to API (GH502_)
- - Add Cython-optimized first, last, min, max, prod functions to GroupBy (GH994_,
- GH1043_)
- - Dates can be split across multiple columns (GH1227_, GH1186_)
- - Add experimental support for converting pandas DataFrame to R data.frame
- via rpy2 (GH350_, GH1212_)
- - Can pass list of (name, function) to GroupBy.aggregate to get aggregates in
- a particular order (GH610_)
- - Can pass dicts with lists of functions or dicts to GroupBy aggregate to do
- much more flexible multiple function aggregation (GH642_, GH610_)
- - New ordered_merge functions for merging DataFrames with ordered
- data. Also supports group-wise merging for panel data (GH813_)
- - Add keys() method to DataFrame
- - Add flexible replace method for replacing potentially values to Series and
- DataFrame (GH929_, GH1241_)
- - Add 'kde' plot kind for Series/DataFrame.plot (GH1059_)
- - More flexible multiple function aggregation with GroupBy
- - Add pct_change function to Series/DataFrame
- - Add option to interpolate by Index values in Series.interpolate (GH1206_)
- - Add ``max_colwidth`` option for DataFrame, defaulting to 50
- - Conversion of DataFrame through rpy2 to R data.frame (GH1282_, )
- - Add keys() method on DataFrame (GH1240_)
- - Add new ``match`` function to API (similar to R) (GH502_)
- - Add dayfirst option to parsers (GH854_)
- - Add ``method`` argument to ``align`` method for forward/backward fillin
- (GH216_)
- - Add Panel.transpose method for rearranging axes (GH695_)
- - Add new ``cut`` function (patterned after R) for discretizing data into
- equal range-length bins or arbitrary breaks of your choosing (GH415_)
- - Add new ``qcut`` for cutting with quantiles (GH1378_)
- - Add ``value_counts`` top level array method (GH1392_)
- - Added Andrews curves plot tupe (GH1325_)
- - Add lag plot (GH1440_)
- - Add autocorrelation_plot (GH1425_)
- - Add support for tox and Travis CI (GH1382_)
- - Add support for Categorical use in GroupBy (GH292_)
- - Add ``any`` and ``all`` methods to DataFrame (GH1416_)
- - Add ``secondary_y`` option to Series.plot
- - Add experimental ``lreshape`` function for reshaping wide to long
-
-**Improvements to existing features**
-
- - Switch to klib/khash-based hash tables in Index classes for better
- performance in many cases and lower memory footprint
- - Shipping some functions from scipy.stats to reduce dependency,
- e.g. Series.describe and DataFrame.describe (GH1092_)
- - Can create MultiIndex by passing list of lists or list of arrays to Series,
- DataFrame constructor, etc. (GH831_)
- - Can pass arrays in addition to column names to DataFrame.set_index (GH402_)
- - Improve the speed of "square" reindexing of homogeneous DataFrame objects
- by significant margin (GH836_)
- - Handle more dtypes when passed MaskedArrays in DataFrame constructor (GH406_)
- - Improved performance of join operations on integer keys (GH682_)
- - Can pass multiple columns to GroupBy object, e.g. grouped[[col1, col2]] to
- only aggregate a subset of the value columns (GH383_)
- - Add histogram / kde plot options for scatter_matrix diagonals (GH1237_)
- - Add inplace option to Series/DataFrame.rename and sort_index,
- DataFrame.drop_duplicates (GH805_, GH207_)
- - More helpful error message when nothing passed to Series.reindex (GH1267_)
- - Can mix array and scalars as dict-value inputs to DataFrame ctor (GH1329_)
- - Use DataFrame columns' name for legend title in plots
- - Preserve frequency in DatetimeIndex when possible in boolean indexing
- operations
- - Promote datetime.date values in data alignment operations (GH867_)
- - Add ``order`` method to Index classes (GH1028_)
- - Avoid hash table creation in large monotonic hash table indexes (GH1160_)
- - Store time zones in HDFStore (GH1232_)
- - Enable storage of sparse data structures in HDFStore (GH85_)
- - Enable Series.asof to work with arrays of timestamp inputs
- - Cython implementation of DataFrame.corr speeds up by > 100x (GH1349_, GH1354_)
- - Exclude "nuisance" columns automatically in GroupBy.transform (GH1364_)
- - Support functions-as-strings in GroupBy.transform (GH1362_)
- - Use index name as xlabel/ylabel in plots (GH1415_)
- - Add ``convert_dtype`` option to Series.apply to be able to leave data as
- dtype=object (GH1414_)
- - Can specify all index level names in concat (GH1419_)
- - Add ``dialect`` keyword to parsers for quoting conventions (GH1363_)
- - Enable DataFrame[bool_DataFrame] += value (GH1366_)
- - Add ``retries`` argument to ``get_data_yahoo`` to try to prevent Yahoo! API
- 404s (GH826_)
- - Improve performance of reshaping by using O(N) categorical sorting
- - Series names will be used for index of DataFrame if no index passed (GH1494_)
- - Header argument in DataFrame.to_csv can accept a list of column names to
- use instead of the object's columns (GH921_)
- - Add ``raise_conflict`` argument to DataFrame.update (GH1526_)
- - Support file-like objects in ExcelFile (GH1529_)
-
-**API Changes**
-
- - Rename `pandas._tseries` to `pandas.lib`
- - Rename Factor to Categorical and add improvements. Numerous Categorical bug
- fixes
- - Frequency name overhaul, WEEKDAY/EOM and rules with @
- deprecated. get_legacy_offset_name backwards compatibility function added
- - Raise ValueError in DataFrame.__nonzero__, so "if df" no longer works
- (GH1073_)
- - Change BDay (business day) to not normalize dates by default (GH506_)
- - Remove deprecated DataMatrix name
- - Default merge suffixes for overlap now have underscores instead of periods
- to facilitate tab completion, etc. (GH1239_)
- - Deprecation of offset, time_rule timeRule parameters throughout codebase
- - Series.append and DataFrame.append no longer check for duplicate indexes
- by default, add verify_integrity parameter (GH1394_)
- - Refactor Factor class, old constructor moved to Factor.from_array
- - Modified internals of MultiIndex to use less memory (no longer represented
- as array of tuples) internally, speed up construction time and many methods
- which construct intermediate hierarchical indexes (GH1467_)
-
-**Bug fixes**
-
- - Fix OverflowError from storing pre-1970 dates in HDFStore by switching to
- datetime64 (GH179_)
- - Fix logical error with February leap year end in YearEnd offset
- - Series([False, nan]) was getting casted to float64 (GH1074_)
- - Fix binary operations between boolean Series and object Series with
- booleans and NAs (GH1074_, GH1079_)
- - Couldn't assign whole array to column in mixed-type DataFrame via .ix
- (GH1142_)
- - Fix label slicing issues with float index values (GH1167_)
- - Fix segfault caused by empty groups passed to groupby (GH1048_)
- - Fix occasionally misbehaved reindexing in the presence of NaN labels (GH522_)
- - Fix imprecise logic causing weird Series results from .apply (GH1183_)
- - Unstack multiple levels in one shot, avoiding empty columns in some
- cases. Fix pivot table bug (GH1181_)
- - Fix formatting of MultiIndex on Series/DataFrame when index name coincides
- with label (GH1217_)
- - Handle Excel 2003 #N/A as NaN from xlrd (GH1213_, GH1225_)
- - Fix timestamp locale-related deserialization issues with HDFStore by moving
- to datetime64 representation (GH1081_, GH809_)
- - Fix DataFrame.duplicated/drop_duplicates NA value handling (GH557_)
- - Actually raise exceptions in fast reducer (GH1243_)
- - Fix various timezone-handling bugs from 0.7.3 (GH969_)
- - GroupBy on level=0 discarded index name (GH1313_)
- - Better error message with unmergeable DataFrames (GH1307_)
- - Series.__repr__ alignment fix with unicode index values (GH1279_)
- - Better error message if nothing passed to reindex (GH1267_)
- - More robust NA handling in DataFrame.drop_duplicates (GH557_)
- - Resolve locale-based and pre-epoch HDF5 timestamp deserialization issues
- (GH973_, GH1081_, GH179_)
- - Implement Series.repeat (GH1229_)
- - Fix indexing with namedtuple and other tuple subclasses (GH1026_)
- - Fix float64 slicing bug (GH1167_)
- - Parsing integers with commas (GH796_)
- - Fix groupby improper data type when group consists of one value (GH1065_)
- - Fix negative variance possibility in nanvar resulting from floating point
- error (GH1090_)
- - Consistently set name on groupby pieces (GH184_)
- - Treat dict return values as Series in GroupBy.apply (GH823_)
- - Respect column selection for DataFrame in in GroupBy.transform (GH1365_)
- - Fix MultiIndex partial indexing bug (GH1352_)
- - Enable assignment of rows in mixed-type DataFrame via .ix (GH1432_)
- - Reset index mapping when grouping Series in Cython (GH1423_)
- - Fix outer/inner DataFrame.join with non-unique indexes (GH1421_)
- - Fix MultiIndex groupby bugs with empty lower levels (GH1401_)
- - Calling fillna with a Series will have same behavior as with dict (GH1486_)
- - SparseSeries reduction bug (GH1375_)
- - Fix unicode serialization issue in HDFStore (GH1361_)
- - Pass keywords to pyplot.boxplot in DataFrame.boxplot (GH1493_)
- - Bug fixes in MonthBegin (GH1483_)
- - Preserve MultiIndex names in drop (GH1513_)
- - Fix Panel DataFrame slice-assignment bug (GH1533_)
- - Don't use locals() in read_* functions (GH1547_)
-
-.. _GH222: https://github.com/pydata/pandas/issues/222
-.. _GH1018: https://github.com/pydata/pandas/issues/1018
-.. _GH391: https://github.com/pydata/pandas/issues/391
-.. _GH864: https://github.com/pydata/pandas/issues/864
-.. _GH825: https://github.com/pydata/pandas/issues/825
-.. _GH961: https://github.com/pydata/pandas/issues/961
-.. _GH502: https://github.com/pydata/pandas/issues/502
-.. _GH994: https://github.com/pydata/pandas/issues/994
-.. _GH1043: https://github.com/pydata/pandas/issues/1043
-.. _GH1227: https://github.com/pydata/pandas/issues/1227
-.. _GH1186: https://github.com/pydata/pandas/issues/1186
-.. _GH350: https://github.com/pydata/pandas/issues/350
-.. _GH1212: https://github.com/pydata/pandas/issues/1212
-.. _GH610: https://github.com/pydata/pandas/issues/610
-.. _GH642: https://github.com/pydata/pandas/issues/642
-.. _GH813: https://github.com/pydata/pandas/issues/813
-.. _GH929: https://github.com/pydata/pandas/issues/929
-.. _GH1241: https://github.com/pydata/pandas/issues/1241
-.. _GH1059: https://github.com/pydata/pandas/issues/1059
-.. _GH1206: https://github.com/pydata/pandas/issues/1206
-.. _GH1282: https://github.com/pydata/pandas/issues/1282
-.. _GH1240: https://github.com/pydata/pandas/issues/1240
-.. _GH854: https://github.com/pydata/pandas/issues/854
-.. _GH216: https://github.com/pydata/pandas/issues/216
-.. _GH695: https://github.com/pydata/pandas/issues/695
-.. _GH415: https://github.com/pydata/pandas/issues/415
-.. _GH1378: https://github.com/pydata/pandas/issues/1378
-.. _GH1392: https://github.com/pydata/pandas/issues/1392
-.. _GH1325: https://github.com/pydata/pandas/issues/1325
-.. _GH1440: https://github.com/pydata/pandas/issues/1440
-.. _GH1425: https://github.com/pydata/pandas/issues/1425
-.. _GH1382: https://github.com/pydata/pandas/issues/1382
-.. _GH292: https://github.com/pydata/pandas/issues/292
-.. _GH1416: https://github.com/pydata/pandas/issues/1416
-.. _GH1092: https://github.com/pydata/pandas/issues/1092
-.. _GH831: https://github.com/pydata/pandas/issues/831
-.. _GH402: https://github.com/pydata/pandas/issues/402
-.. _GH836: https://github.com/pydata/pandas/issues/836
-.. _GH406: https://github.com/pydata/pandas/issues/406
-.. _GH682: https://github.com/pydata/pandas/issues/682
-.. _GH383: https://github.com/pydata/pandas/issues/383
-.. _GH1237: https://github.com/pydata/pandas/issues/1237
-.. _GH805: https://github.com/pydata/pandas/issues/805
-.. _GH207: https://github.com/pydata/pandas/issues/207
-.. _GH1267: https://github.com/pydata/pandas/issues/1267
-.. _GH1329: https://github.com/pydata/pandas/issues/1329
-.. _GH867: https://github.com/pydata/pandas/issues/867
-.. _GH1028: https://github.com/pydata/pandas/issues/1028
-.. _GH1160: https://github.com/pydata/pandas/issues/1160
-.. _GH1232: https://github.com/pydata/pandas/issues/1232
-.. _GH1349: https://github.com/pydata/pandas/issues/1349
-.. _GH1354: https://github.com/pydata/pandas/issues/1354
-.. _GH1364: https://github.com/pydata/pandas/issues/1364
-.. _GH1362: https://github.com/pydata/pandas/issues/1362
-.. _GH1415: https://github.com/pydata/pandas/issues/1415
-.. _GH1414: https://github.com/pydata/pandas/issues/1414
-.. _GH1419: https://github.com/pydata/pandas/issues/1419
-.. _GH1363: https://github.com/pydata/pandas/issues/1363
-.. _GH1366: https://github.com/pydata/pandas/issues/1366
-.. _GH826: https://github.com/pydata/pandas/issues/826
-.. _GH1494: https://github.com/pydata/pandas/issues/1494
-.. _GH921: https://github.com/pydata/pandas/issues/921
-.. _GH1526: https://github.com/pydata/pandas/issues/1526
-.. _GH1529: https://github.com/pydata/pandas/issues/1529
-.. _GH1073: https://github.com/pydata/pandas/issues/1073
-.. _GH506: https://github.com/pydata/pandas/issues/506
-.. _GH1239: https://github.com/pydata/pandas/issues/1239
-.. _GH1394: https://github.com/pydata/pandas/issues/1394
-.. _GH1467: https://github.com/pydata/pandas/issues/1467
-.. _GH179: https://github.com/pydata/pandas/issues/179
-.. _GH1074: https://github.com/pydata/pandas/issues/1074
-.. _GH1079: https://github.com/pydata/pandas/issues/1079
-.. _GH1142: https://github.com/pydata/pandas/issues/1142
-.. _GH1167: https://github.com/pydata/pandas/issues/1167
-.. _GH1048: https://github.com/pydata/pandas/issues/1048
-.. _GH522: https://github.com/pydata/pandas/issues/522
-.. _GH1183: https://github.com/pydata/pandas/issues/1183
-.. _GH1181: https://github.com/pydata/pandas/issues/1181
-.. _GH1217: https://github.com/pydata/pandas/issues/1217
-.. _GH1213: https://github.com/pydata/pandas/issues/1213
-.. _GH1225: https://github.com/pydata/pandas/issues/1225
-.. _GH1081: https://github.com/pydata/pandas/issues/1081
-.. _GH809: https://github.com/pydata/pandas/issues/809
-.. _GH557: https://github.com/pydata/pandas/issues/557
-.. _GH1243: https://github.com/pydata/pandas/issues/1243
-.. _GH969: https://github.com/pydata/pandas/issues/969
-.. _GH1313: https://github.com/pydata/pandas/issues/1313
-.. _GH1307: https://github.com/pydata/pandas/issues/1307
-.. _GH1279: https://github.com/pydata/pandas/issues/1279
-.. _GH973: https://github.com/pydata/pandas/issues/973
-.. _GH1229: https://github.com/pydata/pandas/issues/1229
-.. _GH1026: https://github.com/pydata/pandas/issues/1026
-.. _GH796: https://github.com/pydata/pandas/issues/796
-.. _GH1065: https://github.com/pydata/pandas/issues/1065
-.. _GH1090: https://github.com/pydata/pandas/issues/1090
-.. _GH184: https://github.com/pydata/pandas/issues/184
-.. _GH823: https://github.com/pydata/pandas/issues/823
-.. _GH1365: https://github.com/pydata/pandas/issues/1365
-.. _GH1352: https://github.com/pydata/pandas/issues/1352
-.. _GH1432: https://github.com/pydata/pandas/issues/1432
-.. _GH1423: https://github.com/pydata/pandas/issues/1423
-.. _GH1421: https://github.com/pydata/pandas/issues/1421
-.. _GH1401: https://github.com/pydata/pandas/issues/1401
-.. _GH1486: https://github.com/pydata/pandas/issues/1486
-.. _GH1375: https://github.com/pydata/pandas/issues/1375
-.. _GH1361: https://github.com/pydata/pandas/issues/1361
-.. _GH1493: https://github.com/pydata/pandas/issues/1493
-.. _GH1483: https://github.com/pydata/pandas/issues/1483
-.. _GH1513: https://github.com/pydata/pandas/issues/1513
-.. _GH1533: https://github.com/pydata/pandas/issues/1533
-.. _GH1547: https://github.com/pydata/pandas/issues/1547
-.. _GH85: https://github.com/pydata/pandas/issues/85
-
-
-pandas 0.7.3
-============
-
-**Release date:** April 12, 2012
-
-**New features / modules**
-
- - Support for non-unique indexes: indexing and selection, many-to-one and
- many-to-many joins (GH1306_)
- - Added fixed-width file reader, read_fwf (GH952_)
- - Add group_keys argument to groupby to not add group names to MultiIndex in
- result of apply (GH938_)
- - DataFrame can now accept non-integer label slicing (GH946_). Previously
- only DataFrame.ix was able to do so.
- - DataFrame.apply now retains name attributes on Series objects (GH983_)
- - Numeric DataFrame comparisons with non-numeric values now raises proper
- TypeError (GH943_). Previously raise "PandasError: DataFrame constructor
- not properly called!"
- - Add ``kurt`` methods to Series and DataFrame (GH964_)
- - Can pass dict of column -> list/set NA values for text parsers (GH754_)
- - Allows users specified NA values in text parsers (GH754_)
- - Parsers checks for openpyxl dependency and raises ImportError if not found
- (GH1007_)
- - New factory function to create HDFStore objects that can be used in a with
- statement so users do not have to explicitly call HDFStore.close (GH1005_)
- - pivot_table is now more flexible with same parameters as groupby (GH941_)
- - Added stacked bar plots (GH987_)
- - scatter_matrix method in pandas/tools/plotting.py (GH935_)
- - DataFrame.boxplot returns plot results for ex-post styling (GH985_)
- - Short version number accessible as pandas.version.short_version (GH930_)
- - Additional documentation in panel.to_frame (GH942_)
- - More informative Series.apply docstring regarding element-wise apply
- (GH977_)
- - Notes on rpy2 installation (GH1006_)
- - Add rotation and font size options to hist method (GH1012_)
- - Use exogenous / X variable index in result of OLS.y_predict. Add
- OLS.predict method (GH1027_, GH1008_)
-
-**API Changes**
-
- - Calling apply on grouped Series, e.g. describe(), will no longer yield
- DataFrame by default. Will have to call unstack() to get prior behavior
- - NA handling in non-numeric comparisons has been tightened up (GH933_, GH953_)
- - No longer assign dummy names key_0, key_1, etc. to groupby index (GH1291_)
-
-**Bug fixes**
-
- - Fix logic error when selecting part of a row in a DataFrame with a
- MultiIndex index (GH1013_)
- - Series comparison with Series of differing length causes crash (GH1016_).
- - Fix bug in indexing when selecting section of hierarchically-indexed row
- (GH1013_)
- - DataFrame.plot(logy=True) has no effect (GH1011_).
- - Broken arithmetic operations between SparsePanel-Panel (GH1015_)
- - Unicode repr issues in MultiIndex with non-ascii characters (GH1010_)
- - DataFrame.lookup() returns inconsistent results if exact match not present
- (GH1001_)
- - DataFrame arithmetic operations not treating None as NA (GH992_)
- - DataFrameGroupBy.apply returns incorrect result (GH991_)
- - Series.reshape returns incorrect result for multiple dimensions (GH989_)
- - Series.std and Series.var ignores ddof parameter (GH934_)
- - DataFrame.append loses index names (GH980_)
- - DataFrame.plot(kind='bar') ignores color argument (GH958_)
- - Inconsistent Index comparison results (GH948_)
- - Improper int dtype DataFrame construction from data with NaN (GH846_)
- - Removes default 'result' name in grouby results (GH995_)
- - DataFrame.from_records no longer mutate input columns (GH975_)
- - Use Index name when grouping by it (GH1313_)
-
-.. _GH1306: https://github.com/pydata/pandas/issues/1306
-.. _GH952: https://github.com/pydata/pandas/issues/952
-.. _GH938: https://github.com/pydata/pandas/issues/938
-.. _GH946: https://github.com/pydata/pandas/issues/946
-.. _GH983: https://github.com/pydata/pandas/issues/983
-.. _GH943: https://github.com/pydata/pandas/issues/943
-.. _GH964: https://github.com/pydata/pandas/issues/964
-.. _GH754: https://github.com/pydata/pandas/issues/754
-.. _GH1007: https://github.com/pydata/pandas/issues/1007
-.. _GH1005: https://github.com/pydata/pandas/issues/1005
-.. _GH941: https://github.com/pydata/pandas/issues/941
-.. _GH987: https://github.com/pydata/pandas/issues/987
-.. _GH935: https://github.com/pydata/pandas/issues/935
-.. _GH985: https://github.com/pydata/pandas/issues/985
-.. _GH930: https://github.com/pydata/pandas/issues/930
-.. _GH942: https://github.com/pydata/pandas/issues/942
-.. _GH977: https://github.com/pydata/pandas/issues/977
-.. _GH1006: https://github.com/pydata/pandas/issues/1006
-.. _GH1012: https://github.com/pydata/pandas/issues/1012
-.. _GH1027: https://github.com/pydata/pandas/issues/1027
-.. _GH1008: https://github.com/pydata/pandas/issues/1008
-.. _GH933: https://github.com/pydata/pandas/issues/933
-.. _GH953: https://github.com/pydata/pandas/issues/953
-.. _GH1291: https://github.com/pydata/pandas/issues/1291
-.. _GH1013: https://github.com/pydata/pandas/issues/1013
-.. _GH1016: https://github.com/pydata/pandas/issues/1016
-.. _GH1011: https://github.com/pydata/pandas/issues/1011
-.. _GH1015: https://github.com/pydata/pandas/issues/1015
-.. _GH1010: https://github.com/pydata/pandas/issues/1010
-.. _GH1001: https://github.com/pydata/pandas/issues/1001
-.. _GH992: https://github.com/pydata/pandas/issues/992
-.. _GH991: https://github.com/pydata/pandas/issues/991
-.. _GH989: https://github.com/pydata/pandas/issues/989
-.. _GH934: https://github.com/pydata/pandas/issues/934
-.. _GH980: https://github.com/pydata/pandas/issues/980
-.. _GH958: https://github.com/pydata/pandas/issues/958
-.. _GH948: https://github.com/pydata/pandas/issues/948
-.. _GH846: https://github.com/pydata/pandas/issues/846
-.. _GH995: https://github.com/pydata/pandas/issues/995
-.. _GH975: https://github.com/pydata/pandas/issues/975
-.. _GH1313: https://github.com/pydata/pandas/issues/1313
-
-
-pandas 0.7.2
-============
-
-**Release date:** March 16, 2012
-
-**New features / modules**
-
- - Add additional tie-breaking methods in DataFrame.rank (GH874_)
- - Add ascending parameter to rank in Series, DataFrame (GH875_)
- - Add sort_columns parameter to allow unsorted plots (GH918_)
- - IPython tab completion on GroupBy objects
-
-**API Changes**
-
- - Series.sum returns 0 instead of NA when called on an empty
- series. Analogously for a DataFrame whose rows or columns are length 0
- (GH844_)
-
-**Improvements to existing features**
-
- - Don't use groups dict in Grouper.size (GH860_)
- - Use khash for Series.value_counts, add raw function to algorithms.py (GH861_)
- - Enable column access via attributes on GroupBy (GH882_)
- - Enable setting existing columns (only) via attributes on DataFrame, Panel
- (GH883_)
- - Intercept __builtin__.sum in groupby (GH885_)
- - Can pass dict to DataFrame.fillna to use different values per column (GH661_)
- - Can select multiple hierarchical groups by passing list of values in .ix
- (GH134_)
- - Add level keyword to ``drop`` for dropping values from a level (GH159_)
- - Add ``coerce_float`` option on DataFrame.from_records (GH893_)
- - Raise exception if passed date_parser fails in ``read_csv``
- - Add ``axis`` option to DataFrame.fillna (GH174_)
- - Fixes to Panel to make it easier to subclass (GH888_)
-
-**Bug fixes**
-
- - Fix overflow-related bugs in groupby (GH850_, GH851_)
- - Fix unhelpful error message in parsers (GH856_)
- - Better err msg for failed boolean slicing of dataframe (GH859_)
- - Series.count cannot accept a string (level name) in the level argument (GH869_)
- - Group index platform int check (GH870_)
- - concat on axis=1 and ignore_index=True raises TypeError (GH871_)
- - Further unicode handling issues resolved (GH795_)
- - Fix failure in multiindex-based access in Panel (GH880_)
- - Fix DataFrame boolean slice assignment failure (GH881_)
- - Fix combineAdd NotImplementedError for SparseDataFrame (GH887_)
- - Fix DataFrame.to_html encoding and columns (GH890_, GH891_, GH909_)
- - Fix na-filling handling in mixed-type DataFrame (GH910_)
- - Fix to DataFrame.set_value with non-existant row/col (GH911_)
- - Fix malformed block in groupby when excluding nuisance columns (GH916_)
- - Fix inconsistant NA handling in dtype=object arrays (GH925_)
- - Fix missing center-of-mass computation in ewmcov (GH862_)
- - Don't raise exception when opening read-only HDF5 file (GH847_)
- - Fix possible out-of-bounds memory access in 0-length Series (GH917_)
-
-.. _GH874: https://github.com/pydata/pandas/issues/874
-.. _GH875: https://github.com/pydata/pandas/issues/875
-.. _GH893: https://github.com/pydata/pandas/issues/893
-.. _GH918: https://github.com/pydata/pandas/issues/918
-.. _GH844: https://github.com/pydata/pandas/issues/844
-.. _GH860: https://github.com/pydata/pandas/issues/860
-.. _GH861: https://github.com/pydata/pandas/issues/861
-.. _GH882: https://github.com/pydata/pandas/issues/882
-.. _GH883: https://github.com/pydata/pandas/issues/883
-.. _GH885: https://github.com/pydata/pandas/issues/885
-.. _GH661: https://github.com/pydata/pandas/issues/661
-.. _GH134: https://github.com/pydata/pandas/issues/134
-.. _GH159: https://github.com/pydata/pandas/issues/159
-.. _GH174: https://github.com/pydata/pandas/issues/174
-.. _GH888: https://github.com/pydata/pandas/issues/888
-.. _GH850: https://github.com/pydata/pandas/issues/850
-.. _GH851: https://github.com/pydata/pandas/issues/851
-.. _GH856: https://github.com/pydata/pandas/issues/856
-.. _GH859: https://github.com/pydata/pandas/issues/859
-.. _GH869: https://github.com/pydata/pandas/issues/869
-.. _GH870: https://github.com/pydata/pandas/issues/870
-.. _GH871: https://github.com/pydata/pandas/issues/871
-.. _GH795: https://github.com/pydata/pandas/issues/795
-.. _GH880: https://github.com/pydata/pandas/issues/880
-.. _GH881: https://github.com/pydata/pandas/issues/881
-.. _GH887: https://github.com/pydata/pandas/issues/887
-.. _GH890: https://github.com/pydata/pandas/issues/890
-.. _GH891: https://github.com/pydata/pandas/issues/891
-.. _GH909: https://github.com/pydata/pandas/issues/909
-.. _GH910: https://github.com/pydata/pandas/issues/910
-.. _GH911: https://github.com/pydata/pandas/issues/911
-.. _GH916: https://github.com/pydata/pandas/issues/916
-.. _GH925: https://github.com/pydata/pandas/issues/925
-.. _GH862: https://github.com/pydata/pandas/issues/862
-.. _GH847: https://github.com/pydata/pandas/issues/847
-.. _GH917: https://github.com/pydata/pandas/issues/917
-
-
-pandas 0.7.1
-============
-
-**Release date:** February 29, 2012
-
-**New features / modules**
-
- - Add ``to_clipboard`` function to pandas namespace for writing objects to
- the system clipboard (GH774_)
- - Add ``itertuples`` method to DataFrame for iterating through the rows of a
- dataframe as tuples (GH818_)
- - Add ability to pass fill_value and method to DataFrame and Series align
- method (GH806_, GH807_)
- - Add fill_value option to reindex, align methods (GH784_)
- - Enable concat to produce DataFrame from Series (GH787_)
- - Add ``between`` method to Series (GH802_)
- - Add HTML representation hook to DataFrame for the IPython HTML notebook
- (GH773_)
- - Support for reading Excel 2007 XML documents using openpyxl
-
-**Improvements to existing features**
-
- - Improve performance and memory usage of fillna on DataFrame
- - Can concatenate a list of Series along axis=1 to obtain a DataFrame (GH787_)
-
-**Bug fixes**
-
- - Fix memory leak when inserting large number of columns into a single
- DataFrame (GH790_)
- - Appending length-0 DataFrame with new columns would not result in those new
- columns being part of the resulting concatenated DataFrame (GH782_)
- - Fixed groupby corner case when passing dictionary grouper and as_index is
- False (GH819_)
- - Fixed bug whereby bool array sometimes had object dtype (GH820_)
- - Fix exception thrown on np.diff (GH816_)
- - Fix to_records where columns are non-strings (GH822_)
- - Fix Index.intersection where indices have incomparable types (GH811_)
- - Fix ExcelFile throwing an exception for two-line file (GH837_)
- - Add clearer error message in csv parser (GH835_)
- - Fix loss of fractional seconds in HDFStore (GH513_)
- - Fix DataFrame join where columns have datetimes (GH787_)
- - Work around numpy performance issue in take (GH817_)
- - Improve comparison operations for NA-friendliness (GH801_)
- - Fix indexing operation for floating point values (GH780_, GH798_)
- - Fix groupby case resulting in malformed dataframe (GH814_)
- - Fix behavior of reindex of Series dropping name (GH812_)
- - Improve on redudant groupby computation (GH775_)
- - Catch possible NA assignment to int/bool series with exception (GH839_)
-
-.. _GH774: https://github.com/pydata/pandas/issues/774
-.. _GH818: https://github.com/pydata/pandas/issues/818
-.. _GH806: https://github.com/pydata/pandas/issues/806
-.. _GH807: https://github.com/pydata/pandas/issues/807
-.. _GH784: https://github.com/pydata/pandas/issues/784
-.. _GH787: https://github.com/pydata/pandas/issues/787
-.. _GH802: https://github.com/pydata/pandas/issues/802
-.. _GH773: https://github.com/pydata/pandas/issues/773
-.. _GH790: https://github.com/pydata/pandas/issues/790
-.. _GH782: https://github.com/pydata/pandas/issues/782
-.. _GH819: https://github.com/pydata/pandas/issues/819
-.. _GH820: https://github.com/pydata/pandas/issues/820
-.. _GH816: https://github.com/pydata/pandas/issues/816
-.. _GH822: https://github.com/pydata/pandas/issues/822
-.. _GH811: https://github.com/pydata/pandas/issues/811
-.. _GH837: https://github.com/pydata/pandas/issues/837
-.. _GH835: https://github.com/pydata/pandas/issues/835
-.. _GH513: https://github.com/pydata/pandas/issues/513
-.. _GH817: https://github.com/pydata/pandas/issues/817
-.. _GH801: https://github.com/pydata/pandas/issues/801
-.. _GH780: https://github.com/pydata/pandas/issues/780
-.. _GH798: https://github.com/pydata/pandas/issues/798
-.. _GH814: https://github.com/pydata/pandas/issues/814
-.. _GH812: https://github.com/pydata/pandas/issues/812
-.. _GH775: https://github.com/pydata/pandas/issues/775
-.. _GH839: https://github.com/pydata/pandas/issues/839
-
-
-pandas 0.7.0
-============
-
-**Release date:** 2/9/2012
-
-**New features / modules**
-
- - New ``merge`` function for efficiently performing full gamut of database /
- relational-algebra operations. Refactored existing join methods to use the
- new infrastructure, resulting in substantial performance gains (GH220_,
- GH249_, GH267_)
- - New ``concat`` function for concatenating DataFrame or Panel objects along
- an axis. Can form union or intersection of the other axes. Improves
- performance of ``DataFrame.append`` (GH468_, GH479_, GH273_)
- - Handle differently-indexed output values in ``DataFrame.apply`` (GH498_)
- - Can pass list of dicts (e.g., a list of shallow JSON objects) to DataFrame
- constructor (GH526_)
- - Add ``reorder_levels`` method to Series and DataFrame (GH534_)
- - Add dict-like ``get`` function to DataFrame and Panel (GH521_)
- - ``DataFrame.iterrows`` method for efficiently iterating through the rows of
- a DataFrame
- - Added ``DataFrame.to_panel`` with code adapted from ``LongPanel.to_long``
- - ``reindex_axis`` method added to DataFrame
- - Add ``level`` option to binary arithmetic functions on ``DataFrame`` and
- ``Series``
- - Add ``level`` option to the ``reindex`` and ``align`` methods on Series and
- DataFrame for broadcasting values across a level (GH542_, GH552_, others)
- - Add attribute-based item access to ``Panel`` and add IPython completion (PR
- GH554_)
- - Add ``logy`` option to ``Series.plot`` for log-scaling on the Y axis
- - Add ``index``, ``header``, and ``justify`` options to
- ``DataFrame.to_string``. Add option to (GH570_, GH571_)
- - Can pass multiple DataFrames to ``DataFrame.join`` to join on index (GH115_)
- - Can pass multiple Panels to ``Panel.join`` (GH115_)
- - Can pass multiple DataFrames to `DataFrame.append` to concatenate (stack)
- and multiple Series to ``Series.append`` too
- - Added ``justify`` argument to ``DataFrame.to_string`` to allow different
- alignment of column headers
- - Add ``sort`` option to GroupBy to allow disabling sorting of the group keys
- for potential speedups (GH595_)
- - Can pass MaskedArray to Series constructor (GH563_)
- - Add Panel item access via attributes and IPython completion (GH554_)
- - Implement ``DataFrame.lookup``, fancy-indexing analogue for retrieving
- values given a sequence of row and column labels (GH338_)
- - Add ``verbose`` option to ``read_csv`` and ``read_table`` to show number of
- NA values inserted in non-numeric columns (GH614_)
- - Can pass a list of dicts or Series to ``DataFrame.append`` to concatenate
- multiple rows (GH464_)
- - Add ``level`` argument to ``DataFrame.xs`` for selecting data from other
- MultiIndex levels. Can take one or more levels with potentially a tuple of
- keys for flexible retrieval of data (GH371_, GH629_)
- - New ``crosstab`` function for easily computing frequency tables (GH170_)
- - Can pass a list of functions to aggregate with groupby on a DataFrame,
- yielding an aggregated result with hierarchical columns (GH166_)
- - Add integer-indexing functions ``iget`` in Series and ``irow`` / ``iget``
- in DataFrame (GH628_)
- - Add new ``Series.unique`` function, significantly faster than
- ``numpy.unique`` (GH658_)
- - Add new ``cummin`` and ``cummax`` instance methods to ``Series`` and
- ``DataFrame`` (GH647_)
- - Add new ``value_range`` function to return min/max of a dataframe (GH288_)
- - Add ``drop`` parameter to ``reset_index`` method of ``DataFrame`` and added
- method to ``Series`` as well (GH699_)
- - Add ``isin`` method to Index objects, works just like ``Series.isin`` (GH
- GH657_)
- - Implement array interface on Panel so that ufuncs work (re: GH740_)
- - Add ``sort`` option to ``DataFrame.join`` (GH731_)
- - Improved handling of NAs (propagation) in binary operations with
- dtype=object arrays (GH737_)
- - Add ``abs`` method to Pandas objects
- - Added ``algorithms`` module to start collecting central algos
-
-**API Changes**
-
- - Label-indexing with integer indexes now raises KeyError if a label is not
- found instead of falling back on location-based indexing (GH700_)
- - Label-based slicing via ``ix`` or ``[]`` on Series will now only work if
- exact matches for the labels are found or if the index is monotonic (for
- range selections)
- - Label-based slicing and sequences of labels can be passed to ``[]`` on a
- Series for both getting and setting (GH86_)
- - `[]` operator (``__getitem__`` and ``__setitem__``) will raise KeyError
- with integer indexes when an index is not contained in the index. The prior
- behavior would fall back on position-based indexing if a key was not found
- in the index which would lead to subtle bugs. This is now consistent with
- the behavior of ``.ix`` on DataFrame and friends (GH328_)
- - Rename ``DataFrame.delevel`` to ``DataFrame.reset_index`` and add
- deprecation warning
- - `Series.sort` (an in-place operation) called on a Series which is a view on
- a larger array (e.g. a column in a DataFrame) will generate an Exception to
- prevent accidentally modifying the data source (GH316_)
- - Refactor to remove deprecated ``LongPanel`` class (GH552_)
- - Deprecated ``Panel.to_long``, renamed to ``to_frame``
- - Deprecated ``colSpace`` argument in ``DataFrame.to_string``, renamed to
- ``col_space``
- - Rename ``precision`` to ``accuracy`` in engineering float formatter (GH
- GH395_)
- - The default delimiter for ``read_csv`` is comma rather than letting
- ``csv.Sniffer`` infer it
- - Rename ``col_or_columns`` argument in ``DataFrame.drop_duplicates`` (GH
- GH734_)
-
-**Improvements to existing features**
-
- - Better error message in DataFrame constructor when passed column labels
- don't match data (GH497_)
- - Substantially improve performance of multi-GroupBy aggregation when a
- Python function is passed, reuse ndarray object in Cython (GH496_)
- - Can store objects indexed by tuples and floats in HDFStore (GH492_)
- - Don't print length by default in Series.to_string, add `length` option (GH
- GH489_)
- - Improve Cython code for multi-groupby to aggregate without having to sort
- the data (GH93_)
- - Improve MultiIndex reindexing speed by storing tuples in the MultiIndex,
- test for backwards unpickling compatibility
- - Improve column reindexing performance by using specialized Cython take
- function
- - Further performance tweaking of Series.__getitem__ for standard use cases
- - Avoid Index dict creation in some cases (i.e. when getting slices, etc.),
- regression from prior versions
- - Friendlier error message in setup.py if NumPy not installed
- - Use common set of NA-handling operations (sum, mean, etc.) in Panel class
- also (GH536_)
- - Default name assignment when calling ``reset_index`` on DataFrame with a
- regular (non-hierarchical) index (GH476_)
- - Use Cythonized groupers when possible in Series/DataFrame stat ops with
- ``level`` parameter passed (GH545_)
- - Ported skiplist data structure to C to speed up ``rolling_median`` by about
- 5-10x in most typical use cases (GH374_)
- - Some performance enhancements in constructing a Panel from a dict of
- DataFrame objects
- - Made ``Index._get_duplicates`` a public method by removing the underscore
- - Prettier printing of floats, and column spacing fix (GH395_, GH571_)
- - Add ``bold_rows`` option to DataFrame.to_html (GH586_)
- - Improve the performance of ``DataFrame.sort_index`` by up to 5x or more
- when sorting by multiple columns
- - Substantially improve performance of DataFrame and Series constructors when
- passed a nested dict or dict, respectively (GH540_, GH621_)
- - Modified setup.py so that pip / setuptools will install dependencies (GH
- GH507_, various pull requests)
- - Unstack called on DataFrame with non-MultiIndex will return Series (GH
- GH477_)
- - Improve DataFrame.to_string and console formatting to be more consistent in
- the number of displayed digits (GH395_)
- - Use bottleneck if available for performing NaN-friendly statistical
- operations that it implemented (GH91_)
- - Monkey-patch context to traceback in ``DataFrame.apply`` to indicate which
- row/column the function application failed on (GH614_)
- - Improved ability of read_table and read_clipboard to parse
- console-formatted DataFrames (can read the row of index names, etc.)
- - Can pass list of group labels (without having to convert to an ndarray
- yourself) to ``groupby`` in some cases (GH659_)
- - Use ``kind`` argument to Series.order for selecting different sort kinds
- (GH668_)
- - Add option to Series.to_csv to omit the index (GH684_)
- - Add ``delimiter`` as an alternative to ``sep`` in ``read_csv`` and other
- parsing functions
- - Substantially improved performance of groupby on DataFrames with many
- columns by aggregating blocks of columns all at once (GH745_)
- - Can pass a file handle or StringIO to Series/DataFrame.to_csv (GH765_)
- - Can pass sequence of integers to DataFrame.irow(icol) and Series.iget, (GH
- GH654_)
- - Prototypes for some vectorized string functions
- - Add float64 hash table to solve the Series.unique problem with NAs (GH714_)
- - Memoize objects when reading from file to reduce memory footprint
- - Can get and set a column of a DataFrame with hierarchical columns
- containing "empty" ('') lower levels without passing the empty levels (PR
- GH768_)
-
-**Bug fixes**
-
- - Raise exception in out-of-bounds indexing of Series instead of
- seg-faulting, regression from earlier releases (GH495_)
- - Fix error when joining DataFrames of different dtypes within the same
- typeclass (e.g. float32 and float64) (GH486_)
- - Fix bug in Series.min/Series.max on objects like datetime.datetime (GH
- GH487_)
- - Preserve index names in Index.union (GH501_)
- - Fix bug in Index joining causing subclass information (like DateRange type)
- to be lost in some cases (GH500_)
- - Accept empty list as input to DataFrame constructor, regression from 0.6.0
- (GH491_)
- - Can output DataFrame and Series with ndarray objects in a dtype=object
- array (GH490_)
- - Return empty string from Series.to_string when called on empty Series (GH
- GH488_)
- - Fix exception passing empty list to DataFrame.from_records
- - Fix Index.format bug (excluding name field) with datetimes with time info
- - Fix scalar value access in Series to always return NumPy scalars,
- regression from prior versions (GH510_)
- - Handle rows skipped at beginning of file in read_* functions (GH505_)
- - Handle improper dtype casting in ``set_value`` methods
- - Unary '-' / __neg__ operator on DataFrame was returning integer values
- - Unbox 0-dim ndarrays from certain operators like all, any in Series
- - Fix handling of missing columns (was combine_first-specific) in
- DataFrame.combine for general case (GH529_)
- - Fix type inference logic with boolean lists and arrays in DataFrame indexing
- - Use centered sum of squares in R-square computation if entity_effects=True
- in panel regression
- - Handle all NA case in Series.{corr, cov}, was raising exception (GH548_)
- - Aggregating by multiple levels with ``level`` argument to DataFrame, Series
- stat method, was broken (GH545_)
- - Fix Cython buf when converter passed to read_csv produced a numeric array
- (buffer dtype mismatch when passed to Cython type inference function) (GH
- GH546_)
- - Fix exception when setting scalar value using .ix on a DataFrame with a
- MultiIndex (GH551_)
- - Fix outer join between two DateRanges with different offsets that returned
- an invalid DateRange
- - Cleanup DataFrame.from_records failure where index argument is an integer
- - Fix Data.from_records failure when passed a dictionary
- - Fix NA handling in {Series, DataFrame}.rank with non-floating point dtypes
- - Fix bug related to integer type-checking in .ix-based indexing
- - Handle non-string index name passed to DataFrame.from_records
- - DataFrame.insert caused the columns name(s) field to be discarded (GH527_)
- - Fix erroneous in monotonic many-to-one left joins
- - Fix DataFrame.to_string to remove extra column white space (GH571_)
- - Format floats to default to same number of digits (GH395_)
- - Added decorator to copy docstring from one function to another (GH449_)
- - Fix error in monotonic many-to-one left joins
- - Fix __eq__ comparison between DateOffsets with different relativedelta
- keywords passed
- - Fix exception caused by parser converter returning strings (GH583_)
- - Fix MultiIndex formatting bug with integer names (GH601_)
- - Fix bug in handling of non-numeric aggregates in Series.groupby (GH612_)
- - Fix TypeError with tuple subclasses (e.g. namedtuple) in
- DataFrame.from_records (GH611_)
- - Catch misreported console size when running IPython within Emacs
- - Fix minor bug in pivot table margins, loss of index names and length-1
- 'All' tuple in row labels
- - Add support for legacy WidePanel objects to be read from HDFStore
- - Fix out-of-bounds segfault in pad_object and backfill_object methods when
- either source or target array are empty
- - Could not create a new column in a DataFrame from a list of tuples
- - Fix bugs preventing SparseDataFrame and SparseSeries working with groupby
- (GH666_)
- - Use sort kind in Series.sort / argsort (GH668_)
- - Fix DataFrame operations on non-scalar, non-pandas objects (GH672_)
- - Don't convert DataFrame column to integer type when passing integer to
- __setitem__ (GH669_)
- - Fix downstream bug in pivot_table caused by integer level names in
- MultiIndex (GH678_)
- - Fix SparseSeries.combine_first when passed a dense Series (GH687_)
- - Fix performance regression in HDFStore loading when DataFrame or Panel
- stored in table format with datetimes
- - Raise Exception in DateRange when offset with n=0 is passed (GH683_)
- - Fix get/set inconsistency with .ix property and integer location but
- non-integer index (GH707_)
- - Use right dropna function for SparseSeries. Return dense Series for NA fill
- value (GH730_)
- - Fix Index.format bug causing incorrectly string-formatted Series with
- datetime indexes (GH726_, GH758_)
- - Fix errors caused by object dtype arrays passed to ols (GH759_)
- - Fix error where column names lost when passing list of labels to
- DataFrame.__getitem__, (GH662_)
- - Fix error whereby top-level week iterator overwrote week instance
- - Fix circular reference causing memory leak in sparse array / series /
- frame, (GH663_)
- - Fix integer-slicing from integers-as-floats (GH670_)
- - Fix zero division errors in nanops from object dtype arrays in all NA case
- (GH676_)
- - Fix csv encoding when using unicode (GH705_, GH717_, GH738_)
- - Fix assumption that each object contains every unique block type in concat,
- (GH708_)
- - Fix sortedness check of multiindex in to_panel (GH719_, 720)
- - Fix that None was not treated as NA in PyObjectHashtable
- - Fix hashing dtype because of endianness confusion (GH747_, GH748_)
- - Fix SparseSeries.dropna to return dense Series in case of NA fill value (GH
- GH730_)
- - Use map_infer instead of np.vectorize. handle NA sentinels if converter
- yields numeric array, (GH753_)
- - Fixes and improvements to DataFrame.rank (GH742_)
- - Fix catching AttributeError instead of NameError for bottleneck
- - Try to cast non-MultiIndex to better dtype when calling reset_index (GH726_
- GH440_)
- - Fix #1.QNAN0' float bug on 2.6/win64
- - Allow subclasses of dicts in DataFrame constructor, with tests
- - Fix problem whereby set_index destroys column multiindex (GH764_)
- - Hack around bug in generating DateRange from naive DateOffset (GH770_)
- - Fix bug in DateRange.intersection causing incorrect results with some
- overlapping ranges (GH771_)
-
-Thanks
-------
-- Craig Austin
-- Chris Billington
-- Marius Cobzarenco
-- Mario Gamboa-Cavazos
-- Hans-Martin Gaudecker
-- Arthur Gerigk
-- Yaroslav Halchenko
-- Jeff Hammerbacher
-- Matt Harrison
-- Andreas Hilboll
-- Luc Kesters
-- Adam Klein
-- Gregg Lind
-- Solomon Negusse
-- Wouter Overmeire
-- Christian Prinoth
-- Jeff Reback
-- Sam Reckoner
-- Craig Reeson
-- Jan Schulz
-- Skipper Seabold
-- Ted Square
-- Graham Taylor
-- Aman Thakral
-- Chris Uga
-- Dieter Vandenbussche
-- Texas P.
-- Pinxing Ye
-- ... and everyone I forgot
-
-.. _GH220: https://github.com/pydata/pandas/issues/220
-.. _GH249: https://github.com/pydata/pandas/issues/249
-.. _GH267: https://github.com/pydata/pandas/issues/267
-.. _GH468: https://github.com/pydata/pandas/issues/468
-.. _GH479: https://github.com/pydata/pandas/issues/479
-.. _GH273: https://github.com/pydata/pandas/issues/273
-.. _GH498: https://github.com/pydata/pandas/issues/498
-.. _GH526: https://github.com/pydata/pandas/issues/526
-.. _GH534: https://github.com/pydata/pandas/issues/534
-.. _GH521: https://github.com/pydata/pandas/issues/521
-.. _GH542: https://github.com/pydata/pandas/issues/542
-.. _GH552: https://github.com/pydata/pandas/issues/552
-.. _GH554: https://github.com/pydata/pandas/issues/554
-.. _GH570: https://github.com/pydata/pandas/issues/570
-.. _GH571: https://github.com/pydata/pandas/issues/571
-.. _GH115: https://github.com/pydata/pandas/issues/115
-.. _GH595: https://github.com/pydata/pandas/issues/595
-.. _GH563: https://github.com/pydata/pandas/issues/563
-.. _GH338: https://github.com/pydata/pandas/issues/338
-.. _GH614: https://github.com/pydata/pandas/issues/614
-.. _GH464: https://github.com/pydata/pandas/issues/464
-.. _GH371: https://github.com/pydata/pandas/issues/371
-.. _GH629: https://github.com/pydata/pandas/issues/629
-.. _GH170: https://github.com/pydata/pandas/issues/170
-.. _GH166: https://github.com/pydata/pandas/issues/166
-.. _GH628: https://github.com/pydata/pandas/issues/628
-.. _GH658: https://github.com/pydata/pandas/issues/658
-.. _GH647: https://github.com/pydata/pandas/issues/647
-.. _GH288: https://github.com/pydata/pandas/issues/288
-.. _GH699: https://github.com/pydata/pandas/issues/699
-.. _GH657: https://github.com/pydata/pandas/issues/657
-.. _GH740: https://github.com/pydata/pandas/issues/740
-.. _GH731: https://github.com/pydata/pandas/issues/731
-.. _GH737: https://github.com/pydata/pandas/issues/737
-.. _GH700: https://github.com/pydata/pandas/issues/700
-.. _GH328: https://github.com/pydata/pandas/issues/328
-.. _GH316: https://github.com/pydata/pandas/issues/316
-.. _GH395: https://github.com/pydata/pandas/issues/395
-.. _GH734: https://github.com/pydata/pandas/issues/734
-.. _GH497: https://github.com/pydata/pandas/issues/497
-.. _GH496: https://github.com/pydata/pandas/issues/496
-.. _GH492: https://github.com/pydata/pandas/issues/492
-.. _GH489: https://github.com/pydata/pandas/issues/489
-.. _GH536: https://github.com/pydata/pandas/issues/536
-.. _GH476: https://github.com/pydata/pandas/issues/476
-.. _GH545: https://github.com/pydata/pandas/issues/545
-.. _GH374: https://github.com/pydata/pandas/issues/374
-.. _GH586: https://github.com/pydata/pandas/issues/586
-.. _GH540: https://github.com/pydata/pandas/issues/540
-.. _GH621: https://github.com/pydata/pandas/issues/621
-.. _GH507: https://github.com/pydata/pandas/issues/507
-.. _GH477: https://github.com/pydata/pandas/issues/477
-.. _GH659: https://github.com/pydata/pandas/issues/659
-.. _GH668: https://github.com/pydata/pandas/issues/668
-.. _GH684: https://github.com/pydata/pandas/issues/684
-.. _GH745: https://github.com/pydata/pandas/issues/745
-.. _GH765: https://github.com/pydata/pandas/issues/765
-.. _GH654: https://github.com/pydata/pandas/issues/654
-.. _GH714: https://github.com/pydata/pandas/issues/714
-.. _GH768: https://github.com/pydata/pandas/issues/768
-.. _GH495: https://github.com/pydata/pandas/issues/495
-.. _GH486: https://github.com/pydata/pandas/issues/486
-.. _GH487: https://github.com/pydata/pandas/issues/487
-.. _GH501: https://github.com/pydata/pandas/issues/501
-.. _GH500: https://github.com/pydata/pandas/issues/500
-.. _GH491: https://github.com/pydata/pandas/issues/491
-.. _GH490: https://github.com/pydata/pandas/issues/490
-.. _GH488: https://github.com/pydata/pandas/issues/488
-.. _GH510: https://github.com/pydata/pandas/issues/510
-.. _GH505: https://github.com/pydata/pandas/issues/505
-.. _GH529: https://github.com/pydata/pandas/issues/529
-.. _GH548: https://github.com/pydata/pandas/issues/548
-.. _GH546: https://github.com/pydata/pandas/issues/546
-.. _GH551: https://github.com/pydata/pandas/issues/551
-.. _GH527: https://github.com/pydata/pandas/issues/527
-.. _GH449: https://github.com/pydata/pandas/issues/449
-.. _GH583: https://github.com/pydata/pandas/issues/583
-.. _GH601: https://github.com/pydata/pandas/issues/601
-.. _GH612: https://github.com/pydata/pandas/issues/612
-.. _GH611: https://github.com/pydata/pandas/issues/611
-.. _GH666: https://github.com/pydata/pandas/issues/666
-.. _GH672: https://github.com/pydata/pandas/issues/672
-.. _GH669: https://github.com/pydata/pandas/issues/669
-.. _GH678: https://github.com/pydata/pandas/issues/678
-.. _GH687: https://github.com/pydata/pandas/issues/687
-.. _GH683: https://github.com/pydata/pandas/issues/683
-.. _GH707: https://github.com/pydata/pandas/issues/707
-.. _GH730: https://github.com/pydata/pandas/issues/730
-.. _GH759: https://github.com/pydata/pandas/issues/759
-.. _GH662: https://github.com/pydata/pandas/issues/662
-.. _GH663: https://github.com/pydata/pandas/issues/663
-.. _GH670: https://github.com/pydata/pandas/issues/670
-.. _GH676: https://github.com/pydata/pandas/issues/676
-.. _GH705: https://github.com/pydata/pandas/issues/705
-.. _GH717: https://github.com/pydata/pandas/issues/717
-.. _GH738: https://github.com/pydata/pandas/issues/738
-.. _GH708: https://github.com/pydata/pandas/issues/708
-.. _GH719: https://github.com/pydata/pandas/issues/719
-.. _GH747: https://github.com/pydata/pandas/issues/747
-.. _GH748: https://github.com/pydata/pandas/issues/748
-.. _GH753: https://github.com/pydata/pandas/issues/753
-.. _GH742: https://github.com/pydata/pandas/issues/742
-.. _GH726: https://github.com/pydata/pandas/issues/726
-.. _GH440: https://github.com/pydata/pandas/issues/440
-.. _GH764: https://github.com/pydata/pandas/issues/764
-.. _GH770: https://github.com/pydata/pandas/issues/770
-.. _GH771: https://github.com/pydata/pandas/issues/771
-.. _GH758: https://github.com/pydata/pandas/issues/758
-.. _GH86: https://github.com/pydata/pandas/issues/86
-.. _GH91: https://github.com/pydata/pandas/issues/91
-.. _GH93: https://github.com/pydata/pandas/issues/93
-
-
-pandas 0.6.1
-============
-
-**Release date:** 12/13/2011
-
-**API Changes**
-
- - Rename `names` argument in DataFrame.from_records to `columns`. Add
- deprecation warning
- - Boolean get/set operations on Series with boolean Series will reindex
- instead of requiring that the indexes be exactly equal (GH429_)
-
-**New features / modules**
-
- - Can pass Series to DataFrame.append with ignore_index=True for appending a
- single row (GH430_)
- - Add Spearman and Kendall correlation options to Series.corr and
- DataFrame.corr (GH428_)
- - Add new `get_value` and `set_value` methods to Series, DataFrame, and Panel
- to very low-overhead access to scalar elements. df.get_value(row, column)
- is about 3x faster than df[column][row] by handling fewer cases (GH437_,
- GH438_). Add similar methods to sparse data structures for compatibility
- - Add Qt table widget to sandbox (GH435_)
- - DataFrame.align can accept Series arguments, add axis keyword (GH461_)
- - Implement new SparseList and SparseArray data structures. SparseSeries now
- derives from SparseArray (GH463_)
- - max_columns / max_rows options in set_printoptions (GH453_)
- - Implement Series.rank and DataFrame.rank, fast versions of
- scipy.stats.rankdata (GH428_)
- - Implement DataFrame.from_items alternate constructor (GH444_)
- - DataFrame.convert_objects method for inferring better dtypes for object
- columns (GH302_)
- - Add rolling_corr_pairwise function for computing Panel of correlation
- matrices (GH189_)
- - Add `margins` option to `pivot_table` for computing subgroup aggregates (GH
- GH114_)
- - Add `Series.from_csv` function (GH482_)
-
-**Improvements to existing features**
-
- - Improve memory usage of `DataFrame.describe` (do not copy data
- unnecessarily) (GH425_)
- - Use same formatting function for outputting floating point Series to console
- as in DataFrame (GH420_)
- - DataFrame.delevel will try to infer better dtype for new columns (GH440_)
- - Exclude non-numeric types in DataFrame.{corr, cov}
- - Override Index.astype to enable dtype casting (GH412_)
- - Use same float formatting function for Series.__repr__ (GH420_)
- - Use available console width to output DataFrame columns (GH453_)
- - Accept ndarrays when setting items in Panel (GH452_)
- - Infer console width when printing __repr__ of DataFrame to console (PR
- GH453_)
- - Optimize scalar value lookups in the general case by 25% or more in Series
- and DataFrame
- - Can pass DataFrame/DataFrame and DataFrame/Series to
- rolling_corr/rolling_cov (GH462_)
- - Fix performance regression in cross-sectional count in DataFrame, affecting
- DataFrame.dropna speed
- - Column deletion in DataFrame copies no data (computes views on blocks) (GH
- GH158_)
- - MultiIndex.get_level_values can take the level name
- - More helpful error message when DataFrame.plot fails on one of the columns
- (GH478_)
- - Improve performance of DataFrame.{index, columns} attribute lookup
-
-**Bug fixes**
-
- - Fix O(K^2) memory leak caused by inserting many columns without
- consolidating, had been present since 0.4.0 (GH467_)
- - `DataFrame.count` should return Series with zero instead of NA with length-0
- axis (GH423_)
- - Fix Yahoo! Finance API usage in pandas.io.data (GH419_, GH427_)
- - Fix upstream bug causing failure in Series.align with empty Series (GH434_)
- - Function passed to DataFrame.apply can return a list, as long as it's the
- right length. Regression from 0.4 (GH432_)
- - Don't "accidentally" upcast scalar values when indexing using .ix (GH431_)
- - Fix groupby exception raised with as_index=False and single column selected
- (GH421_)
- - Implement DateOffset.__ne__ causing downstream bug (GH456_)
- - Fix __doc__-related issue when converting py -> pyo with py2exe
- - Bug fix in left join Cython code with duplicate monotonic labels
- - Fix bug when unstacking multiple levels described in GH451_
- - Exclude NA values in dtype=object arrays, regression from 0.5.0 (GH469_)
- - Use Cython map_infer function in DataFrame.applymap to properly infer
- output type, handle tuple return values and other things that were breaking
- (GH465_)
- - Handle floating point index values in HDFStore (GH454_)
- - Fixed stale column reference bug (cached Series object) caused by type
- change / item deletion in DataFrame (GH473_)
- - Index.get_loc should always raise Exception when there are duplicates
- - Handle differently-indexed Series input to DataFrame constructor (GH475_)
- - Omit nuisance columns in multi-groupby with Python function
- - Buglet in handling of single grouping in general apply
- - Handle type inference properly when passing list of lists or tuples to
- DataFrame constructor (GH484_)
- - Preserve Index / MultiIndex names in GroupBy.apply concatenation step (GH
- GH481_)
-
-Thanks
-------
-- Ralph Bean
-- Luca Beltrame
-- Marius Cobzarenco
-- Andreas Hilboll
-- Jev Kuznetsov
-- Adam Lichtenstein
-- Wouter Overmeire
-- Fernando Perez
-- Nathan Pinger
-- Christian Prinoth
-- Alex Reyfman
-- Joon Ro
-- Chang She
-- Ted Square
-- Chris Uga
-- Dieter Vandenbussche
-
-.. _GH429: https://github.com/pydata/pandas/issues/429
-.. _GH430: https://github.com/pydata/pandas/issues/430
-.. _GH428: https://github.com/pydata/pandas/issues/428
-.. _GH437: https://github.com/pydata/pandas/issues/437
-.. _GH438: https://github.com/pydata/pandas/issues/438
-.. _GH435: https://github.com/pydata/pandas/issues/435
-.. _GH461: https://github.com/pydata/pandas/issues/461
-.. _GH463: https://github.com/pydata/pandas/issues/463
-.. _GH453: https://github.com/pydata/pandas/issues/453
-.. _GH444: https://github.com/pydata/pandas/issues/444
-.. _GH302: https://github.com/pydata/pandas/issues/302
-.. _GH189: https://github.com/pydata/pandas/issues/189
-.. _GH114: https://github.com/pydata/pandas/issues/114
-.. _GH482: https://github.com/pydata/pandas/issues/482
-.. _GH425: https://github.com/pydata/pandas/issues/425
-.. _GH420: https://github.com/pydata/pandas/issues/420
-.. _GH440: https://github.com/pydata/pandas/issues/440
-.. _GH412: https://github.com/pydata/pandas/issues/412
-.. _GH452: https://github.com/pydata/pandas/issues/452
-.. _GH462: https://github.com/pydata/pandas/issues/462
-.. _GH158: https://github.com/pydata/pandas/issues/158
-.. _GH478: https://github.com/pydata/pandas/issues/478
-.. _GH467: https://github.com/pydata/pandas/issues/467
-.. _GH423: https://github.com/pydata/pandas/issues/423
-.. _GH419: https://github.com/pydata/pandas/issues/419
-.. _GH427: https://github.com/pydata/pandas/issues/427
-.. _GH434: https://github.com/pydata/pandas/issues/434
-.. _GH432: https://github.com/pydata/pandas/issues/432
-.. _GH431: https://github.com/pydata/pandas/issues/431
-.. _GH421: https://github.com/pydata/pandas/issues/421
-.. _GH456: https://github.com/pydata/pandas/issues/456
-.. _GH451: https://github.com/pydata/pandas/issues/451
-.. _GH469: https://github.com/pydata/pandas/issues/469
-.. _GH465: https://github.com/pydata/pandas/issues/465
-.. _GH454: https://github.com/pydata/pandas/issues/454
-.. _GH473: https://github.com/pydata/pandas/issues/473
-.. _GH475: https://github.com/pydata/pandas/issues/475
-.. _GH484: https://github.com/pydata/pandas/issues/484
-.. _GH481: https://github.com/pydata/pandas/issues/481
-
-
-pandas 0.6.0
-============
-
-**Release date:** 11/25/2011
-
-**API Changes**
-
- - Arithmetic methods like `sum` will attempt to sum dtype=object values by
- default instead of excluding them (GH382_)
-
-**New features / modules**
-
- - Add `melt` function to `pandas.core.reshape`
- - Add `level` parameter to group by level in Series and DataFrame
- descriptive statistics (GH313_)
- - Add `head` and `tail` methods to Series, analogous to to DataFrame (PR
- GH296_)
- - Add `Series.isin` function which checks if each value is contained in a
- passed sequence (GH289_)
- - Add `float_format` option to `Series.to_string`
- - Add `skip_footer` (GH291_) and `converters` (GH343_) options to
- `read_csv` and `read_table`
- - Add proper, tested weighted least squares to standard and panel OLS (GH
- GH303_)
- - Add `drop_duplicates` and `duplicated` functions for removing duplicate
- DataFrame rows and checking for duplicate rows, respectively (GH319_)
- - Implement logical (boolean) operators &, |, ^ on DataFrame (GH347_)
- - Add `Series.mad`, mean absolute deviation, matching DataFrame
- - Add `QuarterEnd` DateOffset (GH321_)
- - Add matrix multiplication function `dot` to DataFrame (GH65_)
- - Add `orient` option to `Panel.from_dict` to ease creation of mixed-type
- Panels (GH359_, GH301_)
- - Add `DataFrame.from_dict` with similar `orient` option
- - Can now pass list of tuples or list of lists to `DataFrame.from_records`
- for fast conversion to DataFrame (GH357_)
- - Can pass multiple levels to groupby, e.g. `df.groupby(level=[0, 1])` (GH
- GH103_)
- - Can sort by multiple columns in `DataFrame.sort_index` (GH92_, GH362_)
- - Add fast `get_value` and `put_value` methods to DataFrame and
- micro-performance tweaks (GH360_)
- - Add `cov` instance methods to Series and DataFrame (GH194_, GH362_)
- - Add bar plot option to `DataFrame.plot` (GH348_)
- - Add `idxmin` and `idxmax` functions to Series and DataFrame for computing
- index labels achieving maximum and minimum values (GH286_)
- - Add `read_clipboard` function for parsing DataFrame from OS clipboard,
- should work across platforms (GH300_)
- - Add `nunique` function to Series for counting unique elements (GH297_)
- - DataFrame constructor will use Series name if no columns passed (GH373_)
- - Support regular expressions and longer delimiters in read_table/read_csv,
- but does not handle quoted strings yet (GH364_)
- - Add `DataFrame.to_html` for formatting DataFrame to HTML (GH387_)
- - MaskedArray can be passed to DataFrame constructor and masked values will be
- converted to NaN (GH396_)
- - Add `DataFrame.boxplot` function (GH368_, others)
- - Can pass extra args, kwds to DataFrame.apply (GH376_)
-
-**Improvements to existing features**
-
- - Raise more helpful exception if date parsing fails in DateRange (GH298_)
- - Vastly improved performance of GroupBy on axes with a MultiIndex (GH299_)
- - Print level names in hierarchical index in Series repr (GH305_)
- - Return DataFrame when performing GroupBy on selected column and
- as_index=False (GH308_)
- - Can pass vector to `on` argument in `DataFrame.join` (GH312_)
- - Don't show Series name if it's None in the repr, also omit length for short
- Series (GH317_)
- - Show legend by default in `DataFrame.plot`, add `legend` boolean flag (GH
- GH324_)
- - Significantly improved performance of `Series.order`, which also makes
- np.unique called on a Series faster (GH327_)
- - Faster cythonized count by level in Series and DataFrame (GH341_)
- - Raise exception if dateutil 2.0 installed on Python 2.x runtime (GH346_)
- - Significant GroupBy performance enhancement with multiple keys with many
- "empty" combinations
- - New Cython vectorized function `map_infer` speeds up `Series.apply` and
- `Series.map` significantly when passed elementwise Python function,
- motivated by GH355_
- - Cythonized `cache_readonly`, resulting in substantial micro-performance
- enhancements throughout the codebase (GH361_)
- - Special Cython matrix iterator for applying arbitrary reduction operations
- with 3-5x better performance than `np.apply_along_axis` (GH309_)
- - Add `raw` option to `DataFrame.apply` for getting better performance when
- the passed function only requires an ndarray (GH309_)
- - Improve performance of `MultiIndex.from_tuples`
- - Can pass multiple levels to `stack` and `unstack` (GH370_)
- - Can pass multiple values columns to `pivot_table` (GH381_)
- - Can call `DataFrame.delevel` with standard Index with name set (GH393_)
- - Use Series name in GroupBy for result index (GH363_)
- - Refactor Series/DataFrame stat methods to use common set of NaN-friendly
- function
- - Handle NumPy scalar integers at C level in Cython conversion routines
-
-**Bug fixes**
-
- - Fix bug in `DataFrame.to_csv` when writing a DataFrame with an index
- name (GH290_)
- - DataFrame should clear its Series caches on consolidation, was causing
- "stale" Series to be returned in some corner cases (GH304_)
- - DataFrame constructor failed if a column had a list of tuples (GH293_)
- - Ensure that `Series.apply` always returns a Series and implement
- `Series.round` (GH314_)
- - Support boolean columns in Cythonized groupby functions (GH315_)
- - `DataFrame.describe` should not fail if there are no numeric columns,
- instead return categorical describe (GH323_)
- - Fixed bug which could cause columns to be printed in wrong order in
- `DataFrame.to_string` if specific list of columns passed (GH325_)
- - Fix legend plotting failure if DataFrame columns are integers (GH326_)
- - Shift start date back by one month for Yahoo! Finance API in pandas.io.data
- (GH329_)
- - Fix `DataFrame.join` failure on unconsolidated inputs (GH331_)
- - DataFrame.min/max will no longer fail on mixed-type DataFrame (GH337_)
- - Fix `read_csv` / `read_table` failure when passing list to index_col that is
- not in ascending order (GH349_)
- - Fix failure passing Int64Index to Index.union when both are monotonic
- - Fix error when passing SparseSeries to (dense) DataFrame constructor
- - Added missing bang at top of setup.py (GH352_)
- - Change `is_monotonic` on MultiIndex so it properly compares the tuples
- - Fix MultiIndex outer join logic (GH351_)
- - Set index name attribute with single-key groupby (GH358_)
- - Bug fix in reflexive binary addition in Series and DataFrame for
- non-commutative operations (like string concatenation) (GH353_)
- - setupegg.py will invoke Cython (GH192_)
- - Fix block consolidation bug after inserting column into MultiIndex (GH366_)
- - Fix bug in join operations between Index and Int64Index (GH367_)
- - Handle min_periods=0 case in moving window functions (GH365_)
- - Fixed corner cases in DataFrame.apply/pivot with empty DataFrame (GH378_)
- - Fixed repr exception when Series name is a tuple
- - Always return DateRange from `asfreq` (GH390_)
- - Pass level names to `swaplavel` (GH379_)
- - Don't lose index names in `MultiIndex.droplevel` (GH394_)
- - Infer more proper return type in `DataFrame.apply` when no columns or rows
- depending on whether the passed function is a reduction (GH389_)
- - Always return NA/NaN from Series.min/max and DataFrame.min/max when all of a
- row/column/values are NA (GH384_)
- - Enable partial setting with .ix / advanced indexing (GH397_)
- - Handle mixed-type DataFrames correctly in unstack, do not lose type
- information (GH403_)
- - Fix integer name formatting bug in Index.format and in Series.__repr__
- - Handle label types other than string passed to groupby (GH405_)
- - Fix bug in .ix-based indexing with partial retrieval when a label is not
- contained in a level
- - Index name was not being pickled (GH408_)
- - Level name should be passed to result index in GroupBy.apply (GH416_)
-
-Thanks
-------
-
-- Craig Austin
-- Marius Cobzarenco
-- Joel Cross
-- Jeff Hammerbacher
-- Adam Klein
-- Thomas Kluyver
-- Jev Kuznetsov
-- Kieran O'Mahony
-- Wouter Overmeire
-- Nathan Pinger
-- Christian Prinoth
-- Skipper Seabold
-- Chang She
-- Ted Square
-- Aman Thakral
-- Chris Uga
-- Dieter Vandenbussche
-- carljv
-- rsamson
-
-.. _GH382: https://github.com/pydata/pandas/issues/382
-.. _GH313: https://github.com/pydata/pandas/issues/313
-.. _GH296: https://github.com/pydata/pandas/issues/296
-.. _GH289: https://github.com/pydata/pandas/issues/289
-.. _GH291: https://github.com/pydata/pandas/issues/291
-.. _GH343: https://github.com/pydata/pandas/issues/343
-.. _GH303: https://github.com/pydata/pandas/issues/303
-.. _GH319: https://github.com/pydata/pandas/issues/319
-.. _GH347: https://github.com/pydata/pandas/issues/347
-.. _GH321: https://github.com/pydata/pandas/issues/321
-.. _GH359: https://github.com/pydata/pandas/issues/359
-.. _GH301: https://github.com/pydata/pandas/issues/301
-.. _GH357: https://github.com/pydata/pandas/issues/357
-.. _GH103: https://github.com/pydata/pandas/issues/103
-.. _GH362: https://github.com/pydata/pandas/issues/362
-.. _GH360: https://github.com/pydata/pandas/issues/360
-.. _GH194: https://github.com/pydata/pandas/issues/194
-.. _GH348: https://github.com/pydata/pandas/issues/348
-.. _GH286: https://github.com/pydata/pandas/issues/286
-.. _GH300: https://github.com/pydata/pandas/issues/300
-.. _GH297: https://github.com/pydata/pandas/issues/297
-.. _GH373: https://github.com/pydata/pandas/issues/373
-.. _GH364: https://github.com/pydata/pandas/issues/364
-.. _GH387: https://github.com/pydata/pandas/issues/387
-.. _GH396: https://github.com/pydata/pandas/issues/396
-.. _GH368: https://github.com/pydata/pandas/issues/368
-.. _GH376: https://github.com/pydata/pandas/issues/376
-.. _GH298: https://github.com/pydata/pandas/issues/298
-.. _GH299: https://github.com/pydata/pandas/issues/299
-.. _GH305: https://github.com/pydata/pandas/issues/305
-.. _GH308: https://github.com/pydata/pandas/issues/308
-.. _GH312: https://github.com/pydata/pandas/issues/312
-.. _GH317: https://github.com/pydata/pandas/issues/317
-.. _GH324: https://github.com/pydata/pandas/issues/324
-.. _GH327: https://github.com/pydata/pandas/issues/327
-.. _GH341: https://github.com/pydata/pandas/issues/341
-.. _GH346: https://github.com/pydata/pandas/issues/346
-.. _GH355: https://github.com/pydata/pandas/issues/355
-.. _GH361: https://github.com/pydata/pandas/issues/361
-.. _GH309: https://github.com/pydata/pandas/issues/309
-.. _GH370: https://github.com/pydata/pandas/issues/370
-.. _GH381: https://github.com/pydata/pandas/issues/381
-.. _GH393: https://github.com/pydata/pandas/issues/393
-.. _GH363: https://github.com/pydata/pandas/issues/363
-.. _GH290: https://github.com/pydata/pandas/issues/290
-.. _GH304: https://github.com/pydata/pandas/issues/304
-.. _GH293: https://github.com/pydata/pandas/issues/293
-.. _GH314: https://github.com/pydata/pandas/issues/314
-.. _GH315: https://github.com/pydata/pandas/issues/315
-.. _GH323: https://github.com/pydata/pandas/issues/323
-.. _GH325: https://github.com/pydata/pandas/issues/325
-.. _GH326: https://github.com/pydata/pandas/issues/326
-.. _GH329: https://github.com/pydata/pandas/issues/329
-.. _GH331: https://github.com/pydata/pandas/issues/331
-.. _GH337: https://github.com/pydata/pandas/issues/337
-.. _GH349: https://github.com/pydata/pandas/issues/349
-.. _GH352: https://github.com/pydata/pandas/issues/352
-.. _GH351: https://github.com/pydata/pandas/issues/351
-.. _GH358: https://github.com/pydata/pandas/issues/358
-.. _GH353: https://github.com/pydata/pandas/issues/353
-.. _GH192: https://github.com/pydata/pandas/issues/192
-.. _GH366: https://github.com/pydata/pandas/issues/366
-.. _GH367: https://github.com/pydata/pandas/issues/367
-.. _GH365: https://github.com/pydata/pandas/issues/365
-.. _GH378: https://github.com/pydata/pandas/issues/378
-.. _GH390: https://github.com/pydata/pandas/issues/390
-.. _GH379: https://github.com/pydata/pandas/issues/379
-.. _GH394: https://github.com/pydata/pandas/issues/394
-.. _GH389: https://github.com/pydata/pandas/issues/389
-.. _GH384: https://github.com/pydata/pandas/issues/384
-.. _GH397: https://github.com/pydata/pandas/issues/397
-.. _GH403: https://github.com/pydata/pandas/issues/403
-.. _GH405: https://github.com/pydata/pandas/issues/405
-.. _GH408: https://github.com/pydata/pandas/issues/408
-.. _GH416: https://github.com/pydata/pandas/issues/416
-.. _GH65: https://github.com/pydata/pandas/issues/65
-.. _GH92: https://github.com/pydata/pandas/issues/92
-
-
-pandas 0.5.0
-============
-
-**Release date:** 10/24/2011
-
-This release of pandas includes a number of API changes (see below) and cleanup
-of deprecated APIs from pre-0.4.0 releases. There are also bug fixes, new
-features, numerous significant performance enhancements, and includes a new
-IPython completer hook to enable tab completion of DataFrame columns accesses
-as attributes (a new feature).
-
-In addition to the changes listed here from 0.4.3 to 0.5.0, the minor releases
-0.4.1, 0.4.2, and 0.4.3 brought some significant new functionality and
-performance improvements that are worth taking a look at.
-
-Thanks to all for bug reports, contributed patches and generally providing
-feedback on the library.
-
-**API Changes**
-
- - `read_table`, `read_csv`, and `ExcelFile.parse` default arguments for
- `index_col` is now None. To use one or more of the columns as the resulting
- DataFrame's index, these must be explicitly specified now
- - Parsing functions like `read_csv` no longer parse dates by default (GH
- GH225_)
- - Removed `weights` option in panel regression which was not doing anything
- principled (GH155_)
- - Changed `buffer` argument name in `Series.to_string` to `buf`
- - `Series.to_string` and `DataFrame.to_string` now return strings by default
- instead of printing to sys.stdout
- - Deprecated `nanRep` argument in various `to_string` and `to_csv` functions
- in favor of `na_rep`. Will be removed in 0.6 (GH275_)
- - Renamed `delimiter` to `sep` in `DataFrame.from_csv` for consistency
- - Changed order of `Series.clip` arguments to match those of `numpy.clip` and
- added (unimplemented) `out` argument so `numpy.clip` can be called on a
- Series (GH272_)
- - Series functions renamed (and thus deprecated) in 0.4 series have been
- removed:
-
- * `asOf`, use `asof`
- * `toDict`, use `to_dict`
- * `toString`, use `to_string`
- * `toCSV`, use `to_csv`
- * `merge`, use `map`
- * `applymap`, use `apply`
- * `combineFirst`, use `combine_first`
- * `_firstTimeWithValue` use `first_valid_index`
- * `_lastTimeWithValue` use `last_valid_index`
-
- - DataFrame functions renamed / deprecated in 0.4 series have been removed:
-
- * `asMatrix` method, use `as_matrix` or `values` attribute
- * `combineFirst`, use `combine_first`
- * `getXS`, use `xs`
- * `merge`, use `join`
- * `fromRecords`, use `from_records`
- * `fromcsv`, use `from_csv`
- * `toRecords`, use `to_records`
- * `toDict`, use `to_dict`
- * `toString`, use `to_string`
- * `toCSV`, use `to_csv`
- * `_firstTimeWithValue` use `first_valid_index`
- * `_lastTimeWithValue` use `last_valid_index`
- * `toDataMatrix` is no longer needed
- * `rows()` method, use `index` attribute
- * `cols()` method, use `columns` attribute
- * `dropEmptyRows()`, use `dropna(how='all')`
- * `dropIncompleteRows()`, use `dropna()`
- * `tapply(f)`, use `apply(f, axis=1)`
- * `tgroupby(keyfunc, aggfunc)`, use `groupby` with `axis=1`
-
- - Other outstanding deprecations have been removed:
-
- * `indexField` argument in `DataFrame.from_records`
- * `missingAtEnd` argument in `Series.order`. Use `na_last` instead
- * `Series.fromValue` classmethod, use regular `Series` constructor instead
- * Functions `parseCSV`, `parseText`, and `parseExcel` methods in
- `pandas.io.parsers` have been removed
- * `Index.asOfDate` function
- * `Panel.getMinorXS` (use `minor_xs`) and `Panel.getMajorXS` (use
- `major_xs`)
- * `Panel.toWide`, use `Panel.to_wide` instead
-
-**New features / modules**
-
- - Added `DataFrame.align` method with standard join options
- - Added `parse_dates` option to `read_csv` and `read_table` methods to
- optionally try to parse dates in the index columns
- - Add `nrows`, `chunksize`, and `iterator` arguments to `read_csv` and
- `read_table`. The last two return a new `TextParser` class capable of
- lazily iterating through chunks of a flat file (GH242_)
- - Added ability to join on multiple columns in `DataFrame.join` (GH214_)
- - Added private `_get_duplicates` function to `Index` for identifying
- duplicate values more easily
- - Added column attribute access to DataFrame, e.g. df.A equivalent to df['A']
- if 'A' is a column in the DataFrame (GH213_)
- - Added IPython tab completion hook for DataFrame columns. (GH233_, GH230_)
- - Implement `Series.describe` for Series containing objects (GH241_)
- - Add inner join option to `DataFrame.join` when joining on key(s) (GH248_)
- - Can select set of DataFrame columns by passing a list to `__getitem__` (GH
- GH253_)
- - Can use & and | to intersection / union Index objects, respectively (GH
- GH261_)
- - Added `pivot_table` convenience function to pandas namespace (GH234_)
- - Implemented `Panel.rename_axis` function (GH243_)
- - DataFrame will show index level names in console output
- - Implemented `Panel.take`
- - Add `set_eng_float_format` function for setting alternate DataFrame
- floating point string formatting
- - Add convenience `set_index` function for creating a DataFrame index from
- its existing columns
-
-**Improvements to existing features**
-
- - Major performance improvements in file parsing functions `read_csv` and
- `read_table`
- - Added Cython function for converting tuples to ndarray very fast. Speeds up
- many MultiIndex-related operations
- - File parsing functions like `read_csv` and `read_table` will explicitly
- check if a parsed index has duplicates and raise a more helpful exception
- rather than deferring the check until later
- - Refactored merging / joining code into a tidy class and disabled unnecessary
- computations in the float/object case, thus getting about 10% better
- performance (GH211_)
- - Improved speed of `DataFrame.xs` on mixed-type DataFrame objects by about
- 5x, regression from 0.3.0 (GH215_)
- - With new `DataFrame.align` method, speeding up binary operations between
- differently-indexed DataFrame objects by 10-25%.
- - Significantly sped up conversion of nested dict into DataFrame (GH212_)
- - Can pass hierarchical index level name to `groupby` instead of the level
- number if desired (GH223_)
- - Add support for different delimiters in `DataFrame.to_csv` (GH244_)
- - Add more helpful error message when importing pandas post-installation from
- the source directory (GH250_)
- - Significantly speed up DataFrame `__repr__` and `count` on large mixed-type
- DataFrame objects
- - Better handling of pyx file dependencies in Cython module build (GH271_)
-
-**Bug fixes**
-
- - `read_csv` / `read_table` fixes
- - Be less aggressive about converting float->int in cases of floating point
- representations of integers like 1.0, 2.0, etc.
- - "True"/"False" will not get correctly converted to boolean
- - Index name attribute will get set when specifying an index column
- - Passing column names should force `header=None` (GH257_)
- - Don't modify passed column names when `index_col` is not
- None (GH258_)
- - Can sniff CSV separator in zip file (since seek is not supported, was
- failing before)
- - Worked around matplotlib "bug" in which series[:, np.newaxis] fails. Should
- be reported upstream to matplotlib (GH224_)
- - DataFrame.iteritems was not returning Series with the name attribute
- set. Also neither was DataFrame._series
- - Can store datetime.date objects in HDFStore (GH231_)
- - Index and Series names are now stored in HDFStore
- - Fixed problem in which data would get upcasted to object dtype in
- GroupBy.apply operations (GH237_)
- - Fixed outer join bug with empty DataFrame (GH238_)
- - Can create empty Panel (GH239_)
- - Fix join on single key when passing list with 1 entry (GH246_)
- - Don't raise Exception on plotting DataFrame with an all-NA column (GH251_,
- GH254_)
- - Bug min/max errors when called on integer DataFrames (GH241_)
- - `DataFrame.iteritems` and `DataFrame._series` not assigning name attribute
- - Panel.__repr__ raised exception on length-0 major/minor axes
- - `DataFrame.join` on key with empty DataFrame produced incorrect columns
- - Implemented `MultiIndex.diff` (GH260_)
- - `Int64Index.take` and `MultiIndex.take` lost name field, fix downstream
- issue GH262_
- - Can pass list of tuples to `Series` (GH270_)
- - Can pass level name to `DataFrame.stack`
- - Support set operations between MultiIndex and Index
- - Fix many corner cases in MultiIndex set operations
- - Fix MultiIndex-handling bug with GroupBy.apply when returned groups are not
- indexed the same
- - Fix corner case bugs in DataFrame.apply
- - Setting DataFrame index did not cause Series cache to get cleared
- - Various int32 -> int64 platform-specific issues
- - Don't be too aggressive converting to integer when parsing file with
- MultiIndex (GH285_)
- - Fix bug when slicing Series with negative indices before beginning
-
-Thanks
-------
-
-- Thomas Kluyver
-- Daniel Fortunov
-- Aman Thakral
-- Luca Beltrame
-- Wouter Overmeire
-
-.. _GH225: https://github.com/pydata/pandas/issues/225
-.. _GH155: https://github.com/pydata/pandas/issues/155
-.. _GH275: https://github.com/pydata/pandas/issues/275
-.. _GH272: https://github.com/pydata/pandas/issues/272
-.. _GH242: https://github.com/pydata/pandas/issues/242
-.. _GH214: https://github.com/pydata/pandas/issues/214
-.. _GH213: https://github.com/pydata/pandas/issues/213
-.. _GH233: https://github.com/pydata/pandas/issues/233
-.. _GH230: https://github.com/pydata/pandas/issues/230
-.. _GH241: https://github.com/pydata/pandas/issues/241
-.. _GH248: https://github.com/pydata/pandas/issues/248
-.. _GH253: https://github.com/pydata/pandas/issues/253
-.. _GH261: https://github.com/pydata/pandas/issues/261
-.. _GH234: https://github.com/pydata/pandas/issues/234
-.. _GH243: https://github.com/pydata/pandas/issues/243
-.. _GH211: https://github.com/pydata/pandas/issues/211
-.. _GH215: https://github.com/pydata/pandas/issues/215
-.. _GH212: https://github.com/pydata/pandas/issues/212
-.. _GH223: https://github.com/pydata/pandas/issues/223
-.. _GH244: https://github.com/pydata/pandas/issues/244
-.. _GH250: https://github.com/pydata/pandas/issues/250
-.. _GH271: https://github.com/pydata/pandas/issues/271
-.. _GH257: https://github.com/pydata/pandas/issues/257
-.. _GH258: https://github.com/pydata/pandas/issues/258
-.. _GH224: https://github.com/pydata/pandas/issues/224
-.. _GH231: https://github.com/pydata/pandas/issues/231
-.. _GH237: https://github.com/pydata/pandas/issues/237
-.. _GH238: https://github.com/pydata/pandas/issues/238
-.. _GH239: https://github.com/pydata/pandas/issues/239
-.. _GH246: https://github.com/pydata/pandas/issues/246
-.. _GH251: https://github.com/pydata/pandas/issues/251
-.. _GH254: https://github.com/pydata/pandas/issues/254
-.. _GH260: https://github.com/pydata/pandas/issues/260
-.. _GH262: https://github.com/pydata/pandas/issues/262
-.. _GH270: https://github.com/pydata/pandas/issues/270
-.. _GH285: https://github.com/pydata/pandas/issues/285
-
-
-pandas 0.4.3
-============
-
-Release notes
--------------
-
-**Release date:** 10/9/2011
-
-This is largely a bugfix release from 0.4.2 but also includes a handful of new
-and enhanced features. Also, pandas can now be installed and used on Python 3
-(thanks Thomas Kluyver!).
-
-**New features / modules**
-
- - Python 3 support using 2to3 (GH200_, Thomas Kluyver)
- - Add `name` attribute to `Series` and added relevant logic and tests. Name
- now prints as part of `Series.__repr__`
- - Add `name` attribute to standard Index so that stacking / unstacking does
- not discard names and so that indexed DataFrame objects can be reliably
- round-tripped to flat files, pickle, HDF5, etc.
- - Add `isnull` and `notnull` as instance methods on Series (GH209_, GH203_)
-
-**Improvements to existing features**
-
- - Skip xlrd-related unit tests if not installed
- - `Index.append` and `MultiIndex.append` can accept a list of Index objects to
- concatenate together
- - Altered binary operations on differently-indexed SparseSeries objects to use
- the integer-based (dense) alignment logic which is faster with a larger
- number of blocks (GH205_)
- - Refactored `Series.__repr__` to be a bit more clean and consistent
-
-**API Changes**
-
- - `Series.describe` and `DataFrame.describe` now bring the 25% and 75%
- quartiles instead of the 10% and 90% deciles. The other outputs have not
- changed
- - `Series.toString` will print deprecation warning, has been de-camelCased to
- `to_string`
-
-**Bug fixes**
-
- - Fix broken interaction between `Index` and `Int64Index` when calling
- intersection. Implement `Int64Index.intersection`
- - `MultiIndex.sortlevel` discarded the level names (GH202_)
- - Fix bugs in groupby, join, and append due to improper concatenation of
- `MultiIndex` objects (GH201_)
- - Fix regression from 0.4.1, `isnull` and `notnull` ceased to work on other
- kinds of Python scalar objects like `datetime.datetime`
- - Raise more helpful exception when attempting to write empty DataFrame or
- LongPanel to `HDFStore` (GH204_)
- - Use stdlib csv module to properly escape strings with commas in
- `DataFrame.to_csv` (GH206_, Thomas Kluyver)
- - Fix Python ndarray access in Cython code for sparse blocked index integrity
- check
- - Fix bug writing Series to CSV in Python 3 (GH209_)
- - Miscellaneous Python 3 bugfixes
-
-Thanks
-------
-
- - Thomas Kluyver
- - rsamson
-
-.. _GH200: https://github.com/pydata/pandas/issues/200
-.. _GH209: https://github.com/pydata/pandas/issues/209
-.. _GH203: https://github.com/pydata/pandas/issues/203
-.. _GH205: https://github.com/pydata/pandas/issues/205
-.. _GH202: https://github.com/pydata/pandas/issues/202
-.. _GH201: https://github.com/pydata/pandas/issues/201
-.. _GH204: https://github.com/pydata/pandas/issues/204
-.. _GH206: https://github.com/pydata/pandas/issues/206
-
-
-pandas 0.4.2
-============
-
-Release notes
--------------
-
-**Release date:** 10/3/2011
-
-This is a performance optimization release with several bug fixes. The new
-Int64Index and new merging / joining Cython code and related Python
-infrastructure are the main new additions
-
-**New features / modules**
-
- - Added fast `Int64Index` type with specialized join, union,
- intersection. Will result in significant performance enhancements for
- int64-based time series (e.g. using NumPy's datetime64 one day) and also
- faster operations on DataFrame objects storing record array-like data.
- - Refactored `Index` classes to have a `join` method and associated data
- alignment routines throughout the codebase to be able to leverage optimized
- joining / merging routines.
- - Added `Series.align` method for aligning two series with choice of join
- method
- - Wrote faster Cython data alignment / merging routines resulting in
- substantial speed increases
- - Added `is_monotonic` property to `Index` classes with associated Cython
- code to evaluate the monotonicity of the `Index` values
- - Add method `get_level_values` to `MultiIndex`
- - Implemented shallow copy of `BlockManager` object in `DataFrame` internals
-
-**Improvements to existing features**
-
- - Improved performance of `isnull` and `notnull`, a regression from v0.3.0
- (GH187_)
- - Wrote templating / code generation script to auto-generate Cython code for
- various functions which need to be available for the 4 major data types
- used in pandas (float64, bool, object, int64)
- - Refactored code related to `DataFrame.join` so that intermediate aligned
- copies of the data in each `DataFrame` argument do not need to be
- created. Substantial performance increases result (GH176_)
- - Substantially improved performance of generic `Index.intersection` and
- `Index.union`
- - Improved performance of `DateRange.union` with overlapping ranges and
- non-cacheable offsets (like Minute). Implemented analogous fast
- `DateRange.intersection` for overlapping ranges.
- - Implemented `BlockManager.take` resulting in significantly faster `take`
- performance on mixed-type `DataFrame` objects (GH104_)
- - Improved performance of `Series.sort_index`
- - Significant groupby performance enhancement: removed unnecessary integrity
- checks in DataFrame internals that were slowing down slicing operations to
- retrieve groups
- - Added informative Exception when passing dict to DataFrame groupby
- aggregation with axis != 0
-
-**API Changes**
-
-None
-
-**Bug fixes**
-
- - Fixed minor unhandled exception in Cython code implementing fast groupby
- aggregation operations
- - Fixed bug in unstacking code manifesting with more than 3 hierarchical
- levels
- - Throw exception when step specified in label-based slice (GH185_)
- - Fix isnull to correctly work with np.float32. Fix upstream bug described in
- GH182_
- - Finish implementation of as_index=False in groupby for DataFrame
- aggregation (GH181_)
- - Raise SkipTest for pre-epoch HDFStore failure. Real fix will be sorted out
- via datetime64 dtype
-
-Thanks
-------
-
-- Uri Laserson
-- Scott Sinclair
-
-.. _GH187: https://github.com/pydata/pandas/issues/187
-.. _GH176: https://github.com/pydata/pandas/issues/176
-.. _GH104: https://github.com/pydata/pandas/issues/104
-.. _GH185: https://github.com/pydata/pandas/issues/185
-.. _GH182: https://github.com/pydata/pandas/issues/182
-.. _GH181: https://github.com/pydata/pandas/issues/181
-
-
-pandas 0.4.1
-============
-
-Release notes
--------------
-
-**Release date:** 9/25/2011
-
-This is primarily a bug fix release but includes some new features and
-improvements
-
-**New features / modules**
-
- - Added new `DataFrame` methods `get_dtype_counts` and property `dtypes`
- - Setting of values using ``.ix`` indexing attribute in mixed-type DataFrame
- objects has been implemented (fixes GH135_)
- - `read_csv` can read multiple columns into a `MultiIndex`. DataFrame's
- `to_csv` method will properly write out a `MultiIndex` which can be read
- back (GH151_, thanks to Skipper Seabold)
- - Wrote fast time series merging / joining methods in Cython. Will be
- integrated later into DataFrame.join and related functions
- - Added `ignore_index` option to `DataFrame.append` for combining unindexed
- records stored in a DataFrame
-
-**Improvements to existing features**
-
- - Some speed enhancements with internal Index type-checking function
- - `DataFrame.rename` has a new `copy` parameter which can rename a DataFrame
- in place
- - Enable unstacking by level name (GH142_)
- - Enable sortlevel to work by level name (GH141_)
- - `read_csv` can automatically "sniff" other kinds of delimiters using
- `csv.Sniffer` (GH146_)
- - Improved speed of unit test suite by about 40%
- - Exception will not be raised calling `HDFStore.remove` on non-existent node
- with where clause
- - Optimized `_ensure_index` function resulting in performance savings in
- type-checking Index objects
-
-**API Changes**
-
-None
-
-**Bug fixes**
-
- - Fixed DataFrame constructor bug causing downstream problems (e.g. .copy()
- failing) when passing a Series as the values along with a column name and
- index
- - Fixed single-key groupby on DataFrame with as_index=False (GH160_)
- - `Series.shift` was failing on integer Series (GH154_)
- - `unstack` methods were producing incorrect output in the case of duplicate
- hierarchical labels. An exception will now be raised (GH147_)
- - Calling `count` with level argument caused reduceat failure or segfault in
- earlier NumPy (GH169_)
- - Fixed `DataFrame.corrwith` to automatically exclude non-numeric data (GH
- GH144_)
- - Unicode handling bug fixes in `DataFrame.to_string` (GH138_)
- - Excluding OLS degenerate unit test case that was causing platform specific
- failure (GH149_)
- - Skip blosc-dependent unit tests for PyTables < 2.2 (GH137_)
- - Calling `copy` on `DateRange` did not copy over attributes to the new object
- (GH168_)
- - Fix bug in `HDFStore` in which Panel data could be appended to a Table with
- different item order, thus resulting in an incorrect result read back
-
-Thanks
-------
-- Yaroslav Halchenko
-- Jeff Reback
-- Skipper Seabold
-- Dan Lovell
-- Nick Pentreath
-
-.. _GH135: https://github.com/pydata/pandas/issues/135
-.. _GH151: https://github.com/pydata/pandas/issues/151
-.. _GH142: https://github.com/pydata/pandas/issues/142
-.. _GH141: https://github.com/pydata/pandas/issues/141
-.. _GH146: https://github.com/pydata/pandas/issues/146
-.. _GH160: https://github.com/pydata/pandas/issues/160
-.. _GH154: https://github.com/pydata/pandas/issues/154
-.. _GH147: https://github.com/pydata/pandas/issues/147
-.. _GH169: https://github.com/pydata/pandas/issues/169
-.. _GH144: https://github.com/pydata/pandas/issues/144
-.. _GH138: https://github.com/pydata/pandas/issues/138
-.. _GH149: https://github.com/pydata/pandas/issues/149
-.. _GH137: https://github.com/pydata/pandas/issues/137
-.. _GH168: https://github.com/pydata/pandas/issues/168
-
-
-pandas 0.4.0
-============
-
-Release notes
--------------
-
-**Release date:** 9/12/2011
-
-**New features / modules**
-
- - `pandas.core.sparse` module: "Sparse" (mostly-NA, or some other fill value)
- versions of `Series`, `DataFrame`, and `Panel`. For low-density data, this
- will result in significant performance boosts, and smaller memory
- footprint. Added `to_sparse` methods to `Series`, `DataFrame`, and
- `Panel`. See online documentation for more on these
- - Fancy indexing operator on Series / DataFrame, e.g. via .ix operator. Both
- getting and setting of values is supported; however, setting values will only
- currently work on homogeneously-typed DataFrame objects. Things like:
-
- * series.ix[[d1, d2, d3]]
- * frame.ix[5:10, ['C', 'B', 'A']], frame.ix[5:10, 'A':'C']
- * frame.ix[date1:date2]
-
- - Significantly enhanced `groupby` functionality
-
- * Can groupby multiple keys, e.g. df.groupby(['key1', 'key2']). Iteration with
- multiple groupings products a flattened tuple
- * "Nuisance" columns (non-aggregatable) will automatically be excluded from
- DataFrame aggregation operations
- * Added automatic "dispatching to Series / DataFrame methods to more easily
- invoke methods on groups. e.g. s.groupby(crit).std() will work even though
- `std` is not implemented on the `GroupBy` class
-
- - Hierarchical / multi-level indexing
-
- * New the `MultiIndex` class. Integrated `MultiIndex` into `Series` and
- `DataFrame` fancy indexing, slicing, __getitem__ and __setitem,
- reindexing, etc. Added `level` keyword argument to `groupby` to enable
- grouping by a level of a `MultiIndex`
-
- - New data reshaping functions: `stack` and `unstack` on DataFrame and Series
-
- * Integrate with MultiIndex to enable sophisticated reshaping of data
-
- - `Index` objects (labels for axes) are now capable of holding tuples
- - `Series.describe`, `DataFrame.describe`: produces an R-like table of summary
- statistics about each data column
- - `DataFrame.quantile`, `Series.quantile` for computing sample quantiles of data
- across requested axis
- - Added general `DataFrame.dropna` method to replace `dropIncompleteRows` and
- `dropEmptyRows`, deprecated those.
- - `Series` arithmetic methods with optional fill_value for missing data,
- e.g. a.add(b, fill_value=0). If a location is missing for both it will still
- be missing in the result though.
- - fill_value option has been added to `DataFrame`.{add, mul, sub, div} methods
- similar to `Series`
- - Boolean indexing with `DataFrame` objects: data[data > 0.1] = 0.1 or
- data[data> other] = 1.
- - `pytz` / tzinfo support in `DateRange`
-
- * `tz_localize`, `tz_normalize`, and `tz_validate` methods added
-
- - Added `ExcelFile` class to `pandas.io.parsers` for parsing multiple sheets out
- of a single Excel 2003 document
- - `GroupBy` aggregations can now optionally *broadcast*, e.g. produce an object
- of the same size with the aggregated value propagated
- - Added `select` function in all data structures: reindex axis based on
- arbitrary criterion (function returning boolean value),
- e.g. frame.select(lambda x: 'foo' in x, axis=1)
- - `DataFrame.consolidate` method, API function relating to redesigned internals
- - `DataFrame.insert` method for inserting column at a specified location rather
- than the default __setitem__ behavior (which puts it at the end)
- - `HDFStore` class in `pandas.io.pytables` has been largely rewritten using
- patches from Jeff Reback from others. It now supports mixed-type `DataFrame`
- and `Series` data and can store `Panel` objects. It also has the option to
- query `DataFrame` and `Panel` data. Loading data from legacy `HDFStore`
- files is supported explicitly in the code
- - Added `set_printoptions` method to modify appearance of DataFrame tabular
- output
- - `rolling_quantile` functions; a moving version of `Series.quantile` /
- `DataFrame.quantile`
- - Generic `rolling_apply` moving window function
- - New `drop` method added to `Series`, `DataFrame`, etc. which can drop a set of
- labels from an axis, producing a new object
- - `reindex` methods now sport a `copy` option so that data is not forced to be
- copied then the resulting object is indexed the same
- - Added `sort_index` methods to Series and Panel. Renamed `DataFrame.sort`
- to `sort_index`. Leaving `DataFrame.sort` for now.
- - Added ``skipna`` option to statistical instance methods on all the data
- structures
- - `pandas.io.data` module providing a consistent interface for reading time
- series data from several different sources
-
-**Improvements to existing features**
-
- * The 2-dimensional `DataFrame` and `DataMatrix` classes have been extensively
- redesigned internally into a single class `DataFrame`, preserving where
- possible their optimal performance characteristics. This should reduce
- confusion from users about which class to use.
-
- * Note that under the hood there is a new essentially "lazy evaluation"
- scheme within respect to adding columns to DataFrame. During some
- operations, like-typed blocks will be "consolidated" but not before.
-
- * `DataFrame` accessing columns repeatedly is now significantly faster than
- `DataMatrix` used to be in 0.3.0 due to an internal Series caching mechanism
- (which are all views on the underlying data)
- * Column ordering for mixed type data is now completely consistent in
- `DataFrame`. In prior releases, there was inconsistent column ordering in
- `DataMatrix`
- * Improved console / string formatting of DataMatrix with negative numbers
- * Improved tabular data parsing functions, `read_table` and `read_csv`:
-
- * Added `skiprows` and `na_values` arguments to `pandas.io.parsers` functions
- for more flexible IO
- * `parseCSV` / `read_csv` functions and others in `pandas.io.parsers` now can
- take a list of custom NA values, and also a list of rows to skip
-
- * Can slice `DataFrame` and get a view of the data (when homogeneously typed),
- e.g. frame.xs(idx, copy=False) or frame.ix[idx]
- * Many speed optimizations throughout `Series` and `DataFrame`
- * Eager evaluation of groups when calling ``groupby`` functions, so if there is
- an exception with the grouping function it will raised immediately versus
- sometime later on when the groups are needed
- * `datetools.WeekOfMonth` offset can be parameterized with `n` different than 1
- or -1.
- * Statistical methods on DataFrame like `mean`, `std`, `var`, `skew` will now
- ignore non-numerical data. Before a not very useful error message was
- generated. A flag `numeric_only` has been added to `DataFrame.sum` and
- `DataFrame.count` to enable this behavior in those methods if so desired
- (disabled by default)
- * `DataFrame.pivot` generalized to enable pivoting multiple columns into a
- `DataFrame` with hierarchical columns
- * `DataFrame` constructor can accept structured / record arrays
- * `Panel` constructor can accept a dict of DataFrame-like objects. Do not
- need to use `from_dict` anymore (`from_dict` is there to stay, though).
-
-**API Changes**
-
- * The `DataMatrix` variable now refers to `DataFrame`, will be removed within
- two releases
- * `WidePanel` is now known as `Panel`. The `WidePanel` variable in the pandas
- namespace now refers to the renamed `Panel` class
- * `LongPanel` and `Panel` / `WidePanel` now no longer have a common
- subclass. `LongPanel` is now a subclass of `DataFrame` having a number of
- additional methods and a hierarchical index instead of the old
- `LongPanelIndex` object, which has been removed. Legacy `LongPanel` pickles
- may not load properly
- * Cython is now required to build `pandas` from a development branch. This was
- done to avoid continuing to check in cythonized C files into source
- control. Builds from released source distributions will not require Cython
- * Cython code has been moved up to a top level `pandas/src` directory. Cython
- extension modules have been renamed and promoted from the `lib` subpackage to
- the top level, i.e.
-
- * `pandas.lib.tseries` -> `pandas._tseries`
- * `pandas.lib.sparse` -> `pandas._sparse`
-
- * `DataFrame` pickling format has changed. Backwards compatibility for legacy
- pickles is provided, but it's recommended to consider PyTables-based
- `HDFStore` for storing data with a longer expected shelf life
- * A `copy` argument has been added to the `DataFrame` constructor to avoid
- unnecessary copying of data. Data is no longer copied by default when passed
- into the constructor
- * Handling of boolean dtype in `DataFrame` has been improved to support storage
- of boolean data with NA / NaN values. Before it was being converted to float64
- so this should not (in theory) cause API breakage
- * To optimize performance, Index objects now only check that their labels are
- unique when uniqueness matters (i.e. when someone goes to perform a
- lookup). This is a potentially dangerous tradeoff, but will lead to much
- better performance in many places (like groupby).
- * Boolean indexing using Series must now have the same indices (labels)
- * Backwards compatibility support for begin/end/nPeriods keyword arguments in
- DateRange class has been removed
- * More intuitive / shorter filling aliases `ffill` (for `pad`) and `bfill` (for
- `backfill`) have been added to the functions that use them: `reindex`,
- `asfreq`, `fillna`.
- * `pandas.core.mixins` code moved to `pandas.core.generic`
- * `buffer` keyword arguments (e.g. `DataFrame.toString`) renamed to `buf` to
- avoid using Python built-in name
- * `DataFrame.rows()` removed (use `DataFrame.index`)
- * Added deprecation warning to `DataFrame.cols()`, to be removed in next release
- * `DataFrame` deprecations and de-camelCasing: `merge`, `asMatrix`,
- `toDataMatrix`, `_firstTimeWithValue`, `_lastTimeWithValue`, `toRecords`,
- `fromRecords`, `tgroupby`, `toString`
- * `pandas.io.parsers` method deprecations
-
- * `parseCSV` is now `read_csv` and keyword arguments have been de-camelCased
- * `parseText` is now `read_table`
- * `parseExcel` is replaced by the `ExcelFile` class and its `parse` method
-
- * `fillMethod` arguments (deprecated in prior release) removed, should be
- replaced with `method`
- * `Series.fill`, `DataFrame.fill`, and `Panel.fill` removed, use `fillna`
- instead
- * `groupby` functions now exclude NA / NaN values from the list of groups. This
- matches R behavior with NAs in factors e.g. with the `tapply` function
- * Removed `parseText`, `parseCSV` and `parseExcel` from pandas namespace
- * `Series.combineFunc` renamed to `Series.combine` and made a bit more general
- with a `fill_value` keyword argument defaulting to NaN
- * Removed `pandas.core.pytools` module. Code has been moved to
- `pandas.core.common`
- * Tacked on `groupName` attribute for groups in GroupBy renamed to `name`
- * Panel/LongPanel `dims` attribute renamed to `shape` to be more conformant
- * Slicing a `Series` returns a view now
- * More Series deprecations / renaming: `toCSV` to `to_csv`, `asOf` to `asof`,
- `merge` to `map`, `applymap` to `apply`, `toDict` to `to_dict`,
- `combineFirst` to `combine_first`. Will print `FutureWarning`.
- * `DataFrame.to_csv` does not write an "index" column label by default
- anymore since the output file can be read back without it. However, there
- is a new ``index_label`` argument. So you can do ``index_label='index'`` to
- emulate the old behavior
- * `datetools.Week` argument renamed from `dayOfWeek` to `weekday`
- * `timeRule` argument in `shift` has been deprecated in favor of using the
- `offset` argument for everything. So you can still pass a time rule string
- to `offset`
- * Added optional `encoding` argument to `read_csv`, `read_table`, `to_csv`,
- `from_csv` to handle unicode in python 2.x
-
-**Bug fixes**
-
- * Column ordering in `pandas.io.parsers.parseCSV` will match CSV in the presence
- of mixed-type data
- * Fixed handling of Excel 2003 dates in `pandas.io.parsers`
- * `DateRange` caching was happening with high resolution `DateOffset` objects,
- e.g. `DateOffset(seconds=1)`. This has been fixed
- * Fixed __truediv__ issue in `DataFrame`
- * Fixed `DataFrame.toCSV` bug preventing IO round trips in some cases
- * Fixed bug in `Series.plot` causing matplotlib to barf in exceptional cases
- * Disabled `Index` objects from being hashable, like ndarrays
- * Added `__ne__` implementation to `Index` so that operations like ts[ts != idx]
- will work
- * Added `__ne__` implementation to `DataFrame`
- * Bug / unintuitive result when calling `fillna` on unordered labels
- * Bug calling `sum` on boolean DataFrame
- * Bug fix when creating a DataFrame from a dict with scalar values
- * Series.{sum, mean, std, ...} now return NA/NaN when the whole Series is NA
- * NumPy 1.4 through 1.6 compatibility fixes
- * Fixed bug in bias correction in `rolling_cov`, was affecting `rolling_corr`
- too
- * R-square value was incorrect in the presence of fixed and time effects in
- the `PanelOLS` classes
- * `HDFStore` can handle duplicates in table format, will take
-
-Thanks
-------
- - Joon Ro
- - Michael Pennington
- - Chris Uga
- - Chris Withers
- - Jeff Reback
- - Ted Square
- - Craig Austin
- - William Ferreira
- - Daniel Fortunov
- - Tony Roberts
- - Martin Felder
- - John Marino
- - Tim McNamara
- - Justin Berka
- - Dieter Vandenbussche
- - Shane Conway
- - Skipper Seabold
- - Chris Jordan-Squire
-
-pandas 0.3.0
-============
-
-Release notes
--------------
-
-**Release date:** February 20, 2011
-
-**New features / modules**
-
- - `corrwith` function to compute column- or row-wise correlations between two
- DataFrame objects
- - Can boolean-index DataFrame objects, e.g. df[df > 2] = 2, px[px > last_px] = 0
- - Added comparison magic methods (__lt__, __gt__, etc.)
- - Flexible explicit arithmetic methods (add, mul, sub, div, etc.)
- - Added `reindex_like` method
- - Added `reindex_like` method to WidePanel
- - Convenience functions for accessing SQL-like databases in `pandas.io.sql`
- module
- - Added (still experimental) HDFStore class for storing pandas data
- structures using HDF5 / PyTables in `pandas.io.pytables` module
- - Added WeekOfMonth date offset
- - `pandas.rpy` (experimental) module created, provide some interfacing /
- conversion between rpy2 and pandas
-
-**Improvements**
-
- - Unit test coverage: 100% line coverage of core data structures
- - Speed enhancement to rolling_{median, max, min}
- - Column ordering between DataFrame and DataMatrix is now consistent: before
- DataFrame would not respect column order
- - Improved {Series, DataFrame}.plot methods to be more flexible (can pass
- matplotlib Axis arguments, plot DataFrame columns in multiple subplots,
- etc.)
-
-**API Changes**
-
- - Exponentially-weighted moment functions in `pandas.stats.moments` have a
- more consistent API and accept a min_periods argument like their regular
- moving counterparts.
- - **fillMethod** argument in Series, DataFrame changed to **method**,
- `FutureWarning` added.
- - **fill** method in Series, DataFrame/DataMatrix, WidePanel renamed to
- **fillna**, `FutureWarning` added to **fill**
- - Renamed **DataFrame.getXS** to **xs**, `FutureWarning` added
- - Removed **cap** and **floor** functions from DataFrame, renamed to
- **clip_upper** and **clip_lower** for consistency with NumPy
-
-**Bug fixes**
-
- - Fixed bug in IndexableSkiplist Cython code that was breaking
- rolling_max function
- - Numerous numpy.int64-related indexing fixes
- - Several NumPy 1.4.0 NaN-handling fixes
- - Bug fixes to pandas.io.parsers.parseCSV
- - Fixed `DateRange` caching issue with unusual date offsets
- - Fixed bug in `DateRange.union`
- - Fixed corner case in `IndexableSkiplist` implementation
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 60218a1d2480b..99d1703b9ca34 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -243,8 +243,6 @@
# extlinks alias
extlinks = {'issue': ('https://github.com/pydata/pandas/issues/%s',
- 'issue '),
- 'pull request': ('https://github.com/pydata/pandas/pulls/%s',
- 'pull request '),
- 'wiki': ('https://github.com/pydata/pandas/pulls/%s',
+ 'GH'),
+ 'wiki': ('https://github.com/pydata/pandas/wiki/%s',
'wiki ')}
diff --git a/doc/source/index.rst b/doc/source/index.rst
index 21a79ffdb85fd..3534cd2b577f4 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -105,7 +105,6 @@ See the package overview for more detail about what's in the library.
.. toctree::
- :hidden:
:maxdepth: 3
whatsnew
@@ -132,4 +131,4 @@ See the package overview for more detail about what's in the library.
related
comparison_with_r
api
-
+ release
diff --git a/doc/source/release.rst b/doc/source/release.rst
new file mode 100644
index 0000000000000..fdff03217c050
--- /dev/null
+++ b/doc/source/release.rst
@@ -0,0 +1,3012 @@
+.. _release:
+
+=============
+Release Notes
+=============
+
+This is the list of changes to pandas between each release. For full details,
+see the commit logs at http://github.com/pydata/pandas
+
+What is it
+----------
+
+pandas is a Python package providing fast, flexible, and expressive data
+structures designed to make working with “relational” or “labeled” data both
+easy and intuitive. It aims to be the fundamental high-level building block for
+doing practical, real world data analysis in Python. Additionally, it has the
+broader goal of becoming the most powerful and flexible open source data
+analysis / manipulation tool available in any language.
+
+Where to get it
+---------------
+
+* Source code: http://github.com/pydata/pandas
+* Binary installers on PyPI: http://pypi.python.org/pypi/pandas
+* Documentation: http://pandas.pydata.org
+
+pandas 0.11.1
+=============
+
+**Release date:** not-yet-released
+
+**New features**
+
+ - ``pd.read_html()`` can now parse HTML strings, files or urls and returns a
+ list of ``DataFrame`` s courtesy of @cpcloud. (:issue:`3477`,
+ :issue:`3605`, :issue:`3606`)
+ - Support for reading Amazon S3 files. (:issue:`3504`)
+ - Added module for reading and writing Stata files: pandas.io.stata (:issue:`1512`)
+ includes ``to_stata`` DataFrame method, and a ``read_stata`` top-level reader
+ - Added support for writing in ``to_csv`` and reading in ``read_csv``,
+ multi-index columns. The ``header`` option in ``read_csv`` now accepts a
+ list of the rows from which to read the index. Added the option,
+ ``tupleize_cols`` to provide compatiblity for the pre 0.11.1 behavior of
+ writing and reading multi-index columns via a list of tuples. The default in
+ 0.11.1 is to write lists of tuples and *not* interpret list of tuples as a
+ multi-index column.
+ Note: The default value will change in 0.12 to make the default *to* write and
+ read multi-index columns in the new format. (:issue:`3571`, :issue:`1651`, :issue:`3141`)
+ - Add iterator to ``Series.str`` (:issue:`3638`)
+ - ``pd.set_option()`` now allows N option, value pairs (:issue:`3667`).
+ - Added keyword parameters for different types of scatter_matrix subplots
+ - A ``filter`` method on grouped Series or DataFrames returns a subset of
+ the original (:issue:`3680`, :issue:`919`)
+ - Access to historical Google Finance data in pandas.io.data (:issue:`3814`)
+
+**Improvements to existing features**
+
+ - Fixed various issues with internal pprinting code, the repr() for various objects
+ including TimeStamp and Index now produces valid python code strings and
+ can be used to recreate the object, (:issue:`3038`, :issue:`3379`, :issue:`3251`, :issue:`3460`)
+ - ``convert_objects`` now accepts a ``copy`` parameter (defaults to ``True``)
+ - ``HDFStore``
+
+ - will retain index attributes (freq,tz,name) on recreation (:issue:`3499`)
+ - will warn with a ``AttributeConflictWarning`` if you are attempting to append
+ an index with a different frequency than the existing, or attempting
+ to append an index with a different name than the existing
+ - support datelike columns with a timezone as data_columns (:issue:`2852`)
+ - table writing performance improvements.
+ - support python3 (via ``PyTables 3.0.0``) (:issue:`3750`)
+ - Add modulo operator to Series, DataFrame
+ - Add ``date`` method to DatetimeIndex
+ - Simplified the API and added a describe method to Categorical
+ - ``melt`` now accepts the optional parameters ``var_name`` and ``value_name``
+ to specify custom column names of the returned DataFrame (:issue:`3649`),
+ thanks @hoechenberger
+ - clipboard functions use pyperclip (no dependencies on Windows, alternative
+ dependencies offered for Linux) (:issue:`3837`).
+ - Plotting functions now raise a ``TypeError`` before trying to plot anything
+ if the associated objects have have a dtype of ``object`` (:issue:`1818`,
+ :issue:`3572`, :issue:`3911`, :issue:`3912`), but they will try to convert object arrays to
+ numeric arrays if possible so that you can still plot, for example, an
+ object array with floats. This happens before any drawing takes place which
+ elimnates any spurious plots from showing up.
+ - Added Faq section on repr display options, to help users customize their setup.
+ - ``where`` operations that result in block splitting are much faster (:issue:`3733`)
+ - Series and DataFrame hist methods now take a ``figsize`` argument (:issue:`3834`)
+ - DatetimeIndexes no longer try to convert mixed-integer indexes during join
+ operations (:issue:`3877`)
+ - Add ``unit`` keyword to ``Timestamp`` and ``to_datetime`` to enable passing of
+ integers or floats that are in an epoch unit of ``s, ms, us, ns``
+ (e.g. unix timestamps or epoch ``s``, with fracional seconds allowed) (:issue:`3540`)
+ - DataFrame corr method (spearman) is now cythonized.
+
+**API Changes**
+
+ - ``HDFStore``
+
+ - When removing an object, ``remove(key)`` raises
+ ``KeyError`` if the key is not a valid store object.
+ - raise a ``TypeError`` on passing ``where`` or ``columns``
+ to select with a Storer; these are invalid parameters at this time
+ - can now specify an ``encoding`` option to ``append/put``
+ to enable alternate encodings (:issue:`3750`)
+ - enable support for ``iterator/chunksize`` with ``read_hdf``
+ - The repr() for (Multi)Index now obeys display.max_seq_items rather
+ then numpy threshold print options. (:issue:`3426`, :issue:`3466`)
+ - Added mangle_dupe_cols option to read_table/csv, allowing users
+ to control legacy behaviour re dupe cols (A, A.1, A.2 vs A, A ) (:issue:`3468`)
+ Note: The default value will change in 0.12 to the "no mangle" behaviour,
+ If your code relies on this behaviour, explicitly specify mangle_dupe_cols=True
+ in your calls.
+ - Do not allow astypes on ``datetime64[ns]`` except to ``object``, and
+ ``timedelta64[ns]`` to ``object/int`` (:issue:`3425`)
+ - The behavior of ``datetime64`` dtypes has changed with respect to certain
+ so-called reduction operations (:issue:`3726`). The following operations now
+ raise a ``TypeError`` when perfomed on a ``Series`` and return an *empty*
+ ``Series`` when performed on a ``DataFrame`` similar to performing these
+ operations on, for example, a ``DataFrame`` of ``slice`` objects:
+ - sum, prod, mean, std, var, skew, kurt, corr, and cov
+ - Do not allow datetimelike/timedeltalike creation except with valid types
+ (e.g. cannot pass ``datetime64[ms]``) (:issue:`3423`)
+ - Add ``squeeze`` keyword to ``groupby`` to allow reduction from
+ DataFrame -> Series if groups are unique. Regression from 0.10.1,
+ partial revert on (:issue:`2893`) with (:issue:`3596`)
+ - Raise on ``iloc`` when boolean indexing with a label based indexer mask
+ e.g. a boolean Series, even with integer labels, will raise. Since ``iloc``
+ is purely positional based, the labels on the Series are not alignable (:issue:`3631`)
+ - The ``raise_on_error`` option to plotting methods is obviated by :issue:`3572`,
+ so it is removed. Plots now always raise when data cannot be plotted or the
+ object being plotted has a dtype of ``object``.
+ - ``DataFrame.interpolate()`` is now deprecated. Please use
+ ``DataFrame.fillna()`` and ``DataFrame.replace()`` instead (:issue:`3582`,
+ :issue:`3675`, :issue:`3676`).
+ - the ``method`` and ``axis`` arguments of ``DataFrame.replace()`` are
+ deprecated
+ - ``DataFrame.replace`` 's ``infer_types`` parameter is removed and now
+ performs conversion by default. (:issue:`3907`)
+ - Deprecated display.height, display.width is now only a formatting option
+ does not control triggering of summary, similar to < 0.11.0.
+ - Add the keyword ``allow_duplicates`` to ``DataFrame.insert`` to allow a duplicate column
+ to be inserted if ``True``, default is ``False`` (same as prior to 0.11.1) (:issue:`3679`)
+ - io API changes
+
+ - added ``pandas.io.api`` for i/o imports
+ - removed ``Excel`` support to ``pandas.io.excel``
+ - added top-level ``pd.read_sql`` and ``to_sql`` DataFrame methods
+ - removed ``clipboard`` support to ``pandas.io.clipboard``
+ - replace top-level and instance methods ``save`` and ``load`` with
+ top-level ``read_pickle`` and ``to_pickle`` instance method, ``save`` and
+ ``load`` will give deprecation warning.
+ - the ``method`` and ``axis`` arguments of ``DataFrame.replace()`` are
+ deprecated
+ - the ``method`` and ``axis`` arguments of ``DataFrame.replace()`` are
+ deprecated
+ - Implement ``__nonzero__`` for ``NDFrame`` objects (:issue:`3691`, :issue:`3696`)
+ - ``as_matrix`` with mixed signed and unsigned dtypes will result in 2 x the lcd of the unsigned
+ as an int, maxing with ``int64``, to avoid precision issues (:issue:`3733`)
+ - ``na_values`` in a list provided to ``read_csv/read_excel`` will match string and numeric versions
+ e.g. ``na_values=['99']`` will match 99 whether the column ends up being int, float, or string (:issue:`3611`)
+ - ``read_html`` now defaults to ``None`` when reading, and falls back on
+ ``bs4`` + ``html5lib`` when lxml fails to parse. a list of parsers to try
+ until success is also valid
+ - more consistency in the to_datetime return types (give string/array of string inputs) (:issue:`3888`)
+
+**Bug Fixes**
+
+ - Fixed an esoteric excel reading bug, xlrd>= 0.9.0 now required for excel
+ support. Should provide python3 support (for reading) which has been
+ lacking. (:issue:`3164`)
+ - Allow unioning of date ranges sharing a timezone (:issue:`3491`)
+ - Fix to_csv issue when having a large number of rows and ``NaT`` in some
+ columns (:issue:`3437`)
+ - ``.loc`` was not raising when passed an integer list (:issue:`3449`)
+ - Unordered time series selection was misbehaving when using label slicing (:issue:`3448`)
+ - Fix sorting in a frame with a list of columns which contains datetime64[ns] dtypes (:issue:`3461`)
+ - DataFrames fetched via FRED now handle '.' as a NaN. (:issue:`3469`)
+ - Fix regression in a DataFrame apply with axis=1, objects were not being converted back
+ to base dtypes correctly (:issue:`3480`)
+ - Fix issue when storing uint dtypes in an HDFStore. (:issue:`3493`)
+ - Non-unique index support clarified (:issue:`3468`)
+
+ - Addressed handling of dupe columns in df.to_csv new and old (:issue:`3454`, :issue:`3457`)
+ - Fix assigning a new index to a duplicate index in a DataFrame would fail (:issue:`3468`)
+ - Fix construction of a DataFrame with a duplicate index
+ - ref_locs support to allow duplicative indices across dtypes,
+ allows iget support to always find the index (even across dtypes) (:issue:`2194`)
+ - applymap on a DataFrame with a non-unique index now works
+ (removed warning) (:issue:`2786`), and fix (:issue:`3230`)
+ - Fix to_csv to handle non-unique columns (:issue:`3495`)
+ - Duplicate indexes with getitem will return items in the correct order (:issue:`3455`, :issue:`3457`)
+ and handle missing elements like unique indices (:issue:`3561`)
+ - Duplicate indexes with and empty DataFrame.from_records will return a correct frame (:issue:`3562`)
+ - Concat to produce a non-unique columns when duplicates are across dtypes is fixed (:issue:`3602`)
+ - Non-unique indexing with a slice via ``loc`` and friends fixed (:issue:`3659`)
+ - Allow insert/delete to non-unique columns (:issue:`3679`)
+ - Extend ``reindex`` to correctly deal with non-unique indices (:issue:`3679`)
+ - ``DataFrame.itertuples()`` now works with frames with duplicate column
+ names (:issue:`3873`)
+
+ - Fixed bug in groupby with empty series referencing a variable before assignment. (:issue:`3510`)
+ - Fixed bug in mixed-frame assignment with aligned series (:issue:`3492`)
+ - Fixed bug in selecting month/quarter/year from a series would not select the time element
+ on the last day (:issue:`3546`)
+ - Fixed a couple of MultiIndex rendering bugs in df.to_html() (:issue:`3547`, :issue:`3553`)
+ - Properly convert np.datetime64 objects in a Series (:issue:`3416`)
+ - Raise a ``TypeError`` on invalid datetime/timedelta operations
+ e.g. add datetimes, multiple timedelta x datetime
+ - Fix ``.diff`` on datelike and timedelta operations (:issue:`3100`)
+ - ``combine_first`` not returning the same dtype in cases where it can (:issue:`3552`)
+ - Fixed bug with ``Panel.transpose`` argument aliases (:issue:`3556`)
+ - Fixed platform bug in ``PeriodIndex.take`` (:issue:`3579`)
+ - Fixed bud in incorrect conversion of datetime64[ns] in ``combine_first`` (:issue:`3593`)
+ - Fixed bug in reset_index with ``NaN`` in a multi-index (:issue:`3586`)
+ - ``fillna`` methods now raise a ``TypeError`` when the ``value`` parameter
+ is a ``list`` or ``tuple``.
+ - Fixed bug where a time-series was being selected in preference to an actual column name
+ in a frame (:issue:`3594`)
+ - Fix modulo and integer division on Series,DataFrames to act similary to ``float`` dtypes to return
+ ``np.nan`` or ``np.inf`` as appropriate (:issue:`3590`)
+ - Fix incorrect dtype on groupby with ``as_index=False`` (:issue:`3610`)
+ - Fix ``read_csv/read_excel`` to correctly encode identical na_values, e.g. ``na_values=[-999.0,-999]``
+ was failing (:issue:`3611`)
+ - Disable HTML output in qtconsole again. (:issue:`3657`)
+ - Reworked the new repr display logic, which users found confusing. (:issue:`3663`)
+ - Fix indexing issue in ndim >= 3 with ``iloc`` (:issue:`3617`)
+ - Correctly parse date columns with embedded (nan/NaT) into datetime64[ns] dtype in ``read_csv``
+ when ``parse_dates`` is specified (:issue:`3062`)
+ - Fix not consolidating before to_csv (:issue:`3624`)
+ - Fix alignment issue when setitem in a DataFrame with a piece of a DataFrame (:issue:`3626`) or
+ a mixed DataFrame and a Series (:issue:`3668`)
+ - Fix plotting of unordered DatetimeIndex (:issue:`3601`)
+ - ``sql.write_frame`` failing when writing a single column to sqlite (:issue:`3628`),
+ thanks to @stonebig
+ - Fix pivoting with ``nan`` in the index (:issue:`3558`)
+ - Fix running of bs4 tests when it is not installed (:issue:`3605`)
+ - Fix parsing of html table (:issue:`3606`)
+ - ``read_html()`` now only allows a single backend: ``html5lib`` (:issue:`3616`)
+ - ``convert_objects`` with ``convert_dates='coerce'`` was parsing some single-letter strings
+ into today's date
+ - ``DataFrame.from_records`` did not accept empty recarrays (:issue:`3682`)
+ - ``DataFrame.to_csv`` will succeed with the deprecated option ``nanRep``, @tdsmith
+ - ``DataFrame.to_html`` and ``DataFrame.to_latex`` now accept a path for
+ their first argument (:issue:`3702`)
+ - Fix file tokenization error with \r delimiter and quoted fields (:issue:`3453`)
+ - Groupby transform with item-by-item not upcasting correctly (:issue:`3740`)
+ - Incorrectly read a HDFStore multi-index Frame witha column specification (:issue:`3748`)
+ - ``read_html`` now correctly skips tests (:issue:`3741`)
+ - PandasObjects raise TypeError when trying to hash (:issue:`3882`)
+ - Fix incorrect arguments passed to concat that are not list-like (e.g. concat(df1,df2)) (:issue:`3481`)
+ - Correctly parse when passed the ``dtype=str`` (or other variable-len string dtypes)
+ in ``read_csv`` (:issue:`3795`)
+ - Fix index name not propogating when using ``loc/ix`` (:issue:`3880`)
+ - Fix groupby when applying a custom function resulting in a returned DataFrame was
+ not converting dtypes (:issue:`3911`)
+ - Fixed a bug where ``DataFrame.replace`` with a compiled regular expression
+ in the ``to_replace`` argument wasn't working (:issue:`3907`)
+ - Fixed ``__truediv__`` in Python 2.7 with ``numexpr`` installed to actually do true division when dividing
+ two integer arrays with at least 10000 cells total (:issue:`3764`)
+ - Indexing with a string with seconds resolution not selecting from a time index (:issue:`3925`)
+
+.. _Gh3616: https://github.com/pydata/pandas/issues/3616
+
+pandas 0.11.0
+=============
+
+**Release date:** 2013-04-22
+
+**New features**
+
+ - New documentation section, ``10 Minutes to Pandas``
+ - New documentation section, ``Cookbook``
+ - Allow mixed dtypes (e.g ``float32/float64/int32/int16/int8``) to coexist in
+ DataFrames and propogate in operations
+ - Add function to pandas.io.data for retrieving stock index components from
+ Yahoo! finance (:issue:`2795`)
+ - Support slicing with time objects (:issue:`2681`)
+ - Added ``.iloc`` attribute, to support strict integer based indexing,
+ analogous to ``.ix`` (:issue:`2922`)
+ - Added ``.loc`` attribute, to support strict label based indexing, analagous
+ to ``.ix`` (:issue:`3053`)
+ - Added ``.iat`` attribute, to support fast scalar access via integers
+ (replaces ``iget_value/iset_value``)
+ - Added ``.at`` attribute, to support fast scalar access via labels (replaces
+ ``get_value/set_value``)
+ - Moved functionaility from ``irow,icol,iget_value/iset_value`` to ``.iloc`` indexer
+ (via ``_ixs`` methods in each object)
+ - Added support for expression evaluation using the ``numexpr`` library
+ - Added ``convert=boolean`` to ``take`` routines to translate negative
+ indices to positive, defaults to True
+ - Added to_series() method to indices, to facilitate the creation of indexeres
+ (:issue:`3275`)
+
+**Improvements to existing features**
+
+ - Improved performance of df.to_csv() by up to 10x in some cases. (:issue:`3059`)
+ - added ``blocks`` attribute to DataFrames, to return a dict of dtypes to
+ homogeneously dtyped DataFrames
+ - added keyword ``convert_numeric`` to ``convert_objects()`` to try to
+ convert object dtypes to numeric types (default is False)
+ - ``convert_dates`` in ``convert_objects`` can now be ``coerce`` which will
+ return a datetime64[ns] dtype with non-convertibles set as ``NaT``; will
+ preserve an all-nan object (e.g. strings), default is True (to perform
+ soft-conversion
+ - Series print output now includes the dtype by default
+ - Optimize internal reindexing routines (:issue:`2819`, :issue:`2867`)
+ - ``describe_option()`` now reports the default and current value of options.
+ - Add ``format`` option to ``pandas.to_datetime`` with faster conversion of
+ strings that can be parsed with datetime.strptime
+ - Add ``axes`` property to ``Series`` for compatibility
+ - Add ``xs`` function to ``Series`` for compatibility
+ - Allow setitem in a frame where only mixed numerics are present (e.g. int
+ and float), (:issue:`3037`)
+ - ``HDFStore``
+
+ - Provide dotted attribute access to ``get`` from stores
+ (e.g. store.df == store['df'])
+ - New keywords ``iterator=boolean``, and ``chunksize=number_in_a_chunk``
+ are provided to support iteration on ``select`` and
+ ``select_as_multiple`` (:issue:`3076`)
+ - support ``read_hdf/to_hdf`` API similar to ``read_csv/to_csv`` (:issue:`3222`)
+
+ - Add ``squeeze`` method to possibly remove length 1 dimensions from an
+ object.
+
+ .. ipython:: python
+
+ p = Panel(randn(3,4,4),items=['ItemA','ItemB','ItemC'],
+ major_axis=date_range('20010102',periods=4),
+ minor_axis=['A','B','C','D'])
+ p
+ p.reindex(items=['ItemA']).squeeze()
+ p.reindex(items=['ItemA'],minor=['B']).squeeze()
+
+ - Improvement to Yahoo API access in ``pd.io.data.Options`` (:issue:`2758`)
+ - added option `display.max_seq_items` to control the number of
+ elements printed per sequence pprinting it. (:issue:`2979`)
+ - added option `display.chop_threshold` to control display of small numerical
+ values. (:issue:`2739`)
+ - added option `display.max_info_rows` to prevent verbose_info from being
+ calculated for frames above 1M rows (configurable). (:issue:`2807`, :issue:`2918`)
+ - value_counts() now accepts a "normalize" argument, for normalized
+ histograms. (:issue:`2710`).
+ - DataFrame.from_records now accepts not only dicts but any instance of
+ the collections.Mapping ABC.
+ - Allow selection semantics via a string with a datelike index to work in both
+ Series and DataFrames (:issue:`3070`)
+
+ .. ipython:: python
+
+ idx = date_range("2001-10-1", periods=5, freq='M')
+ ts = Series(np.random.rand(len(idx)),index=idx)
+ ts['2001']
+
+ df = DataFrame(dict(A = ts))
+ df['2001']
+
+ - added option `display.mpl_style` providing a sleeker visual style
+ for plots. Based on https://gist.github.com/huyng/816622 (:issue:`3075`).
+
+
+ - Improved performance across several core functions by taking memory
+ ordering of arrays into account. Courtesy of @stephenwlin (:issue:`3130`)
+ - Improved performance of groupby transform method (:issue:`2121`)
+ - Handle "ragged" CSV files missing trailing delimiters in rows with missing
+ fields when also providing explicit list of column names (so the parser
+ knows how many columns to expect in the result) (:issue:`2981`)
+ - On a mixed DataFrame, allow setting with indexers with ndarray/DataFrame
+ on rhs (:issue:`3216`)
+ - Treat boolean values as integers (values 1 and 0) for numeric
+ operations. (:issue:`2641`)
+ - Add ``time`` method to DatetimeIndex (:issue:`3180`)
+ - Return NA when using Series.str[...] for values that are not long enough
+ (:issue:`3223`)
+ - Display cursor coordinate information in time-series plots (:issue:`1670`)
+ - to_html() now accepts an optional "escape" argument to control reserved
+ HTML character escaping (enabled by default) and escapes ``&``, in addition
+ to ``<`` and ``>``. (:issue:`2919`)
+
+**API Changes**
+
+ - Do not automatically upcast numeric specified dtypes to ``int64`` or
+ ``float64`` (:issue:`622` and :issue:`797`)
+ - DataFrame construction of lists and scalars, with no dtype present, will
+ result in casting to ``int64`` or ``float64``, regardless of platform.
+ This is not an apparent change in the API, but noting it.
+ - Guarantee that ``convert_objects()`` for Series/DataFrame always returns a
+ copy
+ - groupby operations will respect dtypes for numeric float operations
+ (float32/float64); other types will be operated on, and will try to cast
+ back to the input dtype (e.g. if an int is passed, as long as the output
+ doesn't have nans, then an int will be returned)
+ - backfill/pad/take/diff/ohlc will now support ``float32/int16/int8``
+ operations
+ - Block types will upcast as needed in where/masking operations (:issue:`2793`)
+ - Series now automatically will try to set the correct dtype based on passed
+ datetimelike objects (datetime/Timestamp)
+
+ - timedelta64 are returned in appropriate cases (e.g. Series - Series,
+ when both are datetime64)
+ - mixed datetimes and objects (:issue:`2751`) in a constructor will be cast
+ correctly
+ - astype on datetimes to object are now handled (as well as NaT
+ conversions to np.nan)
+ - all timedelta like objects will be correctly assigned to ``timedelta64``
+ with mixed ``NaN`` and/or ``NaT`` allowed
+
+ - arguments to DataFrame.clip were inconsistent to numpy and Series clipping
+ (:issue:`2747`)
+ - util.testing.assert_frame_equal now checks the column and index names (:issue:`2964`)
+ - Constructors will now return a more informative ValueError on failures
+ when invalid shapes are passed
+ - Don't suppress TypeError in GroupBy.agg (:issue:`3238`)
+ - Methods return None when inplace=True (:issue:`1893`)
+ - ``HDFStore``
+
+ - added the method ``select_column`` to select a single column from a table as a Series.
+ - deprecated the ``unique`` method, can be replicated by ``select_column(key,column).unique()``
+ - ``min_itemsize`` parameter will now automatically create data_columns for passed keys
+
+ - Downcast on pivot if possible (:issue:`3283`), adds argument ``downcast`` to ``fillna``
+ - Introduced options `display.height/width` for explicitly specifying terminal
+ height/width in characters. Deprecated display.line_width, now replaced by display.width.
+ These defaults are in effect for scripts as well, so unless disabled, previously
+ very wide output will now be output as "expand_repr" style wrapped output.
+ - Various defaults for options (including display.max_rows) have been revised,
+ after a brief survey concluded they were wrong for everyone. Now at w=80,h=60.
+ - HTML repr output in IPython qtconsole is once again controlled by the option
+ `display.notebook_repr_html`, and on by default.
+
+**Bug Fixes**
+
+ - Fix seg fault on empty data frame when fillna with ``pad`` or ``backfill``
+ (:issue:`2778`)
+ - Single element ndarrays of datetimelike objects are handled
+ (e.g. np.array(datetime(2001,1,1,0,0))), w/o dtype being passed
+ - 0-dim ndarrays with a passed dtype are handled correctly
+ (e.g. np.array(0.,dtype='float32'))
+ - Fix some boolean indexing inconsistencies in Series.__getitem__/__setitem__
+ (:issue:`2776`)
+ - Fix issues with DataFrame and Series constructor with integers that
+ overflow ``int64`` and some mixed typed type lists (:issue:`2845`)
+
+ - ``HDFStore``
+
+ - Fix weird PyTables error when using too many selectors in a where
+ also correctly filter on any number of values in a Term expression
+ (so not using numexpr filtering, but isin filtering)
+ - Internally, change all variables to be private-like (now have leading
+ underscore)
+ - Fixes for query parsing to correctly interpret boolean and != (:issue:`2849`, :issue:`2973`)
+ - Fixes for pathological case on SparseSeries with 0-len array and
+ compression (:issue:`2931`)
+ - Fixes bug with writing rows if part of a block was all-nan (:issue:`3012`)
+ - Exceptions are now ValueError or TypeError as needed
+ - A table will now raise if min_itemsize contains fields which are not queryables
+
+ - Bug showing up in applymap where some object type columns are converted (:issue:`2909`)
+ had an incorrect default in convert_objects
+
+ - TimeDeltas
+
+ - Series ops with a Timestamp on the rhs was throwing an exception (:issue:`2898`)
+ added tests for Series ops with datetimes,timedeltas,Timestamps, and datelike
+ Series on both lhs and rhs
+ - Fixed subtle timedelta64 inference issue on py3 & numpy 1.7.0 (:issue:`3094`)
+ - Fixed some formatting issues on timedelta when negative
+ - Support null checking on timedelta64, representing (and formatting) with NaT
+ - Support setitem with np.nan value, converts to NaT
+ - Support min/max ops in a Dataframe (abs not working, nor do we error on non-supported ops)
+ - Support idxmin/idxmax/abs/max/min in a Series (:issue:`2989`, :issue:`2982`)
+
+ - Bug on in-place putmasking on an ``integer`` series that needs to be converted to
+ ``float`` (:issue:`2746`)
+ - Bug in argsort of ``datetime64[ns]`` Series with ``NaT`` (:issue:`2967`)
+ - Bug in value_counts of ``datetime64[ns]`` Series (:issue:`3002`)
+ - Fixed printing of ``NaT`` in an index
+ - Bug in idxmin/idxmax of ``datetime64[ns]`` Series with ``NaT`` (:issue:`2982`)
+ - Bug in ``icol, take`` with negative indicies was producing incorrect return
+ values (see :issue:`2922`, :issue:`2892`), also check for out-of-bounds indices (:issue:`3029`)
+ - Bug in DataFrame column insertion when the column creation fails, existing frame is left in
+ an irrecoverable state (:issue:`3010`)
+ - Bug in DataFrame update, combine_first where non-specified values could cause
+ dtype changes (:issue:`3016`, :issue:`3041`)
+ - Bug in groupby with first/last where dtypes could change (:issue:`3041`, :issue:`2763`)
+ - Formatting of an index that has ``nan`` was inconsistent or wrong (would fill from
+ other values), (:issue:`2850`)
+ - Unstack of a frame with no nans would always cause dtype upcasting (:issue:`2929`)
+ - Fix scalar datetime.datetime parsing bug in read_csv (:issue:`3071`)
+ - Fixed slow printing of large Dataframes, due to inefficient dtype
+ reporting (:issue:`2807`)
+ - Fixed a segfault when using a function as grouper in groupby (:issue:`3035`)
+ - Fix pretty-printing of infinite data structures (closes :issue:`2978`)
+ - Fixed exception when plotting timeseries bearing a timezone (closes :issue:`2877`)
+ - str.contains ignored na argument (:issue:`2806`)
+ - Substitute warning for segfault when grouping with categorical grouper
+ of mismatched length (:issue:`3011`)
+ - Fix exception in SparseSeries.density (:issue:`2083`)
+ - Fix upsampling bug with closed='left' and daily to daily data (:issue:`3020`)
+ - Fixed missing tick bars on scatter_matrix plot (:issue:`3063`)
+ - Fixed bug in Timestamp(d,tz=foo) when d is date() rather then datetime() (:issue:`2993`)
+ - series.plot(kind='bar') now respects pylab color schem (:issue:`3115`)
+ - Fixed bug in reshape if not passed correct input, now raises TypeError (:issue:`2719`)
+ - Fixed a bug where Series ctor did not respect ordering if OrderedDict passed in (:issue:`3282`)
+ - Fix NameError issue on RESO_US (:issue:`2787`)
+ - Allow selection in an *unordered* timeseries to work similary
+ to an *ordered* timeseries (:issue:`2437`).
+ - Fix implemented ``.xs`` when called with ``axes=1`` and a level parameter (:issue:`2903`)
+ - Timestamp now supports the class method fromordinal similar to datetimes (:issue:`3042`)
+ - Fix issue with indexing a series with a boolean key and specifiying a 1-len list on the rhs (:issue:`2745`)
+ or a list on the rhs (:issue:`3235`)
+ - Fixed bug in groupby apply when kernel generate list of arrays having unequal len (:issue:`1738`)
+ - fixed handling of rolling_corr with center=True which could produce corr>1 (:issue:`3155`)
+ - Fixed issues where indices can be passed as 'index/column' in addition to 0/1 for the axis parameter
+ - PeriodIndex.tolist now boxes to Period (:issue:`3178`)
+ - PeriodIndex.get_loc KeyError now reports Period instead of ordinal (:issue:`3179`)
+ - df.to_records bug when handling MultiIndex (GH3189)
+ - Fix Series.__getitem__ segfault when index less than -length (:issue:`3168`)
+ - Fix bug when using Timestamp as a date parser (:issue:`2932`)
+ - Fix bug creating date range from Timestamp with time zone and passing same
+ time zone (:issue:`2926`)
+ - Add comparison operators to Period object (:issue:`2781`)
+ - Fix bug when concatenating two Series into a DataFrame when they have the
+ same name (:issue:`2797`)
+ - Fix automatic color cycling when plotting consecutive timeseries
+ without color arguments (:issue:`2816`)
+ - fixed bug in the pickling of PeriodIndex (:issue:`2891`)
+ - Upcast/split blocks when needed in a mixed DataFrame when setitem
+ with an indexer (:issue:`3216`)
+ - Invoking df.applymap on a dataframe with dupe cols now raises a ValueError (:issue:`2786`)
+ - Apply with invalid returned indices raise correct Exception (:issue:`2808`)
+ - Fixed a bug in plotting log-scale bar plots (:issue:`3247`)
+ - df.plot() grid on/off now obeys the mpl default style, just like
+ series.plot(). (:issue:`3233`)
+ - Fixed a bug in the legend of plotting.andrews_curves() (:issue:`3278`)
+ - Produce a series on apply if we only generate a singular series and have
+ a simple index (:issue:`2893`)
+ - Fix Python ascii file parsing when integer falls outside of floating point
+ spacing (:issue:`3258`)
+ - fixed pretty priniting of sets (:issue:`3294`)
+ - Panel() and Panel.from_dict() now respects ordering when give OrderedDict (:issue:`3303`)
+ - DataFrame where with a datetimelike incorrectly selecting (:issue:`3311`)
+ - Ensure index casts work even in Int64Index
+ - Fix set_index segfault when passing MultiIndex (:issue:`3308`)
+ - Ensure pickles created in py2 can be read in py3
+ - Insert ellipsis in MultiIndex summary repr (:issue:`3348`)
+ - Groupby will handle mutation among an input groups columns (and fallback
+ to non-fast apply) (:issue:`3380`)
+ - Eliminated unicode errors on FreeBSD when using MPL GTK backend (:issue:`3360`)
+ - Period.strftime should return unicode strings always (:issue:`3363`)
+ - Respect passed read_* chunksize in get_chunk function (:issue:`3406`)
+
+
+pandas 0.10.1
+=============
+
+**Release date:** 2013-01-22
+
+**New features**
+
+ - Add data inferface to World Bank WDI pandas.io.wb (:issue:`2592`)
+
+**API Changes**
+
+ - Restored inplace=True behavior returning self (same object) with
+ deprecation warning until 0.11 (:issue:`1893`)
+ - ``HDFStore``
+
+ - refactored HFDStore to deal with non-table stores as objects, will allow future enhancements
+ - removed keyword ``compression`` from ``put`` (replaced by keyword
+ ``complib`` to be consistent across library)
+ - warn `PerformanceWarning` if you are attempting to store types that will be pickled by PyTables
+
+**Improvements to existing features**
+
+ - ``HDFStore``
+
+ - enables storing of multi-index dataframes (closes :issue:`1277`)
+ - support data column indexing and selection, via ``data_columns`` keyword
+ in append
+ - support write chunking to reduce memory footprint, via ``chunksize``
+ keyword to append
+ - support automagic indexing via ``index`` keyword to append
+ - support ``expectedrows`` keyword in append to inform ``PyTables`` about
+ the expected tablesize
+ - support ``start`` and ``stop`` keywords in select to limit the row
+ selection space
+ - added ``get_store`` context manager to automatically import with pandas
+ - added column filtering via ``columns`` keyword in select
+ - added methods append_to_multiple/select_as_multiple/select_as_coordinates
+ to do multiple-table append/selection
+ - added support for datetime64 in columns
+ - added method ``unique`` to select the unique values in an indexable or
+ data column
+ - added method ``copy`` to copy an existing store (and possibly upgrade)
+ - show the shape of the data on disk for non-table stores when printing the
+ store
+ - added ability to read PyTables flavor tables (allows compatiblity to
+ other HDF5 systems)
+
+ - Add ``logx`` option to DataFrame/Series.plot (:issue:`2327`, :issue:`2565`)
+ - Support reading gzipped data from file-like object
+ - ``pivot_table`` aggfunc can be anything used in GroupBy.aggregate (:issue:`2643`)
+ - Implement DataFrame merges in case where set cardinalities might overflow
+ 64-bit integer (:issue:`2690`)
+ - Raise exception in C file parser if integer dtype specified and have NA
+ values. (:issue:`2631`)
+ - Attempt to parse ISO8601 format dates when parse_dates=True in read_csv for
+ major performance boost in such cases (:issue:`2698`)
+ - Add methods ``neg`` and ``inv`` to Series
+ - Implement ``kind`` option in ``ExcelFile`` to indicate whether it's an XLS
+ or XLSX file (:issue:`2613`)
+
+**Bug fixes**
+
+ - Fix read_csv/read_table multithreading issues (:issue:`2608`)
+ - ``HDFStore``
+
+ - correctly handle ``nan`` elements in string columns; serialize via the
+ ``nan_rep`` keyword to append
+ - raise correctly on non-implemented column types (unicode/date)
+ - handle correctly ``Term`` passed types (e.g. ``index<1000``, when index
+ is ``Int64``), (closes :issue:`512`)
+ - handle Timestamp correctly in data_columns (closes :issue:`2637`)
+ - contains correctly matches on non-natural names
+ - correctly store ``float32`` dtypes in tables (if not other float types in
+ the same table)
+
+ - Fix DataFrame.info bug with UTF8-encoded columns. (:issue:`2576`)
+ - Fix DatetimeIndex handling of FixedOffset tz (:issue:`2604`)
+ - More robust detection of being in IPython session for wide DataFrame
+ console formatting (:issue:`2585`)
+ - Fix platform issues with ``file:///`` in unit test (:issue:`2564`)
+ - Fix bug and possible segfault when grouping by hierarchical level that
+ contains NA values (:issue:`2616`)
+ - Ensure that MultiIndex tuples can be constructed with NAs (:issue:`2616`)
+ - Fix int64 overflow issue when unstacking MultiIndex with many levels
+ (:issue:`2616`)
+ - Exclude non-numeric data from DataFrame.quantile by default (:issue:`2625`)
+ - Fix a Cython C int64 boxing issue causing read_csv to return incorrect
+ results (:issue:`2599`)
+ - Fix groupby summing performance issue on boolean data (:issue:`2692`)
+ - Don't bork Series containing datetime64 values with to_datetime (:issue:`2699`)
+ - Fix DataFrame.from_records corner case when passed columns, index column,
+ but empty record list (:issue:`2633`)
+ - Fix C parser-tokenizer bug with trailing fields. (:issue:`2668`)
+ - Don't exclude non-numeric data from GroupBy.max/min (:issue:`2700`)
+ - Don't lose time zone when calling DatetimeIndex.drop (:issue:`2621`)
+ - Fix setitem on a Series with a boolean key and a non-scalar as value
+ (:issue:`2686`)
+ - Box datetime64 values in Series.apply/map (:issue:`2627`, :issue:`2689`)
+ - Upconvert datetime + datetime64 values when concatenating frames (:issue:`2624`)
+ - Raise a more helpful error message in merge operations when one DataFrame
+ has duplicate columns (:issue:`2649`)
+ - Fix partial date parsing issue occuring only when code is run at EOM
+ (:issue:`2618`)
+ - Prevent MemoryError when using counting sort in sortlevel with
+ high-cardinality MultiIndex objects (:issue:`2684`)
+ - Fix Period resampling bug when all values fall into a single bin (:issue:`2070`)
+ - Fix buggy interaction with usecols argument in read_csv when there is an
+ implicit first index column (:issue:`2654`)
+
+
+pandas 0.10.0
+=============
+
+**Release date:** 2012-12-17
+
+**New features**
+
+ - Brand new high-performance delimited file parsing engine written in C and
+ Cython. 50% or better performance in many standard use cases with a
+ fraction as much memory usage. (:issue:`407`, :issue:`821`)
+ - Many new file parser (read_csv, read_table) features:
+
+ - Support for on-the-fly gzip or bz2 decompression (`compression` option)
+ - Ability to get back numpy.recarray instead of DataFrame
+ (`as_recarray=True`)
+ - `dtype` option: explicit column dtypes
+ - `usecols` option: specify list of columns to be read from a file. Good
+ for reading very wide files with many irrelevant columns (:issue:`1216` :issue:`926`, :issue:`2465`)
+ - Enhanced unicode decoding support via `encoding` option
+ - `skipinitialspace` dialect option
+ - Can specify strings to be recognized as True (`true_values`) or False
+ (`false_values`)
+ - High-performance `delim_whitespace` option for whitespace-delimited
+ files; a preferred alternative to the '\s+' regular expression delimiter
+ - Option to skip "bad" lines (wrong number of fields) that would otherwise
+ have caused an error in the past (`error_bad_lines` and `warn_bad_lines`
+ options)
+ - Substantially improved performance in the parsing of integers with
+ thousands markers and lines with comments
+ - Easy of European (and other) decimal formats (`decimal` option) (:issue:`584`, :issue:`2466`)
+ - Custom line terminators (e.g. lineterminator='~') (:issue:`2457`)
+ - Handling of no trailing commas in CSV files (:issue:`2333`)
+ - Ability to handle fractional seconds in date_converters (:issue:`2209`)
+ - read_csv allow scalar arg to na_values (:issue:`1944`)
+ - Explicit column dtype specification in read_* functions (:issue:`1858`)
+ - Easier CSV dialect specification (:issue:`1743`)
+ - Improve parser performance when handling special characters (:issue:`1204`)
+
+ - Google Analytics API integration with easy oauth2 workflow (:issue:`2283`)
+ - Add error handling to Series.str.encode/decode (:issue:`2276`)
+ - Add ``where`` and ``mask`` to Series (:issue:`2337`)
+ - Grouped histogram via `by` keyword in Series/DataFrame.hist (:issue:`2186`)
+ - Support optional ``min_periods`` keyword in ``corr`` and ``cov``
+ for both Series and DataFrame (:issue:`2002`)
+ - Add ``duplicated`` and ``drop_duplicates`` functions to Series (:issue:`1923`)
+ - Add docs for ``HDFStore table`` format
+ - 'density' property in `SparseSeries` (:issue:`2384`)
+ - Add ``ffill`` and ``bfill`` convenience functions for forward- and
+ backfilling time series data (:issue:`2284`)
+ - New option configuration system and functions `set_option`, `get_option`,
+ `describe_option`, and `reset_option`. Deprecate `set_printoptions` and
+ `reset_printoptions` (:issue:`2393`).
+ You can also access options as attributes via ``pandas.options.X``
+ - Wide DataFrames can be viewed more easily in the console with new
+ `expand_frame_repr` and `line_width` configuration options. This is on by
+ default now (:issue:`2436`)
+ - Scikits.timeseries-like moving window functions via ``rolling_window`` (:issue:`1270`)
+
+**Experimental Features**
+
+ - Add support for Panel4D, a named 4 Dimensional stucture
+ - Add support for ndpanel factory functions, to create custom,
+ domain-specific N-Dimensional containers
+
+**API Changes**
+
+ - The default binning/labeling behavior for ``resample`` has been changed to
+ `closed='left', label='left'` for daily and lower frequencies. This had
+ been a large source of confusion for users. See "what's new" page for more
+ on this. (:issue:`2410`)
+ - Methods with ``inplace`` option now return None instead of the calling
+ (modified) object (:issue:`1893`)
+ - The special case DataFrame - TimeSeries doing column-by-column broadcasting
+ has been deprecated. Users should explicitly do e.g. df.sub(ts, axis=0)
+ instead. This is a legacy hack and can lead to subtle bugs.
+ - inf/-inf are no longer considered as NA by isnull/notnull. To be clear, this
+ is legacy cruft from early pandas. This behavior can be globally re-enabled
+ using the new option ``mode.use_inf_as_null`` (:issue:`2050`, :issue:`1919`)
+ - ``pandas.merge`` will now default to ``sort=False``. For many use cases
+ sorting the join keys is not necessary, and doing it by default is wasteful
+ - Specify ``header=0`` explicitly to replace existing column names in file in
+ read_* functions.
+ - Default column names for header-less parsed files (yielded by read_csv,
+ etc.) are now the integers 0, 1, .... A new argument `prefix` has been
+ added; to get the v0.9.x behavior specify ``prefix='X'`` (:issue:`2034`). This API
+ change was made to make the default column names more consistent with the
+ DataFrame constructor's default column names when none are specified.
+ - DataFrame selection using a boolean frame now preserves input shape
+ - If function passed to Series.apply yields a Series, result will be a
+ DataFrame (:issue:`2316`)
+ - Values like YES/NO/yes/no will not be considered as boolean by default any
+ longer in the file parsers. This can be customized using the new
+ ``true_values`` and ``false_values`` options (:issue:`2360`)
+ - `obj.fillna()` is no longer valid; make `method='pad'` no longer the
+ default option, to be more explicit about what kind of filling to
+ perform. Add `ffill/bfill` convenience functions per above (:issue:`2284`)
+ - `HDFStore.keys()` now returns an absolute path-name for each key
+ - `to_string()` now always returns a unicode string. (:issue:`2224`)
+ - File parsers will not handle NA sentinel values arising from passed
+ converter functions
+
+**Improvements to existing features**
+
+ - Add ``nrows`` option to DataFrame.from_records for iterators (:issue:`1794`)
+ - Unstack/reshape algorithm rewrite to avoid high memory use in cases where
+ the number of observed key-tuples is much smaller than the total possible
+ number that could occur (:issue:`2278`). Also improves performance in most cases.
+ - Support duplicate columns in DataFrame.from_records (:issue:`2179`)
+ - Add ``normalize`` option to Series/DataFrame.asfreq (:issue:`2137`)
+ - SparseSeries and SparseDataFrame construction from empty and scalar
+ values now no longer create dense ndarrays unnecessarily (:issue:`2322`)
+ - ``HDFStore`` now supports hierarchial keys (:issue:`2397`)
+ - Support multiple query selection formats for ``HDFStore tables`` (:issue:`1996`)
+ - Support ``del store['df']`` syntax to delete HDFStores
+ - Add multi-dtype support for ``HDFStore tables``
+ - ``min_itemsize`` parameter can be specified in ``HDFStore table`` creation
+ - Indexing support in ``HDFStore tables`` (:issue:`698`)
+ - Add `line_terminator` option to DataFrame.to_csv (:issue:`2383`)
+ - added implementation of str(x)/unicode(x)/bytes(x) to major pandas data
+ structures, which should do the right thing on both py2.x and py3.x. (:issue:`2224`)
+ - Reduce groupby.apply overhead substantially by low-level manipulation of
+ internal NumPy arrays in DataFrames (:issue:`535`)
+ - Implement ``value_vars`` in ``melt`` and add ``melt`` to pandas namespace
+ (:issue:`2412`)
+ - Added boolean comparison operators to Panel
+ - Enable ``Series.str.strip/lstrip/rstrip`` methods to take an argument (:issue:`2411`)
+ - The DataFrame ctor now respects column ordering when given
+ an OrderedDict (:issue:`2455`)
+ - Assigning DatetimeIndex to Series changes the class to TimeSeries (:issue:`2139`)
+ - Improve performance of .value_counts method on non-integer data (:issue:`2480`)
+ - ``get_level_values`` method for MultiIndex return Index instead of ndarray (:issue:`2449`)
+ - ``convert_to_r_dataframe`` conversion for datetime values (:issue:`2351`)
+ - Allow ``DataFrame.to_csv`` to represent inf and nan differently (:issue:`2026`)
+ - Add ``min_i`` argument to ``nancorr`` to specify minimum required observations (:issue:`2002`)
+ - Add ``inplace`` option to ``sortlevel`` / ``sort`` functions on DataFrame (:issue:`1873`)
+ - Enable DataFrame to accept scalar constructor values like Series (:issue:`1856`)
+ - DataFrame.from_records now takes optional ``size`` parameter (:issue:`1794`)
+ - include iris dataset (:issue:`1709`)
+ - No datetime64 DataFrame column conversion of datetime.datetime with tzinfo (:issue:`1581`)
+ - Micro-optimizations in DataFrame for tracking state of internal consolidation (:issue:`217`)
+ - Format parameter in DataFrame.to_csv (:issue:`1525`)
+ - Partial string slicing for ``DatetimeIndex`` for daily and higher frequencies (:issue:`2306`)
+ - Implement ``col_space`` parameter in ``to_html`` and ``to_string`` in DataFrame (:issue:`1000`)
+ - Override ``Series.tolist`` and box datetime64 types (:issue:`2447`)
+ - Optimize ``unstack`` memory usage by compressing indices (:issue:`2278`)
+ - Fix HTML repr in IPython qtconsole if opening window is small (:issue:`2275`)
+ - Escape more special characters in console output (:issue:`2492`)
+ - df.select now invokes bool on the result of crit(x) (:issue:`2487`)
+
+**Bug fixes**
+
+ - Fix major performance regression in DataFrame.iteritems (:issue:`2273`)
+ - Fixes bug when negative period passed to Series/DataFrame.diff (:issue:`2266`)
+ - Escape tabs in console output to avoid alignment issues (:issue:`2038`)
+ - Properly box datetime64 values when retrieving cross-section from
+ mixed-dtype DataFrame (:issue:`2272`)
+ - Fix concatenation bug leading to :issue:`2057`, :issue:`2257`
+ - Fix regression in Index console formatting (:issue:`2319`)
+ - Box Period data when assigning PeriodIndex to frame column (:issue:`2243`, :issue:`2281`)
+ - Raise exception on calling reset_index on Series with inplace=True (:issue:`2277`)
+ - Enable setting multiple columns in DataFrame with hierarchical columns
+ (:issue:`2295`)
+ - Respect dtype=object in DataFrame constructor (:issue:`2291`)
+ - Fix DatetimeIndex.join bug with tz-aware indexes and how='outer' (:issue:`2317`)
+ - pop(...) and del works with DataFrame with duplicate columns (:issue:`2349`)
+ - Treat empty strings as NA in date parsing (rather than let dateutil do
+ something weird) (:issue:`2263`)
+ - Prevent uint64 -> int64 overflows (:issue:`2355`)
+ - Enable joins between MultiIndex and regular Index (:issue:`2024`)
+ - Fix time zone metadata issue when unioning non-overlapping DatetimeIndex
+ objects (:issue:`2367`)
+ - Raise/handle int64 overflows in parsers (:issue:`2247`)
+ - Deleting of consecutive rows in ``HDFStore tables``` is much faster than before
+ - Appending on a HDFStore would fail if the table was not first created via ``put``
+ - Use `col_space` argument as minimum column width in DataFrame.to_html (:issue:`2328`)
+ - Fix tz-aware DatetimeIndex.to_period (:issue:`2232`)
+ - Fix DataFrame row indexing case with MultiIndex (:issue:`2314`)
+ - Fix to_excel exporting issues with Timestamp objects in index (:issue:`2294`)
+ - Fixes assigning scalars and array to hierarchical column chunk (:issue:`1803`)
+ - Fixed a UnicdeDecodeError with series tidy_repr (:issue:`2225`)
+ - Fixed issued with duplicate keys in an index (:issue:`2347`, :issue:`2380`)
+ - Fixed issues re: Hash randomization, default on starting w/ py3.3 (:issue:`2331`)
+ - Fixed issue with missing attributes after loading a pickled dataframe (:issue:`2431`)
+ - Fix Timestamp formatting with tzoffset time zone in dateutil 2.1 (:issue:`2443`)
+ - Fix GroupBy.apply issue when using BinGrouper to do ts binning (:issue:`2300`)
+ - Fix issues resulting from datetime.datetime columns being converted to
+ datetime64 when calling DataFrame.apply. (:issue:`2374`)
+ - Raise exception when calling to_panel on non uniquely-indexed frame (:issue:`2441`)
+ - Improved detection of console encoding on IPython zmq frontends (:issue:`2458`)
+ - Preserve time zone when .append-ing two time series (:issue:`2260`)
+ - Box timestamps when calling reset_index on time-zone-aware index rather
+ than creating a tz-less datetime64 column (:issue:`2262`)
+ - Enable searching non-string columns in DataFrame.filter(like=...) (:issue:`2467`)
+ - Fixed issue with losing nanosecond precision upon conversion to DatetimeIndex(:issue:`2252`)
+ - Handle timezones in Datetime.normalize (:issue:`2338`)
+ - Fix test case where dtype specification with endianness causes
+ failures on big endian machines (:issue:`2318`)
+ - Fix plotting bug where upsampling causes data to appear shifted in time (:issue:`2448`)
+ - Fix ``read_csv`` failure for UTF-16 with BOM and skiprows(:issue:`2298`)
+ - read_csv with names arg not implicitly setting header=None(:issue:`2459`)
+ - Unrecognized compression mode causes segfault in read_csv(:issue:`2474`)
+ - In read_csv, header=0 and passed names should discard first row(:issue:`2269`)
+ - Correctly route to stdout/stderr in read_table (:issue:`2071`)
+ - Fix exception when Timestamp.to_datetime is called on a Timestamp with tzoffset (:issue:`2471`)
+ - Fixed unintentional conversion of datetime64 to long in groupby.first() (:issue:`2133`)
+ - Union of empty DataFrames now return empty with concatenated index (:issue:`2307`)
+ - DataFrame.sort_index raises more helpful exception if sorting by column
+ with duplicates (:issue:`2488`)
+ - DataFrame.to_string formatters can be list, too (:issue:`2520`)
+ - DataFrame.combine_first will always result in the union of the index and
+ columns, even if one DataFrame is length-zero (:issue:`2525`)
+ - Fix several DataFrame.icol/irow with duplicate indices issues (:issue:`2228`, :issue:`2259`)
+ - Use Series names for column names when using concat with axis=1 (:issue:`2489`)
+ - Raise Exception if start, end, periods all passed to date_range (:issue:`2538`)
+ - Fix Panel resampling issue (:issue:`2537`)
+
+
+
+pandas 0.9.1
+============
+
+**Release date:** 2012-11-14
+
+**New features**
+
+ - Can specify multiple sort orders in DataFrame/Series.sort/sort_index (:issue:`928`)
+ - New `top` and `bottom` options for handling NAs in rank (:issue:`1508`, :issue:`2159`)
+ - Add `where` and `mask` functions to DataFrame (:issue:`2109`, :issue:`2151`)
+ - Add `at_time` and `between_time` functions to DataFrame (:issue:`2149`)
+ - Add flexible `pow` and `rpow` methods to DataFrame (:issue:`2190`)
+
+**API Changes**
+
+ - Upsampling period index "spans" intervals. Example: annual periods
+ upsampled to monthly will span all months in each year
+ - Period.end_time will yield timestamp at last nanosecond in the interval
+ (:issue:`2124`, :issue:`2125`, :issue:`1764`)
+ - File parsers no longer coerce to float or bool for columns that have custom
+ converters specified (:issue:`2184`)
+
+**Improvements to existing features**
+
+ - Time rule inference for week-of-month (e.g. WOM-2FRI) rules (:issue:`2140`)
+ - Improve performance of datetime + business day offset with large number of
+ offset periods
+ - Improve HTML display of DataFrame objects with hierarchical columns
+ - Enable referencing of Excel columns by their column names (:issue:`1936`)
+ - DataFrame.dot can accept ndarrays (:issue:`2042`)
+ - Support negative periods in Panel.shift (:issue:`2164`)
+ - Make .drop(...) work with non-unique indexes (:issue:`2101`)
+ - Improve performance of Series/DataFrame.diff (re: :issue:`2087`)
+ - Support unary ~ (__invert__) in DataFrame (:issue:`2110`)
+ - Turn off pandas-style tick locators and formatters (:issue:`2205`)
+ - DataFrame[DataFrame] uses DataFrame.where to compute masked frame (:issue:`2230`)
+
+**Bug fixes**
+
+ - Fix some duplicate-column DataFrame constructor issues (:issue:`2079`)
+ - Fix bar plot color cycle issues (:issue:`2082`)
+ - Fix off-center grid for stacked bar plots (:issue:`2157`)
+ - Fix plotting bug if inferred frequency is offset with N > 1 (:issue:`2126`)
+ - Implement comparisons on date offsets with fixed delta (:issue:`2078`)
+ - Handle inf/-inf correctly in read_* parser functions (:issue:`2041`)
+ - Fix matplotlib unicode interaction bug
+ - Make WLS r-squared match statsmodels 0.5.0 fixed value
+ - Fix zero-trimming DataFrame formatting bug
+ - Correctly compute/box datetime64 min/max values from Series.min/max (:issue:`2083`)
+ - Fix unstacking edge case with unrepresented groups (:issue:`2100`)
+ - Fix Series.str failures when using pipe pattern '|' (:issue:`2119`)
+ - Fix pretty-printing of dict entries in Series, DataFrame (:issue:`2144`)
+ - Cast other datetime64 values to nanoseconds in DataFrame ctor (:issue:`2095`)
+ - Alias Timestamp.astimezone to tz_convert, so will yield Timestamp (:issue:`2060`)
+ - Fix timedelta64 formatting from Series (:issue:`2165`, :issue:`2146`)
+ - Handle None values gracefully in dict passed to Panel constructor (:issue:`2075`)
+ - Box datetime64 values as Timestamp objects in Series/DataFrame.iget (:issue:`2148`)
+ - Fix Timestamp indexing bug in DatetimeIndex.insert (:issue:`2155`)
+ - Use index name(s) (if any) in DataFrame.to_records (:issue:`2161`)
+ - Don't lose index names in Panel.to_frame/DataFrame.to_panel (:issue:`2163`)
+ - Work around length-0 boolean indexing NumPy bug (:issue:`2096`)
+ - Fix partial integer indexing bug in DataFrame.xs (:issue:`2107`)
+ - Fix variety of cut/qcut string-bin formatting bugs (:issue:`1978`, :issue:`1979`)
+ - Raise Exception when xs view not possible of MultiIndex'd DataFrame (:issue:`2117`)
+ - Fix groupby(...).first() issue with datetime64 (:issue:`2133`)
+ - Better floating point error robustness in some rolling_* functions
+ (:issue:`2114`, :issue:`2527`)
+ - Fix ewma NA handling in the middle of Series (:issue:`2128`)
+ - Fix numerical precision issues in diff with integer data (:issue:`2087`)
+ - Fix bug in MultiIndex.__getitem__ with NA values (:issue:`2008`)
+ - Fix DataFrame.from_records dict-arg bug when passing columns (:issue:`2179`)
+ - Fix Series and DataFrame.diff for integer dtypes (:issue:`2087`, :issue:`2174`)
+ - Fix bug when taking intersection of DatetimeIndex with empty index (:issue:`2129`)
+ - Pass through timezone information when calling DataFrame.align (:issue:`2127`)
+ - Properly sort when joining on datetime64 values (:issue:`2196`)
+ - Fix indexing bug in which False/True were being coerced to 0/1 (:issue:`2199`)
+ - Many unicode formatting fixes (:issue:`2201`)
+ - Fix improper MultiIndex conversion issue when assigning
+ e.g. DataFrame.index (:issue:`2200`)
+ - Fix conversion of mixed-type DataFrame to ndarray with dup columns (:issue:`2236`)
+ - Fix duplicate columns issue (:issue:`2218`, :issue:`2219`)
+ - Fix SparseSeries.__pow__ issue with NA input (:issue:`2220`)
+ - Fix icol with integer sequence failure (:issue:`2228`)
+ - Fixed resampling tz-aware time series issue (:issue:`2245`)
+ - SparseDataFrame.icol was not returning SparseSeries (:issue:`2227`, :issue:`2229`)
+ - Enable ExcelWriter to handle PeriodIndex (:issue:`2240`)
+ - Fix issue constructing DataFrame from empty Series with name (:issue:`2234`)
+ - Use console-width detection in interactive sessions only (:issue:`1610`)
+ - Fix parallel_coordinates legend bug with mpl 1.2.0 (:issue:`2237`)
+ - Make tz_localize work in corner case of empty Series (:issue:`2248`)
+
+
+
+pandas 0.9.0
+============
+
+**Release date:** 10/7/2012
+
+**New features**
+
+ - Add ``str.encode`` and ``str.decode`` to Series (:issue:`1706`)
+ - Add `to_latex` method to DataFrame (:issue:`1735`)
+ - Add convenient expanding window equivalents of all rolling_* ops (:issue:`1785`)
+ - Add Options class to pandas.io.data for fetching options data from Yahoo!
+ Finance (:issue:`1748`, :issue:`1739`)
+ - Recognize and convert more boolean values in file parsing (Yes, No, TRUE,
+ FALSE, variants thereof) (:issue:`1691`, :issue:`1295`)
+ - Add Panel.update method, analogous to DataFrame.update (:issue:`1999`, :issue:`1988`)
+
+**Improvements to existing features**
+
+ - Proper handling of NA values in merge operations (:issue:`1990`)
+ - Add ``flags`` option for ``re.compile`` in some Series.str methods (:issue:`1659`)
+ - Parsing of UTC date strings in read_* functions (:issue:`1693`)
+ - Handle generator input to Series (:issue:`1679`)
+ - Add `na_action='ignore'` to Series.map to quietly propagate NAs (:issue:`1661`)
+ - Add args/kwds options to Series.apply (:issue:`1829`)
+ - Add inplace option to Series/DataFrame.reset_index (:issue:`1797`)
+ - Add ``level`` parameter to ``Series.reset_index``
+ - Add quoting option for DataFrame.to_csv (:issue:`1902`)
+ - Indicate long column value truncation in DataFrame output with ... (:issue:`1854`)
+ - DataFrame.dot will not do data alignment, and also work with Series (:issue:`1915`)
+ - Add ``na`` option for missing data handling in some vectorized string
+ methods (:issue:`1689`)
+ - If index_label=False in DataFrame.to_csv, do not print fields/commas in the
+ text output. Results in easier importing into R (:issue:`1583`)
+ - Can pass tuple/list of axes to DataFrame.dropna to simplify repeated calls
+ (dropping both columns and rows) (:issue:`924`)
+ - Improve DataFrame.to_html output for hierarchically-indexed rows (do not
+ repeat levels) (:issue:`1929`)
+ - TimeSeries.between_time can now select times across midnight (:issue:`1871`)
+ - Enable `skip_footer` parameter in `ExcelFile.parse` (:issue:`1843`)
+
+**API Changes**
+
+ - Change default header names in read_* functions to more Pythonic X0, X1,
+ etc. instead of X.1, X.2. (:issue:`2000`)
+ - Deprecated ``day_of_year`` API removed from PeriodIndex, use ``dayofyear``
+ (:issue:`1723`)
+ - Don't modify NumPy suppress printoption at import time
+ - The internal HDF5 data arrangement for DataFrames has been
+ transposed. Legacy files will still be readable by HDFStore (:issue:`1834`, :issue:`1824`)
+ - Legacy cruft removed: pandas.stats.misc.quantileTS
+ - Use ISO8601 format for Period repr: monthly, daily, and on down (:issue:`1776`)
+ - Empty DataFrame columns are now created as object dtype. This will prevent
+ a class of TypeErrors that was occurring in code where the dtype of a
+ column would depend on the presence of data or not (e.g. a SQL query having
+ results) (:issue:`1783`)
+ - Setting parts of DataFrame/Panel using ix now aligns input Series/DataFrame
+ (:issue:`1630`)
+ - `first` and `last` methods in `GroupBy` no longer drop non-numeric columns
+ (:issue:`1809`)
+ - Resolved inconsistencies in specifying custom NA values in text parser.
+ `na_values` of type dict no longer override default NAs unless
+ `keep_default_na` is set to false explicitly (:issue:`1657`)
+ - Enable `skipfooter` parameter in text parsers as an alias for `skip_footer`
+
+**Bug fixes**
+
+ - Perform arithmetic column-by-column in mixed-type DataFrame to avoid type
+ upcasting issues. Caused downstream DataFrame.diff bug (:issue:`1896`)
+ - Fix matplotlib auto-color assignment when no custom spectrum passed. Also
+ respect passed color keyword argument (:issue:`1711`)
+ - Fix resampling logical error with closed='left' (:issue:`1726`)
+ - Fix critical DatetimeIndex.union bugs (:issue:`1730`, :issue:`1719`, :issue:`1745`, :issue:`1702`, :issue:`1753`)
+ - Fix critical DatetimeIndex.intersection bug with unanchored offsets (:issue:`1708`)
+ - Fix MM-YYYY time series indexing case (:issue:`1672`)
+ - Fix case where Categorical group key was not being passed into index in
+ GroupBy result (:issue:`1701`)
+ - Handle Ellipsis in Series.__getitem__/__setitem__ (:issue:`1721`)
+ - Fix some bugs with handling datetime64 scalars of other units in NumPy 1.6
+ and 1.7 (:issue:`1717`)
+ - Fix performance issue in MultiIndex.format (:issue:`1746`)
+ - Fixed GroupBy bugs interacting with DatetimeIndex asof / map methods (:issue:`1677`)
+ - Handle factors with NAs in pandas.rpy (:issue:`1615`)
+ - Fix statsmodels import in pandas.stats.var (:issue:`1734`)
+ - Fix DataFrame repr/info summary with non-unique columns (:issue:`1700`)
+ - Fix Series.iget_value for non-unique indexes (:issue:`1694`)
+ - Don't lose tzinfo when passing DatetimeIndex as DataFrame column (:issue:`1682`)
+ - Fix tz conversion with time zones that haven't had any DST transitions since
+ first date in the array (:issue:`1673`)
+ - Fix field access with UTC->local conversion on unsorted arrays (:issue:`1756`)
+ - Fix isnull handling of array-like (list) inputs (:issue:`1755`)
+ - Fix regression in handling of Series in Series constructor (:issue:`1671`)
+ - Fix comparison of Int64Index with DatetimeIndex (:issue:`1681`)
+ - Fix min_periods handling in new rolling_max/min at array start (:issue:`1695`)
+ - Fix errors with how='median' and generic NumPy resampling in some cases
+ caused by SeriesBinGrouper (:issue:`1648`, :issue:`1688`)
+ - When grouping by level, exclude unobserved levels (:issue:`1697`)
+ - Don't lose tzinfo in DatetimeIndex when shifting by different offset (:issue:`1683`)
+ - Hack to support storing data with a zero-length axis in HDFStore (:issue:`1707`)
+ - Fix DatetimeIndex tz-aware range generation issue (:issue:`1674`)
+ - Fix method='time' interpolation with intraday data (:issue:`1698`)
+ - Don't plot all-NA DataFrame columns as zeros (:issue:`1696`)
+ - Fix bug in scatter_plot with by option (:issue:`1716`)
+ - Fix performance problem in infer_freq with lots of non-unique stamps (:issue:`1686`)
+ - Fix handling of PeriodIndex as argument to create MultiIndex (:issue:`1705`)
+ - Fix re: unicode MultiIndex level names in Series/DataFrame repr (:issue:`1736`)
+ - Handle PeriodIndex in to_datetime instance method (:issue:`1703`)
+ - Support StaticTzInfo in DatetimeIndex infrastructure (:issue:`1692`)
+ - Allow MultiIndex setops with length-0 other type indexes (:issue:`1727`)
+ - Fix handling of DatetimeIndex in DataFrame.to_records (:issue:`1720`)
+ - Fix handling of general objects in isnull on which bool(...) fails (:issue:`1749`)
+ - Fix .ix indexing with MultiIndex ambiguity (:issue:`1678`)
+ - Fix .ix setting logic error with non-unique MultiIndex (:issue:`1750`)
+ - Basic indexing now works on MultiIndex with > 1000000 elements, regression
+ from earlier version of pandas (:issue:`1757`)
+ - Handle non-float64 dtypes in fast DataFrame.corr/cov code paths (:issue:`1761`)
+ - Fix DatetimeIndex.isin to function properly (:issue:`1763`)
+ - Fix conversion of array of tz-aware datetime.datetime to DatetimeIndex with
+ right time zone (:issue:`1777`)
+ - Fix DST issues with generating ancxhored date ranges (:issue:`1778`)
+ - Fix issue calling sort on result of Series.unique (:issue:`1807`)
+ - Fix numerical issue leading to square root of negative number in
+ rolling_std (:issue:`1840`)
+ - Let Series.str.split accept no arguments (like str.split) (:issue:`1859`)
+ - Allow user to have dateutil 2.1 installed on a Python 2 system (:issue:`1851`)
+ - Catch ImportError less aggressively in pandas/__init__.py (:issue:`1845`)
+ - Fix pip source installation bug when installing from GitHub (:issue:`1805`)
+ - Fix error when window size > array size in rolling_apply (:issue:`1850`)
+ - Fix pip source installation issues via SSH from GitHub
+ - Fix OLS.summary when column is a tuple (:issue:`1837`)
+ - Fix bug in __doc__ patching when -OO passed to interpreter
+ (:issue:`1792` :issue:`1741` :issue:`1774`)
+ - Fix unicode console encoding issue in IPython notebook (:issue:`1782`, :issue:`1768`)
+ - Fix unicode formatting issue with Series.name (:issue:`1782`)
+ - Fix bug in DataFrame.duplicated with datetime64 columns (:issue:`1833`)
+ - Fix bug in Panel internals resulting in error when doing fillna after
+ truncate not changing size of panel (:issue:`1823`)
+ - Prevent segfault due to MultiIndex not being supported in HDFStore table
+ format (:issue:`1848`)
+ - Fix UnboundLocalError in Panel.__setitem__ and add better error (:issue:`1826`)
+ - Fix to_csv issues with list of string entries. Isnull works on list of
+ strings now too (:issue:`1791`)
+ - Fix Timestamp comparisons with datetime values outside the nanosecond range
+ (1677-2262)
+ - Revert to prior behavior of normalize_date with datetime.date objects
+ (return datetime)
+ - Fix broken interaction between np.nansum and Series.any/all
+ - Fix bug with multiple column date parsers (:issue:`1866`)
+ - DatetimeIndex.union(Int64Index) was broken
+ - Make plot x vs y interface consistent with integer indexing (:issue:`1842`)
+ - set_index inplace modified data even if unique check fails (:issue:`1831`)
+ - Only use Q-OCT/NOV/DEC in quarterly frequency inference (:issue:`1789`)
+ - Upcast to dtype=object when unstacking boolean DataFrame (:issue:`1820`)
+ - Fix float64/float32 merging bug (:issue:`1849`)
+ - Fixes to Period.start_time for non-daily frequencies (:issue:`1857`)
+ - Fix failure when converter used on index_col in read_csv (:issue:`1835`)
+ - Implement PeriodIndex.append so that pandas.concat works correctly (:issue:`1815`)
+ - Avoid Cython out-of-bounds access causing segfault sometimes in pad_2d,
+ backfill_2d
+ - Fix resampling error with intraday times and anchored target time (like
+ AS-DEC) (:issue:`1772`)
+ - Fix .ix indexing bugs with mixed-integer indexes (:issue:`1799`)
+ - Respect passed color keyword argument in Series.plot (:issue:`1890`)
+ - Fix rolling_min/max when the window is larger than the size of the input
+ array. Check other malformed inputs (:issue:`1899`, :issue:`1897`)
+ - Rolling variance / standard deviation with only a single observation in
+ window (:issue:`1884`)
+ - Fix unicode sheet name failure in to_excel (:issue:`1828`)
+ - Override DatetimeIndex.min/max to return Timestamp objects (:issue:`1895`)
+ - Fix column name formatting issue in length-truncated column (:issue:`1906`)
+ - Fix broken handling of copying Index metadata to new instances created by
+ view(...) calls inside the NumPy infrastructure
+ - Support datetime.date again in DateOffset.rollback/rollforward
+ - Raise Exception if set passed to Series constructor (:issue:`1913`)
+ - Add TypeError when appending HDFStore table w/ wrong index type (:issue:`1881`)
+ - Don't raise exception on empty inputs in EW functions (e.g. ewma) (:issue:`1900`)
+ - Make asof work correctly with PeriodIndex (:issue:`1883`)
+ - Fix extlinks in doc build
+ - Fill boolean DataFrame with NaN when calling shift (:issue:`1814`)
+ - Fix setuptools bug causing pip not to Cythonize .pyx files sometimes
+ - Fix negative integer indexing regression in .ix from 0.7.x (:issue:`1888`)
+ - Fix error while retrieving timezone and utc offset from subclasses of
+ datetime.tzinfo without .zone and ._utcoffset attributes (:issue:`1922`)
+ - Fix DataFrame formatting of small, non-zero FP numbers (:issue:`1911`)
+ - Various fixes by upcasting of date -> datetime (:issue:`1395`)
+ - Raise better exception when passing multiple functions with the same name,
+ such as lambdas, to GroupBy.aggregate
+ - Fix DataFrame.apply with axis=1 on a non-unique index (:issue:`1878`)
+ - Proper handling of Index subclasses in pandas.unique (:issue:`1759`)
+ - Set index names in DataFrame.from_records (:issue:`1744`)
+ - Fix time series indexing error with duplicates, under and over hash table
+ size cutoff (:issue:`1821`)
+ - Handle list keys in addition to tuples in DataFrame.xs when
+ partial-indexing a hierarchically-indexed DataFrame (:issue:`1796`)
+ - Support multiple column selection in DataFrame.__getitem__ with duplicate
+ columns (:issue:`1943`)
+ - Fix time zone localization bug causing improper fields (e.g. hours) in time
+ zones that have not had a UTC transition in a long time (:issue:`1946`)
+ - Fix errors when parsing and working with with fixed offset timezones
+ (:issue:`1922`, :issue:`1928`)
+ - Fix text parser bug when handling UTC datetime objects generated by
+ dateutil (:issue:`1693`)
+ - Fix plotting bug when 'B' is the inferred frequency but index actually
+ contains weekends (:issue:`1668`, :issue:`1669`)
+ - Fix plot styling bugs (:issue:`1666`, :issue:`1665`, :issue:`1658`)
+ - Fix plotting bug with index/columns with unicode (:issue:`1685`)
+ - Fix DataFrame constructor bug when passed Series with datetime64 dtype
+ in a dict (:issue:`1680`)
+ - Fixed regression in generating DatetimeIndex using timezone aware
+ datetime.datetime (:issue:`1676`)
+ - Fix DataFrame bug when printing concatenated DataFrames with duplicated
+ columns (:issue:`1675`)
+ - Fixed bug when plotting time series with multiple intraday frequencies
+ (:issue:`1732`)
+ - Fix bug in DataFrame.duplicated to enable iterables other than list-types
+ as input argument (:issue:`1773`)
+ - Fix resample bug when passed list of lambdas as `how` argument (:issue:`1808`)
+ - Repr fix for MultiIndex level with all NAs (:issue:`1971`)
+ - Fix PeriodIndex slicing bug when slice start/end are out-of-bounds (:issue:`1977`)
+ - Fix read_table bug when parsing unicode (:issue:`1975`)
+ - Fix BlockManager.iget bug when dealing with non-unique MultiIndex as columns
+ (:issue:`1970`)
+ - Fix reset_index bug if both drop and level are specified (:issue:`1957`)
+ - Work around unsafe NumPy object->int casting with Cython function (:issue:`1987`)
+ - Fix datetime64 formatting bug in DataFrame.to_csv (:issue:`1993`)
+ - Default start date in pandas.io.data to 1/1/2000 as the docs say (:issue:`2011`)
+
+
+
+
+pandas 0.8.1
+============
+
+**Release date:** July 22, 2012
+
+**New features**
+
+ - Add vectorized, NA-friendly string methods to Series (:issue:`1621`, :issue:`620`)
+ - Can pass dict of per-column line styles to DataFrame.plot (:issue:`1559`)
+ - Selective plotting to secondary y-axis on same subplot (:issue:`1640`)
+ - Add new ``bootstrap_plot`` plot function
+ - Add new ``parallel_coordinates`` plot function (:issue:`1488`)
+ - Add ``radviz`` plot function (:issue:`1566`)
+ - Add ``multi_sparse`` option to ``set_printoptions`` to modify display of
+ hierarchical indexes (:issue:`1538`)
+ - Add ``dropna`` method to Panel (:issue:`171`)
+
+**Improvements to existing features**
+
+ - Use moving min/max algorithms from Bottleneck in rolling_min/rolling_max
+ for > 100x speedup. (:issue:`1504`, :issue:`50`)
+ - Add Cython group median method for >15x speedup (:issue:`1358`)
+ - Drastically improve ``to_datetime`` performance on ISO8601 datetime strings
+ (with no time zones) (:issue:`1571`)
+ - Improve single-key groupby performance on large data sets, accelerate use of
+ groupby with a Categorical variable
+ - Add ability to append hierarchical index levels with ``set_index`` and to
+ drop single levels with ``reset_index`` (:issue:`1569`, :issue:`1577`)
+ - Always apply passed functions in ``resample``, even if upsampling (:issue:`1596`)
+ - Avoid unnecessary copies in DataFrame constructor with explicit dtype (:issue:`1572`)
+ - Cleaner DatetimeIndex string representation with 1 or 2 elements (:issue:`1611`)
+ - Improve performance of array-of-Period to PeriodIndex, convert such arrays
+ to PeriodIndex inside Index (:issue:`1215`)
+ - More informative string representation for weekly Period objects (:issue:`1503`)
+ - Accelerate 3-axis multi data selection from homogeneous Panel (:issue:`979`)
+ - Add ``adjust`` option to ewma to disable adjustment factor (:issue:`1584`)
+ - Add new matplotlib converters for high frequency time series plotting (:issue:`1599`)
+ - Handling of tz-aware datetime.datetime objects in to_datetime; raise
+ Exception unless utc=True given (:issue:`1581`)
+
+**Bug fixes**
+
+ - Fix NA handling in DataFrame.to_panel (:issue:`1582`)
+ - Handle TypeError issues inside PyObject_RichCompareBool calls in khash
+ (:issue:`1318`)
+ - Fix resampling bug to lower case daily frequency (:issue:`1588`)
+ - Fix kendall/spearman DataFrame.corr bug with no overlap (:issue:`1595`)
+ - Fix bug in DataFrame.set_index (:issue:`1592`)
+ - Don't ignore axes in boxplot if by specified (:issue:`1565`)
+ - Fix Panel .ix indexing with integers bug (:issue:`1603`)
+ - Fix Partial indexing bugs (years, months, ...) with PeriodIndex (:issue:`1601`)
+ - Fix MultiIndex console formatting issue (:issue:`1606`)
+ - Unordered index with duplicates doesn't yield scalar location for single
+ entry (:issue:`1586`)
+ - Fix resampling of tz-aware time series with "anchored" freq (:issue:`1591`)
+ - Fix DataFrame.rank error on integer data (:issue:`1589`)
+ - Selection of multiple SparseDataFrame columns by list in __getitem__ (:issue:`1585`)
+ - Override Index.tolist for compatibility with MultiIndex (:issue:`1576`)
+ - Fix hierarchical summing bug with MultiIndex of length 1 (:issue:`1568`)
+ - Work around numpy.concatenate use/bug in Series.set_value (:issue:`1561`)
+ - Ensure Series/DataFrame are sorted before resampling (:issue:`1580`)
+ - Fix unhandled IndexError when indexing very large time series (:issue:`1562`)
+ - Fix DatetimeIndex intersection logic error with irregular indexes (:issue:`1551`)
+ - Fix unit test errors on Python 3 (:issue:`1550`)
+ - Fix .ix indexing bugs in duplicate DataFrame index (:issue:`1201`)
+ - Better handle errors with non-existing objects in HDFStore (:issue:`1254`)
+ - Don't copy int64 array data in DatetimeIndex when copy=False (:issue:`1624`)
+ - Fix resampling of conforming periods quarterly to annual (:issue:`1622`)
+ - Don't lose index name on resampling (:issue:`1631`)
+ - Support python-dateutil version 2.1 (:issue:`1637`)
+ - Fix broken scatter_matrix axis labeling, esp. with time series (:issue:`1625`)
+ - Fix cases where extra keywords weren't being passed on to matplotlib from
+ Series.plot (:issue:`1636`)
+ - Fix BusinessMonthBegin logic for dates before 1st bday of month (:issue:`1645`)
+ - Ensure string alias converted (valid in DatetimeIndex.get_loc) in
+ DataFrame.xs / __getitem__ (:issue:`1644`)
+ - Fix use of string alias timestamps with tz-aware time series (:issue:`1647`)
+ - Fix Series.max/min and Series.describe on len-0 series (:issue:`1650`)
+ - Handle None values in dict passed to concat (:issue:`1649`)
+ - Fix Series.interpolate with method='values' and DatetimeIndex (:issue:`1646`)
+ - Fix IndexError in left merges on a DataFrame with 0-length (:issue:`1628`)
+ - Fix DataFrame column width display with UTF-8 encoded characters (:issue:`1620`)
+ - Handle case in pandas.io.data.get_data_yahoo where Yahoo! returns duplicate
+ dates for most recent business day
+ - Avoid downsampling when plotting mixed frequencies on the same subplot (:issue:`1619`)
+ - Fix read_csv bug when reading a single line (:issue:`1553`)
+ - Fix bug in C code causing monthly periods prior to December 1969 to be off (:issue:`1570`)
+
+
+
+pandas 0.8.0
+============
+
+**Release date:** 6/29/2012
+
+**New features**
+
+ - New unified DatetimeIndex class for nanosecond-level timestamp data
+ - New Timestamp datetime.datetime subclass with easy time zone conversions,
+ and support for nanoseconds
+ - New PeriodIndex class for timespans, calendar logic, and Period scalar object
+ - High performance resampling of timestamp and period data. New `resample`
+ method of all pandas data structures
+ - New frequency names plus shortcut string aliases like '15h', '1h30min'
+ - Time series string indexing shorthand (:issue:`222`)
+ - Add week, dayofyear array and other timestamp array-valued field accessor
+ functions to DatetimeIndex
+ - Add GroupBy.prod optimized aggregation function and 'prod' fast time series
+ conversion method (:issue:`1018`)
+ - Implement robust frequency inference function and `inferred_freq` attribute
+ on DatetimeIndex (:issue:`391`)
+ - New ``tz_convert`` and ``tz_localize`` methods in Series / DataFrame
+ - Convert DatetimeIndexes to UTC if time zones are different in join/setops
+ (:issue:`864`)
+ - Add limit argument for forward/backward filling to reindex, fillna,
+ etc. (:issue:`825` and others)
+ - Add support for indexes (dates or otherwise) with duplicates and common
+ sense indexing/selection functionality
+ - Series/DataFrame.update methods, in-place variant of combine_first (:issue:`961`)
+ - Add ``match`` function to API (:issue:`502`)
+ - Add Cython-optimized first, last, min, max, prod functions to GroupBy (:issue:`994`,
+ :issue:`1043`)
+ - Dates can be split across multiple columns (:issue:`1227`, :issue:`1186`)
+ - Add experimental support for converting pandas DataFrame to R data.frame
+ via rpy2 (:issue:`350`, :issue:`1212`)
+ - Can pass list of (name, function) to GroupBy.aggregate to get aggregates in
+ a particular order (:issue:`610`)
+ - Can pass dicts with lists of functions or dicts to GroupBy aggregate to do
+ much more flexible multiple function aggregation (:issue:`642`, :issue:`610`)
+ - New ordered_merge functions for merging DataFrames with ordered
+ data. Also supports group-wise merging for panel data (:issue:`813`)
+ - Add keys() method to DataFrame
+ - Add flexible replace method for replacing potentially values to Series and
+ DataFrame (:issue:`929`, :issue:`1241`)
+ - Add 'kde' plot kind for Series/DataFrame.plot (:issue:`1059`)
+ - More flexible multiple function aggregation with GroupBy
+ - Add pct_change function to Series/DataFrame
+ - Add option to interpolate by Index values in Series.interpolate (:issue:`1206`)
+ - Add ``max_colwidth`` option for DataFrame, defaulting to 50
+ - Conversion of DataFrame through rpy2 to R data.frame (:issue:`1282`, )
+ - Add keys() method on DataFrame (:issue:`1240`)
+ - Add new ``match`` function to API (similar to R) (:issue:`502`)
+ - Add dayfirst option to parsers (:issue:`854`)
+ - Add ``method`` argument to ``align`` method for forward/backward fillin
+ (:issue:`216`)
+ - Add Panel.transpose method for rearranging axes (:issue:`695`)
+ - Add new ``cut`` function (patterned after R) for discretizing data into
+ equal range-length bins or arbitrary breaks of your choosing (:issue:`415`)
+ - Add new ``qcut`` for cutting with quantiles (:issue:`1378`)
+ - Add ``value_counts`` top level array method (:issue:`1392`)
+ - Added Andrews curves plot tupe (:issue:`1325`)
+ - Add lag plot (:issue:`1440`)
+ - Add autocorrelation_plot (:issue:`1425`)
+ - Add support for tox and Travis CI (:issue:`1382`)
+ - Add support for Categorical use in GroupBy (:issue:`292`)
+ - Add ``any`` and ``all`` methods to DataFrame (:issue:`1416`)
+ - Add ``secondary_y`` option to Series.plot
+ - Add experimental ``lreshape`` function for reshaping wide to long
+
+**Improvements to existing features**
+
+ - Switch to klib/khash-based hash tables in Index classes for better
+ performance in many cases and lower memory footprint
+ - Shipping some functions from scipy.stats to reduce dependency,
+ e.g. Series.describe and DataFrame.describe (:issue:`1092`)
+ - Can create MultiIndex by passing list of lists or list of arrays to Series,
+ DataFrame constructor, etc. (:issue:`831`)
+ - Can pass arrays in addition to column names to DataFrame.set_index (:issue:`402`)
+ - Improve the speed of "square" reindexing of homogeneous DataFrame objects
+ by significant margin (:issue:`836`)
+ - Handle more dtypes when passed MaskedArrays in DataFrame constructor (:issue:`406`)
+ - Improved performance of join operations on integer keys (:issue:`682`)
+ - Can pass multiple columns to GroupBy object, e.g. grouped[[col1, col2]] to
+ only aggregate a subset of the value columns (:issue:`383`)
+ - Add histogram / kde plot options for scatter_matrix diagonals (:issue:`1237`)
+ - Add inplace option to Series/DataFrame.rename and sort_index,
+ DataFrame.drop_duplicates (:issue:`805`, :issue:`207`)
+ - More helpful error message when nothing passed to Series.reindex (:issue:`1267`)
+ - Can mix array and scalars as dict-value inputs to DataFrame ctor (:issue:`1329`)
+ - Use DataFrame columns' name for legend title in plots
+ - Preserve frequency in DatetimeIndex when possible in boolean indexing
+ operations
+ - Promote datetime.date values in data alignment operations (:issue:`867`)
+ - Add ``order`` method to Index classes (:issue:`1028`)
+ - Avoid hash table creation in large monotonic hash table indexes (:issue:`1160`)
+ - Store time zones in HDFStore (:issue:`1232`)
+ - Enable storage of sparse data structures in HDFStore (:issue:`85`)
+ - Enable Series.asof to work with arrays of timestamp inputs
+ - Cython implementation of DataFrame.corr speeds up by > 100x (:issue:`1349`, :issue:`1354`)
+ - Exclude "nuisance" columns automatically in GroupBy.transform (:issue:`1364`)
+ - Support functions-as-strings in GroupBy.transform (:issue:`1362`)
+ - Use index name as xlabel/ylabel in plots (:issue:`1415`)
+ - Add ``convert_dtype`` option to Series.apply to be able to leave data as
+ dtype=object (:issue:`1414`)
+ - Can specify all index level names in concat (:issue:`1419`)
+ - Add ``dialect`` keyword to parsers for quoting conventions (:issue:`1363`)
+ - Enable DataFrame[bool_DataFrame] += value (:issue:`1366`)
+ - Add ``retries`` argument to ``get_data_yahoo`` to try to prevent Yahoo! API
+ 404s (:issue:`826`)
+ - Improve performance of reshaping by using O(N) categorical sorting
+ - Series names will be used for index of DataFrame if no index passed (:issue:`1494`)
+ - Header argument in DataFrame.to_csv can accept a list of column names to
+ use instead of the object's columns (:issue:`921`)
+ - Add ``raise_conflict`` argument to DataFrame.update (:issue:`1526`)
+ - Support file-like objects in ExcelFile (:issue:`1529`)
+
+**API Changes**
+
+ - Rename `pandas._tseries` to `pandas.lib`
+ - Rename Factor to Categorical and add improvements. Numerous Categorical bug
+ fixes
+ - Frequency name overhaul, WEEKDAY/EOM and rules with @
+ deprecated. get_legacy_offset_name backwards compatibility function added
+ - Raise ValueError in DataFrame.__nonzero__, so "if df" no longer works
+ (:issue:`1073`)
+ - Change BDay (business day) to not normalize dates by default (:issue:`506`)
+ - Remove deprecated DataMatrix name
+ - Default merge suffixes for overlap now have underscores instead of periods
+ to facilitate tab completion, etc. (:issue:`1239`)
+ - Deprecation of offset, time_rule timeRule parameters throughout codebase
+ - Series.append and DataFrame.append no longer check for duplicate indexes
+ by default, add verify_integrity parameter (:issue:`1394`)
+ - Refactor Factor class, old constructor moved to Factor.from_array
+ - Modified internals of MultiIndex to use less memory (no longer represented
+ as array of tuples) internally, speed up construction time and many methods
+ which construct intermediate hierarchical indexes (:issue:`1467`)
+
+**Bug fixes**
+
+ - Fix OverflowError from storing pre-1970 dates in HDFStore by switching to
+ datetime64 (:issue:`179`)
+ - Fix logical error with February leap year end in YearEnd offset
+ - Series([False, nan]) was getting casted to float64 (:issue:`1074`)
+ - Fix binary operations between boolean Series and object Series with
+ booleans and NAs (:issue:`1074`, :issue:`1079`)
+ - Couldn't assign whole array to column in mixed-type DataFrame via .ix
+ (:issue:`1142`)
+ - Fix label slicing issues with float index values (:issue:`1167`)
+ - Fix segfault caused by empty groups passed to groupby (:issue:`1048`)
+ - Fix occasionally misbehaved reindexing in the presence of NaN labels (:issue:`522`)
+ - Fix imprecise logic causing weird Series results from .apply (:issue:`1183`)
+ - Unstack multiple levels in one shot, avoiding empty columns in some
+ cases. Fix pivot table bug (:issue:`1181`)
+ - Fix formatting of MultiIndex on Series/DataFrame when index name coincides
+ with label (:issue:`1217`)
+ - Handle Excel 2003 #N/A as NaN from xlrd (:issue:`1213`, :issue:`1225`)
+ - Fix timestamp locale-related deserialization issues with HDFStore by moving
+ to datetime64 representation (:issue:`1081`, :issue:`809`)
+ - Fix DataFrame.duplicated/drop_duplicates NA value handling (:issue:`557`)
+ - Actually raise exceptions in fast reducer (:issue:`1243`)
+ - Fix various timezone-handling bugs from 0.7.3 (:issue:`969`)
+ - GroupBy on level=0 discarded index name (:issue:`1313`)
+ - Better error message with unmergeable DataFrames (:issue:`1307`)
+ - Series.__repr__ alignment fix with unicode index values (:issue:`1279`)
+ - Better error message if nothing passed to reindex (:issue:`1267`)
+ - More robust NA handling in DataFrame.drop_duplicates (:issue:`557`)
+ - Resolve locale-based and pre-epoch HDF5 timestamp deserialization issues
+ (:issue:`973`, :issue:`1081`, :issue:`179`)
+ - Implement Series.repeat (:issue:`1229`)
+ - Fix indexing with namedtuple and other tuple subclasses (:issue:`1026`)
+ - Fix float64 slicing bug (:issue:`1167`)
+ - Parsing integers with commas (:issue:`796`)
+ - Fix groupby improper data type when group consists of one value (:issue:`1065`)
+ - Fix negative variance possibility in nanvar resulting from floating point
+ error (:issue:`1090`)
+ - Consistently set name on groupby pieces (:issue:`184`)
+ - Treat dict return values as Series in GroupBy.apply (:issue:`823`)
+ - Respect column selection for DataFrame in in GroupBy.transform (:issue:`1365`)
+ - Fix MultiIndex partial indexing bug (:issue:`1352`)
+ - Enable assignment of rows in mixed-type DataFrame via .ix (:issue:`1432`)
+ - Reset index mapping when grouping Series in Cython (:issue:`1423`)
+ - Fix outer/inner DataFrame.join with non-unique indexes (:issue:`1421`)
+ - Fix MultiIndex groupby bugs with empty lower levels (:issue:`1401`)
+ - Calling fillna with a Series will have same behavior as with dict (:issue:`1486`)
+ - SparseSeries reduction bug (:issue:`1375`)
+ - Fix unicode serialization issue in HDFStore (:issue:`1361`)
+ - Pass keywords to pyplot.boxplot in DataFrame.boxplot (:issue:`1493`)
+ - Bug fixes in MonthBegin (:issue:`1483`)
+ - Preserve MultiIndex names in drop (:issue:`1513`)
+ - Fix Panel DataFrame slice-assignment bug (:issue:`1533`)
+ - Don't use locals() in read_* functions (:issue:`1547`)
+
+
+
+pandas 0.7.3
+============
+
+**Release date:** April 12, 2012
+
+**New features / modules**
+
+ - Support for non-unique indexes: indexing and selection, many-to-one and
+ many-to-many joins (:issue:`1306`)
+ - Added fixed-width file reader, read_fwf (:issue:`952`)
+ - Add group_keys argument to groupby to not add group names to MultiIndex in
+ result of apply (:issue:`938`)
+ - DataFrame can now accept non-integer label slicing (:issue:`946`). Previously
+ only DataFrame.ix was able to do so.
+ - DataFrame.apply now retains name attributes on Series objects (:issue:`983`)
+ - Numeric DataFrame comparisons with non-numeric values now raises proper
+ TypeError (:issue:`943`). Previously raise "PandasError: DataFrame constructor
+ not properly called!"
+ - Add ``kurt`` methods to Series and DataFrame (:issue:`964`)
+ - Can pass dict of column -> list/set NA values for text parsers (:issue:`754`)
+ - Allows users specified NA values in text parsers (:issue:`754`)
+ - Parsers checks for openpyxl dependency and raises ImportError if not found
+ (:issue:`1007`)
+ - New factory function to create HDFStore objects that can be used in a with
+ statement so users do not have to explicitly call HDFStore.close (:issue:`1005`)
+ - pivot_table is now more flexible with same parameters as groupby (:issue:`941`)
+ - Added stacked bar plots (:issue:`987`)
+ - scatter_matrix method in pandas/tools/plotting.py (:issue:`935`)
+ - DataFrame.boxplot returns plot results for ex-post styling (:issue:`985`)
+ - Short version number accessible as pandas.version.short_version (:issue:`930`)
+ - Additional documentation in panel.to_frame (:issue:`942`)
+ - More informative Series.apply docstring regarding element-wise apply
+ (:issue:`977`)
+ - Notes on rpy2 installation (:issue:`1006`)
+ - Add rotation and font size options to hist method (:issue:`1012`)
+ - Use exogenous / X variable index in result of OLS.y_predict. Add
+ OLS.predict method (:issue:`1027`, :issue:`1008`)
+
+**API Changes**
+
+ - Calling apply on grouped Series, e.g. describe(), will no longer yield
+ DataFrame by default. Will have to call unstack() to get prior behavior
+ - NA handling in non-numeric comparisons has been tightened up (:issue:`933`, :issue:`953`)
+ - No longer assign dummy names key_0, key_1, etc. to groupby index (:issue:`1291`)
+
+**Bug fixes**
+
+ - Fix logic error when selecting part of a row in a DataFrame with a
+ MultiIndex index (:issue:`1013`)
+ - Series comparison with Series of differing length causes crash (:issue:`1016`).
+ - Fix bug in indexing when selecting section of hierarchically-indexed row
+ (:issue:`1013`)
+ - DataFrame.plot(logy=True) has no effect (:issue:`1011`).
+ - Broken arithmetic operations between SparsePanel-Panel (:issue:`1015`)
+ - Unicode repr issues in MultiIndex with non-ascii characters (:issue:`1010`)
+ - DataFrame.lookup() returns inconsistent results if exact match not present
+ (:issue:`1001`)
+ - DataFrame arithmetic operations not treating None as NA (:issue:`992`)
+ - DataFrameGroupBy.apply returns incorrect result (:issue:`991`)
+ - Series.reshape returns incorrect result for multiple dimensions (:issue:`989`)
+ - Series.std and Series.var ignores ddof parameter (:issue:`934`)
+ - DataFrame.append loses index names (:issue:`980`)
+ - DataFrame.plot(kind='bar') ignores color argument (:issue:`958`)
+ - Inconsistent Index comparison results (:issue:`948`)
+ - Improper int dtype DataFrame construction from data with NaN (:issue:`846`)
+ - Removes default 'result' name in grouby results (:issue:`995`)
+ - DataFrame.from_records no longer mutate input columns (:issue:`975`)
+ - Use Index name when grouping by it (:issue:`1313`)
+
+
+
+pandas 0.7.2
+============
+
+**Release date:** March 16, 2012
+
+**New features / modules**
+
+ - Add additional tie-breaking methods in DataFrame.rank (:issue:`874`)
+ - Add ascending parameter to rank in Series, DataFrame (:issue:`875`)
+ - Add sort_columns parameter to allow unsorted plots (:issue:`918`)
+ - IPython tab completion on GroupBy objects
+
+**API Changes**
+
+ - Series.sum returns 0 instead of NA when called on an empty
+ series. Analogously for a DataFrame whose rows or columns are length 0
+ (:issue:`844`)
+
+**Improvements to existing features**
+
+ - Don't use groups dict in Grouper.size (:issue:`860`)
+ - Use khash for Series.value_counts, add raw function to algorithms.py (:issue:`861`)
+ - Enable column access via attributes on GroupBy (:issue:`882`)
+ - Enable setting existing columns (only) via attributes on DataFrame, Panel
+ (:issue:`883`)
+ - Intercept __builtin__.sum in groupby (:issue:`885`)
+ - Can pass dict to DataFrame.fillna to use different values per column (:issue:`661`)
+ - Can select multiple hierarchical groups by passing list of values in .ix
+ (:issue:`134`)
+ - Add level keyword to ``drop`` for dropping values from a level (:issue:`159`)
+ - Add ``coerce_float`` option on DataFrame.from_records (:issue:`893`)
+ - Raise exception if passed date_parser fails in ``read_csv``
+ - Add ``axis`` option to DataFrame.fillna (:issue:`174`)
+ - Fixes to Panel to make it easier to subclass (:issue:`888`)
+
+**Bug fixes**
+
+ - Fix overflow-related bugs in groupby (:issue:`850`, :issue:`851`)
+ - Fix unhelpful error message in parsers (:issue:`856`)
+ - Better err msg for failed boolean slicing of dataframe (:issue:`859`)
+ - Series.count cannot accept a string (level name) in the level argument (:issue:`869`)
+ - Group index platform int check (:issue:`870`)
+ - concat on axis=1 and ignore_index=True raises TypeError (:issue:`871`)
+ - Further unicode handling issues resolved (:issue:`795`)
+ - Fix failure in multiindex-based access in Panel (:issue:`880`)
+ - Fix DataFrame boolean slice assignment failure (:issue:`881`)
+ - Fix combineAdd NotImplementedError for SparseDataFrame (:issue:`887`)
+ - Fix DataFrame.to_html encoding and columns (:issue:`890`, :issue:`891`, :issue:`909`)
+ - Fix na-filling handling in mixed-type DataFrame (:issue:`910`)
+ - Fix to DataFrame.set_value with non-existant row/col (:issue:`911`)
+ - Fix malformed block in groupby when excluding nuisance columns (:issue:`916`)
+ - Fix inconsistant NA handling in dtype=object arrays (:issue:`925`)
+ - Fix missing center-of-mass computation in ewmcov (:issue:`862`)
+ - Don't raise exception when opening read-only HDF5 file (:issue:`847`)
+ - Fix possible out-of-bounds memory access in 0-length Series (:issue:`917`)
+
+
+
+pandas 0.7.1
+============
+
+**Release date:** February 29, 2012
+
+**New features / modules**
+
+ - Add ``to_clipboard`` function to pandas namespace for writing objects to
+ the system clipboard (:issue:`774`)
+ - Add ``itertuples`` method to DataFrame for iterating through the rows of a
+ dataframe as tuples (:issue:`818`)
+ - Add ability to pass fill_value and method to DataFrame and Series align
+ method (:issue:`806`, :issue:`807`)
+ - Add fill_value option to reindex, align methods (:issue:`784`)
+ - Enable concat to produce DataFrame from Series (:issue:`787`)
+ - Add ``between`` method to Series (:issue:`802`)
+ - Add HTML representation hook to DataFrame for the IPython HTML notebook
+ (:issue:`773`)
+ - Support for reading Excel 2007 XML documents using openpyxl
+
+**Improvements to existing features**
+
+ - Improve performance and memory usage of fillna on DataFrame
+ - Can concatenate a list of Series along axis=1 to obtain a DataFrame (:issue:`787`)
+
+**Bug fixes**
+
+ - Fix memory leak when inserting large number of columns into a single
+ DataFrame (:issue:`790`)
+ - Appending length-0 DataFrame with new columns would not result in those new
+ columns being part of the resulting concatenated DataFrame (:issue:`782`)
+ - Fixed groupby corner case when passing dictionary grouper and as_index is
+ False (:issue:`819`)
+ - Fixed bug whereby bool array sometimes had object dtype (:issue:`820`)
+ - Fix exception thrown on np.diff (:issue:`816`)
+ - Fix to_records where columns are non-strings (:issue:`822`)
+ - Fix Index.intersection where indices have incomparable types (:issue:`811`)
+ - Fix ExcelFile throwing an exception for two-line file (:issue:`837`)
+ - Add clearer error message in csv parser (:issue:`835`)
+ - Fix loss of fractional seconds in HDFStore (:issue:`513`)
+ - Fix DataFrame join where columns have datetimes (:issue:`787`)
+ - Work around numpy performance issue in take (:issue:`817`)
+ - Improve comparison operations for NA-friendliness (:issue:`801`)
+ - Fix indexing operation for floating point values (:issue:`780`, :issue:`798`)
+ - Fix groupby case resulting in malformed dataframe (:issue:`814`)
+ - Fix behavior of reindex of Series dropping name (:issue:`812`)
+ - Improve on redudant groupby computation (:issue:`775`)
+ - Catch possible NA assignment to int/bool series with exception (:issue:`839`)
+
+
+
+pandas 0.7.0
+============
+
+**Release date:** 2/9/2012
+
+**New features / modules**
+
+ - New ``merge`` function for efficiently performing full gamut of database /
+ relational-algebra operations. Refactored existing join methods to use the
+ new infrastructure, resulting in substantial performance gains (:issue:`220`,
+ :issue:`249`, :issue:`267`)
+ - New ``concat`` function for concatenating DataFrame or Panel objects along
+ an axis. Can form union or intersection of the other axes. Improves
+ performance of ``DataFrame.append`` (:issue:`468`, :issue:`479`, :issue:`273`)
+ - Handle differently-indexed output values in ``DataFrame.apply`` (:issue:`498`)
+ - Can pass list of dicts (e.g., a list of shallow JSON objects) to DataFrame
+ constructor (:issue:`526`)
+ - Add ``reorder_levels`` method to Series and DataFrame (:issue:`534`)
+ - Add dict-like ``get`` function to DataFrame and Panel (:issue:`521`)
+ - ``DataFrame.iterrows`` method for efficiently iterating through the rows of
+ a DataFrame
+ - Added ``DataFrame.to_panel`` with code adapted from ``LongPanel.to_long``
+ - ``reindex_axis`` method added to DataFrame
+ - Add ``level`` option to binary arithmetic functions on ``DataFrame`` and
+ ``Series``
+ - Add ``level`` option to the ``reindex`` and ``align`` methods on Series and
+ DataFrame for broadcasting values across a level (:issue:`542`, :issue:`552`, others)
+ - Add attribute-based item access to ``Panel`` and add IPython completion (PR
+ :issue:`554`)
+ - Add ``logy`` option to ``Series.plot`` for log-scaling on the Y axis
+ - Add ``index``, ``header``, and ``justify`` options to
+ ``DataFrame.to_string``. Add option to (:issue:`570`, :issue:`571`)
+ - Can pass multiple DataFrames to ``DataFrame.join`` to join on index (:issue:`115`)
+ - Can pass multiple Panels to ``Panel.join`` (:issue:`115`)
+ - Can pass multiple DataFrames to `DataFrame.append` to concatenate (stack)
+ and multiple Series to ``Series.append`` too
+ - Added ``justify`` argument to ``DataFrame.to_string`` to allow different
+ alignment of column headers
+ - Add ``sort`` option to GroupBy to allow disabling sorting of the group keys
+ for potential speedups (:issue:`595`)
+ - Can pass MaskedArray to Series constructor (:issue:`563`)
+ - Add Panel item access via attributes and IPython completion (:issue:`554`)
+ - Implement ``DataFrame.lookup``, fancy-indexing analogue for retrieving
+ values given a sequence of row and column labels (:issue:`338`)
+ - Add ``verbose`` option to ``read_csv`` and ``read_table`` to show number of
+ NA values inserted in non-numeric columns (:issue:`614`)
+ - Can pass a list of dicts or Series to ``DataFrame.append`` to concatenate
+ multiple rows (:issue:`464`)
+ - Add ``level`` argument to ``DataFrame.xs`` for selecting data from other
+ MultiIndex levels. Can take one or more levels with potentially a tuple of
+ keys for flexible retrieval of data (:issue:`371`, :issue:`629`)
+ - New ``crosstab`` function for easily computing frequency tables (:issue:`170`)
+ - Can pass a list of functions to aggregate with groupby on a DataFrame,
+ yielding an aggregated result with hierarchical columns (:issue:`166`)
+ - Add integer-indexing functions ``iget`` in Series and ``irow`` / ``iget``
+ in DataFrame (:issue:`628`)
+ - Add new ``Series.unique`` function, significantly faster than
+ ``numpy.unique`` (:issue:`658`)
+ - Add new ``cummin`` and ``cummax`` instance methods to ``Series`` and
+ ``DataFrame`` (:issue:`647`)
+ - Add new ``value_range`` function to return min/max of a dataframe (:issue:`288`)
+ - Add ``drop`` parameter to ``reset_index`` method of ``DataFrame`` and added
+ method to ``Series`` as well (:issue:`699`)
+ - Add ``isin`` method to Index objects, works just like ``Series.isin`` (GH
+ :issue:`657`)
+ - Implement array interface on Panel so that ufuncs work (re: :issue:`740`)
+ - Add ``sort`` option to ``DataFrame.join`` (:issue:`731`)
+ - Improved handling of NAs (propagation) in binary operations with
+ dtype=object arrays (:issue:`737`)
+ - Add ``abs`` method to Pandas objects
+ - Added ``algorithms`` module to start collecting central algos
+
+**API Changes**
+
+ - Label-indexing with integer indexes now raises KeyError if a label is not
+ found instead of falling back on location-based indexing (:issue:`700`)
+ - Label-based slicing via ``ix`` or ``[]`` on Series will now only work if
+ exact matches for the labels are found or if the index is monotonic (for
+ range selections)
+ - Label-based slicing and sequences of labels can be passed to ``[]`` on a
+ Series for both getting and setting (:issue:`86`)
+ - `[]` operator (``__getitem__`` and ``__setitem__``) will raise KeyError
+ with integer indexes when an index is not contained in the index. The prior
+ behavior would fall back on position-based indexing if a key was not found
+ in the index which would lead to subtle bugs. This is now consistent with
+ the behavior of ``.ix`` on DataFrame and friends (:issue:`328`)
+ - Rename ``DataFrame.delevel`` to ``DataFrame.reset_index`` and add
+ deprecation warning
+ - `Series.sort` (an in-place operation) called on a Series which is a view on
+ a larger array (e.g. a column in a DataFrame) will generate an Exception to
+ prevent accidentally modifying the data source (:issue:`316`)
+ - Refactor to remove deprecated ``LongPanel`` class (:issue:`552`)
+ - Deprecated ``Panel.to_long``, renamed to ``to_frame``
+ - Deprecated ``colSpace`` argument in ``DataFrame.to_string``, renamed to
+ ``col_space``
+ - Rename ``precision`` to ``accuracy`` in engineering float formatter (GH
+ :issue:`395`)
+ - The default delimiter for ``read_csv`` is comma rather than letting
+ ``csv.Sniffer`` infer it
+ - Rename ``col_or_columns`` argument in ``DataFrame.drop_duplicates`` (GH
+ :issue:`734`)
+
+**Improvements to existing features**
+
+ - Better error message in DataFrame constructor when passed column labels
+ don't match data (:issue:`497`)
+ - Substantially improve performance of multi-GroupBy aggregation when a
+ Python function is passed, reuse ndarray object in Cython (:issue:`496`)
+ - Can store objects indexed by tuples and floats in HDFStore (:issue:`492`)
+ - Don't print length by default in Series.to_string, add `length` option (GH
+ :issue:`489`)
+ - Improve Cython code for multi-groupby to aggregate without having to sort
+ the data (:issue:`93`)
+ - Improve MultiIndex reindexing speed by storing tuples in the MultiIndex,
+ test for backwards unpickling compatibility
+ - Improve column reindexing performance by using specialized Cython take
+ function
+ - Further performance tweaking of Series.__getitem__ for standard use cases
+ - Avoid Index dict creation in some cases (i.e. when getting slices, etc.),
+ regression from prior versions
+ - Friendlier error message in setup.py if NumPy not installed
+ - Use common set of NA-handling operations (sum, mean, etc.) in Panel class
+ also (:issue:`536`)
+ - Default name assignment when calling ``reset_index`` on DataFrame with a
+ regular (non-hierarchical) index (:issue:`476`)
+ - Use Cythonized groupers when possible in Series/DataFrame stat ops with
+ ``level`` parameter passed (:issue:`545`)
+ - Ported skiplist data structure to C to speed up ``rolling_median`` by about
+ 5-10x in most typical use cases (:issue:`374`)
+ - Some performance enhancements in constructing a Panel from a dict of
+ DataFrame objects
+ - Made ``Index._get_duplicates`` a public method by removing the underscore
+ - Prettier printing of floats, and column spacing fix (:issue:`395`, :issue:`571`)
+ - Add ``bold_rows`` option to DataFrame.to_html (:issue:`586`)
+ - Improve the performance of ``DataFrame.sort_index`` by up to 5x or more
+ when sorting by multiple columns
+ - Substantially improve performance of DataFrame and Series constructors when
+ passed a nested dict or dict, respectively (:issue:`540`, :issue:`621`)
+ - Modified setup.py so that pip / setuptools will install dependencies (GH
+ :issue:`507`, various pull requests)
+ - Unstack called on DataFrame with non-MultiIndex will return Series (GH
+ :issue:`477`)
+ - Improve DataFrame.to_string and console formatting to be more consistent in
+ the number of displayed digits (:issue:`395`)
+ - Use bottleneck if available for performing NaN-friendly statistical
+ operations that it implemented (:issue:`91`)
+ - Monkey-patch context to traceback in ``DataFrame.apply`` to indicate which
+ row/column the function application failed on (:issue:`614`)
+ - Improved ability of read_table and read_clipboard to parse
+ console-formatted DataFrames (can read the row of index names, etc.)
+ - Can pass list of group labels (without having to convert to an ndarray
+ yourself) to ``groupby`` in some cases (:issue:`659`)
+ - Use ``kind`` argument to Series.order for selecting different sort kinds
+ (:issue:`668`)
+ - Add option to Series.to_csv to omit the index (:issue:`684`)
+ - Add ``delimiter`` as an alternative to ``sep`` in ``read_csv`` and other
+ parsing functions
+ - Substantially improved performance of groupby on DataFrames with many
+ columns by aggregating blocks of columns all at once (:issue:`745`)
+ - Can pass a file handle or StringIO to Series/DataFrame.to_csv (:issue:`765`)
+ - Can pass sequence of integers to DataFrame.irow(icol) and Series.iget, (GH
+ :issue:`654`)
+ - Prototypes for some vectorized string functions
+ - Add float64 hash table to solve the Series.unique problem with NAs (:issue:`714`)
+ - Memoize objects when reading from file to reduce memory footprint
+ - Can get and set a column of a DataFrame with hierarchical columns
+ containing "empty" ('') lower levels without passing the empty levels (PR
+ :issue:`768`)
+
+**Bug fixes**
+
+ - Raise exception in out-of-bounds indexing of Series instead of
+ seg-faulting, regression from earlier releases (:issue:`495`)
+ - Fix error when joining DataFrames of different dtypes within the same
+ typeclass (e.g. float32 and float64) (:issue:`486`)
+ - Fix bug in Series.min/Series.max on objects like datetime.datetime (GH
+ :issue:`487`)
+ - Preserve index names in Index.union (:issue:`501`)
+ - Fix bug in Index joining causing subclass information (like DateRange type)
+ to be lost in some cases (:issue:`500`)
+ - Accept empty list as input to DataFrame constructor, regression from 0.6.0
+ (:issue:`491`)
+ - Can output DataFrame and Series with ndarray objects in a dtype=object
+ array (:issue:`490`)
+ - Return empty string from Series.to_string when called on empty Series (GH
+ :issue:`488`)
+ - Fix exception passing empty list to DataFrame.from_records
+ - Fix Index.format bug (excluding name field) with datetimes with time info
+ - Fix scalar value access in Series to always return NumPy scalars,
+ regression from prior versions (:issue:`510`)
+ - Handle rows skipped at beginning of file in read_* functions (:issue:`505`)
+ - Handle improper dtype casting in ``set_value`` methods
+ - Unary '-' / __neg__ operator on DataFrame was returning integer values
+ - Unbox 0-dim ndarrays from certain operators like all, any in Series
+ - Fix handling of missing columns (was combine_first-specific) in
+ DataFrame.combine for general case (:issue:`529`)
+ - Fix type inference logic with boolean lists and arrays in DataFrame indexing
+ - Use centered sum of squares in R-square computation if entity_effects=True
+ in panel regression
+ - Handle all NA case in Series.{corr, cov}, was raising exception (:issue:`548`)
+ - Aggregating by multiple levels with ``level`` argument to DataFrame, Series
+ stat method, was broken (:issue:`545`)
+ - Fix Cython buf when converter passed to read_csv produced a numeric array
+ (buffer dtype mismatch when passed to Cython type inference function) (GH
+ :issue:`546`)
+ - Fix exception when setting scalar value using .ix on a DataFrame with a
+ MultiIndex (:issue:`551`)
+ - Fix outer join between two DateRanges with different offsets that returned
+ an invalid DateRange
+ - Cleanup DataFrame.from_records failure where index argument is an integer
+ - Fix Data.from_records failure when passed a dictionary
+ - Fix NA handling in {Series, DataFrame}.rank with non-floating point dtypes
+ - Fix bug related to integer type-checking in .ix-based indexing
+ - Handle non-string index name passed to DataFrame.from_records
+ - DataFrame.insert caused the columns name(s) field to be discarded (:issue:`527`)
+ - Fix erroneous in monotonic many-to-one left joins
+ - Fix DataFrame.to_string to remove extra column white space (:issue:`571`)
+ - Format floats to default to same number of digits (:issue:`395`)
+ - Added decorator to copy docstring from one function to another (:issue:`449`)
+ - Fix error in monotonic many-to-one left joins
+ - Fix __eq__ comparison between DateOffsets with different relativedelta
+ keywords passed
+ - Fix exception caused by parser converter returning strings (:issue:`583`)
+ - Fix MultiIndex formatting bug with integer names (:issue:`601`)
+ - Fix bug in handling of non-numeric aggregates in Series.groupby (:issue:`612`)
+ - Fix TypeError with tuple subclasses (e.g. namedtuple) in
+ DataFrame.from_records (:issue:`611`)
+ - Catch misreported console size when running IPython within Emacs
+ - Fix minor bug in pivot table margins, loss of index names and length-1
+ 'All' tuple in row labels
+ - Add support for legacy WidePanel objects to be read from HDFStore
+ - Fix out-of-bounds segfault in pad_object and backfill_object methods when
+ either source or target array are empty
+ - Could not create a new column in a DataFrame from a list of tuples
+ - Fix bugs preventing SparseDataFrame and SparseSeries working with groupby
+ (:issue:`666`)
+ - Use sort kind in Series.sort / argsort (:issue:`668`)
+ - Fix DataFrame operations on non-scalar, non-pandas objects (:issue:`672`)
+ - Don't convert DataFrame column to integer type when passing integer to
+ __setitem__ (:issue:`669`)
+ - Fix downstream bug in pivot_table caused by integer level names in
+ MultiIndex (:issue:`678`)
+ - Fix SparseSeries.combine_first when passed a dense Series (:issue:`687`)
+ - Fix performance regression in HDFStore loading when DataFrame or Panel
+ stored in table format with datetimes
+ - Raise Exception in DateRange when offset with n=0 is passed (:issue:`683`)
+ - Fix get/set inconsistency with .ix property and integer location but
+ non-integer index (:issue:`707`)
+ - Use right dropna function for SparseSeries. Return dense Series for NA fill
+ value (:issue:`730`)
+ - Fix Index.format bug causing incorrectly string-formatted Series with
+ datetime indexes (:issue:`726`, :issue:`758`)
+ - Fix errors caused by object dtype arrays passed to ols (:issue:`759`)
+ - Fix error where column names lost when passing list of labels to
+ DataFrame.__getitem__, (:issue:`662`)
+ - Fix error whereby top-level week iterator overwrote week instance
+ - Fix circular reference causing memory leak in sparse array / series /
+ frame, (:issue:`663`)
+ - Fix integer-slicing from integers-as-floats (:issue:`670`)
+ - Fix zero division errors in nanops from object dtype arrays in all NA case
+ (:issue:`676`)
+ - Fix csv encoding when using unicode (:issue:`705`, :issue:`717`, :issue:`738`)
+ - Fix assumption that each object contains every unique block type in concat,
+ (:issue:`708`)
+ - Fix sortedness check of multiindex in to_panel (:issue:`719`, 720)
+ - Fix that None was not treated as NA in PyObjectHashtable
+ - Fix hashing dtype because of endianness confusion (:issue:`747`, :issue:`748`)
+ - Fix SparseSeries.dropna to return dense Series in case of NA fill value (GH
+ :issue:`730`)
+ - Use map_infer instead of np.vectorize. handle NA sentinels if converter
+ yields numeric array, (:issue:`753`)
+ - Fixes and improvements to DataFrame.rank (:issue:`742`)
+ - Fix catching AttributeError instead of NameError for bottleneck
+ - Try to cast non-MultiIndex to better dtype when calling reset_index (:issue:`726`
+ :issue:`440`)
+ - Fix #1.QNAN0' float bug on 2.6/win64
+ - Allow subclasses of dicts in DataFrame constructor, with tests
+ - Fix problem whereby set_index destroys column multiindex (:issue:`764`)
+ - Hack around bug in generating DateRange from naive DateOffset (:issue:`770`)
+ - Fix bug in DateRange.intersection causing incorrect results with some
+ overlapping ranges (:issue:`771`)
+
+Thanks
+------
+- Craig Austin
+- Chris Billington
+- Marius Cobzarenco
+- Mario Gamboa-Cavazos
+- Hans-Martin Gaudecker
+- Arthur Gerigk
+- Yaroslav Halchenko
+- Jeff Hammerbacher
+- Matt Harrison
+- Andreas Hilboll
+- Luc Kesters
+- Adam Klein
+- Gregg Lind
+- Solomon Negusse
+- Wouter Overmeire
+- Christian Prinoth
+- Jeff Reback
+- Sam Reckoner
+- Craig Reeson
+- Jan Schulz
+- Skipper Seabold
+- Ted Square
+- Graham Taylor
+- Aman Thakral
+- Chris Uga
+- Dieter Vandenbussche
+- Texas P.
+- Pinxing Ye
+- ... and everyone I forgot
+
+
+
+pandas 0.6.1
+============
+
+**Release date:** 12/13/2011
+
+**API Changes**
+
+ - Rename `names` argument in DataFrame.from_records to `columns`. Add
+ deprecation warning
+ - Boolean get/set operations on Series with boolean Series will reindex
+ instead of requiring that the indexes be exactly equal (:issue:`429`)
+
+**New features / modules**
+
+ - Can pass Series to DataFrame.append with ignore_index=True for appending a
+ single row (:issue:`430`)
+ - Add Spearman and Kendall correlation options to Series.corr and
+ DataFrame.corr (:issue:`428`)
+ - Add new `get_value` and `set_value` methods to Series, DataFrame, and Panel
+ to very low-overhead access to scalar elements. df.get_value(row, column)
+ is about 3x faster than df[column][row] by handling fewer cases (:issue:`437`,
+ :issue:`438`). Add similar methods to sparse data structures for compatibility
+ - Add Qt table widget to sandbox (:issue:`435`)
+ - DataFrame.align can accept Series arguments, add axis keyword (:issue:`461`)
+ - Implement new SparseList and SparseArray data structures. SparseSeries now
+ derives from SparseArray (:issue:`463`)
+ - max_columns / max_rows options in set_printoptions (:issue:`453`)
+ - Implement Series.rank and DataFrame.rank, fast versions of
+ scipy.stats.rankdata (:issue:`428`)
+ - Implement DataFrame.from_items alternate constructor (:issue:`444`)
+ - DataFrame.convert_objects method for inferring better dtypes for object
+ columns (:issue:`302`)
+ - Add rolling_corr_pairwise function for computing Panel of correlation
+ matrices (:issue:`189`)
+ - Add `margins` option to `pivot_table` for computing subgroup aggregates (GH
+ :issue:`114`)
+ - Add `Series.from_csv` function (:issue:`482`)
+
+**Improvements to existing features**
+
+ - Improve memory usage of `DataFrame.describe` (do not copy data
+ unnecessarily) (:issue:`425`)
+ - Use same formatting function for outputting floating point Series to console
+ as in DataFrame (:issue:`420`)
+ - DataFrame.delevel will try to infer better dtype for new columns (:issue:`440`)
+ - Exclude non-numeric types in DataFrame.{corr, cov}
+ - Override Index.astype to enable dtype casting (:issue:`412`)
+ - Use same float formatting function for Series.__repr__ (:issue:`420`)
+ - Use available console width to output DataFrame columns (:issue:`453`)
+ - Accept ndarrays when setting items in Panel (:issue:`452`)
+ - Infer console width when printing __repr__ of DataFrame to console (PR
+ :issue:`453`)
+ - Optimize scalar value lookups in the general case by 25% or more in Series
+ and DataFrame
+ - Can pass DataFrame/DataFrame and DataFrame/Series to
+ rolling_corr/rolling_cov (:issue:`462`)
+ - Fix performance regression in cross-sectional count in DataFrame, affecting
+ DataFrame.dropna speed
+ - Column deletion in DataFrame copies no data (computes views on blocks) (GH
+ :issue:`158`)
+ - MultiIndex.get_level_values can take the level name
+ - More helpful error message when DataFrame.plot fails on one of the columns
+ (:issue:`478`)
+ - Improve performance of DataFrame.{index, columns} attribute lookup
+
+**Bug fixes**
+
+ - Fix O(K^2) memory leak caused by inserting many columns without
+ consolidating, had been present since 0.4.0 (:issue:`467`)
+ - `DataFrame.count` should return Series with zero instead of NA with length-0
+ axis (:issue:`423`)
+ - Fix Yahoo! Finance API usage in pandas.io.data (:issue:`419`, :issue:`427`)
+ - Fix upstream bug causing failure in Series.align with empty Series (:issue:`434`)
+ - Function passed to DataFrame.apply can return a list, as long as it's the
+ right length. Regression from 0.4 (:issue:`432`)
+ - Don't "accidentally" upcast scalar values when indexing using .ix (:issue:`431`)
+ - Fix groupby exception raised with as_index=False and single column selected
+ (:issue:`421`)
+ - Implement DateOffset.__ne__ causing downstream bug (:issue:`456`)
+ - Fix __doc__-related issue when converting py -> pyo with py2exe
+ - Bug fix in left join Cython code with duplicate monotonic labels
+ - Fix bug when unstacking multiple levels described in :issue:`451`
+ - Exclude NA values in dtype=object arrays, regression from 0.5.0 (:issue:`469`)
+ - Use Cython map_infer function in DataFrame.applymap to properly infer
+ output type, handle tuple return values and other things that were breaking
+ (:issue:`465`)
+ - Handle floating point index values in HDFStore (:issue:`454`)
+ - Fixed stale column reference bug (cached Series object) caused by type
+ change / item deletion in DataFrame (:issue:`473`)
+ - Index.get_loc should always raise Exception when there are duplicates
+ - Handle differently-indexed Series input to DataFrame constructor (:issue:`475`)
+ - Omit nuisance columns in multi-groupby with Python function
+ - Buglet in handling of single grouping in general apply
+ - Handle type inference properly when passing list of lists or tuples to
+ DataFrame constructor (:issue:`484`)
+ - Preserve Index / MultiIndex names in GroupBy.apply concatenation step (GH
+ :issue:`481`)
+
+Thanks
+------
+- Ralph Bean
+- Luca Beltrame
+- Marius Cobzarenco
+- Andreas Hilboll
+- Jev Kuznetsov
+- Adam Lichtenstein
+- Wouter Overmeire
+- Fernando Perez
+- Nathan Pinger
+- Christian Prinoth
+- Alex Reyfman
+- Joon Ro
+- Chang She
+- Ted Square
+- Chris Uga
+- Dieter Vandenbussche
+
+
+
+pandas 0.6.0
+============
+
+**Release date:** 11/25/2011
+
+**API Changes**
+
+ - Arithmetic methods like `sum` will attempt to sum dtype=object values by
+ default instead of excluding them (:issue:`382`)
+
+**New features / modules**
+
+ - Add `melt` function to `pandas.core.reshape`
+ - Add `level` parameter to group by level in Series and DataFrame
+ descriptive statistics (:issue:`313`)
+ - Add `head` and `tail` methods to Series, analogous to to DataFrame (PR
+ :issue:`296`)
+ - Add `Series.isin` function which checks if each value is contained in a
+ passed sequence (:issue:`289`)
+ - Add `float_format` option to `Series.to_string`
+ - Add `skip_footer` (:issue:`291`) and `converters` (:issue:`343`) options to
+ `read_csv` and `read_table`
+ - Add proper, tested weighted least squares to standard and panel OLS (GH
+ :issue:`303`)
+ - Add `drop_duplicates` and `duplicated` functions for removing duplicate
+ DataFrame rows and checking for duplicate rows, respectively (:issue:`319`)
+ - Implement logical (boolean) operators ``&``, ``|``, ``^`` on DataFrame
+ (:issue:`347`)
+ - Add `Series.mad`, mean absolute deviation, matching DataFrame
+ - Add `QuarterEnd` DateOffset (:issue:`321`)
+ - Add matrix multiplication function `dot` to DataFrame (:issue:`65`)
+ - Add `orient` option to `Panel.from_dict` to ease creation of mixed-type
+ Panels (:issue:`359`, :issue:`301`)
+ - Add `DataFrame.from_dict` with similar `orient` option
+ - Can now pass list of tuples or list of lists to `DataFrame.from_records`
+ for fast conversion to DataFrame (:issue:`357`)
+ - Can pass multiple levels to groupby, e.g. `df.groupby(level=[0, 1])` (GH
+ :issue:`103`)
+ - Can sort by multiple columns in `DataFrame.sort_index` (:issue:`92`, :issue:`362`)
+ - Add fast `get_value` and `put_value` methods to DataFrame and
+ micro-performance tweaks (:issue:`360`)
+ - Add `cov` instance methods to Series and DataFrame (:issue:`194`, :issue:`362`)
+ - Add bar plot option to `DataFrame.plot` (:issue:`348`)
+ - Add `idxmin` and `idxmax` functions to Series and DataFrame for computing
+ index labels achieving maximum and minimum values (:issue:`286`)
+ - Add `read_clipboard` function for parsing DataFrame from OS clipboard,
+ should work across platforms (:issue:`300`)
+ - Add `nunique` function to Series for counting unique elements (:issue:`297`)
+ - DataFrame constructor will use Series name if no columns passed (:issue:`373`)
+ - Support regular expressions and longer delimiters in read_table/read_csv,
+ but does not handle quoted strings yet (:issue:`364`)
+ - Add `DataFrame.to_html` for formatting DataFrame to HTML (:issue:`387`)
+ - MaskedArray can be passed to DataFrame constructor and masked values will be
+ converted to NaN (:issue:`396`)
+ - Add `DataFrame.boxplot` function (:issue:`368`, others)
+ - Can pass extra args, kwds to DataFrame.apply (:issue:`376`)
+
+**Improvements to existing features**
+
+ - Raise more helpful exception if date parsing fails in DateRange (:issue:`298`)
+ - Vastly improved performance of GroupBy on axes with a MultiIndex (:issue:`299`)
+ - Print level names in hierarchical index in Series repr (:issue:`305`)
+ - Return DataFrame when performing GroupBy on selected column and
+ as_index=False (:issue:`308`)
+ - Can pass vector to `on` argument in `DataFrame.join` (:issue:`312`)
+ - Don't show Series name if it's None in the repr, also omit length for short
+ Series (:issue:`317`)
+ - Show legend by default in `DataFrame.plot`, add `legend` boolean flag (GH
+ :issue:`324`)
+ - Significantly improved performance of `Series.order`, which also makes
+ np.unique called on a Series faster (:issue:`327`)
+ - Faster cythonized count by level in Series and DataFrame (:issue:`341`)
+ - Raise exception if dateutil 2.0 installed on Python 2.x runtime (:issue:`346`)
+ - Significant GroupBy performance enhancement with multiple keys with many
+ "empty" combinations
+ - New Cython vectorized function `map_infer` speeds up `Series.apply` and
+ `Series.map` significantly when passed elementwise Python function,
+ motivated by :issue:`355`
+ - Cythonized `cache_readonly`, resulting in substantial micro-performance
+ enhancements throughout the codebase (:issue:`361`)
+ - Special Cython matrix iterator for applying arbitrary reduction operations
+ with 3-5x better performance than `np.apply_along_axis` (:issue:`309`)
+ - Add `raw` option to `DataFrame.apply` for getting better performance when
+ the passed function only requires an ndarray (:issue:`309`)
+ - Improve performance of `MultiIndex.from_tuples`
+ - Can pass multiple levels to `stack` and `unstack` (:issue:`370`)
+ - Can pass multiple values columns to `pivot_table` (:issue:`381`)
+ - Can call `DataFrame.delevel` with standard Index with name set (:issue:`393`)
+ - Use Series name in GroupBy for result index (:issue:`363`)
+ - Refactor Series/DataFrame stat methods to use common set of NaN-friendly
+ function
+ - Handle NumPy scalar integers at C level in Cython conversion routines
+
+**Bug fixes**
+
+ - Fix bug in `DataFrame.to_csv` when writing a DataFrame with an index
+ name (:issue:`290`)
+ - DataFrame should clear its Series caches on consolidation, was causing
+ "stale" Series to be returned in some corner cases (:issue:`304`)
+ - DataFrame constructor failed if a column had a list of tuples (:issue:`293`)
+ - Ensure that `Series.apply` always returns a Series and implement
+ `Series.round` (:issue:`314`)
+ - Support boolean columns in Cythonized groupby functions (:issue:`315`)
+ - `DataFrame.describe` should not fail if there are no numeric columns,
+ instead return categorical describe (:issue:`323`)
+ - Fixed bug which could cause columns to be printed in wrong order in
+ `DataFrame.to_string` if specific list of columns passed (:issue:`325`)
+ - Fix legend plotting failure if DataFrame columns are integers (:issue:`326`)
+ - Shift start date back by one month for Yahoo! Finance API in pandas.io.data
+ (:issue:`329`)
+ - Fix `DataFrame.join` failure on unconsolidated inputs (:issue:`331`)
+ - DataFrame.min/max will no longer fail on mixed-type DataFrame (:issue:`337`)
+ - Fix `read_csv` / `read_table` failure when passing list to index_col that is
+ not in ascending order (:issue:`349`)
+ - Fix failure passing Int64Index to Index.union when both are monotonic
+ - Fix error when passing SparseSeries to (dense) DataFrame constructor
+ - Added missing bang at top of setup.py (:issue:`352`)
+ - Change `is_monotonic` on MultiIndex so it properly compares the tuples
+ - Fix MultiIndex outer join logic (:issue:`351`)
+ - Set index name attribute with single-key groupby (:issue:`358`)
+ - Bug fix in reflexive binary addition in Series and DataFrame for
+ non-commutative operations (like string concatenation) (:issue:`353`)
+ - setupegg.py will invoke Cython (:issue:`192`)
+ - Fix block consolidation bug after inserting column into MultiIndex (:issue:`366`)
+ - Fix bug in join operations between Index and Int64Index (:issue:`367`)
+ - Handle min_periods=0 case in moving window functions (:issue:`365`)
+ - Fixed corner cases in DataFrame.apply/pivot with empty DataFrame (:issue:`378`)
+ - Fixed repr exception when Series name is a tuple
+ - Always return DateRange from `asfreq` (:issue:`390`)
+ - Pass level names to `swaplavel` (:issue:`379`)
+ - Don't lose index names in `MultiIndex.droplevel` (:issue:`394`)
+ - Infer more proper return type in `DataFrame.apply` when no columns or rows
+ depending on whether the passed function is a reduction (:issue:`389`)
+ - Always return NA/NaN from Series.min/max and DataFrame.min/max when all of a
+ row/column/values are NA (:issue:`384`)
+ - Enable partial setting with .ix / advanced indexing (:issue:`397`)
+ - Handle mixed-type DataFrames correctly in unstack, do not lose type
+ information (:issue:`403`)
+ - Fix integer name formatting bug in Index.format and in Series.__repr__
+ - Handle label types other than string passed to groupby (:issue:`405`)
+ - Fix bug in .ix-based indexing with partial retrieval when a label is not
+ contained in a level
+ - Index name was not being pickled (:issue:`408`)
+ - Level name should be passed to result index in GroupBy.apply (:issue:`416`)
+
+Thanks
+------
+
+- Craig Austin
+- Marius Cobzarenco
+- Joel Cross
+- Jeff Hammerbacher
+- Adam Klein
+- Thomas Kluyver
+- Jev Kuznetsov
+- Kieran O'Mahony
+- Wouter Overmeire
+- Nathan Pinger
+- Christian Prinoth
+- Skipper Seabold
+- Chang She
+- Ted Square
+- Aman Thakral
+- Chris Uga
+- Dieter Vandenbussche
+- carljv
+- rsamson
+
+
+
+pandas 0.5.0
+============
+
+**Release date:** 10/24/2011
+
+This release of pandas includes a number of API changes (see below) and cleanup
+of deprecated APIs from pre-0.4.0 releases. There are also bug fixes, new
+features, numerous significant performance enhancements, and includes a new
+IPython completer hook to enable tab completion of DataFrame columns accesses
+as attributes (a new feature).
+
+In addition to the changes listed here from 0.4.3 to 0.5.0, the minor releases
+0.4.1, 0.4.2, and 0.4.3 brought some significant new functionality and
+performance improvements that are worth taking a look at.
+
+Thanks to all for bug reports, contributed patches and generally providing
+feedback on the library.
+
+**API Changes**
+
+ - `read_table`, `read_csv`, and `ExcelFile.parse` default arguments for
+ `index_col` is now None. To use one or more of the columns as the resulting
+ DataFrame's index, these must be explicitly specified now
+ - Parsing functions like `read_csv` no longer parse dates by default (GH
+ :issue:`225`)
+ - Removed `weights` option in panel regression which was not doing anything
+ principled (:issue:`155`)
+ - Changed `buffer` argument name in `Series.to_string` to `buf`
+ - `Series.to_string` and `DataFrame.to_string` now return strings by default
+ instead of printing to sys.stdout
+ - Deprecated `nanRep` argument in various `to_string` and `to_csv` functions
+ in favor of `na_rep`. Will be removed in 0.6 (:issue:`275`)
+ - Renamed `delimiter` to `sep` in `DataFrame.from_csv` for consistency
+ - Changed order of `Series.clip` arguments to match those of `numpy.clip` and
+ added (unimplemented) `out` argument so `numpy.clip` can be called on a
+ Series (:issue:`272`)
+ - Series functions renamed (and thus deprecated) in 0.4 series have been
+ removed:
+
+ * `asOf`, use `asof`
+ * `toDict`, use `to_dict`
+ * `toString`, use `to_string`
+ * `toCSV`, use `to_csv`
+ * `merge`, use `map`
+ * `applymap`, use `apply`
+ * `combineFirst`, use `combine_first`
+ * `_firstTimeWithValue` use `first_valid_index`
+ * `_lastTimeWithValue` use `last_valid_index`
+
+ - DataFrame functions renamed / deprecated in 0.4 series have been removed:
+
+ * `asMatrix` method, use `as_matrix` or `values` attribute
+ * `combineFirst`, use `combine_first`
+ * `getXS`, use `xs`
+ * `merge`, use `join`
+ * `fromRecords`, use `from_records`
+ * `fromcsv`, use `from_csv`
+ * `toRecords`, use `to_records`
+ * `toDict`, use `to_dict`
+ * `toString`, use `to_string`
+ * `toCSV`, use `to_csv`
+ * `_firstTimeWithValue` use `first_valid_index`
+ * `_lastTimeWithValue` use `last_valid_index`
+ * `toDataMatrix` is no longer needed
+ * `rows()` method, use `index` attribute
+ * `cols()` method, use `columns` attribute
+ * `dropEmptyRows()`, use `dropna(how='all')`
+ * `dropIncompleteRows()`, use `dropna()`
+ * `tapply(f)`, use `apply(f, axis=1)`
+ * `tgroupby(keyfunc, aggfunc)`, use `groupby` with `axis=1`
+
+ - Other outstanding deprecations have been removed:
+
+ * `indexField` argument in `DataFrame.from_records`
+ * `missingAtEnd` argument in `Series.order`. Use `na_last` instead
+ * `Series.fromValue` classmethod, use regular `Series` constructor instead
+ * Functions `parseCSV`, `parseText`, and `parseExcel` methods in
+ `pandas.io.parsers` have been removed
+ * `Index.asOfDate` function
+ * `Panel.getMinorXS` (use `minor_xs`) and `Panel.getMajorXS` (use
+ `major_xs`)
+ * `Panel.toWide`, use `Panel.to_wide` instead
+
+**New features / modules**
+
+ - Added `DataFrame.align` method with standard join options
+ - Added `parse_dates` option to `read_csv` and `read_table` methods to
+ optionally try to parse dates in the index columns
+ - Add `nrows`, `chunksize`, and `iterator` arguments to `read_csv` and
+ `read_table`. The last two return a new `TextParser` class capable of
+ lazily iterating through chunks of a flat file (:issue:`242`)
+ - Added ability to join on multiple columns in `DataFrame.join` (:issue:`214`)
+ - Added private `_get_duplicates` function to `Index` for identifying
+ duplicate values more easily
+ - Added column attribute access to DataFrame, e.g. df.A equivalent to df['A']
+ if 'A' is a column in the DataFrame (:issue:`213`)
+ - Added IPython tab completion hook for DataFrame columns. (:issue:`233`, :issue:`230`)
+ - Implement `Series.describe` for Series containing objects (:issue:`241`)
+ - Add inner join option to `DataFrame.join` when joining on key(s) (:issue:`248`)
+ - Can select set of DataFrame columns by passing a list to `__getitem__` (GH
+ :issue:`253`)
+ - Can use & and | to intersection / union Index objects, respectively (GH
+ :issue:`261`)
+ - Added `pivot_table` convenience function to pandas namespace (:issue:`234`)
+ - Implemented `Panel.rename_axis` function (:issue:`243`)
+ - DataFrame will show index level names in console output
+ - Implemented `Panel.take`
+ - Add `set_eng_float_format` function for setting alternate DataFrame
+ floating point string formatting
+ - Add convenience `set_index` function for creating a DataFrame index from
+ its existing columns
+
+**Improvements to existing features**
+
+ - Major performance improvements in file parsing functions `read_csv` and
+ `read_table`
+ - Added Cython function for converting tuples to ndarray very fast. Speeds up
+ many MultiIndex-related operations
+ - File parsing functions like `read_csv` and `read_table` will explicitly
+ check if a parsed index has duplicates and raise a more helpful exception
+ rather than deferring the check until later
+ - Refactored merging / joining code into a tidy class and disabled unnecessary
+ computations in the float/object case, thus getting about 10% better
+ performance (:issue:`211`)
+ - Improved speed of `DataFrame.xs` on mixed-type DataFrame objects by about
+ 5x, regression from 0.3.0 (:issue:`215`)
+ - With new `DataFrame.align` method, speeding up binary operations between
+ differently-indexed DataFrame objects by 10-25%.
+ - Significantly sped up conversion of nested dict into DataFrame (:issue:`212`)
+ - Can pass hierarchical index level name to `groupby` instead of the level
+ number if desired (:issue:`223`)
+ - Add support for different delimiters in `DataFrame.to_csv` (:issue:`244`)
+ - Add more helpful error message when importing pandas post-installation from
+ the source directory (:issue:`250`)
+ - Significantly speed up DataFrame `__repr__` and `count` on large mixed-type
+ DataFrame objects
+ - Better handling of pyx file dependencies in Cython module build (:issue:`271`)
+
+**Bug fixes**
+
+ - `read_csv` / `read_table` fixes
+
+ - Be less aggressive about converting float->int in cases of floating point
+ representations of integers like 1.0, 2.0, etc.
+ - "True"/"False" will not get correctly converted to boolean
+ - Index name attribute will get set when specifying an index column
+ - Passing column names should force `header=None` (:issue:`257`)
+ - Don't modify passed column names when `index_col` is not None
+ (:issue:`258`)
+ - Can sniff CSV separator in zip file (since seek is not supported, was
+ failing before)
+
+ - Worked around matplotlib "bug" in which series[:, np.newaxis] fails. Should
+ be reported upstream to matplotlib (:issue:`224`)
+ - DataFrame.iteritems was not returning Series with the name attribute
+ set. Also neither was DataFrame._series
+ - Can store datetime.date objects in HDFStore (:issue:`231`)
+ - Index and Series names are now stored in HDFStore
+ - Fixed problem in which data would get upcasted to object dtype in
+ GroupBy.apply operations (:issue:`237`)
+ - Fixed outer join bug with empty DataFrame (:issue:`238`)
+ - Can create empty Panel (:issue:`239`)
+ - Fix join on single key when passing list with 1 entry (:issue:`246`)
+ - Don't raise Exception on plotting DataFrame with an all-NA column (:issue:`251`,
+ :issue:`254`)
+ - Bug min/max errors when called on integer DataFrames (:issue:`241`)
+ - `DataFrame.iteritems` and `DataFrame._series` not assigning name attribute
+ - Panel.__repr__ raised exception on length-0 major/minor axes
+ - `DataFrame.join` on key with empty DataFrame produced incorrect columns
+ - Implemented `MultiIndex.diff` (:issue:`260`)
+ - `Int64Index.take` and `MultiIndex.take` lost name field, fix downstream
+ issue :issue:`262`
+ - Can pass list of tuples to `Series` (:issue:`270`)
+ - Can pass level name to `DataFrame.stack`
+ - Support set operations between MultiIndex and Index
+ - Fix many corner cases in MultiIndex set operations
+ - Fix MultiIndex-handling bug with GroupBy.apply when returned groups are not
+ indexed the same
+ - Fix corner case bugs in DataFrame.apply
+ - Setting DataFrame index did not cause Series cache to get cleared
+ - Various int32 -> int64 platform-specific issues
+ - Don't be too aggressive converting to integer when parsing file with
+ MultiIndex (:issue:`285`)
+ - Fix bug when slicing Series with negative indices before beginning
+
+Thanks
+------
+
+- Thomas Kluyver
+- Daniel Fortunov
+- Aman Thakral
+- Luca Beltrame
+- Wouter Overmeire
+
+
+
+pandas 0.4.3
+============
+
+Release notes
+-------------
+
+**Release date:** 10/9/2011
+
+This is largely a bugfix release from 0.4.2 but also includes a handful of new
+and enhanced features. Also, pandas can now be installed and used on Python 3
+(thanks Thomas Kluyver!).
+
+**New features / modules**
+
+ - Python 3 support using 2to3 (:issue:`200`, Thomas Kluyver)
+ - Add `name` attribute to `Series` and added relevant logic and tests. Name
+ now prints as part of `Series.__repr__`
+ - Add `name` attribute to standard Index so that stacking / unstacking does
+ not discard names and so that indexed DataFrame objects can be reliably
+ round-tripped to flat files, pickle, HDF5, etc.
+ - Add `isnull` and `notnull` as instance methods on Series (:issue:`209`, :issue:`203`)
+
+**Improvements to existing features**
+
+ - Skip xlrd-related unit tests if not installed
+ - `Index.append` and `MultiIndex.append` can accept a list of Index objects to
+ concatenate together
+ - Altered binary operations on differently-indexed SparseSeries objects to use
+ the integer-based (dense) alignment logic which is faster with a larger
+ number of blocks (:issue:`205`)
+ - Refactored `Series.__repr__` to be a bit more clean and consistent
+
+**API Changes**
+
+ - `Series.describe` and `DataFrame.describe` now bring the 25% and 75%
+ quartiles instead of the 10% and 90% deciles. The other outputs have not
+ changed
+ - `Series.toString` will print deprecation warning, has been de-camelCased to
+ `to_string`
+
+**Bug fixes**
+
+ - Fix broken interaction between `Index` and `Int64Index` when calling
+ intersection. Implement `Int64Index.intersection`
+ - `MultiIndex.sortlevel` discarded the level names (:issue:`202`)
+ - Fix bugs in groupby, join, and append due to improper concatenation of
+ `MultiIndex` objects (:issue:`201`)
+ - Fix regression from 0.4.1, `isnull` and `notnull` ceased to work on other
+ kinds of Python scalar objects like `datetime.datetime`
+ - Raise more helpful exception when attempting to write empty DataFrame or
+ LongPanel to `HDFStore` (:issue:`204`)
+ - Use stdlib csv module to properly escape strings with commas in
+ `DataFrame.to_csv` (:issue:`206`, Thomas Kluyver)
+ - Fix Python ndarray access in Cython code for sparse blocked index integrity
+ check
+ - Fix bug writing Series to CSV in Python 3 (:issue:`209`)
+ - Miscellaneous Python 3 bugfixes
+
+Thanks
+------
+
+ - Thomas Kluyver
+ - rsamson
+
+
+
+pandas 0.4.2
+============
+
+Release notes
+-------------
+
+**Release date:** 10/3/2011
+
+This is a performance optimization release with several bug fixes. The new
+Int64Index and new merging / joining Cython code and related Python
+infrastructure are the main new additions
+
+**New features / modules**
+
+ - Added fast `Int64Index` type with specialized join, union,
+ intersection. Will result in significant performance enhancements for
+ int64-based time series (e.g. using NumPy's datetime64 one day) and also
+ faster operations on DataFrame objects storing record array-like data.
+ - Refactored `Index` classes to have a `join` method and associated data
+ alignment routines throughout the codebase to be able to leverage optimized
+ joining / merging routines.
+ - Added `Series.align` method for aligning two series with choice of join
+ method
+ - Wrote faster Cython data alignment / merging routines resulting in
+ substantial speed increases
+ - Added `is_monotonic` property to `Index` classes with associated Cython
+ code to evaluate the monotonicity of the `Index` values
+ - Add method `get_level_values` to `MultiIndex`
+ - Implemented shallow copy of `BlockManager` object in `DataFrame` internals
+
+**Improvements to existing features**
+
+ - Improved performance of `isnull` and `notnull`, a regression from v0.3.0
+ (:issue:`187`)
+ - Wrote templating / code generation script to auto-generate Cython code for
+ various functions which need to be available for the 4 major data types
+ used in pandas (float64, bool, object, int64)
+ - Refactored code related to `DataFrame.join` so that intermediate aligned
+ copies of the data in each `DataFrame` argument do not need to be
+ created. Substantial performance increases result (:issue:`176`)
+ - Substantially improved performance of generic `Index.intersection` and
+ `Index.union`
+ - Improved performance of `DateRange.union` with overlapping ranges and
+ non-cacheable offsets (like Minute). Implemented analogous fast
+ `DateRange.intersection` for overlapping ranges.
+ - Implemented `BlockManager.take` resulting in significantly faster `take`
+ performance on mixed-type `DataFrame` objects (:issue:`104`)
+ - Improved performance of `Series.sort_index`
+ - Significant groupby performance enhancement: removed unnecessary integrity
+ checks in DataFrame internals that were slowing down slicing operations to
+ retrieve groups
+ - Added informative Exception when passing dict to DataFrame groupby
+ aggregation with axis != 0
+
+**API Changes**
+
+None
+
+**Bug fixes**
+
+ - Fixed minor unhandled exception in Cython code implementing fast groupby
+ aggregation operations
+ - Fixed bug in unstacking code manifesting with more than 3 hierarchical
+ levels
+ - Throw exception when step specified in label-based slice (:issue:`185`)
+ - Fix isnull to correctly work with np.float32. Fix upstream bug described in
+ :issue:`182`
+ - Finish implementation of as_index=False in groupby for DataFrame
+ aggregation (:issue:`181`)
+ - Raise SkipTest for pre-epoch HDFStore failure. Real fix will be sorted out
+ via datetime64 dtype
+
+Thanks
+------
+
+- Uri Laserson
+- Scott Sinclair
+
+
+
+pandas 0.4.1
+============
+
+Release notes
+-------------
+
+**Release date:** 9/25/2011
+
+This is primarily a bug fix release but includes some new features and
+improvements
+
+**New features / modules**
+
+ - Added new `DataFrame` methods `get_dtype_counts` and property `dtypes`
+ - Setting of values using ``.ix`` indexing attribute in mixed-type DataFrame
+ objects has been implemented (fixes :issue:`135`)
+ - `read_csv` can read multiple columns into a `MultiIndex`. DataFrame's
+ `to_csv` method will properly write out a `MultiIndex` which can be read
+ back (:issue:`151`, thanks to Skipper Seabold)
+ - Wrote fast time series merging / joining methods in Cython. Will be
+ integrated later into DataFrame.join and related functions
+ - Added `ignore_index` option to `DataFrame.append` for combining unindexed
+ records stored in a DataFrame
+
+**Improvements to existing features**
+
+ - Some speed enhancements with internal Index type-checking function
+ - `DataFrame.rename` has a new `copy` parameter which can rename a DataFrame
+ in place
+ - Enable unstacking by level name (:issue:`142`)
+ - Enable sortlevel to work by level name (:issue:`141`)
+ - `read_csv` can automatically "sniff" other kinds of delimiters using
+ `csv.Sniffer` (:issue:`146`)
+ - Improved speed of unit test suite by about 40%
+ - Exception will not be raised calling `HDFStore.remove` on non-existent node
+ with where clause
+ - Optimized `_ensure_index` function resulting in performance savings in
+ type-checking Index objects
+
+**API Changes**
+
+None
+
+**Bug fixes**
+
+ - Fixed DataFrame constructor bug causing downstream problems (e.g. .copy()
+ failing) when passing a Series as the values along with a column name and
+ index
+ - Fixed single-key groupby on DataFrame with as_index=False (:issue:`160`)
+ - `Series.shift` was failing on integer Series (:issue:`154`)
+ - `unstack` methods were producing incorrect output in the case of duplicate
+ hierarchical labels. An exception will now be raised (:issue:`147`)
+ - Calling `count` with level argument caused reduceat failure or segfault in
+ earlier NumPy (:issue:`169`)
+ - Fixed `DataFrame.corrwith` to automatically exclude non-numeric data (GH
+ :issue:`144`)
+ - Unicode handling bug fixes in `DataFrame.to_string` (:issue:`138`)
+ - Excluding OLS degenerate unit test case that was causing platform specific
+ failure (:issue:`149`)
+ - Skip blosc-dependent unit tests for PyTables < 2.2 (:issue:`137`)
+ - Calling `copy` on `DateRange` did not copy over attributes to the new object
+ (:issue:`168`)
+ - Fix bug in `HDFStore` in which Panel data could be appended to a Table with
+ different item order, thus resulting in an incorrect result read back
+
+Thanks
+------
+- Yaroslav Halchenko
+- Jeff Reback
+- Skipper Seabold
+- Dan Lovell
+- Nick Pentreath
+
+
+
+pandas 0.4.0
+============
+
+Release notes
+-------------
+
+**Release date:** 9/12/2011
+
+**New features / modules**
+
+ - `pandas.core.sparse` module: "Sparse" (mostly-NA, or some other fill value)
+ versions of `Series`, `DataFrame`, and `Panel`. For low-density data, this
+ will result in significant performance boosts, and smaller memory
+ footprint. Added `to_sparse` methods to `Series`, `DataFrame`, and
+ `Panel`. See online documentation for more on these
+ - Fancy indexing operator on Series / DataFrame, e.g. via .ix operator. Both
+ getting and setting of values is supported; however, setting values will only
+ currently work on homogeneously-typed DataFrame objects. Things like:
+
+ * series.ix[[d1, d2, d3]]
+ * frame.ix[5:10, ['C', 'B', 'A']], frame.ix[5:10, 'A':'C']
+ * frame.ix[date1:date2]
+
+ - Significantly enhanced `groupby` functionality
+
+ * Can groupby multiple keys, e.g. df.groupby(['key1', 'key2']). Iteration with
+ multiple groupings products a flattened tuple
+ * "Nuisance" columns (non-aggregatable) will automatically be excluded from
+ DataFrame aggregation operations
+ * Added automatic "dispatching to Series / DataFrame methods to more easily
+ invoke methods on groups. e.g. s.groupby(crit).std() will work even though
+ `std` is not implemented on the `GroupBy` class
+
+ - Hierarchical / multi-level indexing
+
+ * New the `MultiIndex` class. Integrated `MultiIndex` into `Series` and
+ `DataFrame` fancy indexing, slicing, __getitem__ and __setitem,
+ reindexing, etc. Added `level` keyword argument to `groupby` to enable
+ grouping by a level of a `MultiIndex`
+
+ - New data reshaping functions: `stack` and `unstack` on DataFrame and Series
+
+ * Integrate with MultiIndex to enable sophisticated reshaping of data
+
+ - `Index` objects (labels for axes) are now capable of holding tuples
+ - `Series.describe`, `DataFrame.describe`: produces an R-like table of summary
+ statistics about each data column
+ - `DataFrame.quantile`, `Series.quantile` for computing sample quantiles of data
+ across requested axis
+ - Added general `DataFrame.dropna` method to replace `dropIncompleteRows` and
+ `dropEmptyRows`, deprecated those.
+ - `Series` arithmetic methods with optional fill_value for missing data,
+ e.g. a.add(b, fill_value=0). If a location is missing for both it will still
+ be missing in the result though.
+ - fill_value option has been added to `DataFrame`.{add, mul, sub, div} methods
+ similar to `Series`
+ - Boolean indexing with `DataFrame` objects: data[data > 0.1] = 0.1 or
+ data[data> other] = 1.
+ - `pytz` / tzinfo support in `DateRange`
+
+ * `tz_localize`, `tz_normalize`, and `tz_validate` methods added
+
+ - Added `ExcelFile` class to `pandas.io.parsers` for parsing multiple sheets out
+ of a single Excel 2003 document
+ - `GroupBy` aggregations can now optionally *broadcast*, e.g. produce an object
+ of the same size with the aggregated value propagated
+ - Added `select` function in all data structures: reindex axis based on
+ arbitrary criterion (function returning boolean value),
+ e.g. frame.select(lambda x: 'foo' in x, axis=1)
+ - `DataFrame.consolidate` method, API function relating to redesigned internals
+ - `DataFrame.insert` method for inserting column at a specified location rather
+ than the default __setitem__ behavior (which puts it at the end)
+ - `HDFStore` class in `pandas.io.pytables` has been largely rewritten using
+ patches from Jeff Reback from others. It now supports mixed-type `DataFrame`
+ and `Series` data and can store `Panel` objects. It also has the option to
+ query `DataFrame` and `Panel` data. Loading data from legacy `HDFStore`
+ files is supported explicitly in the code
+ - Added `set_printoptions` method to modify appearance of DataFrame tabular
+ output
+ - `rolling_quantile` functions; a moving version of `Series.quantile` /
+ `DataFrame.quantile`
+ - Generic `rolling_apply` moving window function
+ - New `drop` method added to `Series`, `DataFrame`, etc. which can drop a set of
+ labels from an axis, producing a new object
+ - `reindex` methods now sport a `copy` option so that data is not forced to be
+ copied then the resulting object is indexed the same
+ - Added `sort_index` methods to Series and Panel. Renamed `DataFrame.sort`
+ to `sort_index`. Leaving `DataFrame.sort` for now.
+ - Added ``skipna`` option to statistical instance methods on all the data
+ structures
+ - `pandas.io.data` module providing a consistent interface for reading time
+ series data from several different sources
+
+**Improvements to existing features**
+
+ * The 2-dimensional `DataFrame` and `DataMatrix` classes have been extensively
+ redesigned internally into a single class `DataFrame`, preserving where
+ possible their optimal performance characteristics. This should reduce
+ confusion from users about which class to use.
+
+ * Note that under the hood there is a new essentially "lazy evaluation"
+ scheme within respect to adding columns to DataFrame. During some
+ operations, like-typed blocks will be "consolidated" but not before.
+
+ * `DataFrame` accessing columns repeatedly is now significantly faster than
+ `DataMatrix` used to be in 0.3.0 due to an internal Series caching mechanism
+ (which are all views on the underlying data)
+ * Column ordering for mixed type data is now completely consistent in
+ `DataFrame`. In prior releases, there was inconsistent column ordering in
+ `DataMatrix`
+ * Improved console / string formatting of DataMatrix with negative numbers
+ * Improved tabular data parsing functions, `read_table` and `read_csv`:
+
+ * Added `skiprows` and `na_values` arguments to `pandas.io.parsers` functions
+ for more flexible IO
+ * `parseCSV` / `read_csv` functions and others in `pandas.io.parsers` now can
+ take a list of custom NA values, and also a list of rows to skip
+
+ * Can slice `DataFrame` and get a view of the data (when homogeneously typed),
+ e.g. frame.xs(idx, copy=False) or frame.ix[idx]
+ * Many speed optimizations throughout `Series` and `DataFrame`
+ * Eager evaluation of groups when calling ``groupby`` functions, so if there is
+ an exception with the grouping function it will raised immediately versus
+ sometime later on when the groups are needed
+ * `datetools.WeekOfMonth` offset can be parameterized with `n` different than 1
+ or -1.
+ * Statistical methods on DataFrame like `mean`, `std`, `var`, `skew` will now
+ ignore non-numerical data. Before a not very useful error message was
+ generated. A flag `numeric_only` has been added to `DataFrame.sum` and
+ `DataFrame.count` to enable this behavior in those methods if so desired
+ (disabled by default)
+ * `DataFrame.pivot` generalized to enable pivoting multiple columns into a
+ `DataFrame` with hierarchical columns
+ * `DataFrame` constructor can accept structured / record arrays
+ * `Panel` constructor can accept a dict of DataFrame-like objects. Do not
+ need to use `from_dict` anymore (`from_dict` is there to stay, though).
+
+**API Changes**
+
+ * The `DataMatrix` variable now refers to `DataFrame`, will be removed within
+ two releases
+ * `WidePanel` is now known as `Panel`. The `WidePanel` variable in the pandas
+ namespace now refers to the renamed `Panel` class
+ * `LongPanel` and `Panel` / `WidePanel` now no longer have a common
+ subclass. `LongPanel` is now a subclass of `DataFrame` having a number of
+ additional methods and a hierarchical index instead of the old
+ `LongPanelIndex` object, which has been removed. Legacy `LongPanel` pickles
+ may not load properly
+ * Cython is now required to build `pandas` from a development branch. This was
+ done to avoid continuing to check in cythonized C files into source
+ control. Builds from released source distributions will not require Cython
+ * Cython code has been moved up to a top level `pandas/src` directory. Cython
+ extension modules have been renamed and promoted from the `lib` subpackage to
+ the top level, i.e.
+
+ * `pandas.lib.tseries` -> `pandas._tseries`
+ * `pandas.lib.sparse` -> `pandas._sparse`
+
+ * `DataFrame` pickling format has changed. Backwards compatibility for legacy
+ pickles is provided, but it's recommended to consider PyTables-based
+ `HDFStore` for storing data with a longer expected shelf life
+ * A `copy` argument has been added to the `DataFrame` constructor to avoid
+ unnecessary copying of data. Data is no longer copied by default when passed
+ into the constructor
+ * Handling of boolean dtype in `DataFrame` has been improved to support storage
+ of boolean data with NA / NaN values. Before it was being converted to float64
+ so this should not (in theory) cause API breakage
+ * To optimize performance, Index objects now only check that their labels are
+ unique when uniqueness matters (i.e. when someone goes to perform a
+ lookup). This is a potentially dangerous tradeoff, but will lead to much
+ better performance in many places (like groupby).
+ * Boolean indexing using Series must now have the same indices (labels)
+ * Backwards compatibility support for begin/end/nPeriods keyword arguments in
+ DateRange class has been removed
+ * More intuitive / shorter filling aliases `ffill` (for `pad`) and `bfill` (for
+ `backfill`) have been added to the functions that use them: `reindex`,
+ `asfreq`, `fillna`.
+ * `pandas.core.mixins` code moved to `pandas.core.generic`
+ * `buffer` keyword arguments (e.g. `DataFrame.toString`) renamed to `buf` to
+ avoid using Python built-in name
+ * `DataFrame.rows()` removed (use `DataFrame.index`)
+ * Added deprecation warning to `DataFrame.cols()`, to be removed in next release
+ * `DataFrame` deprecations and de-camelCasing: `merge`, `asMatrix`,
+ `toDataMatrix`, `_firstTimeWithValue`, `_lastTimeWithValue`, `toRecords`,
+ `fromRecords`, `tgroupby`, `toString`
+ * `pandas.io.parsers` method deprecations
+
+ * `parseCSV` is now `read_csv` and keyword arguments have been de-camelCased
+ * `parseText` is now `read_table`
+ * `parseExcel` is replaced by the `ExcelFile` class and its `parse` method
+
+ * `fillMethod` arguments (deprecated in prior release) removed, should be
+ replaced with `method`
+ * `Series.fill`, `DataFrame.fill`, and `Panel.fill` removed, use `fillna`
+ instead
+ * `groupby` functions now exclude NA / NaN values from the list of groups. This
+ matches R behavior with NAs in factors e.g. with the `tapply` function
+ * Removed `parseText`, `parseCSV` and `parseExcel` from pandas namespace
+ * `Series.combineFunc` renamed to `Series.combine` and made a bit more general
+ with a `fill_value` keyword argument defaulting to NaN
+ * Removed `pandas.core.pytools` module. Code has been moved to
+ `pandas.core.common`
+ * Tacked on `groupName` attribute for groups in GroupBy renamed to `name`
+ * Panel/LongPanel `dims` attribute renamed to `shape` to be more conformant
+ * Slicing a `Series` returns a view now
+ * More Series deprecations / renaming: `toCSV` to `to_csv`, `asOf` to `asof`,
+ `merge` to `map`, `applymap` to `apply`, `toDict` to `to_dict`,
+ `combineFirst` to `combine_first`. Will print `FutureWarning`.
+ * `DataFrame.to_csv` does not write an "index" column label by default
+ anymore since the output file can be read back without it. However, there
+ is a new ``index_label`` argument. So you can do ``index_label='index'`` to
+ emulate the old behavior
+ * `datetools.Week` argument renamed from `dayOfWeek` to `weekday`
+ * `timeRule` argument in `shift` has been deprecated in favor of using the
+ `offset` argument for everything. So you can still pass a time rule string
+ to `offset`
+ * Added optional `encoding` argument to `read_csv`, `read_table`, `to_csv`,
+ `from_csv` to handle unicode in python 2.x
+
+**Bug fixes**
+
+ * Column ordering in `pandas.io.parsers.parseCSV` will match CSV in the presence
+ of mixed-type data
+ * Fixed handling of Excel 2003 dates in `pandas.io.parsers`
+ * `DateRange` caching was happening with high resolution `DateOffset` objects,
+ e.g. `DateOffset(seconds=1)`. This has been fixed
+ * Fixed __truediv__ issue in `DataFrame`
+ * Fixed `DataFrame.toCSV` bug preventing IO round trips in some cases
+ * Fixed bug in `Series.plot` causing matplotlib to barf in exceptional cases
+ * Disabled `Index` objects from being hashable, like ndarrays
+ * Added `__ne__` implementation to `Index` so that operations like ts[ts != idx]
+ will work
+ * Added `__ne__` implementation to `DataFrame`
+ * Bug / unintuitive result when calling `fillna` on unordered labels
+ * Bug calling `sum` on boolean DataFrame
+ * Bug fix when creating a DataFrame from a dict with scalar values
+ * Series.{sum, mean, std, ...} now return NA/NaN when the whole Series is NA
+ * NumPy 1.4 through 1.6 compatibility fixes
+ * Fixed bug in bias correction in `rolling_cov`, was affecting `rolling_corr`
+ too
+ * R-square value was incorrect in the presence of fixed and time effects in
+ the `PanelOLS` classes
+ * `HDFStore` can handle duplicates in table format, will take
+
+Thanks
+------
+ - Joon Ro
+ - Michael Pennington
+ - Chris Uga
+ - Chris Withers
+ - Jeff Reback
+ - Ted Square
+ - Craig Austin
+ - William Ferreira
+ - Daniel Fortunov
+ - Tony Roberts
+ - Martin Felder
+ - John Marino
+ - Tim McNamara
+ - Justin Berka
+ - Dieter Vandenbussche
+ - Shane Conway
+ - Skipper Seabold
+ - Chris Jordan-Squire
+
+pandas 0.3.0
+============
+
+Release notes
+-------------
+
+**Release date:** February 20, 2011
+
+**New features / modules**
+
+ - `corrwith` function to compute column- or row-wise correlations between two
+ DataFrame objects
+ - Can boolean-index DataFrame objects, e.g. df[df > 2] = 2, px[px > last_px] = 0
+ - Added comparison magic methods (__lt__, __gt__, etc.)
+ - Flexible explicit arithmetic methods (add, mul, sub, div, etc.)
+ - Added `reindex_like` method
+ - Added `reindex_like` method to WidePanel
+ - Convenience functions for accessing SQL-like databases in `pandas.io.sql`
+ module
+ - Added (still experimental) HDFStore class for storing pandas data
+ structures using HDF5 / PyTables in `pandas.io.pytables` module
+ - Added WeekOfMonth date offset
+ - `pandas.rpy` (experimental) module created, provide some interfacing /
+ conversion between rpy2 and pandas
+
+**Improvements**
+
+ - Unit test coverage: 100% line coverage of core data structures
+ - Speed enhancement to rolling_{median, max, min}
+ - Column ordering between DataFrame and DataMatrix is now consistent: before
+ DataFrame would not respect column order
+ - Improved {Series, DataFrame}.plot methods to be more flexible (can pass
+ matplotlib Axis arguments, plot DataFrame columns in multiple subplots,
+ etc.)
+
+**API Changes**
+
+ - Exponentially-weighted moment functions in `pandas.stats.moments` have a
+ more consistent API and accept a min_periods argument like their regular
+ moving counterparts.
+ - **fillMethod** argument in Series, DataFrame changed to **method**,
+ `FutureWarning` added.
+ - **fill** method in Series, DataFrame/DataMatrix, WidePanel renamed to
+ **fillna**, `FutureWarning` added to **fill**
+ - Renamed **DataFrame.getXS** to **xs**, `FutureWarning` added
+ - Removed **cap** and **floor** functions from DataFrame, renamed to
+ **clip_upper** and **clip_lower** for consistency with NumPy
+
+**Bug fixes**
+
+ - Fixed bug in IndexableSkiplist Cython code that was breaking rolling_max
+ function
+ - Numerous numpy.int64-related indexing fixes
+ - Several NumPy 1.4.0 NaN-handling fixes
+ - Bug fixes to pandas.io.parsers.parseCSV
+ - Fixed `DateRange` caching issue with unusual date offsets
+ - Fixed bug in `DateRange.union`
+ - Fixed corner case in `IndexableSkiplist` implementation
diff --git a/doc/source/v0.10.0.txt b/doc/source/v0.10.0.txt
index 51075a61bec4d..d0c0ecc148239 100644
--- a/doc/source/v0.10.0.txt
+++ b/doc/source/v0.10.0.txt
@@ -159,7 +159,7 @@ Convenience methods ``ffill`` and ``bfill`` have been added:
s
s.apply(f)
-- New API functions for working with pandas options (GH2097_):
+- New API functions for working with pandas options (:issue:`2097`):
- ``get_option`` / ``set_option`` - get/set the value of an option. Partial
names are accepted. - ``reset_option`` - reset one or more options to
@@ -174,7 +174,7 @@ Convenience methods ``ffill`` and ``bfill`` have been added:
get_option("display.max_rows")
-- to_string() methods now always return unicode strings (GH2224_).
+- to_string() methods now always return unicode strings (:issue:`2224`).
New features
~~~~~~~~~~~~
@@ -297,22 +297,22 @@ Updated PyTables Support
- performance improvments on table writing
- support for arbitrarily indexed dimensions
-- ``SparseSeries`` now has a ``density`` property (GH2384_)
+- ``SparseSeries`` now has a ``density`` property (:issue:`2384`)
- enable ``Series.str.strip/lstrip/rstrip`` methods to take an input argument
- to strip arbitrary characters (GH2411_)
+ to strip arbitrary characters (:issue:`2411`)
- implement ``value_vars`` in ``melt`` to limit values to certain columns
- and add ``melt`` to pandas namespace (GH2412_)
+ and add ``melt`` to pandas namespace (:issue:`2412`)
**Bug Fixes**
-- added ``Term`` method of specifying where conditions (GH1996_).
+- added ``Term`` method of specifying where conditions (:issue:`1996`).
- ``del store['df']`` now call ``store.remove('df')`` for store deletion
- deleting of consecutive rows is much faster than before
- ``min_itemsize`` parameter can be specified in table creation to force a
minimum size for indexing columns (the previous implementation would set the
column size based on the first append)
- indexing support via ``create_table_index`` (requires PyTables >= 2.3)
- (GH698_).
+ (:issue:`698`).
- appending on a store would fail if the table was not first created via ``put``
- fixed issue with missing attributes after loading a pickled dataframe (GH2431)
- minor change to select and remove: require a table ONLY if where is also
@@ -350,16 +350,7 @@ Adding experimental support for Panel4D and factory functions to create n-dimens
-See the `full release notes
-<https://github.com/pydata/pandas/blob/master/RELEASE.rst>`__ or issue tracker
+See the :ref:`full release notes
+<release>` or issue tracker
on GitHub for a complete list.
-.. _GH698: https://github.com/pydata/pandas/issues/698
-.. _GH1996: https://github.com/pydata/pandas/issues/1996
-.. _GH2316: https://github.com/pydata/pandas/issues/2316
-.. _GH2097: https://github.com/pydata/pandas/issues/2097
-.. _GH2224: https://github.com/pydata/pandas/issues/2224
-.. _GH2431: https://github.com/pydata/pandas/issues/2431
-.. _GH2412: https://github.com/pydata/pandas/issues/2412
-.. _GH2411: https://github.com/pydata/pandas/issues/2411
-.. _GH2384: https://github.com/pydata/pandas/issues/2384
diff --git a/doc/source/v0.10.1.txt b/doc/source/v0.10.1.txt
index dafa4300af0e3..0d92e359c2a4a 100644
--- a/doc/source/v0.10.1.txt
+++ b/doc/source/v0.10.1.txt
@@ -15,14 +15,14 @@ API changes
- Functions taking an ``inplace`` option return the calling object as before. A
deprecation message has been added
-- Groupby aggregations Max/Min no longer exclude non-numeric data (GH2700_)
+- Groupby aggregations Max/Min no longer exclude non-numeric data (:issue:`2700`)
- Resampling an empty DataFrame now returns an empty DataFrame instead of
- raising an exception (GH2640_)
+ raising an exception (:issue:`2640`)
- The file reader will now raise an exception when NA values are found in an
explicitly specified integer column instead of converting the column to float
- (GH2631_)
+ (:issue:`2631`)
- DatetimeIndex.unique now returns a DatetimeIndex with the same name and
-- timezone instead of an array (GH2563_)
+- timezone instead of an array (:issue:`2563`)
New features
~~~~~~~~~~~~
@@ -164,76 +164,49 @@ combined result, by using ``where`` on a selector table.
- ``Select`` now supports passing ``start`` and ``stop`` to provide selection
space limiting in selection.
-- Greatly improved ISO8601 (e.g., yyyy-mm-dd) date parsing for file parsers (GH2698_)
+- Greatly improved ISO8601 (e.g., yyyy-mm-dd) date parsing for file parsers (:issue:`2698`)
- Allow ``DataFrame.merge`` to handle combinatorial sizes too large for 64-bit
- integer (GH2690_)
-- Series now has unary negation (-series) and inversion (~series) operators (GH2686_)
-- DataFrame.plot now includes a ``logx`` parameter to change the x-axis to log scale (GH2327_)
-- Series arithmetic operators can now handle constant and ndarray input (GH2574_)
-- ExcelFile now takes a ``kind`` argument to specify the file type (GH2613_)
-- A faster implementation for Series.str methods (GH2602_)
+ integer (:issue:`2690`)
+- Series now has unary negation (-series) and inversion (~series) operators (:issue:`2686`)
+- DataFrame.plot now includes a ``logx`` parameter to change the x-axis to log scale (:issue:`2327`)
+- Series arithmetic operators can now handle constant and ndarray input (:issue:`2574`)
+- ExcelFile now takes a ``kind`` argument to specify the file type (:issue:`2613`)
+- A faster implementation for Series.str methods (:issue:`2602`)
**Bug Fixes**
- ``HDFStore`` tables can now store ``float32`` types correctly (cannot be
mixed with ``float64`` however)
-- Fixed Google Analytics prefix when specifying request segment (GH2713_).
+- Fixed Google Analytics prefix when specifying request segment (:issue:`2713`).
- Function to reset Google Analytics token store so users can recover from
- improperly setup client secrets (GH2687_).
-- Fixed groupby bug resulting in segfault when passing in MultiIndex (GH2706_)
+ improperly setup client secrets (:issue:`2687`).
+- Fixed groupby bug resulting in segfault when passing in MultiIndex (:issue:`2706`)
- Fixed bug where passing a Series with datetime64 values into `to_datetime`
- results in bogus output values (GH2699_)
+ results in bogus output values (:issue:`2699`)
- Fixed bug in ``pattern in HDFStore`` expressions when pattern is not a valid
- regex (GH2694_)
-- Fixed performance issues while aggregating boolean data (GH2692_)
+ regex (:issue:`2694`)
+- Fixed performance issues while aggregating boolean data (:issue:`2692`)
- When given a boolean mask key and a Series of new values, Series __setitem__
- will now align the incoming values with the original Series (GH2686_)
+ will now align the incoming values with the original Series (:issue:`2686`)
- Fixed MemoryError caused by performing counting sort on sorting MultiIndex
- levels with a very large number of combinatorial values (GH2684_)
+ levels with a very large number of combinatorial values (:issue:`2684`)
- Fixed bug that causes plotting to fail when the index is a DatetimeIndex with
- a fixed-offset timezone (GH2683_)
+ a fixed-offset timezone (:issue:`2683`)
- Corrected businessday subtraction logic when the offset is more than 5 bdays
- and the starting date is on a weekend (GH2680_)
+ and the starting date is on a weekend (:issue:`2680`)
- Fixed C file parser behavior when the file has more columns than data
- (GH2668_)
+ (:issue:`2668`)
- Fixed file reader bug that misaligned columns with data in the presence of an
implicit column and a specified `usecols` value
- DataFrames with numerical or datetime indices are now sorted prior to
- plotting (GH2609_)
+ plotting (:issue:`2609`)
- Fixed DataFrame.from_records error when passed columns, index, but empty
- records (GH2633_)
-- Several bug fixed for Series operations when dtype is datetime64 (GH2689_,
- GH2629_, GH2626_)
+ records (:issue:`2633`)
+- Several bug fixed for Series operations when dtype is datetime64 (:issue:`2689`,
+ :issue:`2629`, :issue:`2626`)
-See the `full release notes
-<https://github.com/pydata/pandas/blob/master/RELEASE.rst>`__ or issue tracker
+See the :ref:`full release notes
+<release>` or issue tracker
on GitHub for a complete list.
-.. _GH2706: https://github.com/pydata/pandas/issues/2706
-.. _GH2700: https://github.com/pydata/pandas/issues/2700
-.. _GH2699: https://github.com/pydata/pandas/issues/2699
-.. _GH2698: https://github.com/pydata/pandas/issues/2698
-.. _GH2694: https://github.com/pydata/pandas/issues/2694
-.. _GH2692: https://github.com/pydata/pandas/issues/2692
-.. _GH2690: https://github.com/pydata/pandas/issues/2690
-.. _GH2713: https://github.com/pydata/pandas/issues/2713
-.. _GH2689: https://github.com/pydata/pandas/issues/2689
-.. _GH2686: https://github.com/pydata/pandas/issues/2686
-.. _GH2684: https://github.com/pydata/pandas/issues/2684
-.. _GH2683: https://github.com/pydata/pandas/issues/2683
-.. _GH2680: https://github.com/pydata/pandas/issues/2680
-.. _GH2668: https://github.com/pydata/pandas/issues/2668
-.. _GH2640: https://github.com/pydata/pandas/issues/2640
-.. _GH2609: https://github.com/pydata/pandas/issues/2609
-.. _GH2327: https://github.com/pydata/pandas/issues/2327
-.. _GH2574: https://github.com/pydata/pandas/issues/2574
-.. _GH2609: https://github.com/pydata/pandas/issues/2609
-.. _GH2631: https://github.com/pydata/pandas/issues/2631
-.. _GH2633: https://github.com/pydata/pandas/issues/2633
-.. _GH2629: https://github.com/pydata/pandas/issues/2629
-.. _GH2626: https://github.com/pydata/pandas/issues/2626
-.. _GH2613: https://github.com/pydata/pandas/issues/2613
-.. _GH2602: https://github.com/pydata/pandas/issues/2602
-.. _GH2687: https://github.com/pydata/pandas/issues/2687
-.. _GH2563: https://github.com/pydata/pandas/issues/2563
diff --git a/doc/source/v0.11.0.txt b/doc/source/v0.11.0.txt
index b6b35fddab974..6b7fac0fc12dc 100644
--- a/doc/source/v0.11.0.txt
+++ b/doc/source/v0.11.0.txt
@@ -129,7 +129,7 @@ Dtype Gotchas
Starting in 0.11.0, construction of DataFrame/Series will use default dtypes of ``int64`` and ``float64``,
*regardless of platform*. This is not an apparent change from earlier versions of pandas. If you specify
-dtypes, they *WILL* be respected, however (GH2837_)
+dtypes, they *WILL* be respected, however (:issue:`2837`)
The following will all result in ``int64`` dtypes
@@ -176,7 +176,7 @@ Datetimes Conversion
Datetime64[ns] columns in a DataFrame (or a Series) allow the use of ``np.nan`` to indicate a nan value,
in addition to the traditional ``NaT``, or not-a-time. This allows convenient nan setting in a generic way.
Furthermore ``datetime64[ns]`` columns are created by default, when passed datetimelike objects (*this change was introduced in 0.10.1*)
-(GH2809_, GH2810_)
+(:issue:`2809`, :issue:`2810`)
.. ipython:: python
@@ -210,7 +210,7 @@ API changes
~~~~~~~~~~~
- Added to_series() method to indicies, to facilitate the creation of indexers
- (GH3275_)
+ (:issue:`3275`)
- ``HDFStore``
@@ -221,7 +221,7 @@ API changes
Enhancements
~~~~~~~~~~~~
- - Improved performance of df.to_csv() by up to 10x in some cases. (GH3059_)
+ - Improved performance of df.to_csv() by up to 10x in some cases. (:issue:`3059`)
- Numexpr is now a :ref:`Recommended Dependencies <install.recommended_dependencies>`, to accelerate certain
types of numerical and boolean operations
@@ -248,11 +248,11 @@ Enhancements
- provide dotted attribute access to ``get`` from stores, e.g. ``store.df == store['df']``
- new keywords ``iterator=boolean``, and ``chunksize=number_in_a_chunk`` are
- provided to support iteration on ``select`` and ``select_as_multiple`` (GH3076_)
+ provided to support iteration on ``select`` and ``select_as_multiple`` (:issue:`3076`)
- - You can now select timestamps from an *unordered* timeseries similarly to an *ordered* timeseries (GH2437_)
+ - You can now select timestamps from an *unordered* timeseries similarly to an *ordered* timeseries (:issue:`2437`)
- - You can now select with a string from a DataFrame with a datelike index, in a similar way to a Series (GH3070_)
+ - You can now select with a string from a DataFrame with a datelike index, in a similar way to a Series (:issue:`3070`)
.. ipython:: python
@@ -291,59 +291,36 @@ Enhancements
``above_below``. This allows the user to specify if they would like to
only return forward looking data for options near the current stock
price. This just obtains the data from Options.get_near_stock_price
- instead of Options.get_xxx_data() (GH2758_).
+ instead of Options.get_xxx_data() (:issue:`2758`).
- Cursor coordinate information is now displayed in time-series plots.
- added option `display.max_seq_items` to control the number of
- elements printed per sequence pprinting it. (GH2979_)
+ elements printed per sequence pprinting it. (:issue:`2979`)
- added option `display.chop_threshold` to control display of small numerical
- values. (GH2739_)
+ values. (:issue:`2739`)
- added option `display.max_info_rows` to prevent verbose_info from being
- calculated for frames above 1M rows (configurable). (GH2807_, GH2918_)
+ calculated for frames above 1M rows (configurable). (:issue:`2807`, :issue:`2918`)
- value_counts() now accepts a "normalize" argument, for normalized
- histograms. (GH2710_).
+ histograms. (:issue:`2710`).
- DataFrame.from_records now accepts not only dicts but any instance of
the collections.Mapping ABC.
- added option `display.mpl_style` providing a sleeker visual style
- for plots. Based on https://gist.github.com/huyng/816622 (GH3075_).
+ for plots. Based on https://gist.github.com/huyng/816622 (:issue:`3075`).
- Treat boolean values as integers (values 1 and 0) for numeric
- operations. (GH2641_)
+ operations. (:issue:`2641`)
- to_html() now accepts an optional "escape" argument to control reserved
HTML character escaping (enabled by default) and escapes ``&``, in addition
- to ``<`` and ``>``. (GH2919_)
+ to ``<`` and ``>``. (:issue:`2919`)
-See the `full release notes
-<https://github.com/pydata/pandas/blob/master/RELEASE.rst>`__ or issue tracker
+See the :ref:`full release notes
+<release>` or issue tracker
on GitHub for a complete list.
-.. _GH2437: https://github.com/pydata/pandas/issues/2437
-.. _GH2809: https://github.com/pydata/pandas/issues/2809
-.. _GH2810: https://github.com/pydata/pandas/issues/2810
-.. _GH2837: https://github.com/pydata/pandas/issues/2837
-.. _GH2898: https://github.com/pydata/pandas/issues/2898
-.. _GH3035: https://github.com/pydata/pandas/issues/3035
-.. _GH2978: https://github.com/pydata/pandas/issues/2978
-.. _GH2877: https://github.com/pydata/pandas/issues/2877
-.. _GH2739: https://github.com/pydata/pandas/issues/2739
-.. _GH2710: https://github.com/pydata/pandas/issues/2710
-.. _GH2806: https://github.com/pydata/pandas/issues/2806
-.. _GH2807: https://github.com/pydata/pandas/issues/2807
-.. _GH2918: https://github.com/pydata/pandas/issues/2918
-.. _GH2758: https://github.com/pydata/pandas/issues/2758
-.. _GH3275: https://github.com/pydata/pandas/issues/3275
-.. _GH2979: https://github.com/pydata/pandas/issues/2979
-.. _GH3011: https://github.com/pydata/pandas/issues/3011
-.. _GH3076: https://github.com/pydata/pandas/issues/3076
-.. _GH3059: https://github.com/pydata/pandas/issues/3059
-.. _GH3070: https://github.com/pydata/pandas/issues/3070
-.. _GH3075: https://github.com/pydata/pandas/issues/3075
-.. _GH2641: https://github.com/pydata/pandas/issues/2641
-.. _GH2919: https://github.com/pydata/pandas/issues/2919
diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt
index 97f236166be45..76c439afc452c 100644
--- a/doc/source/v0.11.1.txt
+++ b/doc/source/v0.11.1.txt
@@ -39,7 +39,7 @@ API changes
- Fix modulo and integer division on Series,DataFrames to act similary to ``float`` dtypes to return
- ``np.nan`` or ``np.inf`` as appropriate (GH3590_). This correct a numpy bug that treats ``integer``
+ ``np.nan`` or ``np.inf`` as appropriate (:issue:`3590`). This correct a numpy bug that treats ``integer``
and ``float`` dtypes differently.
.. ipython:: python
@@ -53,8 +53,8 @@ API changes
- Add ``squeeze`` keyword to ``groupby`` to allow reduction from
DataFrame -> Series if groups are unique. This is a Regression from 0.10.1.
We are reverting back to the prior behavior. This means groupby will return the
- same shaped objects whether the groups are unique or not. Revert this issue (GH2893_)
- with (GH3596_).
+ same shaped objects whether the groups are unique or not. Revert this issue (:issue:`2893`)
+ with (:issue:`3596`).
.. ipython:: python
@@ -71,7 +71,7 @@ API changes
- Raise on ``iloc`` when boolean indexing with a label based indexer mask
e.g. a boolean Series, even with integer labels, will raise. Since ``iloc``
- is purely positional based, the labels on the Series are not alignable (GH3631_)
+ is purely positional based, the labels on the Series are not alignable (:issue:`3631`)
This case is rarely used, and there are plently of alternatives. This preserves the
``iloc`` API to be *purely* positional based.
@@ -97,18 +97,18 @@ API changes
plot something.
- ``DataFrame.interpolate()`` is now deprecated. Please use
- ``DataFrame.fillna()`` and ``DataFrame.replace()`` instead. (GH3582_,
- GH3675_, GH3676_)
+ ``DataFrame.fillna()`` and ``DataFrame.replace()`` instead. (:issue:`3582`,
+ :issue:`3675`, :issue:`3676`)
- the ``method`` and ``axis`` arguments of ``DataFrame.replace()`` are
deprecated
- ``DataFrame.replace`` 's ``infer_types`` parameter is removed and now
- performs conversion by default. (GH3907_)
+ performs conversion by default. (:issue:`3907`)
- Add the keyword ``allow_duplicates`` to ``DataFrame.insert`` to allow a duplicate column
- to be inserted if ``True``, default is ``False`` (same as prior to 0.11.1) (GH3679_)
- - Implement ``__nonzero__`` for ``NDFrame`` objects (GH3691_, GH3696_)
+ to be inserted if ``True``, default is ``False`` (same as prior to 0.11.1) (:issue:`3679`)
+ - Implement ``__nonzero__`` for ``NDFrame`` objects (:issue:`3691`, :issue:`3696`)
- IO api
@@ -136,13 +136,13 @@ API changes
read_frame(....)
- ``DataFrame.to_html`` and ``DataFrame.to_latex`` now accept a path for
- their first argument (GH3702_)
+ their first argument (:issue:`3702`)
- Do not allow astypes on ``datetime64[ns]`` except to ``object``, and
- ``timedelta64[ns]`` to ``object/int`` (GH3425_)
+ ``timedelta64[ns]`` to ``object/int`` (:issue:`3425`)
- The behavior of ``datetime64`` dtypes has changed with respect to certain
- so-called reduction operations (GH3726_). The following operations now
+ so-called reduction operations (:issue:`3726`). The following operations now
raise a ``TypeError`` when perfomed on a ``Series`` and return an *empty*
``Series`` when performed on a ``DataFrame`` similar to performing these
operations on, for example, a ``DataFrame`` of ``slice`` objects:
@@ -157,7 +157,7 @@ I/O Enhancements
~~~~~~~~~~~~~~~~
- ``pd.read_html()`` can now parse HTML strings, files or urls and return
- DataFrames, courtesy of @cpcloud. (GH3477_, GH3605_, GH3606_, GH3616_).
+ DataFrames, courtesy of @cpcloud. (:issue:`3477`, :issue:`3605`, :issue:`3606`, :issue:`3616`).
It works with a *single* parser backend: BeautifulSoup4 + html5lib :ref:`See the docs<io.html>`
You can use ``pd.read_html()`` to read the output from ``DataFrame.to_html()`` like so
@@ -174,14 +174,14 @@ I/O Enhancements
``DataFrame.to_html()`` are not inverses.
- ``pd.read_html()`` no longer performs hard conversion of date strings
- (GH3656_).
+ (:issue:`3656`).
.. warning::
You may have to install an older version of BeautifulSoup4,
:ref:`See the installation docs<install.optional_dependencies>`
- - Added module for reading and writing Stata files: ``pandas.io.stata`` (GH1512_)
+ - Added module for reading and writing Stata files: ``pandas.io.stata`` (:issue:`1512`)
accessable via ``read_stata`` top-level function for reading,
and ``to_stata`` DataFrame method for writing, :ref:`See the docs<io.stata>`
@@ -202,7 +202,7 @@ I/O Enhancements
Note: The default behavior in 0.11.1 remains unchanged, but starting with 0.12,
the default *to* write and read multi-index columns will be in the new
- format. (GH3571_, GH1651_, GH3141_)
+ format. (:issue:`3571`, :issue:`1651`, :issue:`3141`)
- If an ``index_col`` is not specified (e.g. you don't have an index, or wrote it
with ``df.to_csv(..., index=False``), then any ``names`` on the columns index will
@@ -268,7 +268,7 @@ Other Enhancements
- ``pd.melt()`` now accepts the optional parameters ``var_name`` and ``value_name``
to specify custom column names of the returned DataFrame.
- - ``pd.set_option()`` now allows N option, value pairs (GH3667_).
+ - ``pd.set_option()`` now allows N option, value pairs (:issue:`3667`).
Let's say that we had an option ``'a.b'`` and another option ``'b.c'``.
We can set them at the same time:
@@ -315,16 +315,16 @@ Other Enhancements
dff.groupby('B').filter(lambda x: len(x) > 2, dropna=False)
- - Series and DataFrame hist methods now take a ``figsize`` argument (GH3834_)
+ - Series and DataFrame hist methods now take a ``figsize`` argument (:issue:`3834`)
- DatetimeIndexes no longer try to convert mixed-integer indexes during join
- operations (GH3877_)
+ operations (:issue:`3877`)
Bug Fixes
~~~~~~~~~
- Plotting functions now raise a ``TypeError`` before trying to plot anything
- if the associated objects have have a dtype of ``object`` (GH1818_,
- GH3572_, GH3911_, GH3912_), but they will try to convert object arrays to
+ if the associated objects have have a dtype of ``object`` (:issue:`1818`,
+ :issue:`3572`, :issue:`3911`, :issue:`3912`), but they will try to convert object arrays to
numeric arrays if possible so that you can still plot, for example, an
object array with floats. This happens before any drawing takes place which
elimnates any spurious plots from showing up.
@@ -332,7 +332,7 @@ Bug Fixes
- ``fillna`` methods now raise a ``TypeError`` if the ``value`` parameter is
a list or tuple.
- - ``Series.str`` now supports iteration (GH3638_). You can iterate over the
+ - ``Series.str`` now supports iteration (:issue:`3638`). You can iterate over the
individual elements of each string in the ``Series``. Each iteration yields
yields a ``Series`` with either a single character at each index of the
original ``Series`` or ``NaN``. For example,
@@ -356,90 +356,37 @@ Bug Fixes
- ``HDFStore``
- - will retain index attributes (freq,tz,name) on recreation (GH3499_)
+ - will retain index attributes (freq,tz,name) on recreation (:issue:`3499`)
- will warn with a ``AttributeConflictWarning`` if you are attempting to append
an index with a different frequency than the existing, or attempting
to append an index with a different name than the existing
- - support datelike columns with a timezone as data_columns (GH2852_)
+ - support datelike columns with a timezone as data_columns (:issue:`2852`)
- - Non-unique index support clarified (GH3468_).
+ - Non-unique index support clarified (:issue:`3468`).
- - Fix assigning a new index to a duplicate index in a DataFrame would fail (GH3468_)
+ - Fix assigning a new index to a duplicate index in a DataFrame would fail (:issue:`3468`)
- Fix construction of a DataFrame with a duplicate index
- ref_locs support to allow duplicative indices across dtypes,
- allows iget support to always find the index (even across dtypes) (GH2194_)
+ allows iget support to always find the index (even across dtypes) (:issue:`2194`)
- applymap on a DataFrame with a non-unique index now works
- (removed warning) (GH2786_), and fix (GH3230_)
- - Fix to_csv to handle non-unique columns (GH3495_)
- - Duplicate indexes with getitem will return items in the correct order (GH3455_, GH3457_)
- and handle missing elements like unique indices (GH3561_)
- - Duplicate indexes with and empty DataFrame.from_records will return a correct frame (GH3562_)
- - Concat to produce a non-unique columns when duplicates are across dtypes is fixed (GH3602_)
- - Allow insert/delete to non-unique columns (GH3679_)
- - Non-unique indexing with a slice via ``loc`` and friends fixed (GH3659_)
- - Allow insert/delete to non-unique columns (GH3679_)
- - Extend ``reindex`` to correctly deal with non-unique indices (GH3679_)
+ (removed warning) (:issue:`2786`), and fix (:issue:`3230`)
+ - Fix to_csv to handle non-unique columns (:issue:`3495`)
+ - Duplicate indexes with getitem will return items in the correct order (:issue:`3455`, :issue:`3457`)
+ and handle missing elements like unique indices (:issue:`3561`)
+ - Duplicate indexes with and empty DataFrame.from_records will return a correct frame (:issue:`3562`)
+ - Concat to produce a non-unique columns when duplicates are across dtypes is fixed (:issue:`3602`)
+ - Allow insert/delete to non-unique columns (:issue:`3679`)
+ - Non-unique indexing with a slice via ``loc`` and friends fixed (:issue:`3659`)
+ - Allow insert/delete to non-unique columns (:issue:`3679`)
+ - Extend ``reindex`` to correctly deal with non-unique indices (:issue:`3679`)
- ``DataFrame.itertuples()`` now works with frames with duplicate column
- names (GH3873_)
+ names (:issue:`3873`)
- - ``DataFrame.from_records`` did not accept empty recarrays (GH3682_)
- - ``read_html`` now correctly skips tests (GH3741_)
+ - ``DataFrame.from_records`` did not accept empty recarrays (:issue:`3682`)
+ - ``read_html`` now correctly skips tests (:issue:`3741`)
- Fixed a bug where ``DataFrame.replace`` with a compiled regular expression
- in the ``to_replace`` argument wasn't working (GH3907_)
+ in the ``to_replace`` argument wasn't working (:issue:`3907`)
-See the `full release notes
-<https://github.com/pydata/pandas/blob/master/RELEASE.rst>`__ or issue tracker
+See the :ref:`full release notes
+<release>` or issue tracker
on GitHub for a complete list.
-
-.. _GH3468: https://github.com/pydata/pandas/issues/3468
-.. _GH2194: https://github.com/pydata/pandas/issues/2194
-.. _GH2786: https://github.com/pydata/pandas/issues/2786
-.. _GH3230: https://github.com/pydata/pandas/issues/3230
-.. _GH3495: https://github.com/pydata/pandas/issues/3495
-.. _GH3455: https://github.com/pydata/pandas/issues/3455
-.. _GH3457: https://github.com/pydata/pandas/issues/3457
-.. _GH3561: https://github.com/pydata/pandas/issues/3561
-.. _GH3562: https://github.com/pydata/pandas/issues/3562
-.. _GH3602: https://github.com/pydata/pandas/issues/3602
-.. _GH2437: https://github.com/pydata/pandas/issues/2437
-.. _GH2852: https://github.com/pydata/pandas/issues/2852
-.. _GH3477: https://github.com/pydata/pandas/issues/3477
-.. _GH3492: https://github.com/pydata/pandas/issues/3492
-.. _GH3499: https://github.com/pydata/pandas/issues/3499
-.. _GH2893: https://github.com/pydata/pandas/issues/2893
-.. _GH3596: https://github.com/pydata/pandas/issues/3596
-.. _GH3590: https://github.com/pydata/pandas/issues/3590
-.. _GH3435: https://github.com/pydata/pandas/issues/3435
-.. _GH1512: https://github.com/pydata/pandas/issues/1512
-.. _GH2285: https://github.com/pydata/pandas/issues/2285
-.. _GH3631: https://github.com/pydata/pandas/issues/3631
-.. _GH3571: https://github.com/pydata/pandas/issues/3571
-.. _GH1651: https://github.com/pydata/pandas/issues/1651
-.. _GH3141: https://github.com/pydata/pandas/issues/3141
-.. _GH3638: https://github.com/pydata/pandas/issues/3638
-.. _GH3616: https://github.com/pydata/pandas/issues/3616
-.. _GH3605: https://github.com/pydata/pandas/issues/3605
-.. _GH3606: https://github.com/pydata/pandas/issues/3606
-.. _GH3656: https://github.com/pydata/pandas/issues/3656
-.. _GH1818: https://github.com/pydata/pandas/issues/1818
-.. _GH3572: https://github.com/pydata/pandas/issues/3572
-.. _GH3582: https://github.com/pydata/pandas/issues/3582
-.. _GH3676: https://github.com/pydata/pandas/issues/3676
-.. _GH3675: https://github.com/pydata/pandas/issues/3675
-.. _GH3682: https://github.com/pydata/pandas/issues/3682
-.. _GH3679: https://github.com/pydata/pandas/issues/3679
-.. _GH3702: https://github.com/pydata/pandas/issues/3702
-.. _GH3691: https://github.com/pydata/pandas/issues/3691
-.. _GH3696: https://github.com/pydata/pandas/issues/3696
-.. _GH3667: https://github.com/pydata/pandas/issues/3667
-.. _GH3741: https://github.com/pydata/pandas/issues/3741
-.. _GH3726: https://github.com/pydata/pandas/issues/3726
-.. _GH3425: https://github.com/pydata/pandas/issues/3425
-.. _GH3834: https://github.com/pydata/pandas/issues/3834
-.. _GH3873: https://github.com/pydata/pandas/issues/3873
-.. _GH3877: https://github.com/pydata/pandas/issues/3877
-.. _GH3659: https://github.com/pydata/pandas/issues/3659
-.. _GH3679: https://github.com/pydata/pandas/issues/3679
-.. _GH3907: https://github.com/pydata/pandas/issues/3907
-.. _GH3911: https://github.com/pydata/pandas/issues/3911
-.. _GH3912: https://github.com/pydata/pandas/issues/3912
diff --git a/doc/source/v0.4.x.txt b/doc/source/v0.4.x.txt
index 19293887089ba..249dec5fd647b 100644
--- a/doc/source/v0.4.x.txt
+++ b/doc/source/v0.4.x.txt
@@ -6,48 +6,48 @@ v.0.4.3 through v0.4.1 (September 25 - October 9, 2011)
New Features
~~~~~~~~~~~~
-- Added Python 3 support using 2to3 (PR200_)
+- Added Python 3 support using 2to3 (:issue:`200`)
- :ref:`Added <dsintro.name_attribute>` ``name`` attribute to ``Series``, now
prints as part of ``Series.__repr__``
- :ref:`Added <missing.isnull>` instance methods ``isnull`` and ``notnull`` to
- Series (PR209_, GH203_)
+ Series (:issue:`209`, :issue:`203`)
- :ref:`Added <basics.align>` ``Series.align`` method for aligning two series
with choice of join method (ENH56_)
- :ref:`Added <indexing.get_level_values>` method ``get_level_values`` to
- ``MultiIndex`` (IS188_)
+ ``MultiIndex`` (:issue:`188`)
- :ref:`Set <indexing.mixed_type_setting>` values in mixed-type
- ``DataFrame`` objects via ``.ix`` indexing attribute (GH135_)
+ ``DataFrame`` objects via ``.ix`` indexing attribute (:issue:`135`)
- Added new ``DataFrame`` :ref:`methods <basics.dtypes>`
``get_dtype_counts`` and property ``dtypes`` (ENHdc_)
- Added :ref:`ignore_index <merging.ignore_index>` option to
``DataFrame.append`` to stack DataFrames (ENH1b_)
- ``read_csv`` tries to :ref:`sniff <io.sniff>` delimiters using
- ``csv.Sniffer`` (PR146_)
+ ``csv.Sniffer`` (:issue:`146`)
- ``read_csv`` can :ref:`read <io.csv_multiindex>` multiple columns into a
``MultiIndex``; DataFrame's ``to_csv`` method writes out a corresponding
- ``MultiIndex`` (PR151_)
+ ``MultiIndex`` (:issue:`151`)
- ``DataFrame.rename`` has a new ``copy`` parameter to :ref:`rename
<basics.rename>` a DataFrame in place (ENHed_)
-- :ref:`Enable <reshaping.unstack_by_name>` unstacking by name (PR142_)
-- :ref:`Enable <indexing.sortlevel_byname>` ``sortlevel`` to work by level (PR141_)
+- :ref:`Enable <reshaping.unstack_by_name>` unstacking by name (:issue:`142`)
+- :ref:`Enable <indexing.sortlevel_byname>` ``sortlevel`` to work by level (:issue:`141`)
Performance Enhancements
~~~~~~~~~~~~~~~~~~~~~~~~
- Altered binary operations on differently-indexed SparseSeries objects
to use the integer-based (dense) alignment logic which is faster with a
- larger number of blocks (GH205_)
+ larger number of blocks (:issue:`205`)
- Wrote faster Cython data alignment / merging routines resulting in
substantial speed increases
- Improved performance of ``isnull`` and ``notnull``, a regression from v0.3.0
- (GH187_)
+ (:issue:`187`)
- Refactored code related to ``DataFrame.join`` so that intermediate aligned
copies of the data in each ``DataFrame`` argument do not need to be created.
- Substantial performance increases result (GH176_)
+ Substantial performance increases result (:issue:`176`)
- Substantially improved performance of generic ``Index.intersection`` and
``Index.union``
- Implemented ``BlockManager.take`` resulting in significantly faster ``take``
- performance on mixed-type ``DataFrame`` objects (GH104_)
+ performance on mixed-type ``DataFrame`` objects (:issue:`104`)
- Improved performance of ``Series.sort_index``
- Significant groupby performance enhancement: removed unnecessary integrity
checks in DataFrame internals that were slowing down slicing operations to
@@ -57,21 +57,8 @@ Performance Enhancements
- Wrote fast time series merging / joining methods in Cython. Will be
integrated later into DataFrame.join and related functions
-.. _PR146: https://github.com/pydata/pandas/pull/146
.. _ENH1b: https://github.com/pydata/pandas/commit/1ba56251f0013ff7cd8834e9486cef2b10098371
.. _ENHdc: https://github.com/pydata/pandas/commit/dca3c5c5a6a3769ee01465baca04cfdfa66a4f76
-.. _GH135: https://github.com/pydata/pandas/issues/135
-.. _PR151: https://github.com/pydata/pandas/pull/151
.. _ENHed: https://github.com/pydata/pandas/commit/edd9f1945fc010a57fa0ae3b3444d1fffe592591
-.. _PR142: https://github.com/pydata/pandas/pull/142
-.. _PR141: https://github.com/pydata/pandas/pull/141
-.. _IS188: https://github.com/pydata/pandas/issues/188
.. _ENH56: https://github.com/pydata/pandas/commit/56e0c9ffafac79ce262b55a6a13e1b10a88fbe93
-.. _GH187: https://github.com/pydata/pandas/issues/187
-.. _GH176: https://github.com/pydata/pandas/issues/176
-.. _GH104: https://github.com/pydata/pandas/issues/104
-.. _GH205: https://github.com/pydata/pandas/issues/205
-.. _PR209: https://github.com/pydata/pandas/pull/209
-.. _GH203: https://github.com/pydata/pandas/issues/203
-.. _PR200: https://github.com/pydata/pandas/pull/200
diff --git a/doc/source/v0.5.0.txt b/doc/source/v0.5.0.txt
index 017d10d4c9b8c..d0550fd5ef8f3 100644
--- a/doc/source/v0.5.0.txt
+++ b/doc/source/v0.5.0.txt
@@ -9,23 +9,23 @@ New Features
- :ref:`Added <basics.df_join>` ``DataFrame.align`` method with standard join options
- :ref:`Added <io.parse_dates>` ``parse_dates`` option to ``read_csv`` and ``read_table`` methods to optionally try to parse dates in the index columns
-- :ref:`Added <io.parse_dates>` ``nrows``, ``chunksize``, and ``iterator`` arguments to ``read_csv`` and ``read_table``. The last two return a new ``TextParser`` class capable of lazily iterating through chunks of a flat file (GH242_)
-- :ref:`Added <merging.multikey_join>` ability to join on multiple columns in ``DataFrame.join`` (GH214_)
+- :ref:`Added <io.parse_dates>` ``nrows``, ``chunksize``, and ``iterator`` arguments to ``read_csv`` and ``read_table``. The last two return a new ``TextParser`` class capable of lazily iterating through chunks of a flat file (:issue:`242`)
+- :ref:`Added <merging.multikey_join>` ability to join on multiple columns in ``DataFrame.join`` (:issue:`214`)
- Added private ``_get_duplicates`` function to ``Index`` for identifying duplicate values more easily (ENH5c_)
- :ref:`Added <indexing.df_cols>` column attribute access to DataFrame.
-- :ref:`Added <indexing.df_cols>` Python tab completion hook for DataFrame columns. (PR233_, GH230_)
-- :ref:`Implemented <basics.describe>` ``Series.describe`` for Series containing objects (PR241_)
-- :ref:`Added <merging.df_inner_join>` inner join option to ``DataFrame.join`` when joining on key(s) (GH248_)
-- :ref:`Implemented <indexing.df_cols>` selecting DataFrame columns by passing a list to ``__getitem__`` (GH253_)
-- :ref:`Implemented <indexing.set_ops>` & and | to intersect / union Index objects, respectively (GH261_)
-- :ref:`Added<reshaping.pivot>` ``pivot_table`` convenience function to pandas namespace (GH234_)
-- :ref:`Implemented <basics.rename_axis>` ``Panel.rename_axis`` function (GH243_)
-- DataFrame will show index level names in console output (PR334_)
+- :ref:`Added <indexing.df_cols>` Python tab completion hook for DataFrame columns. (:issue:`233`, :issue:`230`)
+- :ref:`Implemented <basics.describe>` ``Series.describe`` for Series containing objects (:issue:`241`)
+- :ref:`Added <merging.df_inner_join>` inner join option to ``DataFrame.join`` when joining on key(s) (:issue:`248`)
+- :ref:`Implemented <indexing.df_cols>` selecting DataFrame columns by passing a list to ``__getitem__`` (:issue:`253`)
+- :ref:`Implemented <indexing.set_ops>` & and | to intersect / union Index objects, respectively (:issue:`261`)
+- :ref:`Added<reshaping.pivot>` ``pivot_table`` convenience function to pandas namespace (:issue:`234`)
+- :ref:`Implemented <basics.rename_axis>` ``Panel.rename_axis`` function (:issue:`243`)
+- DataFrame will show index level names in console output (:issue:`334`)
- :ref:`Implemented <indexing.take>` ``Panel.take``
- :ref:`Added<basics.console_output>` ``set_eng_float_format`` for alternate DataFrame floating point string formatting (ENH61_)
- :ref:`Added <indexing.set_index>` convenience ``set_index`` function for creating a DataFrame index from its existing columns
-- :ref:`Implemented <groupby.multiindex>` ``groupby`` hierarchical index level name (GH223_)
-- :ref:`Added <io.store_in_csv>` support for different delimiters in ``DataFrame.to_csv`` (PR244_)
+- :ref:`Implemented <groupby.multiindex>` ``groupby`` hierarchical index level name (:issue:`223`)
+- :ref:`Added <io.store_in_csv>` support for different delimiters in ``DataFrame.to_csv`` (:issue:`244`)
- TODO: DOCS ABOUT TAKE METHODS
Performance Enhancements
@@ -33,28 +33,11 @@ Performance Enhancements
- VBENCH Major performance improvements in file parsing functions ``read_csv`` and ``read_table``
- VBENCH Added Cython function for converting tuples to ndarray very fast. Speeds up many MultiIndex-related operations
-- VBENCH Refactored merging / joining code into a tidy class and disabled unnecessary computations in the float/object case, thus getting about 10% better performance (GH211_)
-- VBENCH Improved speed of ``DataFrame.xs`` on mixed-type DataFrame objects by about 5x, regression from 0.3.0 (GH215_)
+- VBENCH Refactored merging / joining code into a tidy class and disabled unnecessary computations in the float/object case, thus getting about 10% better performance (:issue:`211`)
+- VBENCH Improved speed of ``DataFrame.xs`` on mixed-type DataFrame objects by about 5x, regression from 0.3.0 (:issue:`215`)
- VBENCH With new ``DataFrame.align`` method, speeding up binary operations between differently-indexed DataFrame objects by 10-25%.
-- VBENCH Significantly sped up conversion of nested dict into DataFrame (GH212_)
+- VBENCH Significantly sped up conversion of nested dict into DataFrame (:issue:`212`)
- VBENCH Significantly speed up DataFrame ``__repr__`` and ``count`` on large mixed-type DataFrame objects
-.. _GH214: https://github.com/pydata/pandas/issues/214
-.. _GH248: https://github.com/pydata/pandas/issues/248
-.. _GH253: https://github.com/pydata/pandas/issues/253
-.. _GH261: https://github.com/pydata/pandas/issues/261
-.. _GH234: https://github.com/pydata/pandas/issues/234
-.. _GH243: https://github.com/pydata/pandas/issues/243
-.. _GH223: https://github.com/pydata/pandas/issues/223
-.. _PR244: https://github.com/pydata/pandas/pull/244
-.. _PR233: https://github.com/pydata/pandas/pull/233
-.. _GH230: https://github.com/pydata/pandas/issues/230
-.. _PR241: https://github.com/pydata/pandas/pull/241
-.. _GH242: https://github.com/pydata/pandas/issues/242
-.. _GH212: https://github.com/pydata/pandas/issues/212
-.. _GH211: https://github.com/pydata/pandas/issues/211
-.. _GH215: https://github.com/pydata/pandas/issues/215
-.. _GH213: https://github.com/pydata/pandas/issues/213
.. _ENH61: https://github.com/pydata/pandas/commit/6141961
-.. _PR334: https://github.com/pydata/pandas/pull/334
.. _ENH5c: https://github.com/pydata/pandas/commit/5ca6ff5d822ee4ddef1ec0d87b6d83d8b4bbd3eb
diff --git a/doc/source/v0.6.0.txt b/doc/source/v0.6.0.txt
index e72aec601221e..55a67a75e0fd1 100644
--- a/doc/source/v0.6.0.txt
+++ b/doc/source/v0.6.0.txt
@@ -6,95 +6,51 @@ v.0.6.0 (November 25, 2011)
New Features
~~~~~~~~~~~~
- :ref:`Added <reshaping.melt>` ``melt`` function to ``pandas.core.reshape``
-- :ref:`Added <groupby.multiindex>` ``level`` parameter to group by level in Series and DataFrame descriptive statistics (PR313_)
-- :ref:`Added <basics.head_tail>` ``head`` and ``tail`` methods to Series, analogous to to DataFrame (PR296_)
-- :ref:`Added <indexing.boolean>` ``Series.isin`` function which checks if each value is contained in a passed sequence (GH289_)
+- :ref:`Added <groupby.multiindex>` ``level`` parameter to group by level in Series and DataFrame descriptive statistics (:issue:`313`)
+- :ref:`Added <basics.head_tail>` ``head`` and ``tail`` methods to Series, analogous to to DataFrame (:issue:`296`)
+- :ref:`Added <indexing.boolean>` ``Series.isin`` function which checks if each value is contained in a passed sequence (:issue:`289`)
- :ref:`Added <io.formatting>` ``float_format`` option to ``Series.to_string``
-- :ref:`Added <io.parse_dates>` ``skip_footer`` (GH291_) and ``converters`` (GH343_) options to ``read_csv`` and ``read_table``
-- :ref:`Added <indexing.duplicate>` ``drop_duplicates`` and ``duplicated`` functions for removing duplicate DataFrame rows and checking for duplicate rows, respectively (GH319_)
-- :ref:`Implemented <dsintro.boolean>` operators '&', '|', '^', '-' on DataFrame (GH347_)
+- :ref:`Added <io.parse_dates>` ``skip_footer`` (:issue:`291`) and ``converters`` (:issue:`343`) options to ``read_csv`` and ``read_table``
+- :ref:`Added <indexing.duplicate>` ``drop_duplicates`` and ``duplicated`` functions for removing duplicate DataFrame rows and checking for duplicate rows, respectively (:issue:`319`)
+- :ref:`Implemented <dsintro.boolean>` operators '&', '|', '^', '-' on DataFrame (:issue:`347`)
- :ref:`Added <basics.stats>` ``Series.mad``, mean absolute deviation
-- :ref:`Added <timeseries.offsets>` ``QuarterEnd`` DateOffset (PR321_)
-- :ref:`Added <dsintro.numpy_interop>` ``dot`` to DataFrame (GH65_)
-- :ref:`Added <basics.panel>` ``orient`` option to ``Panel.from_dict`` (GH359_, GH301_)
+- :ref:`Added <timeseries.offsets>` ``QuarterEnd`` DateOffset (:issue:`321`)
+- :ref:`Added <dsintro.numpy_interop>` ``dot`` to DataFrame (:issue:`65`)
+- :ref:`Added <basics.panel>` ``orient`` option to ``Panel.from_dict`` (:issue:`359`, :issue:`301`)
- :ref:`Added <basics.dataframe.from_dict>` ``orient`` option to ``DataFrame.from_dict``
-- :ref:`Added <basics.dataframe.from_records>` passing list of tuples or list of lists to ``DataFrame.from_records`` (GH357_)
-- :ref:`Added <groupby.multiindex>` multiple levels to groupby (GH103_)
-- :ref:`Allow <basics.sorting>` multiple columns in ``by`` argument of ``DataFrame.sort_index`` (GH92_, PR362_)
-- :ref:`Added <indexing.basics.get_value>` fast ``get_value`` and ``put_value`` methods to DataFrame (GH360_)
-- :ref:`Added <computation.covariance>` ``cov`` instance methods to Series and DataFrame (GH194_, PR362_)
-- :ref:`Added <visualization.barplot>` ``kind='bar'`` option to ``DataFrame.plot`` (PR348_)
-- :ref:`Added <basics.idxmin>` ``idxmin`` and ``idxmax`` to Series and DataFrame (PR286_)
-- :ref:`Added <io.clipboard>` ``read_clipboard`` function to parse DataFrame from clipboard (GH300_)
-- :ref:`Added <basics.stats>` ``nunique`` function to Series for counting unique elements (GH297_)
-- :ref:`Made <basics.dataframe>` DataFrame constructor use Series name if no columns passed (GH373_)
-- :ref:`Support <io.parse_dates>` regular expressions in read_table/read_csv (GH364_)
-- :ref:`Added <io.html>` ``DataFrame.to_html`` for writing DataFrame to HTML (PR387_)
-- :ref:`Added <basics.dataframe>` support for MaskedArray data in DataFrame, masked values converted to NaN (PR396_)
-- :ref:`Added <visualization.box>` ``DataFrame.boxplot`` function (GH368_)
-- :ref:`Can <basics.apply>` pass extra args, kwds to DataFrame.apply (GH376_)
-- :ref:`Implement <merging.multikey_join>` ``DataFrame.join`` with vector ``on`` argument (GH312_)
-- :ref:`Added <visualization.basic>` ``legend`` boolean flag to ``DataFrame.plot`` (GH324_)
-- :ref:`Can <reshaping.stacking>` pass multiple levels to ``stack`` and ``unstack`` (GH370_)
-- :ref:`Can <reshaping.pivot>` pass multiple values columns to ``pivot_table`` (GH381_)
-- :ref:`Use <groupby.multiindex>` Series name in GroupBy for result index (GH363_)
-- :ref:`Added <basics.apply>` ``raw`` option to ``DataFrame.apply`` for performance if only need ndarray (GH309_)
-- Added proper, tested weighted least squares to standard and panel OLS (GH303_)
+- :ref:`Added <basics.dataframe.from_records>` passing list of tuples or list of lists to ``DataFrame.from_records`` (:issue:`357`)
+- :ref:`Added <groupby.multiindex>` multiple levels to groupby (:issue:`103`)
+- :ref:`Allow <basics.sorting>` multiple columns in ``by`` argument of ``DataFrame.sort_index`` (:issue:`92`, :issue:`362`)
+- :ref:`Added <indexing.basics.get_value>` fast ``get_value`` and ``put_value`` methods to DataFrame (:issue:`360`)
+- :ref:`Added <computation.covariance>` ``cov`` instance methods to Series and DataFrame (:issue:`194`, :issue:`362`)
+- :ref:`Added <visualization.barplot>` ``kind='bar'`` option to ``DataFrame.plot`` (:issue:`348`)
+- :ref:`Added <basics.idxmin>` ``idxmin`` and ``idxmax`` to Series and DataFrame (:issue:`286`)
+- :ref:`Added <io.clipboard>` ``read_clipboard`` function to parse DataFrame from clipboard (:issue:`300`)
+- :ref:`Added <basics.stats>` ``nunique`` function to Series for counting unique elements (:issue:`297`)
+- :ref:`Made <basics.dataframe>` DataFrame constructor use Series name if no columns passed (:issue:`373`)
+- :ref:`Support <io.parse_dates>` regular expressions in read_table/read_csv (:issue:`364`)
+- :ref:`Added <io.html>` ``DataFrame.to_html`` for writing DataFrame to HTML (:issue:`387`)
+- :ref:`Added <basics.dataframe>` support for MaskedArray data in DataFrame, masked values converted to NaN (:issue:`396`)
+- :ref:`Added <visualization.box>` ``DataFrame.boxplot`` function (:issue:`368`)
+- :ref:`Can <basics.apply>` pass extra args, kwds to DataFrame.apply (:issue:`376`)
+- :ref:`Implement <merging.multikey_join>` ``DataFrame.join`` with vector ``on`` argument (:issue:`312`)
+- :ref:`Added <visualization.basic>` ``legend`` boolean flag to ``DataFrame.plot`` (:issue:`324`)
+- :ref:`Can <reshaping.stacking>` pass multiple levels to ``stack`` and ``unstack`` (:issue:`370`)
+- :ref:`Can <reshaping.pivot>` pass multiple values columns to ``pivot_table`` (:issue:`381`)
+- :ref:`Use <groupby.multiindex>` Series name in GroupBy for result index (:issue:`363`)
+- :ref:`Added <basics.apply>` ``raw`` option to ``DataFrame.apply`` for performance if only need ndarray (:issue:`309`)
+- Added proper, tested weighted least squares to standard and panel OLS (:issue:`303`)
Performance Enhancements
~~~~~~~~~~~~~~~~~~~~~~~~
-- VBENCH Cythonized ``cache_readonly``, resulting in substantial micro-performance enhancements throughout the codebase (GH361_)
-- VBENCH Special Cython matrix iterator for applying arbitrary reduction operations with 3-5x better performance than `np.apply_along_axis` (GH309_)
+- VBENCH Cythonized ``cache_readonly``, resulting in substantial micro-performance enhancements throughout the codebase (:issue:`361`)
+- VBENCH Special Cython matrix iterator for applying arbitrary reduction operations with 3-5x better performance than `np.apply_along_axis` (:issue:`309`)
- VBENCH Improved performance of ``MultiIndex.from_tuples``
- VBENCH Special Cython matrix iterator for applying arbitrary reduction operations
- VBENCH + DOCUMENT Add ``raw`` option to ``DataFrame.apply`` for getting better performance when
-- VBENCH Faster cythonized count by level in Series and DataFrame (GH341_)
+- VBENCH Faster cythonized count by level in Series and DataFrame (:issue:`341`)
- VBENCH? Significant GroupBy performance enhancement with multiple keys with many "empty" combinations
-- VBENCH New Cython vectorized function ``map_infer`` speeds up ``Series.apply`` and ``Series.map`` significantly when passed elementwise Python function, motivated by (PR355_)
-- VBENCH Significantly improved performance of ``Series.order``, which also makes np.unique called on a Series faster (GH327_)
-- VBENCH Vastly improved performance of GroupBy on axes with a MultiIndex (GH299_)
+- VBENCH New Cython vectorized function ``map_infer`` speeds up ``Series.apply`` and ``Series.map`` significantly when passed elementwise Python function, motivated by (:issue:`355`)
+- VBENCH Significantly improved performance of ``Series.order``, which also makes np.unique called on a Series faster (:issue:`327`)
+- VBENCH Vastly improved performance of GroupBy on axes with a MultiIndex (:issue:`299`)
-.. _GH65: https://github.com/pydata/pandas/issues/65
-.. _GH92: https://github.com/pydata/pandas/issues/92
-.. _GH103: https://github.com/pydata/pandas/issues/103
-.. _GH194: https://github.com/pydata/pandas/issues/194
-.. _GH289: https://github.com/pydata/pandas/issues/289
-.. _GH291: https://github.com/pydata/pandas/issues/291
-.. _GH297: https://github.com/pydata/pandas/issues/297
-.. _GH299: https://github.com/pydata/pandas/issues/299
-.. _GH300: https://github.com/pydata/pandas/issues/300
-.. _GH301: https://github.com/pydata/pandas/issues/301
-.. _GH303: https://github.com/pydata/pandas/issues/303
-.. _GH305: https://github.com/pydata/pandas/issues/305
-.. _GH308: https://github.com/pydata/pandas/issues/308
-.. _GH309: https://github.com/pydata/pandas/issues/309
-.. _GH312: https://github.com/pydata/pandas/issues/312
-.. _GH319: https://github.com/pydata/pandas/issues/319
-.. _GH324: https://github.com/pydata/pandas/issues/324
-.. _GH327: https://github.com/pydata/pandas/issues/327
-.. _GH341: https://github.com/pydata/pandas/issues/341
-.. _GH343: https://github.com/pydata/pandas/issues/343
-.. _GH347: https://github.com/pydata/pandas/issues/347
-.. _GH357: https://github.com/pydata/pandas/issues/357
-.. _GH359: https://github.com/pydata/pandas/issues/359
-.. _GH360: https://github.com/pydata/pandas/issues/360
-.. _GH361: https://github.com/pydata/pandas/issues/361
-.. _GH363: https://github.com/pydata/pandas/issues/363
-.. _GH364: https://github.com/pydata/pandas/issues/364
-.. _GH368: https://github.com/pydata/pandas/issues/368
-.. _GH370: https://github.com/pydata/pandas/issues/370
-.. _GH373: https://github.com/pydata/pandas/issues/373
-.. _GH376: https://github.com/pydata/pandas/issues/376
-.. _GH381: https://github.com/pydata/pandas/issues/381
-.. _GH382: https://github.com/pydata/pandas/issues/382
-.. _GH393: https://github.com/pydata/pandas/issues/393
-.. _PR286: https://github.com/pydata/pandas/pull/286
-.. _PR296: https://github.com/pydata/pandas/pull/296
-.. _PR313: https://github.com/pydata/pandas/pull/313
-.. _PR321: https://github.com/pydata/pandas/pull/321
-.. _PR348: https://github.com/pydata/pandas/pull/348
-.. _PR355: https://github.com/pydata/pandas/pull/355
-.. _PR362: https://github.com/pydata/pandas/pull/362
-.. _PR386: https://github.com/pydata/pandas/pull/386
-.. _PR387: https://github.com/pydata/pandas/pull/387
-.. _PR396: https://github.com/pydata/pandas/pull/396
diff --git a/doc/source/v0.6.1.txt b/doc/source/v0.6.1.txt
index 7b0588884c5b2..7e593d07f7f2b 100644
--- a/doc/source/v0.6.1.txt
+++ b/doc/source/v0.6.1.txt
@@ -8,28 +8,28 @@ New features
~~~~~~~~~~~~
- Can :ref:`append single rows <merging.append.row>` (as Series) to a DataFrame
- Add Spearman and Kendall rank :ref:`correlation <computation.correlation>`
- options to Series.corr and DataFrame.corr (GH428_)
+ options to Series.corr and DataFrame.corr (:issue:`428`)
- :ref:`Added <indexing.basics.get_value>` ``get_value`` and ``set_value`` methods to
Series, DataFrame, and Panel for very low-overhead access (>2x faster in many
- cases) to scalar elements (GH437_, GH438_). ``set_value`` is capable of
+ cases) to scalar elements (:issue:`437`, :issue:`438`). ``set_value`` is capable of
producing an enlarged object.
-- Add PyQt table widget to sandbox (PR435_)
+- Add PyQt table widget to sandbox (:issue:`435`)
- DataFrame.align can :ref:`accept Series arguments <basics.align.frame.series>`
- and an :ref:`axis option <basics.df_join>` (GH461_)
+ and an :ref:`axis option <basics.df_join>` (:issue:`461`)
- Implement new :ref:`SparseArray <sparse.array>` and :ref:`SparseList <sparse.list>`
- data structures. SparseSeries now derives from SparseArray (GH463_)
-- :ref:`Better console printing options <basics.console_output>` (PR453_)
+ data structures. SparseSeries now derives from SparseArray (:issue:`463`)
+- :ref:`Better console printing options <basics.console_output>` (:issue:`453`)
- Implement fast :ref:`data ranking <computation.ranking>` for Series and
- DataFrame, fast versions of scipy.stats.rankdata (GH428_)
+ DataFrame, fast versions of scipy.stats.rankdata (:issue:`428`)
- Implement :ref:`DataFrame.from_items <basics.dataframe.from_items>` alternate
- constructor (GH444_)
+ constructor (:issue:`444`)
- DataFrame.convert_objects method for :ref:`inferring better dtypes <basics.cast>`
- for object columns (GH302_)
+ for object columns (:issue:`302`)
- Add :ref:`rolling_corr_pairwise <stats.moments.corr_pairwise>` function for
- computing Panel of correlation matrices (GH189_)
+ computing Panel of correlation matrices (:issue:`189`)
- Add :ref:`margins <reshaping.pivot.margins>` option to :ref:`pivot_table
- <reshaping.pivot>` for computing subgroup aggregates (GH114_)
-- Add ``Series.from_csv`` function (PR482_)
+ <reshaping.pivot>` for computing subgroup aggregates (:issue:`114`)
+- Add ``Series.from_csv`` function (:issue:`482`)
- :ref:`Can pass <stats.moments.binary>` DataFrame/DataFrame and
DataFrame/Series to rolling_corr/rolling_cov (GH #462)
- MultiIndex.get_level_values can :ref:`accept the level name <indexing.get_level_values>`
@@ -48,15 +48,3 @@ Performance improvements
- Column deletion in DataFrame copies no data (computes views on blocks) (GH
#158)
-.. _GH114: https://github.com/pydata/pandas/issues/114
-.. _GH189: https://github.com/pydata/pandas/issues/302
-.. _GH302: https://github.com/pydata/pandas/issues/302
-.. _GH428: https://github.com/pydata/pandas/issues/428
-.. _GH437: https://github.com/pydata/pandas/issues/437
-.. _GH438: https://github.com/pydata/pandas/issues/438
-.. _GH444: https://github.com/pydata/pandas/issues/444
-.. _GH461: https://github.com/pydata/pandas/issues/461
-.. _GH463: https://github.com/pydata/pandas/issues/463
-.. _PR435: https://github.com/pydata/pandas/pull/435
-.. _PR453: https://github.com/pydata/pandas/pull/453
-.. _PR482: https://github.com/pydata/pandas/pull/482
diff --git a/doc/source/v0.7.0.txt b/doc/source/v0.7.0.txt
index 6ff748f142d15..bf7acd3820db0 100644
--- a/doc/source/v0.7.0.txt
+++ b/doc/source/v0.7.0.txt
@@ -9,24 +9,24 @@ New features
- New unified :ref:`merge function <merging.join>` for efficiently performing
full gamut of database / relational-algebra operations. Refactored existing
join methods to use the new infrastructure, resulting in substantial
- performance gains (GH220_, GH249_, GH267_)
+ performance gains (:issue:`220`, :issue:`249`, :issue:`267`)
- New :ref:`unified concatenation function <merging.concat>` for concatenating
Series, DataFrame or Panel objects along an axis. Can form union or
intersection of the other axes. Improves performance of ``Series.append`` and
- ``DataFrame.append`` (GH468_, GH479_, GH273_)
+ ``DataFrame.append`` (:issue:`468`, :issue:`479`, :issue:`273`)
- :ref:`Can <merging.concatenation>` pass multiple DataFrames to
`DataFrame.append` to concatenate (stack) and multiple Series to
``Series.append`` too
- :ref:`Can<basics.dataframe.from_list_of_dicts>` pass list of dicts (e.g., a
- list of JSON objects) to DataFrame constructor (GH526_)
+ list of JSON objects) to DataFrame constructor (:issue:`526`)
- You can now :ref:`set multiple columns <indexing.columns.multiple>` in a
- DataFrame via ``__getitem__``, useful for transformation (GH342_)
+ DataFrame via ``__getitem__``, useful for transformation (:issue:`342`)
-- Handle differently-indexed output values in ``DataFrame.apply`` (GH498_)
+- Handle differently-indexed output values in ``DataFrame.apply`` (:issue:`498`)
.. ipython:: python
@@ -34,10 +34,10 @@ New features
df.apply(lambda x: x.describe())
- :ref:`Add<indexing.reorderlevels>` ``reorder_levels`` method to Series and
- DataFrame (PR534_)
+ DataFrame (:issue:`534`)
- :ref:`Add<indexing.dictionarylike>` dict-like ``get`` function to DataFrame
- and Panel (PR521_)
+ and Panel (:issue:`521`)
- :ref:`Add<basics.iterrows>` ``DataFrame.iterrows`` method for efficiently
iterating through the rows of a DataFrame
@@ -52,10 +52,10 @@ New features
- :ref:`Add <indexing.advanced_reindex>` ``level`` option to the ``reindex``
and ``align`` methods on Series and DataFrame for broadcasting values across
- a level (GH542_, PR552_, others)
+ a level (:issue:`542`, :issue:`552`, others)
- :ref:`Add <dsintro.panel_item_selection>` attribute-based item access to
- ``Panel`` and add IPython completion (PR563_)
+ ``Panel`` and add IPython completion (:issue:`563`)
- :ref:`Add <visualization.basic>` ``logy`` option to ``Series.plot`` for
log-scaling on the Y axis
@@ -64,38 +64,38 @@ New features
``DataFrame.to_string``
- :ref:`Can <merging.multiple_join>` pass multiple DataFrames to
- ``DataFrame.join`` to join on index (GH115_)
+ ``DataFrame.join`` to join on index (:issue:`115`)
- :ref:`Can <merging.multiple_join>` pass multiple Panels to ``Panel.join``
- (GH115_)
+ (:issue:`115`)
- :ref:`Added <io.formatting>` ``justify`` argument to ``DataFrame.to_string``
to allow different alignment of column headers
- :ref:`Add <groupby.attributes>` ``sort`` option to GroupBy to allow disabling
- sorting of the group keys for potential speedups (GH595_)
+ sorting of the group keys for potential speedups (:issue:`595`)
- :ref:`Can <basics.dataframe.from_series>` pass MaskedArray to Series
- constructor (PR563_)
+ constructor (:issue:`563`)
- :ref:`Add <dsintro.panel_item_selection>` Panel item access via attributes
- and IPython completion (GH554_)
+ and IPython completion (:issue:`554`)
- Implement ``DataFrame.lookup``, fancy-indexing analogue for retrieving values
- given a sequence of row and column labels (GH338_)
+ given a sequence of row and column labels (:issue:`338`)
- Can pass a :ref:`list of functions <groupby.aggregate.multifunc>` to
aggregate with groupby on a DataFrame, yielding an aggregated result with
- hierarchical columns (GH166_)
+ hierarchical columns (:issue:`166`)
- Can call ``cummin`` and ``cummax`` on Series and DataFrame to get cumulative
- minimum and maximum, respectively (GH647_)
+ minimum and maximum, respectively (:issue:`647`)
- ``value_range`` added as utility function to get min and max of a dataframe
- (GH288_)
+ (:issue:`288`)
- Added ``encoding`` argument to ``read_csv``, ``read_table``, ``to_csv`` and
- ``from_csv`` for non-ascii text (GH717_)
+ ``from_csv`` for non-ascii text (:issue:`717`)
- :ref:`Added <basics.stats>` ``abs`` method to pandas objects
@@ -231,28 +231,28 @@ Other API Changes
- If ``Series.sort`` is called on a column of a DataFrame, an exception will
now be raised. Before it was possible to accidentally mutate a DataFrame's
column by doing ``df[col].sort()`` instead of the side-effect free method
- ``df[col].order()`` (GH316_)
+ ``df[col].order()`` (:issue:`316`)
- Miscellaneous renames and deprecations which will (harmlessly) raise
``FutureWarning``
-- ``drop`` added as an optional parameter to ``DataFrame.reset_index`` (GH699_)
+- ``drop`` added as an optional parameter to ``DataFrame.reset_index`` (:issue:`699`)
Performance improvements
~~~~~~~~~~~~~~~~~~~~~~~~
- :ref:`Cythonized GroupBy aggregations <groupby.aggregate.cython>` no longer
- presort the data, thus achieving a significant speedup (GH93_). GroupBy
+ presort the data, thus achieving a significant speedup (:issue:`93`). GroupBy
aggregations with Python functions significantly sped up by clever
- manipulation of the ndarray data type in Cython (GH496_).
+ manipulation of the ndarray data type in Cython (:issue:`496`).
- Better error message in DataFrame constructor when passed column labels
- don't match data (GH497_)
+ don't match data (:issue:`497`)
- Substantially improve performance of multi-GroupBy aggregation when a
- Python function is passed, reuse ndarray object in Cython (GH496_)
-- Can store objects indexed by tuples and floats in HDFStore (GH492_)
-- Don't print length by default in Series.to_string, add `length` option (GH489_)
+ Python function is passed, reuse ndarray object in Cython (:issue:`496`)
+- Can store objects indexed by tuples and floats in HDFStore (:issue:`492`)
+- Don't print length by default in Series.to_string, add `length` option (:issue:`489`)
- Improve Cython code for multi-groupby to aggregate without having to sort
- the data (GH93_)
+ the data (:issue:`93`)
- Improve MultiIndex reindexing speed by storing tuples in the MultiIndex,
test for backwards unpickling compatibility
- Improve column reindexing performance by using specialized Cython take
@@ -262,47 +262,11 @@ Performance improvements
regression from prior versions
- Friendlier error message in setup.py if NumPy not installed
- Use common set of NA-handling operations (sum, mean, etc.) in Panel class
- also (GH536_)
+ also (:issue:`536`)
- Default name assignment when calling ``reset_index`` on DataFrame with a
- regular (non-hierarchical) index (GH476_)
+ regular (non-hierarchical) index (:issue:`476`)
- Use Cythonized groupers when possible in Series/DataFrame stat ops with
- ``level`` parameter passed (GH545_)
+ ``level`` parameter passed (:issue:`545`)
- Ported skiplist data structure to C to speed up ``rolling_median`` by about
- 5-10x in most typical use cases (GH374_)
-
-.. _GH115: https://github.com/pydata/pandas/issues/115
-.. _GH166: https://github.com/pydata/pandas/issues/166
-.. _GH220: https://github.com/pydata/pandas/issues/220
-.. _GH288: https://github.com/pydata/pandas/issues/288
-.. _GH249: https://github.com/pydata/pandas/issues/249
-.. _GH267: https://github.com/pydata/pandas/issues/267
-.. _GH273: https://github.com/pydata/pandas/issues/273
-.. _GH316: https://github.com/pydata/pandas/issues/316
-.. _GH338: https://github.com/pydata/pandas/issues/338
-.. _GH342: https://github.com/pydata/pandas/issues/342
-.. _GH374: https://github.com/pydata/pandas/issues/374
-.. _GH439: https://github.com/pydata/pandas/issues/439
-.. _GH468: https://github.com/pydata/pandas/issues/468
-.. _GH476: https://github.com/pydata/pandas/issues/476
-.. _GH479: https://github.com/pydata/pandas/issues/479
-.. _GH489: https://github.com/pydata/pandas/issues/489
-.. _GH492: https://github.com/pydata/pandas/issues/492
-.. _GH496: https://github.com/pydata/pandas/issues/496
-.. _GH497: https://github.com/pydata/pandas/issues/497
-.. _GH498: https://github.com/pydata/pandas/issues/498
-.. _GH526: https://github.com/pydata/pandas/issues/526
-.. _GH536: https://github.com/pydata/pandas/issues/536
-.. _GH542: https://github.com/pydata/pandas/issues/542
-.. _GH545: https://github.com/pydata/pandas/issues/545
-.. _GH554: https://github.com/pydata/pandas/issues/554
-.. _GH595: https://github.com/pydata/pandas/issues/595
-.. _GH647: https://github.com/pydata/pandas/issues/647
-.. _GH699: https://github.com/pydata/pandas/issues/699
-.. _GH717: https://github.com/pydata/pandas/issues/717
-.. _GH93: https://github.com/pydata/pandas/issues/93
-.. _GH93: https://github.com/pydata/pandas/issues/93
-.. _PR521: https://github.com/pydata/pandas/pull/521
-.. _PR534: https://github.com/pydata/pandas/pull/534
-.. _PR552: https://github.com/pydata/pandas/pull/552
-.. _PR554: https://github.com/pydata/pandas/pull/554
-.. _PR563: https://github.com/pydata/pandas/pull/563
+ 5-10x in most typical use cases (:issue:`374`)
+
diff --git a/doc/source/v0.7.1.txt b/doc/source/v0.7.1.txt
index 181751eb1c4b0..bc12cb8d200cd 100644
--- a/doc/source/v0.7.1.txt
+++ b/doc/source/v0.7.1.txt
@@ -10,30 +10,21 @@ New features
~~~~~~~~~~~~
- Add ``to_clipboard`` function to pandas namespace for writing objects to
- the system clipboard (GH774_)
+ the system clipboard (:issue:`774`)
- Add ``itertuples`` method to DataFrame for iterating through the rows of a
- dataframe as tuples (GH818_)
+ dataframe as tuples (:issue:`818`)
- Add ability to pass fill_value and method to DataFrame and Series align
- method (GH806_, GH807_)
- - Add fill_value option to reindex, align methods (GH784_)
- - Enable concat to produce DataFrame from Series (GH787_)
- - Add ``between`` method to Series (GH802_)
+ method (:issue:`806`, :issue:`807`)
+ - Add fill_value option to reindex, align methods (:issue:`784`)
+ - Enable concat to produce DataFrame from Series (:issue:`787`)
+ - Add ``between`` method to Series (:issue:`802`)
- Add HTML representation hook to DataFrame for the IPython HTML notebook
- (GH773_)
+ (:issue:`773`)
- Support for reading Excel 2007 XML documents using openpyxl
Performance improvements
~~~~~~~~~~~~~~~~~~~~~~~~
- Improve performance and memory usage of fillna on DataFrame
- - Can concatenate a list of Series along axis=1 to obtain a DataFrame (GH787_)
+ - Can concatenate a list of Series along axis=1 to obtain a DataFrame (:issue:`787`)
-.. _GH774: https://github.com/pydata/pandas/issues/774
-.. _GH818: https://github.com/pydata/pandas/issues/818
-.. _GH806: https://github.com/pydata/pandas/issues/806
-.. _GH807: https://github.com/pydata/pandas/issues/807
-.. _GH784: https://github.com/pydata/pandas/issues/784
-.. _GH787: https://github.com/pydata/pandas/issues/787
-.. _GH802: https://github.com/pydata/pandas/issues/802
-.. _GH773: https://github.com/pydata/pandas/issues/773
-.. _GH787: https://github.com/pydata/pandas/issues/787
\ No newline at end of file
diff --git a/doc/source/v0.7.2.txt b/doc/source/v0.7.2.txt
index 04f7686ed20c6..c711639354139 100644
--- a/doc/source/v0.7.2.txt
+++ b/doc/source/v0.7.2.txt
@@ -8,31 +8,20 @@ This release targets bugs in 0.7.1, and adds a few minor features.
New features
~~~~~~~~~~~~
- - Add additional tie-breaking methods in DataFrame.rank (GH874_)
- - Add ascending parameter to rank in Series, DataFrame (GH875_)
- - Add coerce_float option to DataFrame.from_records (GH893_)
- - Add sort_columns parameter to allow unsorted plots (GH918_)
- - Enable column access via attributes on GroupBy (GH882_)
- - Can pass dict of values to DataFrame.fillna (GH661_)
+ - Add additional tie-breaking methods in DataFrame.rank (:issue:`874`)
+ - Add ascending parameter to rank in Series, DataFrame (:issue:`875`)
+ - Add coerce_float option to DataFrame.from_records (:issue:`893`)
+ - Add sort_columns parameter to allow unsorted plots (:issue:`918`)
+ - Enable column access via attributes on GroupBy (:issue:`882`)
+ - Can pass dict of values to DataFrame.fillna (:issue:`661`)
- Can select multiple hierarchical groups by passing list of values in .ix
- (GH134_)
- - Add ``axis`` option to DataFrame.fillna (GH174_)
- - Add level keyword to ``drop`` for dropping values from a level (GH159_)
+ (:issue:`134`)
+ - Add ``axis`` option to DataFrame.fillna (:issue:`174`)
+ - Add level keyword to ``drop`` for dropping values from a level (:issue:`159`)
Performance improvements
~~~~~~~~~~~~~~~~~~~~~~~~
- - Use khash for Series.value_counts, add raw function to algorithms.py (GH861_)
- - Intercept __builtin__.sum in groupby (GH885_)
+ - Use khash for Series.value_counts, add raw function to algorithms.py (:issue:`861`)
+ - Intercept __builtin__.sum in groupby (:issue:`885`)
-.. _GH134: https://github.com/pydata/pandas/issues/134
-.. _GH159: https://github.com/pydata/pandas/issues/159
-.. _GH174: https://github.com/pydata/pandas/issues/174
-.. _GH661: https://github.com/pydata/pandas/issues/661
-.. _GH874: https://github.com/pydata/pandas/issues/874
-.. _GH875: https://github.com/pydata/pandas/issues/875
-.. _GH893: https://github.com/pydata/pandas/issues/893
-.. _GH918: https://github.com/pydata/pandas/issues/918
-.. _GH882: https://github.com/pydata/pandas/issues/882
-.. _GH861: https://github.com/pydata/pandas/issues/861
-.. _GH885: https://github.com/pydata/pandas/issues/885
diff --git a/doc/source/v0.7.3.txt b/doc/source/v0.7.3.txt
index 72106ae7efb5d..afb4b8faac2cc 100644
--- a/doc/source/v0.7.3.txt
+++ b/doc/source/v0.7.3.txt
@@ -6,8 +6,8 @@ v.0.7.3 (April 12, 2012)
This is a minor release from 0.7.2 and fixes many minor bugs and adds a number
of nice new features. There are also a couple of API changes to note; these
should not affect very many users, and we are inclined to call them "bug fixes"
-even though they do constitute a change in behavior. See the `full release
-notes <https://github.com/pydata/pandas/blob/master/RELEASE.rst>`__ or issue
+even though they do constitute a change in behavior. See the :ref:`full release
+notes <release>` or issue
tracker on GitHub for a complete list.
New features
diff --git a/doc/source/v0.8.0.txt b/doc/source/v0.8.0.txt
index 22e6a056bd4ce..243b7466d7dee 100644
--- a/doc/source/v0.8.0.txt
+++ b/doc/source/v0.8.0.txt
@@ -10,8 +10,8 @@ than 20 distinct authors. Most pandas 0.7.3 and earlier users should not
experience any issues upgrading, but due to the migration to the NumPy
datetime64 dtype, there may be a number of bugs and incompatibilities
lurking. Lingering incompatibilities will be fixed ASAP in a 0.8.1 release if
-necessary. See the `full release notes
-<https://github.com/pydata/pandas/blob/master/RELEASE.rst>`__ or issue tracker
+necessary. See the :ref:`full release notes
+<release>` or issue tracker
on GitHub for a complete list.
Support for non-unique indexes
diff --git a/doc/source/v0.8.1.txt b/doc/source/v0.8.1.txt
index 0bfa1d20e1b22..cecf6f16cdc71 100644
--- a/doc/source/v0.8.1.txt
+++ b/doc/source/v0.8.1.txt
@@ -11,36 +11,26 @@ New features
~~~~~~~~~~~~
- Add :ref:`vectorized string processing methods <basics.string_methods>`
- accessible via Series.str (GH620_)
- - Add option to disable adjustment in EWMA (GH1584_)
- - :ref:`Radviz plot <visualization.radviz>` (GH1566_)
+ accessible via Series.str (:issue:`620`)
+ - Add option to disable adjustment in EWMA (:issue:`1584`)
+ - :ref:`Radviz plot <visualization.radviz>` (:issue:`1566`)
- :ref:`Parallel coordinates plot <visualization.parallel_coordinates>`
- :ref:`Bootstrap plot <visualization.bootstrap>`
- - Per column styles and secondary y-axis plotting (GH1559_)
- - New datetime converters millisecond plotting (GH1599_)
- - Add option to disable "sparse" display of hierarchical indexes (GH1538_)
+ - Per column styles and secondary y-axis plotting (:issue:`1559`)
+ - New datetime converters millisecond plotting (:issue:`1599`)
+ - Add option to disable "sparse" display of hierarchical indexes (:issue:`1538`)
- Series/DataFrame's ``set_index`` method can :ref:`append levels
- <indexing.set_index>` to an existing Index/MultiIndex (GH1569_, GH1577_)
+ <indexing.set_index>` to an existing Index/MultiIndex (:issue:`1569`, :issue:`1577`)
Performance improvements
~~~~~~~~~~~~~~~~~~~~~~~~
- Improved implementation of rolling min and max (thanks to `Bottleneck
<http://berkeleyanalytics.com/bottleneck/>`__ !)
- - Add accelerated ``'median'`` GroupBy option (GH1358_)
+ - Add accelerated ``'median'`` GroupBy option (:issue:`1358`)
- Significantly improve the performance of parsing ISO8601-format date
- strings with ``DatetimeIndex`` or ``to_datetime`` (GH1571_)
+ strings with ``DatetimeIndex`` or ``to_datetime`` (:issue:`1571`)
- Improve the performance of GroupBy on single-key aggregations and use with
Categorical types
- Significant datetime parsing performance improvments
-.. _GH620: https://github.com/pydata/pandas/issues/620
-.. _GH1358: https://github.com/pydata/pandas/issues/1358
-.. _GH1538: https://github.com/pydata/pandas/issues/1538
-.. _GH1559: https://github.com/pydata/pandas/issues/1559
-.. _GH1584: https://github.com/pydata/pandas/issues/1584
-.. _GH1566: https://github.com/pydata/pandas/issues/1566
-.. _GH1569: https://github.com/pydata/pandas/issues/1569
-.. _GH1571: https://github.com/pydata/pandas/issues/1571
-.. _GH1577: https://github.com/pydata/pandas/issues/1577
-.. _GH1599: https://github.com/pydata/pandas/issues/1599
diff --git a/doc/source/v0.9.0.txt b/doc/source/v0.9.0.txt
index 3b91e64253dea..b0c2c2455ab77 100644
--- a/doc/source/v0.9.0.txt
+++ b/doc/source/v0.9.0.txt
@@ -13,19 +13,19 @@ New features
~~~~~~~~~~~~
- Add ``encode`` and ``decode`` for unicode handling to :ref:`vectorized
- string processing methods <basics.string_methods>` in Series.str (GH1706_)
- - Add ``DataFrame.to_latex`` method (GH1735_)
- - Add convenient expanding window equivalents of all rolling_* ops (GH1785_)
+ string processing methods <basics.string_methods>` in Series.str (:issue:`1706`)
+ - Add ``DataFrame.to_latex`` method (:issue:`1735`)
+ - Add convenient expanding window equivalents of all rolling_* ops (:issue:`1785`)
- Add Options class to pandas.io.data for fetching options data from Yahoo!
- Finance (GH1748_, GH1739_)
+ Finance (:issue:`1748`, :issue:`1739`)
- More flexible parsing of boolean values (Yes, No, TRUE, FALSE, etc)
- (GH1691_, GH1295_)
+ (:issue:`1691`, :issue:`1295`)
- Add ``level`` parameter to ``Series.reset_index``
- - ``TimeSeries.between_time`` can now select times across midnight (GH1871_)
- - Series constructor can now handle generator as input (GH1679_)
+ - ``TimeSeries.between_time`` can now select times across midnight (:issue:`1871`)
+ - Series constructor can now handle generator as input (:issue:`1679`)
- ``DataFrame.dropna`` can now take multiple axes (tuple/list) as input
- (GH924_)
- - Enable ``skip_footer`` parameter in ``ExcelFile.parse`` (GH1843_)
+ (:issue:`924`)
+ - Enable ``skip_footer`` parameter in ``ExcelFile.parse`` (:issue:`1843`)
API changes
~~~~~~~~~~~
@@ -58,57 +58,37 @@ API changes
s2
- Deprecated ``day_of_year`` API removed from PeriodIndex, use ``dayofyear``
- (GH1723_)
+ (:issue:`1723`)
- Don't modify NumPy suppress printoption to True at import time
- The internal HDF5 data arrangement for DataFrames has been transposed. Legacy
- files will still be readable by HDFStore (GH1834_, GH1824_)
+ files will still be readable by HDFStore (:issue:`1834`, :issue:`1824`)
- Legacy cruft removed: pandas.stats.misc.quantileTS
-- Use ISO8601 format for Period repr: monthly, daily, and on down (GH1776_)
+- Use ISO8601 format for Period repr: monthly, daily, and on down (:issue:`1776`)
- Empty DataFrame columns are now created as object dtype. This will prevent a
class of TypeErrors that was occurring in code where the dtype of a column
would depend on the presence of data or not (e.g. a SQL query having results)
- (GH1783_)
+ (:issue:`1783`)
- Setting parts of DataFrame/Panel using ix now aligns input Series/DataFrame
- (GH1630_)
+ (:issue:`1630`)
- ``first`` and ``last`` methods in ``GroupBy`` no longer drop non-numeric
- columns (GH1809_)
+ columns (:issue:`1809`)
- Resolved inconsistencies in specifying custom NA values in text parser.
``na_values`` of type dict no longer override default NAs unless
- ``keep_default_na`` is set to false explicitly (GH1657_)
+ ``keep_default_na`` is set to false explicitly (:issue:`1657`)
- ``DataFrame.dot`` will not do data alignment, and also work with Series
- (GH1915_)
+ (:issue:`1915`)
-See the `full release notes
-<https://github.com/pydata/pandas/blob/master/RELEASE.rst>`__ or issue tracker
+See the :ref:`full release notes
+<release>` or issue tracker
on GitHub for a complete list.
-.. _GH1706: https://github.com/pydata/pandas/issues/1706
-.. _GH1735: https://github.com/pydata/pandas/issues/1735
-.. _GH1785: https://github.com/pydata/pandas/issues/1785
-.. _GH1748: https://github.com/pydata/pandas/issues/1748
-.. _GH1739: https://github.com/pydata/pandas/issues/1739
-.. _GH1691: https://github.com/pydata/pandas/issues/1691
-.. _GH1295: https://github.com/pydata/pandas/issues/1295
-.. _GH1723: https://github.com/pydata/pandas/issues/1723
-.. _GH1834: https://github.com/pydata/pandas/issues/1834
-.. _GH1824: https://github.com/pydata/pandas/issues/1824
-.. _GH1776: https://github.com/pydata/pandas/issues/1776
-.. _GH1783: https://github.com/pydata/pandas/issues/1783
-.. _GH1630: https://github.com/pydata/pandas/issues/1630
-.. _GH1809: https://github.com/pydata/pandas/issues/1809
-.. _GH1657: https://github.com/pydata/pandas/issues/1657
-.. _GH1871: https://github.com/pydata/pandas/issues/1871
-.. _GH1679: https://github.com/pydata/pandas/issues/1679
-.. _GH1915: https://github.com/pydata/pandas/issues/1915
-.. _GH924: https://github.com/pydata/pandas/issues/924
-.. _GH1843: https://github.com/pydata/pandas/issues/1843
diff --git a/doc/source/v0.9.1.txt b/doc/source/v0.9.1.txt
index 6733ab8a9e95e..7de000c255d4c 100644
--- a/doc/source/v0.9.1.txt
+++ b/doc/source/v0.9.1.txt
@@ -13,7 +13,7 @@ New features
~~~~~~~~~~~~
- `Series.sort`, `DataFrame.sort`, and `DataFrame.sort_index` can now be
- specified in a per-column manner to support multiple sort orders (GH928_)
+ specified in a per-column manner to support multiple sort orders (:issue:`928`)
.. ipython:: python
@@ -24,7 +24,7 @@ New features
- `DataFrame.rank` now supports additional argument values for the
`na_option` parameter so missing values can be assigned either the largest
- or the smallest rank (GH1508_, GH2159_)
+ or the smallest rank (:issue:`1508`, :issue:`2159`)
.. ipython:: python
@@ -40,7 +40,7 @@ New features
- DataFrame has new `where` and `mask` methods to select values according to a
- given boolean mask (GH2109_, GH2151_)
+ given boolean mask (:issue:`2109`, :issue:`2151`)
DataFrame currently supports slicing via a boolean vector the same length as the DataFrame (inside the `[]`).
The returned DataFrame has the same number of columns as the original, but is sliced on its index.
@@ -81,7 +81,7 @@ New features
df.mask(df<=0)
- - Enable referencing of Excel columns by their column names (GH1936_)
+ - Enable referencing of Excel columns by their column names (:issue:`1936`)
.. ipython:: python
@@ -92,13 +92,13 @@ New features
- Added option to disable pandas-style tick locators and formatters
using `series.plot(x_compat=True)` or `pandas.plot_params['x_compat'] =
- True` (GH2205_)
+ True` (:issue:`2205`)
- Existing TimeSeries methods `at_time` and `between_time` were added to
- DataFrame (GH2149_)
- - DataFrame.dot can now accept ndarrays (GH2042_)
- - DataFrame.drop now supports non-unique indexes (GH2101_)
- - Panel.shift now supports negative periods (GH2164_)
- - DataFrame now support unary ~ operator (GH2110_)
+ DataFrame (:issue:`2149`)
+ - DataFrame.dot can now accept ndarrays (:issue:`2042`)
+ - DataFrame.drop now supports non-unique indexes (:issue:`2101`)
+ - Panel.shift now supports negative periods (:issue:`2164`)
+ - DataFrame now support unary ~ operator (:issue:`2110`)
API changes
~~~~~~~~~~~
@@ -116,7 +116,7 @@ API changes
- Period.end_time now returns the last nanosecond in the time interval
- (GH2124_, GH2125_, GH1764_)
+ (:issue:`2124`, :issue:`2125`, :issue:`1764`)
.. ipython:: python
@@ -126,7 +126,7 @@ API changes
- File parsers no longer coerce to float or bool for columns that have custom
- converters specified (GH2184_)
+ converters specified (:issue:`2184`)
.. ipython:: python
@@ -136,98 +136,6 @@ API changes
read_csv(StringIO(data), converters={'A' : lambda x: x.strip()})
-See the `full release notes
-<https://github.com/pydata/pandas/blob/master/RELEASE.rst>`__ or issue tracker
+See the :ref:`full release notes
+<release>` or issue tracker
on GitHub for a complete list.
-
-
-.. _GH1508: https://github.com/pydata/pandas/issues/1508
-.. _GH928: https://github.com/pydata/pandas/issues/928
-.. _GH2159: https://github.com/pydata/pandas/issues/2159
-.. _GH2109: https://github.com/pydata/pandas/issues/2109
-.. _GH2151: https://github.com/pydata/pandas/issues/2151
-.. _GH2149: https://github.com/pydata/pandas/issues/2149
-.. _GH2101: https://github.com/pydata/pandas/issues/2101
-.. _GH2042: https://github.com/pydata/pandas/issues/2042
-.. _GH1936: https://github.com/pydata/pandas/issues/1936
-.. _GH1764: https://github.com/pydata/pandas/issues/1764
-.. _GH2125: https://github.com/pydata/pandas/issues/2125
-.. _GH2124: https://github.com/pydata/pandas/issues/2124
-.. _GH2110: https://github.com/pydata/pandas/issues/2110
-.. _GH2184: https://github.com/pydata/pandas/issues/2184
-.. _GH2205: https://github.com/pydata/pandas/issues/2205
-
-.. _GH2181: https://github.com/pydata/pandas/issues/2181
-.. _GH2180: https://github.com/pydata/pandas/issues/2180
-.. _GH2176: https://github.com/pydata/pandas/issues/2176
-.. _GH2174: https://github.com/pydata/pandas/issues/2174
-.. _GH2173: https://github.com/pydata/pandas/issues/2173
-.. _GH2170: https://github.com/pydata/pandas/issues/2170
-.. _GH2169: https://github.com/pydata/pandas/issues/2169
-.. _GH2167: https://github.com/pydata/pandas/issues/2167
-.. _GH2166: https://github.com/pydata/pandas/issues/2166
-.. _GH2165: https://github.com/pydata/pandas/issues/2165
-.. _GH2164: https://github.com/pydata/pandas/issues/2164
-.. _GH2163: https://github.com/pydata/pandas/issues/2163
-.. _GH2161: https://github.com/pydata/pandas/issues/2161
-.. _GH2157: https://github.com/pydata/pandas/issues/2157
-.. _GH2155: https://github.com/pydata/pandas/issues/2155
-.. _GH2152: https://github.com/pydata/pandas/issues/2152
-.. _GH2150: https://github.com/pydata/pandas/issues/2150
-.. _GH2148: https://github.com/pydata/pandas/issues/2148
-.. _GH2147: https://github.com/pydata/pandas/issues/2147
-.. _GH2146: https://github.com/pydata/pandas/issues/2146
-.. _GH2144: https://github.com/pydata/pandas/issues/2144
-.. _GH2140: https://github.com/pydata/pandas/issues/2140
-.. _GH2135: https://github.com/pydata/pandas/issues/2135
-.. _GH2133: https://github.com/pydata/pandas/issues/2133
-.. _GH2131: https://github.com/pydata/pandas/issues/2131
-.. _GH2129: https://github.com/pydata/pandas/issues/2129
-.. _GH2128: https://github.com/pydata/pandas/issues/2128
-.. _GH2127: https://github.com/pydata/pandas/issues/2127
-.. _GH2122: https://github.com/pydata/pandas/issues/2122
-.. _GH2120: https://github.com/pydata/pandas/issues/2120
-.. _GH2119: https://github.com/pydata/pandas/issues/2119
-.. _GH2117: https://github.com/pydata/pandas/issues/2117
-.. _GH2116: https://github.com/pydata/pandas/issues/2116
-.. _GH2114: https://github.com/pydata/pandas/issues/2114
-.. _GH2113: https://github.com/pydata/pandas/issues/2113
-.. _GH2111: https://github.com/pydata/pandas/issues/2111
-.. _GH2108: https://github.com/pydata/pandas/issues/2108
-.. _GH2107: https://github.com/pydata/pandas/issues/2107
-.. _GH2103: https://github.com/pydata/pandas/issues/2103
-.. _GH2100: https://github.com/pydata/pandas/issues/2100
-.. _GH2096: https://github.com/pydata/pandas/issues/2096
-.. _GH2095: https://github.com/pydata/pandas/issues/2095
-.. _GH2093: https://github.com/pydata/pandas/issues/2093
-.. _GH2087: https://github.com/pydata/pandas/issues/2087
-.. _GH2086: https://github.com/pydata/pandas/issues/2086
-.. _GH2083: https://github.com/pydata/pandas/issues/2083
-.. _GH2082: https://github.com/pydata/pandas/issues/2082
-.. _GH2080: https://github.com/pydata/pandas/issues/2080
-.. _GH2079: https://github.com/pydata/pandas/issues/2079
-.. _GH2078: https://github.com/pydata/pandas/issues/2078
-.. _GH2077: https://github.com/pydata/pandas/issues/2077
-.. _GH2075: https://github.com/pydata/pandas/issues/2075
-.. _GH2068: https://github.com/pydata/pandas/issues/2068
-.. _GH2066: https://github.com/pydata/pandas/issues/2066
-.. _GH2065: https://github.com/pydata/pandas/issues/2065
-.. _GH2063: https://github.com/pydata/pandas/issues/2063
-.. _GH2061: https://github.com/pydata/pandas/issues/2061
-.. _GH2060: https://github.com/pydata/pandas/issues/2060
-.. _GH2059: https://github.com/pydata/pandas/issues/2059
-.. _GH2056: https://github.com/pydata/pandas/issues/2056
-.. _GH2051: https://github.com/pydata/pandas/issues/2051
-.. _GH2049: https://github.com/pydata/pandas/issues/2049
-.. _GH2043: https://github.com/pydata/pandas/issues/2043
-.. _GH2041: https://github.com/pydata/pandas/issues/2041
-.. _GH2032: https://github.com/pydata/pandas/issues/2032
-.. _GH2029: https://github.com/pydata/pandas/issues/2029
-.. _GH2018: https://github.com/pydata/pandas/issues/2018
-.. _GH2008: https://github.com/pydata/pandas/issues/2008
-.. _GH2005: https://github.com/pydata/pandas/issues/2005
-.. _GH1979: https://github.com/pydata/pandas/issues/1979
-.. _GH1976: https://github.com/pydata/pandas/issues/1976
-.. _GH1959: https://github.com/pydata/pandas/issues/1959
-.. _GH1890: https://github.com/pydata/pandas/issues/1890
-.. _GH1555: https://github.com/pydata/pandas/issues/1555
| closes #3182.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3961 | 2013-06-19T16:14:32Z | 2013-06-20T18:10:16Z | 2013-06-20T18:10:16Z | 2014-06-20T21:52:48Z |
BUG/BLD: add compiler flag for older compilers | diff --git a/setup.py b/setup.py
index bd23b4ef05ce2..ee8f30d62ac6c 100755
--- a/setup.py
+++ b/setup.py
@@ -482,7 +482,8 @@ def pxd(name):
'pandas/src/datetime/np_datetime_strings.c'],
include_dirs=['pandas/src/ujson/python',
'pandas/src/ujson/lib',
- 'pandas/src/datetime'] + common_include)
+ 'pandas/src/datetime'] + common_include,
+ extra_compile_args=['-D_GNU_SOURCE'])
extensions.append(ujson_ext)
| The -D_GNU_SOURCE is needed by older versions of GCC. Many newer versions
enable this flag by default, which is why it wasn't being caught before.
closes #3957.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3959 | 2013-06-19T14:51:05Z | 2013-06-19T16:48:59Z | 2013-06-19T16:48:59Z | 2014-07-11T21:47:26Z |
CLN: Change bare exceptions pt 1 | diff --git a/pandas/sparse/array.py b/pandas/sparse/array.py
index 035db279064a0..de510aa155412 100644
--- a/pandas/sparse/array.py
+++ b/pandas/sparse/array.py
@@ -261,7 +261,7 @@ def _get_val_at(self, loc):
loc += n
if loc >= len(self) or loc < 0:
- raise Exception('Out of bounds access')
+ raise IndexError('out of bounds access')
sp_loc = self.sp_index.lookup(loc)
if sp_loc == -1:
@@ -283,7 +283,7 @@ def take(self, indices, axis=0):
n = len(self)
if (indices < 0).any() or (indices >= n).any():
- raise Exception('out of bounds access')
+ raise IndexError('out of bounds access')
if self.sp_index.npoints > 0:
locs = np.array([self.sp_index.lookup(loc) for loc in indices])
@@ -296,10 +296,10 @@ def take(self, indices, axis=0):
return result
def __setitem__(self, key, value):
- raise Exception('SparseArray objects are immutable')
+ raise TypeError('%r object does not support item assignment' % self.__class__.__name__)
def __setslice__(self, i, j, value):
- raise Exception('SparseArray objects are immutable')
+ raise TypeError('%r object does not support item assignment' % self.__class__.__name__)
def to_dense(self):
"""
@@ -313,7 +313,7 @@ def astype(self, dtype=None):
"""
dtype = np.dtype(dtype)
if dtype is not None and dtype not in (np.float_, float):
- raise Exception('Can only support floating point data for now')
+ raise TypeError('Can only support floating point data for now')
return self.copy()
def copy(self, deep=True):
diff --git a/pandas/sparse/frame.py b/pandas/sparse/frame.py
index 9694cc005d178..0a08fba49afeb 100644
--- a/pandas/sparse/frame.py
+++ b/pandas/sparse/frame.py
@@ -195,10 +195,10 @@ def _init_matrix(self, data, index, columns, dtype=None):
columns = _default_index(K)
if len(columns) != K:
- raise Exception('Column length mismatch: %d vs. %d' %
+ raise ValueError('Column length mismatch: %d vs. %d' %
(len(columns), K))
if len(index) != N:
- raise Exception('Index length mismatch: %d vs. %d' %
+ raise ValueError('Index length mismatch: %d vs. %d' %
(len(index), N))
data = dict([(idx, data[:, i]) for i, idx in enumerate(columns)])
@@ -585,7 +585,7 @@ def _combine_const(self, other, func):
def _reindex_index(self, index, method, copy, level, fill_value=np.nan,
limit=None):
if level is not None:
- raise Exception('Reindex by level not supported for sparse')
+ raise TypeError('Reindex by level not supported for sparse')
if self.index.equals(index):
if copy:
@@ -616,7 +616,7 @@ def _reindex_index(self, index, method, copy, level, fill_value=np.nan,
def _reindex_columns(self, columns, copy, level, fill_value, limit=None):
if level is not None:
- raise Exception('Reindex by level not supported for sparse')
+ raise TypeError('Reindex by level not supported for sparse')
if com.notnull(fill_value):
raise NotImplementedError
@@ -889,9 +889,12 @@ def stack_sparse_frame(frame):
inds_to_concat = []
vals_to_concat = []
+ # TODO: Figure out whether this can be reached.
+ # I think this currently can't be reached because you can't build a SparseDataFrame
+ # with a non-np.NaN fill value (fails earlier).
for _, series in frame.iteritems():
if not np.isnan(series.fill_value):
- raise Exception('This routine assumes NaN fill value')
+ raise TypeError('This routine assumes NaN fill value')
int_index = series.sp_index.to_int_index()
inds_to_concat.append(int_index.indices)
@@ -931,7 +934,7 @@ def homogenize(series_dict):
for _, series in series_dict.iteritems():
if not np.isnan(series.fill_value):
- raise Exception('this method is only valid with NaN fill values')
+ raise TypeError('this method is only valid with NaN fill values')
if index is None:
index = series.sp_index
diff --git a/pandas/sparse/panel.py b/pandas/sparse/panel.py
index 0b2842155b299..246e6fa93918f 100644
--- a/pandas/sparse/panel.py
+++ b/pandas/sparse/panel.py
@@ -249,7 +249,7 @@ def to_frame(self, filter_observations=True):
frame : DataFrame
"""
if not filter_observations:
- raise Exception('filter_observations=False not supported for '
+ raise TypeError('filter_observations=False not supported for '
'SparsePanel.to_long')
I, N, K = self.shape
@@ -325,7 +325,7 @@ def reindex(self, major=None, items=None, minor=None, major_axis=None,
if item in self._frames:
new_frames[item] = self._frames[item]
else:
- raise Exception('Reindexing with new items not yet '
+ raise NotImplementedError('Reindexing with new items not yet '
'supported')
else:
new_frames = self._frames
@@ -488,7 +488,7 @@ def _stack_sparse_info(frame):
series = frame[col]
if not np.isnan(series.fill_value):
- raise Exception('This routine assumes NaN fill value')
+ raise TypeError('This routine assumes NaN fill value')
int_index = series.sp_index.to_int_index()
inds_to_concat.append(int_index.indices)
diff --git a/pandas/sparse/series.py b/pandas/sparse/series.py
index bd01845a295b6..1b8d3541da289 100644
--- a/pandas/sparse/series.py
+++ b/pandas/sparse/series.py
@@ -133,7 +133,7 @@ def __new__(cls, data, index=None, sparse_index=None, kind='block',
raise AssertionError()
else:
if index is None:
- raise Exception('must pass index!')
+ raise TypeError('must pass index!')
length = len(index)
@@ -388,7 +388,7 @@ def astype(self, dtype=None):
"""
if dtype is not None and dtype not in (np.float_, float):
- raise Exception('Can only support floating point data')
+ raise TypeError('Can only support floating point data')
return self.copy()
diff --git a/pandas/sparse/tests/test_array.py b/pandas/sparse/tests/test_array.py
index cf2cd2f687e8d..a92170621f50d 100644
--- a/pandas/sparse/tests/test_array.py
+++ b/pandas/sparse/tests/test_array.py
@@ -1,3 +1,4 @@
+import re
from numpy import nan, ndarray
import numpy as np
@@ -8,7 +9,7 @@
from pandas.core.series import Series
from pandas.core.common import notnull
from pandas.sparse.api import SparseArray
-from pandas.util.testing import assert_almost_equal
+from pandas.util.testing import assert_almost_equal, assertRaisesRegexp
def assert_sp_array_equal(left, right):
@@ -28,6 +29,24 @@ def setUp(self):
self.arr = SparseArray(self.arr_data)
self.zarr = SparseArray([0, 0, 1, 2, 3, 0, 4, 5, 0, 6], fill_value=0)
+ def test_get_item(self):
+ errmsg = re.compile("bounds")
+ assertRaisesRegexp(IndexError, errmsg, lambda : self.arr[11])
+ assertRaisesRegexp(IndexError, errmsg, lambda : self.arr[-11])
+ self.assertEqual(self.arr[-1], self.arr[len(self.arr) - 1])
+
+ def test_bad_take(self):
+ assertRaisesRegexp(IndexError, "bounds", lambda : self.arr.take(11))
+ self.assertRaises(IndexError, lambda : self.arr.take(-11))
+
+ def test_set_item(self):
+ def setitem():
+ self.arr[5] = 3
+ def setslice():
+ self.arr[1:5] = 2
+ assertRaisesRegexp(TypeError, "item assignment", setitem)
+ assertRaisesRegexp(TypeError, "item assignment", setslice)
+
def test_constructor_from_sparse(self):
res = SparseArray(self.zarr)
self.assertEquals(res.fill_value, 0)
@@ -47,7 +66,7 @@ def test_astype(self):
res.sp_values[:3] = 27
self.assert_(not (self.arr.sp_values[:3] == 27).any())
- self.assertRaises(Exception, self.arr.astype, 'i8')
+ assertRaisesRegexp(TypeError, "floating point", self.arr.astype, 'i8')
def test_copy_shallow(self):
arr2 = self.arr.copy(deep=False)
diff --git a/pandas/sparse/tests/test_sparse.py b/pandas/sparse/tests/test_sparse.py
index c6515cd4113f0..1382a6a642aa3 100644
--- a/pandas/sparse/tests/test_sparse.py
+++ b/pandas/sparse/tests/test_sparse.py
@@ -13,7 +13,7 @@
dec = np.testing.dec
from pandas.util.testing import (assert_almost_equal, assert_series_equal,
- assert_frame_equal, assert_panel_equal)
+ assert_frame_equal, assert_panel_equal, assertRaisesRegexp)
from numpy.testing import assert_equal
from pandas import Series, DataFrame, bdate_range, Panel
@@ -641,7 +641,7 @@ def _check_matches(indices, expected):
# must have NaN fill value
data = {'a': SparseSeries(np.arange(7), sparse_index=expected2,
fill_value=0)}
- nose.tools.assert_raises(Exception, spf.homogenize, data)
+ assertRaisesRegexp(TypeError, "NaN fill value", spf.homogenize, data)
def test_fill_value_corner(self):
cop = self.zbseries.copy()
@@ -791,7 +791,7 @@ def test_constructor(self):
assert_sp_frame_equal(cons, reindexed)
# assert level parameter breaks reindex
- self.assertRaises(Exception, self.frame.reindex, idx, level=0)
+ self.assertRaises(TypeError, self.frame.reindex, idx, level=0)
repr(self.frame)
@@ -805,14 +805,14 @@ def test_constructor_ndarray(self):
assert_sp_frame_equal(sp, self.frame.reindex(columns=['A']))
# raise on level argument
- self.assertRaises(Exception, self.frame.reindex, columns=['A'],
+ self.assertRaises(TypeError, self.frame.reindex, columns=['A'],
level=1)
# wrong length index / columns
- self.assertRaises(Exception, SparseDataFrame, self.frame.values,
- index=self.frame.index[:-1])
- self.assertRaises(Exception, SparseDataFrame, self.frame.values,
- columns=self.frame.columns[:-1])
+ assertRaisesRegexp(ValueError, "^Index length", SparseDataFrame, self.frame.values,
+ index=self.frame.index[:-1])
+ assertRaisesRegexp(ValueError, "^Column length", SparseDataFrame, self.frame.values,
+ columns=self.frame.columns[:-1])
def test_constructor_empty(self):
sp = SparseDataFrame()
@@ -840,11 +840,17 @@ def test_constructor_from_series(self):
x = Series(np.random.randn(10000), name ='a')
y = Series(np.random.randn(10000), name ='b')
- x.ix[:9998] = 0
- x = x.to_sparse(fill_value=0)
+ x2 = x.astype(float)
+ x2.ix[:9998] = np.NaN
+ x_sparse = x2.to_sparse(fill_value=np.NaN)
- # currently fails
- #df1 = SparseDataFrame([x, y])
+ # Currently fails too with weird ufunc error
+ # df1 = SparseDataFrame([x_sparse, y])
+
+ y.ix[:9998] = 0
+ y_sparse = y.to_sparse(fill_value=0)
+ # without sparse value raises error
+ # df2 = SparseDataFrame([x2_sparse, y])
def test_dtypes(self):
df = DataFrame(np.random.randn(10000, 4))
diff --git a/pandas/stats/common.py b/pandas/stats/common.py
index c3034dbc390bf..75ebc9284ca21 100644
--- a/pandas/stats/common.py
+++ b/pandas/stats/common.py
@@ -1,42 +1,33 @@
-def _get_cluster_type(cluster_type):
- cluster_type = _WINDOW_TYPES.get(cluster_type, cluster_type)
- if cluster_type is None:
- return cluster_type
-
- cluster_type_up = cluster_type.upper()
-
- if cluster_type_up == 'ENTITY':
- return 'entity'
- elif cluster_type_up == 'TIME':
- return 'time'
- else: # pragma: no cover
- raise Exception('Unrecognized cluster type: %s' % cluster_type)
-
-_CLUSTER_TYPES = {
- 0: 'time',
- 1: 'entity'
-}
_WINDOW_TYPES = {
0: 'full_sample',
1: 'rolling',
2: 'expanding'
}
+# also allow 'rolling' as key
+_WINDOW_TYPES.update((v, v) for k,v in _WINDOW_TYPES.items())
+_ADDITIONAL_CLUSTER_TYPES = set(("entity", "time"))
+def _get_cluster_type(cluster_type):
+ # this was previous behavior
+ if cluster_type is None:
+ return cluster_type
+ try:
+ return _get_window_type(cluster_type)
+ except ValueError:
+ final_type = str(cluster_type).lower().replace("_", " ")
+ if final_type in _ADDITIONAL_CLUSTER_TYPES:
+ return final_type
+ raise ValueError('Unrecognized cluster type: %s' % cluster_type)
def _get_window_type(window_type):
- window_type = _WINDOW_TYPES.get(window_type, window_type)
- window_type_up = window_type.upper()
-
- if window_type_up in ('FULL SAMPLE', 'FULL_SAMPLE'):
- return 'full_sample'
- elif window_type_up == 'ROLLING':
- return 'rolling'
- elif window_type_up == 'EXPANDING':
- return 'expanding'
- else: # pragma: no cover
- raise Exception('Unrecognized window type: %s' % window_type)
-
+ # e.g., 0, 1, 2
+ final_type = _WINDOW_TYPES.get(window_type)
+ # e.g., 'full_sample'
+ final_type = final_type or _WINDOW_TYPES.get(str(window_type).lower().replace(" ", "_"))
+ if final_type is None:
+ raise ValueError('Unrecognized window type: %s' % window_type)
+ return final_type
def banner(text, width=80):
"""
diff --git a/pandas/stats/ols.py b/pandas/stats/ols.py
index 13eeb03e15328..cdcf1ab2ab036 100644
--- a/pandas/stats/ols.py
+++ b/pandas/stats/ols.py
@@ -634,8 +634,8 @@ def _set_window(self, window_type, window, min_periods):
self._window_type = scom._get_window_type(window_type)
if self._is_rolling:
- if not ((window is not None)):
- raise AssertionError()
+ if window is None:
+ raise AssertionError("Must specify window.")
if min_periods is None:
min_periods = window
else:
diff --git a/pandas/stats/tests/test_ols.py b/pandas/stats/tests/test_ols.py
index 17a45409c1ab5..abcf5b8df9a9a 100644
--- a/pandas/stats/tests/test_ols.py
+++ b/pandas/stats/tests/test_ols.py
@@ -19,7 +19,7 @@
from pandas.stats.ols import _filter_data
from pandas.stats.plm import NonPooledPanelOLS, PanelOLS
from pandas.util.testing import (assert_almost_equal, assert_series_equal,
- assert_frame_equal)
+ assert_frame_equal, assertRaisesRegexp)
import pandas.util.testing as tm
from common import BaseTest
@@ -663,7 +663,10 @@ def testRollingWithNeweyWest(self):
def testRollingWithEntityCluster(self):
self.checkMovingOLS(self.panel_x, self.panel_y,
cluster='entity')
-
+ def testUnknownClusterRaisesValueError(self):
+ assertRaisesRegexp(ValueError, "Unrecognized cluster.*ridiculous",
+ self.checkMovingOLS, self.panel_x, self.panel_y,
+ cluster='ridiculous')
def testRollingWithTimeEffectsAndEntityCluster(self):
self.checkMovingOLS(self.panel_x, self.panel_y,
time_effects=True, cluster='entity')
@@ -689,6 +692,10 @@ def testNonPooled(self):
self.checkNonPooled(y=self.panel_y, x=self.panel_x)
self.checkNonPooled(y=self.panel_y, x=self.panel_x,
window_type='rolling', window=25, min_periods=10)
+ def testUnknownWindowType(self):
+ self.assertRaisesRegexp(ValueError, "window.*ridiculous",
+ self.checkNonPooled, y=self.panel_y, x=self.panel_x,
+ window_type='ridiculous', window=25, min_periods=10)
def checkNonPooled(self, x, y, **kwds):
# For now, just check that it doesn't crash
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 8342d218e76bb..63f92e9fa7a35 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -5455,7 +5455,7 @@ def test_append_series_dict(self):
columns=['foo', 'bar', 'baz', 'qux'])
series = df.ix[4]
- self.assertRaises(Exception, df.append, series, verify_integrity=True)
+ self.assertRaises(ValueError, df.append, series, verify_integrity=True)
series.name = None
self.assertRaises(Exception, df.append, series, verify_integrity=True)
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index c5770c61e2f81..cf7d360b5a93d 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -1681,7 +1681,7 @@ def test_append(self):
else:
self.fail("orphaned index!")
- self.assertRaises(Exception, self.ts.append, self.ts,
+ self.assertRaises(ValueError, self.ts.append, self.ts,
verify_integrity=True)
def test_append_many(self):
diff --git a/pandas/tools/merge.py b/pandas/tools/merge.py
index 75e35b403dd78..f96f3b98a0383 100644
--- a/pandas/tools/merge.py
+++ b/pandas/tools/merge.py
@@ -16,7 +16,7 @@
from pandas.core.internals import (IntBlock, BoolBlock, BlockManager,
make_block, _consolidate)
from pandas.util.decorators import cache_readonly, Appender, Substitution
-
+from pandas.core.common import PandasError
from pandas.sparse.frame import SparseDataFrame
import pandas.core.common as com
@@ -1002,7 +1002,8 @@ def _get_concatenated_data(self):
blk.ref_items = self.new_axes[0]
new_data = BlockManager(new_blocks, self.new_axes)
- except Exception: # EAFP
+ # Eventual goal would be to move everything to PandasError or other explicit error
+ except (Exception, PandasError): # EAFP
# should not be possible to fail here for the expected reason with
# axis = 0
if self.axis == 0: # pragma: no cover
@@ -1039,8 +1040,11 @@ def _concat_blocks(self, blocks):
if self.axis > 0:
# Not safe to remove this check, need to profile
if not _all_indexes_same([b.items for b in blocks]):
- raise Exception('dtypes are not consistent throughout '
- 'DataFrames')
+ # TODO: Either profile this piece or remove.
+ # FIXME: Need to figure out how to test whether this line exists or does not...(unclear if even possible
+ # or maybe would require performance test)
+ raise PandasError('dtypes are not consistent throughout '
+ 'DataFrames')
return make_block(concat_values, blocks[0].items, self.new_axes[0])
else:
@@ -1184,7 +1188,7 @@ def _maybe_check_integrity(self, concat_index):
if self.verify_integrity:
if not concat_index.is_unique:
overlap = concat_index.get_duplicates()
- raise Exception('Indexes have overlapping values: %s'
+ raise ValueError('Indexes have overlapping values: %s'
% str(overlap))
diff --git a/pandas/tools/tests/test_merge.py b/pandas/tools/tests/test_merge.py
index 1c020353ebb43..b0261077fc767 100644
--- a/pandas/tools/tests/test_merge.py
+++ b/pandas/tools/tests/test_merge.py
@@ -1078,7 +1078,7 @@ def test_append(self):
self.assert_(appended is not self.frame)
# overlap
- self.assertRaises(Exception, self.frame.append, self.frame,
+ self.assertRaises(ValueError, self.frame.append, self.frame,
verify_integrity=True)
def test_append_length0_frame(self):
diff --git a/pandas/tools/tests/test_tile.py b/pandas/tools/tests/test_tile.py
index c3462dfc69e27..7da9a3bb5a95a 100644
--- a/pandas/tools/tests/test_tile.py
+++ b/pandas/tools/tests/test_tile.py
@@ -6,6 +6,7 @@
from pandas import DataFrame, Series, unique
import pandas.util.testing as tm
+from pandas.util.testing import assertRaisesRegexp
import pandas.core.common as com
from pandas.core.algorithms import quantile
@@ -136,7 +137,7 @@ def test_qcut_specify_quantiles(self):
self.assert_(factor.equals(expected))
def test_qcut_all_bins_same(self):
- self.assertRaises(Exception, qcut, [0,0,0,0,0,0,0,0,0,0], 3)
+ assertRaisesRegexp(ValueError, "edges.*unique", qcut, [0,0,0,0,0,0,0,0,0,0], 3)
def test_cut_out_of_bounds(self):
arr = np.random.randn(100)
diff --git a/pandas/tools/tile.py b/pandas/tools/tile.py
index 4c68594a8a093..ffed6cafc1047 100644
--- a/pandas/tools/tile.py
+++ b/pandas/tools/tile.py
@@ -151,7 +151,7 @@ def _bins_to_cuts(x, bins, right=True, labels=None, retbins=False,
ids = bins.searchsorted(x, side=side)
if len(algos.unique(bins)) < len(bins):
- raise Exception('Bin edges must be unique: %s' % repr(bins))
+ raise ValueError('Bin edges must be unique: %s' % repr(bins))
if include_lowest:
ids[x == bins[0]] = 1
diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py
index 5985a8a898b27..f54bfee55782a 100644
--- a/pandas/tseries/frequencies.py
+++ b/pandas/tseries/frequencies.py
@@ -499,7 +499,7 @@ def get_offset(name):
if offset is not None:
return offset
else:
- raise Exception('Bad rule name requested: %s!' % name)
+ raise ValueError('Bad rule name requested: %s.' % name)
getOffset = get_offset
@@ -522,7 +522,7 @@ def get_offset_name(offset):
if name is not None:
return name
else:
- raise Exception('Bad rule given: %s!' % offset)
+ raise ValueError('Bad rule given: %s.' % offset)
def get_legacy_offset_name(offset):
diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py
index 7f726b8f3c6ab..f560a6bf6e717 100644
--- a/pandas/tseries/index.py
+++ b/pandas/tseries/index.py
@@ -422,13 +422,20 @@ def tzinfo(self):
@classmethod
def _cached_range(cls, start=None, end=None, periods=None, offset=None,
name=None):
+ if start is None and end is None:
+ # I somewhat believe this should never be raised externally and therefore
+ # should be a `PandasError` but whatever...
+ raise TypeError('Must specify either start or end.')
if start is not None:
start = Timestamp(start)
if end is not None:
end = Timestamp(end)
+ if (start is None or end is None) and periods is None:
+ raise TypeError('Must either specify period or provide both start and end.')
if offset is None:
- raise Exception('Must provide a DateOffset!')
+ # This can't happen with external-facing code, therefore PandasError
+ raise TypeError('Must provide offset.')
drc = _daterange_cache
if offset not in _daterange_cache:
@@ -922,10 +929,10 @@ def _maybe_utc_convert(self, other):
if isinstance(other, DatetimeIndex):
if self.tz is not None:
if other.tz is None:
- raise Exception('Cannot join tz-naive with tz-aware '
+ raise TypeError('Cannot join tz-naive with tz-aware '
'DatetimeIndex')
elif other.tz is not None:
- raise Exception('Cannot join tz-naive with tz-aware '
+ raise TypeError('Cannot join tz-naive with tz-aware '
'DatetimeIndex')
if self.tz != other.tz:
@@ -1492,7 +1499,7 @@ def tz_convert(self, tz):
if self.tz is None:
# tz naive, use tz_localize
- raise Exception('Cannot convert tz-naive timestamps, use '
+ raise TypeError('Cannot convert tz-naive timestamps, use '
'tz_localize to localize')
# No conversion since timestamps are all UTC to begin with
@@ -1507,7 +1514,7 @@ def tz_localize(self, tz):
localized : DatetimeIndex
"""
if self.tz is not None:
- raise ValueError("Already tz-aware, use tz_convert to convert.")
+ raise TypeError("Already tz-aware, use tz_convert to convert.")
tz = tools._maybe_get_tz(tz)
# Convert to UTC
@@ -1678,7 +1685,7 @@ def date_range(start=None, end=None, periods=None, freq='D', tz=None,
Frequency strings can have multiples, e.g. '5H'
tz : string or None
Time zone name for returning localized DatetimeIndex, for example
- Asia/Beijing
+ Asia/Hong_Kong
normalize : bool, default False
Normalize start/end dates to midnight before generating date range
name : str, default None
diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py
index 025a12a17687e..9585d1f81e81d 100644
--- a/pandas/tseries/offsets.py
+++ b/pandas/tseries/offsets.py
@@ -351,7 +351,7 @@ def apply(self, other):
return BDay(self.n, offset=self.offset + other,
normalize=self.normalize)
else:
- raise Exception('Only know how to combine business day with '
+ raise TypeError('Only know how to combine business day with '
'datetime or timedelta!')
@classmethod
@@ -487,7 +487,7 @@ def __init__(self, n=1, **kwds):
if self.weekday is not None:
if self.weekday < 0 or self.weekday > 6:
- raise Exception('Day must be 0<=day<=6, got %d' %
+ raise ValueError('Day must be 0<=day<=6, got %d' %
self.weekday)
self._inc = timedelta(weeks=1)
@@ -562,13 +562,13 @@ def __init__(self, n=1, **kwds):
self.week = kwds['week']
if self.n == 0:
- raise Exception('N cannot be 0')
+ raise ValueError('N cannot be 0')
if self.weekday < 0 or self.weekday > 6:
- raise Exception('Day must be 0<=day<=6, got %d' %
+ raise ValueError('Day must be 0<=day<=6, got %d' %
self.weekday)
if self.week < 0 or self.week > 3:
- raise Exception('Week must be 0<=day<=3, got %d' %
+ raise ValueError('Week must be 0<=day<=3, got %d' %
self.week)
self.kwds = kwds
diff --git a/pandas/tseries/tests/test_daterange.py b/pandas/tseries/tests/test_daterange.py
index 22ed41f82506d..7fbdbbe328c84 100644
--- a/pandas/tseries/tests/test_daterange.py
+++ b/pandas/tseries/tests/test_daterange.py
@@ -13,6 +13,7 @@
import pandas.tseries.tools as tools
import pandas.core.datetools as datetools
+from pandas.util.testing import assertRaisesRegexp
def _skip_if_no_pytz():
@@ -65,6 +66,12 @@ def test_constructor(self):
self.assertRaises(ValueError, date_range, '2011-1-1', '2012-1-1', 'B')
self.assertRaises(ValueError, bdate_range, '2011-1-1', '2012-1-1', 'B')
+ def test_naive_aware_conflicts(self):
+ naive = bdate_range(START, END, freq=datetools.bday, tz=None)
+ aware = bdate_range(START, END, freq=datetools.bday, tz="Asia/Hong_Kong")
+ assertRaisesRegexp(TypeError, "tz-naive.*tz-aware", naive.join, aware)
+ assertRaisesRegexp(TypeError, "tz-naive.*tz-aware", aware.join, naive)
+
def test_cached_range(self):
rng = DatetimeIndex._cached_range(START, END,
offset=datetools.bday)
@@ -73,16 +80,16 @@ def test_cached_range(self):
rng = DatetimeIndex._cached_range(end=START, periods=20,
offset=datetools.bday)
- self.assertRaises(Exception, DatetimeIndex._cached_range, START, END)
+ assertRaisesRegexp(TypeError, "offset", DatetimeIndex._cached_range, START, END)
- self.assertRaises(Exception, DatetimeIndex._cached_range, START,
- freq=datetools.bday)
+ assertRaisesRegexp(TypeError, "specify period", DatetimeIndex._cached_range, START,
+ offset=datetools.bday)
- self.assertRaises(Exception, DatetimeIndex._cached_range, end=END,
- freq=datetools.bday)
+ assertRaisesRegexp(TypeError, "specify period", DatetimeIndex._cached_range, end=END,
+ offset=datetools.bday)
- self.assertRaises(Exception, DatetimeIndex._cached_range, periods=20,
- freq=datetools.bday)
+ assertRaisesRegexp(TypeError, "start or end", DatetimeIndex._cached_range, periods=20,
+ offset=datetools.bday)
def test_cached_range_bug(self):
rng = date_range('2010-09-01 05:00:00', periods=50,
diff --git a/pandas/tseries/tests/test_offsets.py b/pandas/tseries/tests/test_offsets.py
index 209f770da5c94..bcd74e7e6eecd 100644
--- a/pandas/tseries/tests/test_offsets.py
+++ b/pandas/tseries/tests/test_offsets.py
@@ -20,6 +20,7 @@
from pandas.tslib import monthrange
from pandas.lib import Timestamp
+from pandas.util.testing import assertRaisesRegexp
_multiprocess_can_split_ = True
@@ -44,7 +45,7 @@ def test_ole2datetime():
actual = ole2datetime(60000)
assert actual == datetime(2064, 4, 8)
- assert_raises(Exception, ole2datetime, 60)
+ assert_raises(ValueError, ole2datetime, 60)
def test_to_datetime1():
@@ -285,7 +286,7 @@ def test_apply_large_n(self):
self.assertEqual(rs, xp)
def test_apply_corner(self):
- self.assertRaises(Exception, BDay().apply, BMonthEnd())
+ self.assertRaises(TypeError, BDay().apply, BMonthEnd())
def test_offsets_compare_equal(self):
# root cause of #456
@@ -301,8 +302,8 @@ def assertOnOffset(offset, date, expected):
class TestWeek(unittest.TestCase):
def test_corner(self):
- self.assertRaises(Exception, Week, weekday=7)
- self.assertRaises(Exception, Week, weekday=-1)
+ self.assertRaises(ValueError, Week, weekday=7)
+ assertRaisesRegexp(ValueError, "Day must be", Week, weekday=-1)
def test_isAnchored(self):
self.assert_(Week(weekday=0).isAnchored())
@@ -366,11 +367,11 @@ def test_offsets_compare_equal(self):
class TestWeekOfMonth(unittest.TestCase):
def test_constructor(self):
- self.assertRaises(Exception, WeekOfMonth, n=0, week=1, weekday=1)
- self.assertRaises(Exception, WeekOfMonth, n=1, week=4, weekday=0)
- self.assertRaises(Exception, WeekOfMonth, n=1, week=-1, weekday=0)
- self.assertRaises(Exception, WeekOfMonth, n=1, week=0, weekday=-1)
- self.assertRaises(Exception, WeekOfMonth, n=1, week=0, weekday=7)
+ assertRaisesRegexp(ValueError, "^N cannot be 0", WeekOfMonth, n=0, week=1, weekday=1)
+ assertRaisesRegexp(ValueError, "^Week", WeekOfMonth, n=1, week=4, weekday=0)
+ assertRaisesRegexp(ValueError, "^Week", WeekOfMonth, n=1, week=-1, weekday=0)
+ assertRaisesRegexp(ValueError, "^Day", WeekOfMonth, n=1, week=0, weekday=-1)
+ assertRaisesRegexp(ValueError, "^Day", WeekOfMonth, n=1, week=0, weekday=7)
def test_offset(self):
date1 = datetime(2011, 1, 4) # 1st Tuesday of Month
@@ -1445,7 +1446,7 @@ def test_hasOffsetName():
def test_get_offset_name():
- assert_raises(Exception, get_offset_name, BDay(2))
+ assertRaisesRegexp(ValueError, 'Bad rule.*BusinessDays', get_offset_name, BDay(2))
assert get_offset_name(BDay()) == 'B'
assert get_offset_name(BMonthEnd()) == 'BM'
@@ -1457,7 +1458,7 @@ def test_get_offset_name():
def test_get_offset():
- assert_raises(Exception, get_offset, 'gibberish')
+ assertRaisesRegexp(ValueError, "rule.*GIBBERISH", get_offset, 'gibberish')
assert get_offset('B') == BDay()
assert get_offset('b') == BDay()
diff --git a/pandas/tseries/tests/test_timezones.py b/pandas/tseries/tests/test_timezones.py
index 21b11bb455e32..e57b554b7ca3c 100644
--- a/pandas/tseries/tests/test_timezones.py
+++ b/pandas/tseries/tests/test_timezones.py
@@ -18,8 +18,9 @@
import pandas.tseries.offsets as offsets
from pandas.tseries.index import bdate_range, date_range
import pandas.tseries.tools as tools
+from pytz import NonExistentTimeError
-from pandas.util.testing import assert_series_equal, assert_almost_equal
+from pandas.util.testing import assert_series_equal, assert_almost_equal, assertRaisesRegexp
import pandas.util.testing as tm
import pandas.lib as lib
@@ -93,7 +94,8 @@ def test_localize_utc_conversion(self):
# DST ambiguity, this should fail
rng = date_range('3/11/2012', '3/12/2012', freq='30T')
- self.assertRaises(Exception, rng.tz_localize, 'US/Eastern')
+ # Is this really how it should fail??
+ self.assertRaises(NonExistentTimeError, rng.tz_localize, 'US/Eastern')
def test_timestamp_tz_localize(self):
stamp = Timestamp('3/11/2012 04:00')
@@ -672,7 +674,7 @@ def test_series_frame_tz_localize(self):
# Can't localize if already tz-aware
rng = date_range('1/1/2011', periods=100, freq='H', tz='utc')
ts = Series(1, index=rng)
- self.assertRaises(Exception, ts.tz_localize, 'US/Eastern')
+ assertRaisesRegexp(TypeError, 'Already tz-aware', ts.tz_localize, 'US/Eastern')
def test_series_frame_tz_convert(self):
rng = date_range('1/1/2011', periods=200, freq='D',
@@ -696,7 +698,7 @@ def test_series_frame_tz_convert(self):
# can't convert tz-naive
rng = date_range('1/1/2011', periods=200, freq='D')
ts = Series(1, index=rng)
- self.assertRaises(Exception, ts.tz_convert, 'US/Eastern')
+ assertRaisesRegexp(TypeError, "Cannot convert tz-naive", ts.tz_convert, 'US/Eastern')
def test_join_utc_convert(self):
rng = date_range('1/1/2011', periods=100, freq='H', tz='utc')
diff --git a/pandas/tseries/tools.py b/pandas/tseries/tools.py
index c80d2ef5d4e1c..531d9f399279b 100644
--- a/pandas/tseries/tools.py
+++ b/pandas/tseries/tools.py
@@ -344,6 +344,6 @@ def ole2datetime(oledt):
# Excel has a bug where it thinks the date 2/29/1900 exists
# we just reject any date before 3/1/1900.
if val < 61:
- raise Exception("Value is outside of acceptable range: %s " % val)
+ raise ValueError("Value is outside of acceptable range: %s " % val)
return OLE_TIME_ZERO + timedelta(days=val)
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index c297cfa554fa5..5e1ab59305bab 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -748,6 +748,56 @@ def stdin_encoding(encoding=None):
yield
sys.stdin = _stdin
+def assertRaisesRegexp(exception, regexp, callable, *args, **kwargs):
+ """ Port of assertRaisesRegexp from unittest in Python 2.7 - used in with statement.
+
+ Explanation from standard library:
+ Like assertRaises() but also tests that regexp matches on the string
+ representation of the raised exception. regexp may be a regular expression
+ object or a string containing a regular expression suitable for use by
+ re.search().
+
+ You can pass either a regular expression or a compiled regular expression object.
+ >>> assertRaisesRegexp(ValueError, 'invalid literal for.*XYZ',
+ ... int, 'XYZ')
+ >>> import re
+ >>> assertRaisesRegexp(ValueError, re.compile('literal'), int, 'XYZ')
+
+ If an exception of a different type is raised, it bubbles up.
+
+ >>> assertRaisesRegexp(TypeError, 'literal', int, 'XYZ')
+ Traceback (most recent call last):
+ ...
+ ValueError: invalid literal for int() with base 10: 'XYZ'
+ >>> dct = {}
+ >>> assertRaisesRegexp(KeyError, 'pear', dct.__getitem__, 'apple')
+ Traceback (most recent call last):
+ ...
+ AssertionError: "pear" does not match "'apple'"
+ >>> assertRaisesRegexp(KeyError, 'apple', dct.__getitem__, 'apple')
+ >>> assertRaisesRegexp(Exception, 'operand type.*int.*dict', lambda : 2 + {})
+ """
+
+ import re
+ try:
+ callable(*args, **kwargs)
+ except Exception as e:
+ if not issubclass(e.__class__, exception):
+ # mimics behavior of unittest
+ raise
+ # don't recompile
+ if hasattr(regexp, "search"):
+ expected_regexp = regexp
+ else:
+ expected_regexp = re.compile(regexp)
+ if not expected_regexp.search(str(e)):
+ raise AssertionError('"%s" does not match "%s"' %
+ (expected_regexp.pattern, str(e)))
+ else:
+ # Apparently some exceptions don't have a __name__ attribute? Just aping unittest library here
+ name = getattr(exception, "__name__", str(exception))
+ raise AssertionError("{0} not raised".format(name))
+
@contextmanager
def assert_produces_warning(expected_warning=Warning, filter_level="always"):
| Relates to #3954. In addition to changing exceptions types returned to be more explicit, this pull incorporates the following changes:
1. Adds a new `assertRaisesRegexp` to `util/testing.py` to port the `assertRaisesRegexp` helper from 2.7+ unittest
2. Cleans up stats/common.
3. Fix up initial assertions in `tseries.offset._cached_range` that were all off + fix the test cases which were all just raising TypeErrors because they were calling with the wrong arguments.
4. Changes the example timezone from Asia/Beijing to Asia/Hong+Kong b/c `Asia/Beijing` is not supported by pytz.
5. Any tz-aware : tz-naive comparison fails with TypeError, as will mismatched, localize, etc. calls.
6. Changes `SparseArray` indexing error messages to match tuple message for completeness.
7. Improves the window check in ols.
After you all say it's okay to merge, I'll update the docs to reflect changes.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3956 | 2013-06-19T03:46:35Z | 2013-06-19T21:09:34Z | 2013-06-19T21:09:34Z | 2014-06-19T19:20:54Z |
BLD: Tweak to Makefile. | diff --git a/Makefile b/Makefile
index 6b7e02404525b..5349443ed477f 100644
--- a/Makefile
+++ b/Makefile
@@ -2,10 +2,10 @@
clean: clean_pyc
-rm -rf build dist
- -find . -name '*.so' -exec rm -f {} \;
+ -find . -name '*.so' -exec rm {} \;
clean_pyc:
- -find . -name '*.pyc' -exec rm -f {} \;
+ -find . -name '*.pyc' -or -name '*.pyo' -exec rm {} \;
tseries: pandas/lib.pyx pandas/tslib.pyx pandas/hashtable.pyx
python setup.py build_ext --inplace
| https://api.github.com/repos/pandas-dev/pandas/pulls/3955 | 2013-06-19T02:21:19Z | 2013-06-19T12:53:19Z | 2013-06-19T12:53:19Z | 2014-07-16T08:14:53Z | |
Doc for pandas.io.data | diff --git a/doc/source/io.rst b/doc/source/io.rst
index d2d0c5c23af9e..c4d7497308524 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -2463,3 +2463,58 @@ Alternatively, the function :func:`~pandas.io.stata.read_stata` can be used
import os
os.remove('stata.dta')
+
+Data
+----
+
+Functions from :mod:`pandas.io.data` extract data from various Internet
+sources into a DataFrame. Currently the following sources are supported:
+
+ - Yahoo! Finance
+ - Google Finance
+ - St. Louis FED (FRED)
+ - Kenneth French's data library
+
+It should be noted, that various sources support different kinds of data, so not all sources implement the same methods and the data elements returned might also differ.
+
+Loading Yahoo! Finance data:
+
+.. ipython:: python
+
+ import pandas.io.data as web
+ from datetime import datetime
+ start = datetime(2010, 1, 1)
+ end = datetime(2013, 01, 27)
+ f=web.DataReader("F", 'yahoo', start, end)
+ f.ix['2010-01-04']
+
+Loading Google Finance data:
+
+.. ipython:: python
+
+ import pandas.io.data as web
+ from datetime import datetime
+ start = datetime(2010, 1, 1)
+ end = datetime(2013, 01, 27)
+ f=web.DataReader("F", 'google', start, end)
+ f.ix['2010-01-04']
+
+Loading FRED data:
+
+.. ipython:: python
+
+ import pandas.io.data as web
+ from datetime import datetime
+ start = datetime(2010, 1, 1)
+ end = datetime(2013, 01, 27)
+ gdp=web.DataReader("GDP", "fred", start, end)
+ gdp.ix['2013-01-01']
+
+Loading Fama/French data (the dataset names are listed at `Fama/French Data Library
+<http://mba.tuck.dartmouth.edu/pages/faculty/ken.french/data_library.html>`_):
+
+.. ipython:: python
+
+ import pandas.io.data as web
+ ip=web.DataReader("5_Industry_Portfolios", "famafrench")
+ ip[4].ix[192607]
| https://api.github.com/repos/pandas-dev/pandas/pulls/3953 | 2013-06-19T01:39:17Z | 2013-06-22T17:36:32Z | 2013-06-22T17:36:32Z | 2013-06-22T17:36:51Z | |
ENH: enable support for iterator with read_hdf in HDFStore (GH3937) | diff --git a/RELEASE.rst b/RELEASE.rst
index ebd88091050f1..8e4bdd3cba268 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -101,6 +101,7 @@ pandas 0.11.1
to select with a Storer; these are invalid parameters at this time
- can now specify an ``encoding`` option to ``append/put``
to enable alternate encodings (GH3750_)
+ - enable support for ``iterator/chunksize`` with ``read_hdf``
- The repr() for (Multi)Index now obeys display.max_seq_items rather
then numpy threshold print options. (GH3426_, GH3466_)
- Added mangle_dupe_cols option to read_table/csv, allowing users
diff --git a/doc/source/io.rst b/doc/source/io.rst
index 6fee8ad35e10c..e586c7efeec61 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -1925,6 +1925,18 @@ The default is 50,000 rows returned in a chunk.
for df in store.select('df', chunksize=3):
print df
+.. note::
+
+ .. versionadded:: 0.11.1
+
+ You can also use the iterator with ``read_hdf`` which will open, then
+ automatically close the store when finished iterating.
+
+ .. code-block:: python
+
+ for df in read_hdf('store.h5','df', chunsize=3):
+ print df
+
Note, that the chunksize keyword applies to the **returned** rows. So if you
are doing a query, then that set will be subdivided and returned in the
iterator. Keep in mind that if you do not pass a ``where`` selection criteria
diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt
index d6b8c6d516b25..97f236166be45 100644
--- a/doc/source/v0.11.1.txt
+++ b/doc/source/v0.11.1.txt
@@ -6,6 +6,11 @@ v0.11.1 (June ??, 2013)
This is a minor release from 0.11.0 and includes several new features and
enhancements along with a large number of bug fixes.
+Highlites include a consistent I/O API naming scheme, routines to read html,
+write multi-indexes to csv files, read & write STATA data files, read & write JSON format
+files, Python 3 support for ``HDFStore``, filtering of groupby expressions via ``filter``, and a
+revamped ``replace`` routine that accepts regular expressions.
+
API changes
~~~~~~~~~~~
@@ -148,8 +153,8 @@ API changes
``bs4`` + ``html5lib`` when lxml fails to parse. a list of parsers to try
until success is also valid
-Enhancements
-~~~~~~~~~~~~
+I/O Enhancements
+~~~~~~~~~~~~~~~~
- ``pd.read_html()`` can now parse HTML strings, files or urls and return
DataFrames, courtesy of @cpcloud. (GH3477_, GH3605_, GH3606_, GH3616_).
@@ -184,28 +189,6 @@ Enhancements
accessable via ``read_json`` top-level function for reading,
and ``to_json`` DataFrame method for writing, :ref:`See the docs<io.json>`
- - ``DataFrame.replace()`` now allows regular expressions on contained
- ``Series`` with object dtype. See the examples section in the regular docs
- :ref:`Replacing via String Expression <missing_data.replace_expression>`
-
- For example you can do
-
- .. ipython :: python
-
- df = DataFrame({'a': list('ab..'), 'b': [1, 2, 3, 4]})
- df.replace(regex=r'\s*\.\s*', value=np.nan)
-
- to replace all occurrences of the string ``'.'`` with zero or more
- instances of surrounding whitespace with ``NaN``.
-
- Regular string replacement still works as expected. For example, you can do
-
- .. ipython :: python
-
- df.replace('.', np.nan)
-
- to replace all occurrences of the string ``'.'`` with ``NaN``.
-
- Multi-index column support for reading and writing csv format files
- The ``header`` option in ``read_csv`` now accepts a
@@ -225,19 +208,62 @@ Enhancements
with ``df.to_csv(..., index=False``), then any ``names`` on the columns index will
be *lost*.
+ .. ipython:: python
+
+ from pandas.util.testing import makeCustomDataframe as mkdf
+ df = mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)
+ df.to_csv('mi.csv',tupleize_cols=False)
+ print open('mi.csv').read()
+ pd.read_csv('mi.csv',header=[0,1,2,3],index_col=[0,1],tupleize_cols=False)
+
+ .. ipython:: python
+ :suppress:
+
+ import os
+ os.remove('mi.csv')
+
+ - Support for ``HDFStore`` (via ``PyTables 3.0.0``) on Python3
+
+ - Iterator support via ``read_hdf`` that automatically opens and closes the
+ store when iteration is finished. This is only for *tables*
+
.. ipython:: python
- from pandas.util.testing import makeCustomDataframe as mkdf
- df = mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)
- df.to_csv('mi.csv',tupleize_cols=False)
- print open('mi.csv').read()
- pd.read_csv('mi.csv',header=[0,1,2,3],index_col=[0,1],tupleize_cols=False)
+ path = 'store_iterator.h5'
+ DataFrame(randn(10,2)).to_hdf(path,'df',table=True)
+ for df in read_hdf(path,'df', chunksize=3):
+ print df
.. ipython:: python
- :suppress:
+ :suppress:
- import os
- os.remove('mi.csv')
+ import os
+ os.remove(path)
+
+Other Enhancements
+~~~~~~~~~~~~~~~~~~
+
+ - ``DataFrame.replace()`` now allows regular expressions on contained
+ ``Series`` with object dtype. See the examples section in the regular docs
+ :ref:`Replacing via String Expression <missing_data.replace_expression>`
+
+ For example you can do
+
+ .. ipython :: python
+
+ df = DataFrame({'a': list('ab..'), 'b': [1, 2, 3, 4]})
+ df.replace(regex=r'\s*\.\s*', value=np.nan)
+
+ to replace all occurrences of the string ``'.'`` with zero or more
+ instances of surrounding whitespace with ``NaN``.
+
+ Regular string replacement still works as expected. For example, you can do
+
+ .. ipython :: python
+
+ df.replace('.', np.nan)
+
+ to replace all occurrences of the string ``'.'`` with ``NaN``.
- ``pd.melt()`` now accepts the optional parameters ``var_name`` and ``value_name``
to specify custom column names of the returned DataFrame.
@@ -261,8 +287,6 @@ Enhancements
pd.get_option('a.b')
pd.get_option('b.c')
- - Support for ``HDFStore`` (via ``PyTables 3.0.0``) on Python3
-
- The ``filter`` method for group objects returns a subset of the original
object. Suppose we want to take only elements that belong to groups with a
group sum greater than 2.
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 62aa1b99dfac0..83e46fc949a4d 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -196,12 +196,27 @@ def to_hdf(path_or_buf, key, value, mode=None, complevel=None, complib=None, app
def read_hdf(path_or_buf, key, **kwargs):
""" read from the store, closeit if we opened it """
- f = lambda store: store.select(key, **kwargs)
+ f = lambda store, auto_close: store.select(key, auto_close=auto_close, **kwargs)
if isinstance(path_or_buf, basestring):
- with get_store(path_or_buf) as store:
- return f(store)
- f(path_or_buf)
+
+ # can't auto open/close if we are using an iterator
+ # so delegate to the iterator
+ store = HDFStore(path_or_buf)
+ try:
+ return f(store, True)
+ except:
+
+ # if there is an error, close the store
+ try:
+ store.close()
+ except:
+ pass
+
+ raise
+
+ # a passed store; user controls open/close
+ f(path_or_buf, False)
class HDFStore(object):
"""
@@ -405,7 +420,7 @@ def get(self, key):
raise KeyError('No object named %s in the file' % key)
return self._read_group(group)
- def select(self, key, where=None, start=None, stop=None, columns=None, iterator=False, chunksize=None, **kwargs):
+ def select(self, key, where=None, start=None, stop=None, columns=None, iterator=False, chunksize=None, auto_close=False, **kwargs):
"""
Retrieve pandas object stored in file, optionally based on where
criteria
@@ -419,6 +434,7 @@ def select(self, key, where=None, start=None, stop=None, columns=None, iterator=
columns : a list of columns that if not None, will limit the return columns
iterator : boolean, return an iterator, default False
chunksize : nrows to include in iteration, return an iterator
+ auto_close : boolean, should automatically close the store when finished, default is False
"""
group = self.get_node(key)
@@ -434,9 +450,11 @@ def func(_start, _stop):
return s.read(where=where, start=_start, stop=_stop, columns=columns, **kwargs)
if iterator or chunksize is not None:
- return TableIterator(func, nrows=s.nrows, start=start, stop=stop, chunksize=chunksize)
+ if not s.is_table:
+ raise TypeError("can only use an iterator or chunksize on a table")
+ return TableIterator(self, func, nrows=s.nrows, start=start, stop=stop, chunksize=chunksize, auto_close=auto_close)
- return TableIterator(func, nrows=s.nrows, start=start, stop=stop).get_values()
+ return TableIterator(self, func, nrows=s.nrows, start=start, stop=stop, auto_close=auto_close).get_values()
def select_as_coordinates(self, key, where=None, start=None, stop=None, **kwargs):
"""
@@ -473,7 +491,7 @@ def select_column(self, key, column, **kwargs):
"""
return self.get_storer(key).read_column(column = column, **kwargs)
- def select_as_multiple(self, keys, where=None, selector=None, columns=None, start=None, stop=None, iterator=False, chunksize=None, **kwargs):
+ def select_as_multiple(self, keys, where=None, selector=None, columns=None, start=None, stop=None, iterator=False, chunksize=None, auto_close=False, **kwargs):
""" Retrieve pandas objects from multiple tables
Parameters
@@ -541,9 +559,9 @@ def func(_start, _stop):
return concat(objs, axis=axis, verify_integrity=True)
if iterator or chunksize is not None:
- return TableIterator(func, nrows=nrows, start=start, stop=stop, chunksize=chunksize)
+ return TableIterator(self, func, nrows=nrows, start=start, stop=stop, chunksize=chunksize, auto_close=auto_close)
- return TableIterator(func, nrows=nrows, start=start, stop=stop).get_values()
+ return TableIterator(self, func, nrows=nrows, start=start, stop=stop, auto_close=auto_close).get_values()
def put(self, key, value, table=None, append=False, **kwargs):
@@ -916,16 +934,20 @@ class TableIterator(object):
Parameters
----------
- func : the function to get results
+ store : the reference store
+ func : the function to get results
nrows : the rows to iterate on
start : the passed start value (default is None)
- stop : the passed stop value (default is None)
+ stop : the passed stop value (default is None)
chunksize : the passed chunking valeu (default is 50000)
+ auto_close : boolean, automatically close the store at the end of iteration,
+ default is False
kwargs : the passed kwargs
"""
- def __init__(self, func, nrows, start=None, stop=None, chunksize=None):
- self.func = func
+ def __init__(self, store, func, nrows, start=None, stop=None, chunksize=None, auto_close=False):
+ self.store = store
+ self.func = func
self.nrows = nrows or 0
self.start = start or 0
@@ -937,6 +959,7 @@ def __init__(self, func, nrows, start=None, stop=None, chunksize=None):
chunksize = 100000
self.chunksize = chunksize
+ self.auto_close = auto_close
def __iter__(self):
current = self.start
@@ -950,9 +973,16 @@ def __iter__(self):
yield v
+ self.close()
+
+ def close(self):
+ if self.auto_close:
+ self.store.close()
+
def get_values(self):
- return self.func(self.start, self.stop)
-
+ results = self.func(self.start, self.stop)
+ self.close()
+ return results
class IndexCol(object):
""" an index column description class
diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py
index 3266a906dcfae..f7f77698f51f5 100644
--- a/pandas/io/tests/test_pytables.py
+++ b/pandas/io/tests/test_pytables.py
@@ -2078,6 +2078,7 @@ def test_select_iterator(self):
results = []
for s in store.select('df',chunksize=100):
results.append(s)
+ self.assert_(len(results) == 5)
result = concat(results)
tm.assert_frame_equal(expected, result)
@@ -2085,7 +2086,28 @@ def test_select_iterator(self):
for s in store.select('df',chunksize=150):
results.append(s)
result = concat(results)
- tm.assert_frame_equal(expected, result)
+ tm.assert_frame_equal(result, expected)
+
+ with tm.ensure_clean(self.path) as path:
+
+ df = tm.makeTimeDataFrame(500)
+ df.to_hdf(path,'df_non_table')
+ self.assertRaises(TypeError, read_hdf, path,'df_non_table',chunksize=100)
+ self.assertRaises(TypeError, read_hdf, path,'df_non_table',iterator=True)
+
+ with tm.ensure_clean(self.path) as path:
+
+ df = tm.makeTimeDataFrame(500)
+ df.to_hdf(path,'df',table=True)
+
+ results = []
+ for x in read_hdf(path,'df',chunksize=100):
+ results.append(x)
+
+ self.assert_(len(results) == 5)
+ result = concat(results)
+ tm.assert_frame_equal(result, df)
+ tm.assert_frame_equal(result, read_hdf(path,'df'))
# multiple
| closes #3937
DOC: update v0.11.1.rst
| https://api.github.com/repos/pandas-dev/pandas/pulls/3949 | 2013-06-18T22:05:48Z | 2013-06-18T22:57:40Z | 2013-06-18T22:57:40Z | 2014-07-01T10:07:30Z |
CLN/ENH: consolidate tox | diff --git a/tox.ini b/tox.ini
index 2a9c454a29435..607f33331579b 100644
--- a/tox.ini
+++ b/tox.ini
@@ -4,11 +4,11 @@
# and then run "tox" from this directory.
[tox]
-envlist = py26, py27, py32, py33
+envlist = py26, py27, py32, py33, py26-full, py27-full, py32-full, py33-full
[testenv]
deps =
- cython
+ cython >= 0.17.1
numpy >= 1.6.1
nose
pytz
@@ -21,9 +21,7 @@ changedir = {envdir}
commands =
# TODO: --exe because of GH #761
- {envbindir}/nosetests --exe pandas -A "not network"
- # cleanup the temp. build dir created by the tox build
-# /bin/rm -rf {toxinidir}/build
+ {envbindir}/nosetests --exe pandas {posargs}
# quietly rollback the install.
# Note this line will only be reached if the
@@ -36,6 +34,30 @@ commands =
# tox should provide a preinstall-commands hook.
pip uninstall pandas -qy
+[full-deps]
+deps =
+ cython >= 0.17.1
+ numpy >= 1.6.1
+ nose
+ pytz
+ six
+ numexpr
+ bottleneck
+ scipy
+ tables
+ matplotlib
+ openpyxl >= 1.6.1
+ xlrd
+ xlwt
+ html5lib
+ lxml
+ beautifulsoup4
+ git+git://github.com/statsmodels/statsmodels
+ rpy2
+
+changedir = {[testenv]changedir}
+commands = {[testenv]commands}
+
[testenv:py26]
[testenv:py27]
@@ -43,3 +65,11 @@ commands =
[testenv:py32]
[testenv:py33]
+
+[fulldeps:py26-full]
+
+[fulldeps:py27-full]
+
+[fulldeps:py32-full]
+
+[fulldeps:py33-full]
diff --git a/tox.sh b/tox.sh
index b68ffc7fdb91c..2a6d7c770ca53 100755
--- a/tox.sh
+++ b/tox.sh
@@ -1,8 +1,52 @@
#!/usr/bin/env bash
-if [ x"$1" == x"fast" ]; then
+use_build_cache=
+slow_tests=
+full_deps=
+
+while getopts bsd opt; do
+ case $opt in
+ b) use_build_cache=true;;
+ s) slow_tests=true;;
+ d) full_deps=true;;
+ esac
+done
+
+
+if [ $use_build_cache ]; then
scripts/use_build_cache.py
-fi;
+fi
+
+
+# choose to run all tests or just to run not network and not slow (default)
+posargs="-A 'not network and not slow'"
+if [ $slow_tests ]; then
+ posargs=
+fi
+
+
+# choose full or slim build deps
+start_i=3
+end_i=9
+if [ $full_deps ]; then
+ start_i=9
+ end_i=
+fi
+
+ENVS=$(cat tox.ini | grep envlist | tr ',' ' ' | cut -d " " -f ${start_i}-${end_i})
+
+
+TOX_INI_PAR="tox.ini"
+echo "[Creating distfile]"
+tox --sdistonly
+export DISTFILE="$(find .tox/dist -type f)"
+
-tox
+# run the tests
+echo -e "[Starting tests]\n"
+for e in "${ENVS}"; do
+ echo "[launching tox for $e]"
+ tox -c "$TOX_INI_PAR" -e "$e" -- "${posargs}" &
+done
+wait
diff --git a/tox_prll.ini b/tox_prll.ini
deleted file mode 100644
index 7ae399837b4e0..0000000000000
--- a/tox_prll.ini
+++ /dev/null
@@ -1,46 +0,0 @@
-# Tox (http://tox.testrun.org/) is a tool for running tests
-# in multiple virtualenvs. This configuration file will run the
-# test suite on all supported python versions. To use it, "pip install tox"
-# and then run "tox" from this directory.
-
-[tox]
-envlist = py26, py27, py32, py33
-sdistsrc = {env:DISTFILE}
-
-[testenv]
-deps =
- cython
- numpy >= 1.6.1
- nose
- pytz
- six
-
-# cd to anything but the default {toxinidir} which
-# contains the pandas subdirectory and confuses
-# nose away from the fresh install in site-packages
-changedir = {envdir}
-
-commands =
- # TODO: --exe because of GH #761
- {envbindir}/nosetests --exe pandas -A "not network"
- # cleanup the temp. build dir created by the tox build
-# /bin/rm -rf {toxinidir}/build
-
- # quietly rollback the install.
- # Note this line will only be reached if the
- # previous lines succeed (in particular, the tests),
- # but an uninstall is really only required when
- # files are removed from the source tree, in which case,
- # stale versions of files will will remain in the venv
- # until the next time uninstall is run.
- #
- # tox should provide a preinstall-commands hook.
- pip uninstall pandas -qy
-
-[testenv:py26]
-
-[testenv:py27]
-
-[testenv:py32]
-
-[testenv:py33]
diff --git a/tox_prll.sh b/tox_prll.sh
deleted file mode 100755
index 910e49b6b5a80..0000000000000
--- a/tox_prll.sh
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/usr/bin/env bash
-#
-# tox has an undocumented (as of 1.4.2) config option called "sdistsrc"
-# which can make a run use a pre-prepared sdist file.
-# we prepare the sdist once , then launch the tox runs in parallel using it.
-#
-# currently (tox 1.4.2) We have to skip sdist generation when running in parallel
-# or we get a race.
-#
-
-
-ENVS=$(cat tox.ini | grep envlist | tr "," " " | cut -d " " -f 3-)
-TOX_INI_PAR="tox_prll.ini"
-
-if [ x"$1" == x"fast" ]; then
- scripts/use_build_cache.py
-fi;
-
-echo "[Creating distfile]"
-tox --sdistonly
-export DISTFILE="$(find .tox/dist -type f )"
-
-echo -e "[Starting tests]\n"
-for e in $ENVS; do
- echo "[launching tox for $e]"
- tox -c "$TOX_INI_PAR" -e "$e" &
-done
| closes #3947.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3948 | 2013-06-18T18:21:43Z | 2013-06-21T23:12:54Z | null | 2013-07-25T03:52:47Z |
ENH: update bundled ujson to latest v1.33 | diff --git a/pandas/io/tests/test_json/test_pandas.py b/pandas/io/tests/test_json/test_pandas.py
index fe717f56e6bea..997229487e1b9 100644
--- a/pandas/io/tests/test_json/test_pandas.py
+++ b/pandas/io/tests/test_json/test_pandas.py
@@ -179,21 +179,21 @@ def test_frame_from_json_bad_data(self):
# too few indices
json = StringIO('{"columns":["A","B"],'
'"index":["2","3"],'
- '"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}"')
+ '"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}')
self.assertRaises(ValueError, read_json, json,
orient="split")
# too many columns
json = StringIO('{"columns":["A","B","C"],'
'"index":["1","2","3"],'
- '"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}"')
+ '"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}')
self.assertRaises(AssertionError, read_json, json,
orient="split")
# bad key
json = StringIO('{"badkey":["A","B"],'
'"index":["2","3"],'
- '"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}"')
+ '"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}')
self.assertRaises(TypeError, read_json, json,
orient="split")
diff --git a/pandas/io/tests/test_json/test_ujson.py b/pandas/io/tests/test_json/test_ujson.py
index 2e775b4a541ea..23bd41d245f75 100644
--- a/pandas/io/tests/test_json/test_ujson.py
+++ b/pandas/io/tests/test_json/test_ujson.py
@@ -15,6 +15,8 @@
import calendar
import StringIO
import re
+import random
+import decimal
from functools import partial
import pandas.util.py3compat as py3compat
@@ -36,6 +38,72 @@ def _skip_if_python_ver(skip_major, skip_minor=None):
else partial(json.dumps, encoding="utf-8"))
class UltraJSONTests(TestCase):
+
+ def test_encodeDecimal(self):
+ sut = decimal.Decimal("1337.1337")
+ encoded = ujson.encode(sut, double_precision=100)
+ decoded = ujson.decode(encoded)
+ self.assertEquals(decoded, 1337.1337)
+
+ def test_encodeStringConversion(self):
+ input = "A string \\ / \b \f \n \r \t </script> &"
+ not_html_encoded = '"A string \\\\ \\/ \\b \\f \\n \\r \\t <\\/script> &"'
+ html_encoded = '"A string \\\\ \\/ \\b \\f \\n \\r \\t \\u003c\\/script\\u003e \\u0026"'
+
+ def helper(expected_output, **encode_kwargs):
+ output = ujson.encode(input, **encode_kwargs)
+ self.assertEquals(input, json.loads(output))
+ self.assertEquals(output, expected_output)
+ self.assertEquals(input, ujson.decode(output))
+
+ # Default behavior assumes encode_html_chars=False.
+ helper(not_html_encoded, ensure_ascii=True)
+ helper(not_html_encoded, ensure_ascii=False)
+
+ # Make sure explicit encode_html_chars=False works.
+ helper(not_html_encoded, ensure_ascii=True, encode_html_chars=False)
+ helper(not_html_encoded, ensure_ascii=False, encode_html_chars=False)
+
+ # Make sure explicit encode_html_chars=True does the encoding.
+ helper(html_encoded, ensure_ascii=True, encode_html_chars=True)
+ helper(html_encoded, ensure_ascii=False, encode_html_chars=True)
+
+ def test_doubleLongIssue(self):
+ sut = {u'a': -4342969734183514}
+ encoded = json.dumps(sut)
+ decoded = json.loads(encoded)
+ self.assertEqual(sut, decoded)
+ encoded = ujson.encode(sut, double_precision=100)
+ decoded = ujson.decode(encoded)
+ self.assertEqual(sut, decoded)
+
+ def test_doubleLongDecimalIssue(self):
+ sut = {u'a': -12345678901234.56789012}
+ encoded = json.dumps(sut)
+ decoded = json.loads(encoded)
+ self.assertEqual(sut, decoded)
+ encoded = ujson.encode(sut, double_precision=100)
+ decoded = ujson.decode(encoded)
+ self.assertEqual(sut, decoded)
+
+
+ def test_encodeDecodeLongDecimal(self):
+ sut = {u'a': -528656961.4399388}
+ encoded = ujson.dumps(sut, double_precision=15)
+ ujson.decode(encoded)
+
+ def test_decimalDecodeTest(self):
+ sut = {u'a': 4.56}
+ encoded = ujson.encode(sut)
+ decoded = ujson.decode(encoded)
+ self.assertNotEqual(sut, decoded)
+
+ def test_decimalDecodeTestPrecise(self):
+ sut = {u'a': 4.56}
+ encoded = ujson.encode(sut)
+ decoded = ujson.decode(encoded, precise_float=True)
+ self.assertEqual(sut, decoded)
+
def test_encodeDictWithUnicodeKeys(self):
input = { u"key1": u"value1", u"key1": u"value1", u"key1": u"value1", u"key1": u"value1", u"key1": u"value1", u"key1": u"value1" }
output = ujson.encode(input)
@@ -59,6 +127,7 @@ def test_encodeWithDecimal(self):
def test_encodeDoubleNegConversion(self):
input = -math.pi
output = ujson.encode(input)
+
self.assertEquals(round(input, 5), round(json.loads(output), 5))
self.assertEquals(round(input, 5), round(ujson.decode(output), 5))
@@ -93,10 +162,6 @@ def test_doublePrecisionTest(self):
self.assertEquals(round(input, 3), json.loads(output))
self.assertEquals(round(input, 3), ujson.decode(output))
- output = ujson.encode(input)
- self.assertEquals(round(input, 5), json.loads(output))
- self.assertEquals(round(input, 5), ujson.decode(output))
-
def test_invalidDoublePrecision(self):
input = 30.12345678901234567890
output = ujson.encode(input, double_precision = 20)
@@ -373,6 +438,15 @@ def test_decodeBrokenArrayEnd(self):
return
assert False, "Wrong exception"
+ def test_decodeArrayDepthTooBig(self):
+ input = '[' * (1024 * 1024)
+ try:
+ ujson.decode(input)
+ assert False, "Expected exception!"
+ except(ValueError):
+ return
+ assert False, "Wrong exception"
+
def test_decodeBrokenObjectEnd(self):
input = "}"
try:
@@ -382,6 +456,15 @@ def test_decodeBrokenObjectEnd(self):
return
assert False, "Wrong exception"
+ def test_decodeObjectDepthTooBig(self):
+ input = '{' * (1024 * 1024)
+ try:
+ ujson.decode(input)
+ assert False, "Expected exception!"
+ except(ValueError):
+ return
+ assert False, "Wrong exception"
+
def test_decodeStringUnterminated(self):
input = "\"TESTING"
try:
@@ -567,7 +650,7 @@ def test_numericIntFrcExp(self):
self.assertAlmostEqual(output, json.loads(input))
def test_decodeNumericIntExpEPLUS(self):
- input = "1337E+40"
+ input = "1337E+9"
output = ujson.decode(input)
self.assertAlmostEqual(output, json.loads(input))
@@ -1192,7 +1275,165 @@ def test_datetimeindex(self):
decoded = Series(ujson.decode(ujson.encode(ts)))
idx_values = decoded.index.values.astype(np.int64)
decoded.index = DatetimeIndex(idx_values)
- tm.assert_series_equal(np.round(ts, 5), decoded)
+ tm.assert_series_equal(ts, decoded)
+
+ def test_decodeArrayTrailingCommaFail(self):
+ input = "[31337,]"
+ try:
+ ujson.decode(input)
+ except ValueError:
+ pass
+ else:
+ assert False, "expected ValueError"
+
+ def test_decodeArrayLeadingCommaFail(self):
+ input = "[,31337]"
+ try:
+ ujson.decode(input)
+ except ValueError:
+ pass
+ else:
+ assert False, "expected ValueError"
+
+ def test_decodeArrayOnlyCommaFail(self):
+ input = "[,]"
+ try:
+ ujson.decode(input)
+ except ValueError:
+ pass
+ else:
+ assert False, "expected ValueError"
+
+ def test_decodeArrayUnmatchedBracketFail(self):
+ input = "[]]"
+ try:
+ ujson.decode(input)
+ except ValueError:
+ pass
+ else:
+ assert False, "expected ValueError"
+
+ def test_decodeArrayEmpty(self):
+ input = "[]"
+ ujson.decode(input)
+
+ def test_decodeArrayOneItem(self):
+ input = "[31337]"
+ ujson.decode(input)
+
+ def test_decodeBigValue(self):
+ input = "9223372036854775807"
+ ujson.decode(input)
+
+ def test_decodeSmallValue(self):
+ input = "-9223372036854775808"
+ ujson.decode(input)
+
+ def test_decodeTooBigValue(self):
+ try:
+ input = "9223372036854775808"
+ ujson.decode(input)
+ except ValueError, e:
+ pass
+ else:
+ assert False, "expected ValueError"
+
+ def test_decodeTooSmallValue(self):
+ try:
+ input = "-90223372036854775809"
+ ujson.decode(input)
+ except ValueError,e:
+ pass
+ else:
+ assert False, "expected ValueError"
+
+ def test_decodeVeryTooBigValue(self):
+ try:
+ input = "9223372036854775808"
+ ujson.decode(input)
+ except ValueError:
+ pass
+ else:
+ assert False, "expected ValueError"
+
+ def test_decodeVeryTooSmallValue(self):
+ try:
+ input = "-90223372036854775809"
+ ujson.decode(input)
+ except ValueError:
+ pass
+ else:
+ assert False, "expected ValueError"
+
+ def test_decodeWithTrailingWhitespaces(self):
+ input = "{}\n\t "
+ ujson.decode(input)
+
+ def test_decodeWithTrailingNonWhitespaces(self):
+ try:
+ input = "{}\n\t a"
+ ujson.decode(input)
+ except ValueError:
+ pass
+ else:
+ assert False, "expected ValueError"
+
+ def test_decodeArrayWithBigInt(self):
+ try:
+ ujson.loads('[18446098363113800555]')
+ except ValueError:
+ pass
+ else:
+ assert False, "expected ValueError"
+
+ def test_decodeArrayFaultyUnicode(self):
+ try:
+ ujson.loads('[18446098363113800555]')
+ except ValueError:
+ pass
+ else:
+ assert False, "expected ValueError"
+
+
+ def test_decodeFloatingPointAdditionalTests(self):
+ self.assertEquals(-1.1234567893, ujson.loads("-1.1234567893"))
+ self.assertEquals(-1.234567893, ujson.loads("-1.234567893"))
+ self.assertEquals(-1.34567893, ujson.loads("-1.34567893"))
+ self.assertEquals(-1.4567893, ujson.loads("-1.4567893"))
+ self.assertEquals(-1.567893, ujson.loads("-1.567893"))
+ self.assertEquals(-1.67893, ujson.loads("-1.67893"))
+ self.assertEquals(-1.7893, ujson.loads("-1.7893"))
+ self.assertEquals(-1.893, ujson.loads("-1.893"))
+ self.assertEquals(-1.3, ujson.loads("-1.3"))
+
+ self.assertEquals(1.1234567893, ujson.loads("1.1234567893"))
+ self.assertEquals(1.234567893, ujson.loads("1.234567893"))
+ self.assertEquals(1.34567893, ujson.loads("1.34567893"))
+ self.assertEquals(1.4567893, ujson.loads("1.4567893"))
+ self.assertEquals(1.567893, ujson.loads("1.567893"))
+ self.assertEquals(1.67893, ujson.loads("1.67893"))
+ self.assertEquals(1.7893, ujson.loads("1.7893"))
+ self.assertEquals(1.893, ujson.loads("1.893"))
+ self.assertEquals(1.3, ujson.loads("1.3"))
+
+ def test_encodeBigSet(self):
+ s = set()
+ for x in xrange(0, 100000):
+ s.add(x)
+ ujson.encode(s)
+
+ def test_encodeEmptySet(self):
+ s = set()
+ self.assertEquals("[]", ujson.encode(s))
+
+ def test_encodeSet(self):
+ s = set([1,2,3,4,5,6,7,8,9])
+ enc = ujson.encode(s)
+ dec = ujson.decode(enc)
+
+ for v in dec:
+ self.assertTrue(v in s)
+
"""
def test_decodeNumericIntFrcOverflow(self):
diff --git a/pandas/src/ujson/lib/ultrajson.h b/pandas/src/ujson/lib/ultrajson.h
index eae665f00f03e..4d7af3dde1f02 100644
--- a/pandas/src/ujson/lib/ultrajson.h
+++ b/pandas/src/ujson/lib/ultrajson.h
@@ -1,37 +1,38 @@
/*
-Copyright (c) 2011, Jonas Tarnstrom and ESN Social Software AB
+Copyright (c) 2011-2013, ESN Social Software AB and Jonas Tarnstrom
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
-1. Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
-2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-3. All advertising materials mentioning features or use of this software
- must display the following acknowledgement:
- This product includes software developed by ESN Social Software AB (www.esn.me).
-4. Neither the name of the ESN Social Software AB nor the
- names of its contributors may be used to endorse or promote products
- derived from this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY ESN SOCIAL SOFTWARE AB ''AS IS'' AND ANY
-EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of the ESN Social Software AB nor the
+ names of its contributors may be used to endorse or promote products
+ derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL ESN SOCIAL SOFTWARE AB BE LIABLE FOR ANY
-DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+DISCLAIMED. IN NO EVENT SHALL ESN SOCIAL SOFTWARE AB OR JONAS TARNSTROM BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-Portions of code from:
-MODP_ASCII - Ascii transformations (upper/lower, etc)
+
+Portions of code from MODP_ASCII - Ascii transformations (upper/lower, etc)
http://code.google.com/p/stringencoders/
Copyright (c) 2007 Nick Galbreath -- nickg [at] modp [dot] com. All rights reserved.
+Numeric decoder derived from from TCL library
+http://www.opensource.apple.com/source/tcl/tcl-14/tcl/license.terms
+ * Copyright (c) 1988-1993 The Regents of the University of California.
+ * Copyright (c) 1994 Sun Microsystems, Inc.
*/
/*
@@ -54,8 +55,6 @@ tree doesn't have cyclic references.
#include <stdio.h>
#include <wchar.h>
-//#define JSON_DECODE_NUMERIC_AS_DOUBLE
-
// Don't output any extra whitespaces when encoding
#define JSON_NO_EXTRA_WHITESPACE
@@ -69,6 +68,11 @@ tree doesn't have cyclic references.
#define JSON_MAX_RECURSION_DEPTH 1024
#endif
+// Max recursion depth, default for decoder
+#ifndef JSON_MAX_OBJECT_DEPTH
+#define JSON_MAX_OBJECT_DEPTH 1024
+#endif
+
/*
Dictates and limits how much stack space for buffers UltraJSON will use before resorting to provided heap functions */
#ifndef JSON_MAX_STACK_BUFFER_SIZE
@@ -95,26 +99,34 @@ typedef __int64 JSLONG;
#else
-#include <sys/types.h>
+#include <stdint.h>
typedef int64_t JSINT64;
-typedef u_int64_t JSUINT64;
+typedef uint64_t JSUINT64;
typedef int32_t JSINT32;
-typedef u_int32_t JSUINT32;
+typedef uint32_t JSUINT32;
#define FASTCALL_MSVC
+
+#if !defined __x86_64__
#define FASTCALL_ATTR __attribute__((fastcall))
+#else
+#define FASTCALL_ATTR
+#endif
+
#define INLINE_PREFIX inline
-typedef u_int8_t JSUINT8;
-typedef u_int16_t JSUTF16;
-typedef u_int32_t JSUTF32;
+typedef uint8_t JSUINT8;
+typedef uint16_t JSUTF16;
+typedef uint32_t JSUTF32;
typedef int64_t JSLONG;
#define EXPORTFUNCTION
#endif
+#if !(defined(__LITTLE_ENDIAN__) || defined(__BIG_ENDIAN__))
+
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
#define __LITTLE_ENDIAN__
#else
@@ -125,22 +137,24 @@ typedef int64_t JSLONG;
#endif
+#endif
+
#if !defined(__LITTLE_ENDIAN__) && !defined(__BIG_ENDIAN__)
#error "Endianess not supported"
#endif
enum JSTYPES
{
- JT_NULL, // NULL
- JT_TRUE, //boolean true
- JT_FALSE, //boolean false
- JT_INT, //(JSINT32 (signed 32-bit))
- JT_LONG, //(JSINT64 (signed 64-bit))
- JT_DOUBLE, //(double)
- JT_UTF8, //(char 8-bit)
- JT_ARRAY, // Array structure
- JT_OBJECT, // Key/Value structure
- JT_INVALID, // Internal, do not return nor expect
+ JT_NULL, // NULL
+ JT_TRUE, //boolean true
+ JT_FALSE, //boolean false
+ JT_INT, //(JSINT32 (signed 32-bit))
+ JT_LONG, //(JSINT64 (signed 64-bit))
+ JT_DOUBLE, //(double)
+ JT_UTF8, //(char 8-bit)
+ JT_ARRAY, // Array structure
+ JT_OBJECT, // Key/Value structure
+ JT_INVALID, // Internal, do not return nor expect
};
typedef void * JSOBJ;
@@ -148,9 +162,9 @@ typedef void * JSITER;
typedef struct __JSONTypeContext
{
- int type;
- void *encoder;
- void *prv;
+ int type;
+ void *encoder;
+ void *prv;
} JSONTypeContext;
/*
@@ -166,79 +180,82 @@ typedef void *(*JSPFN_REALLOC)(void *base, size_t size);
typedef struct __JSONObjectEncoder
{
- void (*beginTypeContext)(JSOBJ obj, JSONTypeContext *tc);
- void (*endTypeContext)(JSOBJ obj, JSONTypeContext *tc);
- const char *(*getStringValue)(JSOBJ obj, JSONTypeContext *tc, size_t *_outLen);
- JSINT64 (*getLongValue)(JSOBJ obj, JSONTypeContext *tc);
- JSINT32 (*getIntValue)(JSOBJ obj, JSONTypeContext *tc);
- double (*getDoubleValue)(JSOBJ obj, JSONTypeContext *tc);
-
- /*
- Begin iteration of an iteratable object (JS_ARRAY or JS_OBJECT)
- Implementor should setup iteration state in ti->prv
- */
- JSPFN_ITERBEGIN iterBegin;
-
- /*
- Retrieve next object in an iteration. Should return 0 to indicate iteration has reached end or 1 if there are more items.
- Implementor is responsible for keeping state of the iteration. Use ti->prv fields for this
- */
- JSPFN_ITERNEXT iterNext;
-
- /*
- Ends the iteration of an iteratable object.
- Any iteration state stored in ti->prv can be freed here
- */
- JSPFN_ITEREND iterEnd;
-
- /*
- Returns a reference to the value object of an iterator
- The is responsible for the life-cycle of the returned string. Use iterNext/iterEnd and ti->prv to keep track of current object
- */
- JSPFN_ITERGETVALUE iterGetValue;
-
- /*
- Return name of iterator.
- The is responsible for the life-cycle of the returned string. Use iterNext/iterEnd and ti->prv to keep track of current object
- */
- JSPFN_ITERGETNAME iterGetName;
-
- /*
- Release a value as indicated by setting ti->release = 1 in the previous getValue call.
- The ti->prv array should contain the necessary context to release the value
- */
- void (*releaseObject)(JSOBJ obj);
-
- /* Library functions
- Set to NULL to use STDLIB malloc,realloc,free */
- JSPFN_MALLOC malloc;
- JSPFN_REALLOC realloc;
- JSPFN_FREE free;
-
- /*
- Configuration for max recursion, set to 0 to use default (see JSON_MAX_RECURSION_DEPTH)*/
- int recursionMax;
-
- /*
- Configuration for max decimals of double floating poiunt numbers to encode (0-9) */
- int doublePrecision;
-
- /*
- If true output will be ASCII with all characters above 127 encoded as \uXXXX. If false output will be UTF-8 or what ever charset strings are brought as */
- int forceASCII;
-
-
- /*
- Set to an error message if error occured */
- const char *errorMsg;
- JSOBJ errorObj;
-
- /* Buffer stuff */
- char *start;
- char *offset;
- char *end;
- int heap;
- int level;
+ void (*beginTypeContext)(JSOBJ obj, JSONTypeContext *tc);
+ void (*endTypeContext)(JSOBJ obj, JSONTypeContext *tc);
+ const char *(*getStringValue)(JSOBJ obj, JSONTypeContext *tc, size_t *_outLen);
+ JSINT64 (*getLongValue)(JSOBJ obj, JSONTypeContext *tc);
+ JSINT32 (*getIntValue)(JSOBJ obj, JSONTypeContext *tc);
+ double (*getDoubleValue)(JSOBJ obj, JSONTypeContext *tc);
+
+ /*
+ Begin iteration of an iteratable object (JS_ARRAY or JS_OBJECT)
+ Implementor should setup iteration state in ti->prv
+ */
+ JSPFN_ITERBEGIN iterBegin;
+
+ /*
+ Retrieve next object in an iteration. Should return 0 to indicate iteration has reached end or 1 if there are more items.
+ Implementor is responsible for keeping state of the iteration. Use ti->prv fields for this
+ */
+ JSPFN_ITERNEXT iterNext;
+
+ /*
+ Ends the iteration of an iteratable object.
+ Any iteration state stored in ti->prv can be freed here
+ */
+ JSPFN_ITEREND iterEnd;
+
+ /*
+ Returns a reference to the value object of an iterator
+ The is responsible for the life-cycle of the returned string. Use iterNext/iterEnd and ti->prv to keep track of current object
+ */
+ JSPFN_ITERGETVALUE iterGetValue;
+
+ /*
+ Return name of iterator.
+ The is responsible for the life-cycle of the returned string. Use iterNext/iterEnd and ti->prv to keep track of current object
+ */
+ JSPFN_ITERGETNAME iterGetName;
+
+ /*
+ Release a value as indicated by setting ti->release = 1 in the previous getValue call.
+ The ti->prv array should contain the necessary context to release the value
+ */
+ void (*releaseObject)(JSOBJ obj);
+
+ /* Library functions
+ Set to NULL to use STDLIB malloc,realloc,free */
+ JSPFN_MALLOC malloc;
+ JSPFN_REALLOC realloc;
+ JSPFN_FREE free;
+
+ /*
+ Configuration for max recursion, set to 0 to use default (see JSON_MAX_RECURSION_DEPTH)*/
+ int recursionMax;
+
+ /*
+ Configuration for max decimals of double floating poiunt numbers to encode (0-9) */
+ int doublePrecision;
+
+ /*
+ If true output will be ASCII with all characters above 127 encoded as \uXXXX. If false output will be UTF-8 or what ever charset strings are brought as */
+ int forceASCII;
+
+ /*
+ If true, '<', '>', and '&' characters will be encoded as \u003c, \u003e, and \u0026, respectively. If false, no special encoding will be used. */
+ int encodeHTMLChars;
+
+ /*
+ Set to an error message if error occured */
+ const char *errorMsg;
+ JSOBJ errorObj;
+
+ /* Buffer stuff */
+ char *start;
+ char *offset;
+ char *end;
+ int heap;
+ int level;
} JSONObjectEncoder;
@@ -268,29 +285,27 @@ EXPORTFUNCTION char *JSON_EncodeObject(JSOBJ obj, JSONObjectEncoder *enc, char *
typedef struct __JSONObjectDecoder
{
- JSOBJ (*newString)(wchar_t *start, wchar_t *end);
- int (*objectAddKey)(JSOBJ obj, JSOBJ name, JSOBJ value);
- int (*arrayAddItem)(JSOBJ obj, JSOBJ value);
- JSOBJ (*newTrue)(void);
- JSOBJ (*newFalse)(void);
- JSOBJ (*newNull)(void);
- JSOBJ (*newObject)(void *decoder);
- JSOBJ (*endObject)(JSOBJ obj);
- JSOBJ (*newArray)(void *decoder);
- JSOBJ (*endArray)(JSOBJ obj);
- JSOBJ (*newInt)(JSINT32 value);
- JSOBJ (*newLong)(JSINT64 value);
- JSOBJ (*newDouble)(double value);
- void (*releaseObject)(JSOBJ obj, void *decoder);
- JSPFN_MALLOC malloc;
- JSPFN_FREE free;
- JSPFN_REALLOC realloc;
-
- char *errorStr;
- char *errorOffset;
-
-
-
+ JSOBJ (*newString)(void *prv, wchar_t *start, wchar_t *end);
+ int (*objectAddKey)(void *prv, JSOBJ obj, JSOBJ name, JSOBJ value);
+ int (*arrayAddItem)(void *prv, JSOBJ obj, JSOBJ value);
+ JSOBJ (*newTrue)(void *prv);
+ JSOBJ (*newFalse)(void *prv);
+ JSOBJ (*newNull)(void *prv);
+ JSOBJ (*newObject)(void *prv, void *decoder);
+ JSOBJ (*endObject)(void *prv, JSOBJ obj);
+ JSOBJ (*newArray)(void *prv, void *decoder);
+ JSOBJ (*endArray)(void *prv, JSOBJ obj);
+ JSOBJ (*newInt)(void *prv, JSINT32 value);
+ JSOBJ (*newLong)(void *prv, JSINT64 value);
+ JSOBJ (*newDouble)(void *prv, double value);
+ void (*releaseObject)(void *prv, JSOBJ obj, void *decoder);
+ JSPFN_MALLOC malloc;
+ JSPFN_FREE free;
+ JSPFN_REALLOC realloc;
+ char *errorStr;
+ char *errorOffset;
+ int preciseFloat;
+ void *prv;
} JSONObjectDecoder;
EXPORTFUNCTION JSOBJ JSON_DecodeObject(JSONObjectDecoder *dec, const char *buffer, size_t cbBuffer);
diff --git a/pandas/src/ujson/lib/ultrajsondec.c b/pandas/src/ujson/lib/ultrajsondec.c
index eda30f3fea839..c5cf341ad3092 100644
--- a/pandas/src/ujson/lib/ultrajsondec.c
+++ b/pandas/src/ujson/lib/ultrajsondec.c
@@ -1,37 +1,38 @@
/*
-Copyright (c) 2011, Jonas Tarnstrom and ESN Social Software AB
+Copyright (c) 2011-2013, ESN Social Software AB and Jonas Tarnstrom
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
-1. Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
-2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-3. All advertising materials mentioning features or use of this software
- must display the following acknowledgement:
- This product includes software developed by ESN Social Software AB (www.esn.me).
-4. Neither the name of the ESN Social Software AB nor the
- names of its contributors may be used to endorse or promote products
- derived from this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY ESN SOCIAL SOFTWARE AB ''AS IS'' AND ANY
-EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+* Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+* Neither the name of the ESN Social Software AB nor the
+names of its contributors may be used to endorse or promote products
+derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL ESN SOCIAL SOFTWARE AB BE LIABLE FOR ANY
-DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+DISCLAIMED. IN NO EVENT SHALL ESN SOCIAL SOFTWARE AB OR JONAS TARNSTROM BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-Portions of code from:
-MODP_ASCII - Ascii transformations (upper/lower, etc)
+
+Portions of code from MODP_ASCII - Ascii transformations (upper/lower, etc)
http://code.google.com/p/stringencoders/
Copyright (c) 2007 Nick Galbreath -- nickg [at] modp [dot] com. All rights reserved.
+Numeric decoder derived from from TCL library
+http://www.opensource.apple.com/source/tcl/tcl-14/tcl/license.terms
+* Copyright (c) 1988-1993 The Regents of the University of California.
+* Copyright (c) 1994 Sun Microsystems, Inc.
*/
#include "ultrajson.h"
@@ -40,806 +41,871 @@ Copyright (c) 2007 Nick Galbreath -- nickg [at] modp [dot] com. All rights rese
#include <string.h>
#include <limits.h>
#include <wchar.h>
+#include <stdlib.h>
+#include <errno.h>
+
+#ifndef TRUE
+#define TRUE 1
+#define FALSE 0
+#endif
+#ifndef NULL
+#define NULL 0
+#endif
struct DecoderState
{
- char *start;
- char *end;
- wchar_t *escStart;
- wchar_t *escEnd;
- int escHeap;
- int lastType;
- JSONObjectDecoder *dec;
+ char *start;
+ char *end;
+ wchar_t *escStart;
+ wchar_t *escEnd;
+ int escHeap;
+ int lastType;
+ JSUINT32 objDepth;
+ void *prv;
+ JSONObjectDecoder *dec;
};
JSOBJ FASTCALL_MSVC decode_any( struct DecoderState *ds) FASTCALL_ATTR;
typedef JSOBJ (*PFN_DECODER)( struct DecoderState *ds);
-#define RETURN_JSOBJ_NULLCHECK(_expr) return(_expr);
-double createDouble(double intNeg, double intValue, double frcValue, int frcDecimalCount)
+static JSOBJ SetError( struct DecoderState *ds, int offset, const char *message)
{
- static const double g_pow10[] = {1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000, 10000000000, 100000000000, 1000000000000, 10000000000000, 100000000000000, 1000000000000000};
-
- return (intValue + (frcValue / g_pow10[frcDecimalCount])) * intNeg;
+ ds->dec->errorOffset = ds->start + offset;
+ ds->dec->errorStr = (char *) message;
+ return NULL;
}
-static JSOBJ SetError( struct DecoderState *ds, int offset, const char *message)
+static void ClearError( struct DecoderState *ds)
{
- ds->dec->errorOffset = ds->start + offset;
- ds->dec->errorStr = (char *) message;
- return NULL;
+ ds->dec->errorOffset = 0;
+ ds->dec->errorStr = NULL;
}
+double createDouble(double intNeg, double intValue, double frcValue, int frcDecimalCount)
+{
+ static const double g_pow10[] = {1.0, 0.1, 0.01, 0.001, 0.0001, 0.00001, 0.000001,0.0000001, 0.00000001, 0.000000001, 0.0000000001, 0.00000000001, 0.000000000001, 0.0000000000001, 0.00000000000001, 0.000000000000001};
+ return (intValue + (frcValue * g_pow10[frcDecimalCount])) * intNeg;
+}
-FASTCALL_ATTR JSOBJ FASTCALL_MSVC decode_numeric ( struct DecoderState *ds)
+FASTCALL_ATTR JSOBJ FASTCALL_MSVC decodePreciseFloat(struct DecoderState *ds)
{
-#ifdef JSON_DECODE_NUMERIC_AS_DOUBLE
- double intNeg = 1;
- double intValue;
-#else
- int intNeg = 1;
- JSLONG intValue;
-#endif
+ char *end;
+ double value;
+ errno = 0;
- double expNeg;
- int chr;
- int decimalCount = 0;
- double frcValue = 0.0;
- double expValue;
- char *offset = ds->start;
+ value = strtod(ds->start, &end);
- if (*(offset) == '-')
- {
- offset ++;
- intNeg = -1;
- }
+ if (errno == ERANGE)
+ {
+ return SetError(ds, -1, "Range error when decoding numeric as double");
+ }
- // Scan integer part
- intValue = 0;
+ ds->start = end;
+ return ds->dec->newDouble(ds->prv, value);
+}
- while (1)
- {
- chr = (int) (unsigned char) *(offset);
+FASTCALL_ATTR JSOBJ FASTCALL_MSVC decode_numeric (struct DecoderState *ds)
+{
+ int intNeg = 1;
+ int mantSize = 0;
+ JSUINT64 intValue;
+ int chr;
+ int decimalCount = 0;
+ double frcValue = 0.0;
+ double expNeg;
+ double expValue;
+ char *offset = ds->start;
+
+ JSUINT64 overflowLimit = LLONG_MAX;
+
+ if (*(offset) == '-')
+ {
+ offset ++;
+ intNeg = -1;
+ overflowLimit = LLONG_MIN;
+ }
+
+ // Scan integer part
+ intValue = 0;
- switch (chr)
+ while (1)
+ {
+ chr = (int) (unsigned char) *(offset);
+
+ switch (chr)
+ {
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ {
+ //FIXME: Check for arithemtic overflow here
+ //PERF: Don't do 64-bit arithmetic here unless we know we have to
+ intValue = intValue * 10ULL + (JSLONG) (chr - 48);
+
+ if (intValue > overflowLimit)
{
- case '0':
- case '1':
- case '2':
- case '3':
- case '4':
- case '5':
- case '6':
- case '7':
- case '8':
- case '9':
- //FIXME: Check for arithemtic overflow here
- //PERF: Don't do 64-bit arithmetic here unless we know we have to
-#ifdef JSON_DECODE_NUMERIC_AS_DOUBLE
- intValue = intValue * 10.0 + (double) (chr - 48);
-#else
- intValue = intValue * 10LL + (JSLONG) (chr - 48);
-#endif
- offset ++;
- break;
-
- case '.':
- offset ++;
- goto DECODE_FRACTION;
- break;
-
- case 'e':
- case 'E':
- offset ++;
- goto DECODE_EXPONENT;
- break;
-
- default:
- goto BREAK_INT_LOOP;
- break;
+ return SetError(ds, -1, overflowLimit == LLONG_MAX ? "Value is too big" : "Value is too small");
}
+
+ offset ++;
+ mantSize ++;
+ break;
+ }
+ case '.':
+ {
+ offset ++;
+ goto DECODE_FRACTION;
+ break;
+ }
+ case 'e':
+ case 'E':
+ {
+ offset ++;
+ goto DECODE_EXPONENT;
+ break;
+ }
+
+ default:
+ {
+ goto BREAK_INT_LOOP;
+ break;
+ }
}
+ }
BREAK_INT_LOOP:
- ds->lastType = JT_INT;
- ds->start = offset;
+ ds->lastType = JT_INT;
+ ds->start = offset;
- //If input string is LONGLONG_MIN here the value is already negative so we should not flip it
-
-#ifdef JSON_DECODE_NUMERIC_AS_DOUBLE
-#else
- if (intValue < 0)
- {
- intNeg = 1;
- }
-#endif
-
- //dbg1 = (intValue * intNeg);
- //dbg2 = (JSLONG) dbg1;
-
-#ifdef JSON_DECODE_NUMERIC_AS_DOUBLE
- if (intValue > (double) INT_MAX || intValue < (double) INT_MIN)
-#else
- if ( (intValue >> 31))
-#endif
- {
- RETURN_JSOBJ_NULLCHECK(ds->dec->newLong( (JSINT64) (intValue * (JSINT64) intNeg)));
- }
- else
- {
- RETURN_JSOBJ_NULLCHECK(ds->dec->newInt( (JSINT32) (intValue * intNeg)));
- }
+ if ((intValue >> 31))
+ {
+ return ds->dec->newLong(ds->prv, (JSINT64) (intValue * (JSINT64) intNeg));
+ }
+ else
+ {
+ return ds->dec->newInt(ds->prv, (JSINT32) (intValue * intNeg));
+ }
+DECODE_FRACTION:
+ if (ds->dec->preciseFloat)
+ {
+ return decodePreciseFloat(ds);
+ }
-DECODE_FRACTION:
+ // Scan fraction part
+ frcValue = 0.0;
+ for (;;)
+ {
+ chr = (int) (unsigned char) *(offset);
- // Scan fraction part
- frcValue = 0.0;
- while (1)
+ switch (chr)
{
- chr = (int) (unsigned char) *(offset);
-
- switch (chr)
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ {
+ if (decimalCount < JSON_DOUBLE_MAX_DECIMALS)
{
- case '0':
- case '1':
- case '2':
- case '3':
- case '4':
- case '5':
- case '6':
- case '7':
- case '8':
- case '9':
- if (decimalCount < JSON_DOUBLE_MAX_DECIMALS)
- {
- frcValue = frcValue * 10.0 + (double) (chr - 48);
- decimalCount ++;
- }
- offset ++;
- break;
-
- case 'e':
- case 'E':
- offset ++;
- goto DECODE_EXPONENT;
- break;
-
- default:
- goto BREAK_FRC_LOOP;
+ frcValue = frcValue * 10.0 + (double) (chr - 48);
+ decimalCount ++;
}
+ offset ++;
+ break;
+ }
+ case 'e':
+ case 'E':
+ {
+ offset ++;
+ goto DECODE_EXPONENT;
+ break;
+ }
+ default:
+ {
+ goto BREAK_FRC_LOOP;
+ }
}
+ }
BREAK_FRC_LOOP:
-
- if (intValue < 0)
- {
- intNeg = 1;
- }
-
- //FIXME: Check for arithemtic overflow here
- ds->lastType = JT_DOUBLE;
- ds->start = offset;
- RETURN_JSOBJ_NULLCHECK(ds->dec->newDouble (createDouble( (double) intNeg, (double) intValue, frcValue, decimalCount)));
+ //FIXME: Check for arithemtic overflow here
+ ds->lastType = JT_DOUBLE;
+ ds->start = offset;
+ return ds->dec->newDouble (ds->prv, createDouble( (double) intNeg, (double) intValue, frcValue, decimalCount));
DECODE_EXPONENT:
- expNeg = 1.0;
+ if (ds->dec->preciseFloat)
+ {
+ return decodePreciseFloat(ds);
+ }
- if (*(offset) == '-')
- {
- expNeg = -1.0;
- offset ++;
- }
- else
- if (*(offset) == '+')
- {
- expNeg = +1.0;
- offset ++;
- }
+ expNeg = 1.0;
- expValue = 0.0;
+ if (*(offset) == '-')
+ {
+ expNeg = -1.0;
+ offset ++;
+ }
+ else
+ if (*(offset) == '+')
+ {
+ expNeg = +1.0;
+ offset ++;
+ }
- while (1)
- {
- chr = (int) (unsigned char) *(offset);
+ expValue = 0.0;
- switch (chr)
- {
- case '0':
- case '1':
- case '2':
- case '3':
- case '4':
- case '5':
- case '6':
- case '7':
- case '8':
- case '9':
- expValue = expValue * 10.0 + (double) (chr - 48);
- offset ++;
- break;
-
- default:
- goto BREAK_EXP_LOOP;
+ for (;;)
+ {
+ chr = (int) (unsigned char) *(offset);
- }
+ switch (chr)
+ {
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ {
+ expValue = expValue * 10.0 + (double) (chr - 48);
+ offset ++;
+ break;
+ }
+ default:
+ {
+ goto BREAK_EXP_LOOP;
+ }
}
+ }
BREAK_EXP_LOOP:
-
-#ifdef JSON_DECODE_NUMERIC_AS_DOUBLE
-#else
- if (intValue < 0)
- {
- intNeg = 1;
- }
-#endif
-
- //FIXME: Check for arithemtic overflow here
- ds->lastType = JT_DOUBLE;
- ds->start = offset;
- RETURN_JSOBJ_NULLCHECK(ds->dec->newDouble (createDouble( (double) intNeg, (double) intValue , frcValue, decimalCount) * pow(10.0, expValue * expNeg)));
+ //FIXME: Check for arithemtic overflow here
+ ds->lastType = JT_DOUBLE;
+ ds->start = offset;
+ return ds->dec->newDouble (ds->prv, createDouble( (double) intNeg, (double) intValue , frcValue, decimalCount) * pow(10.0, expValue * expNeg));
}
-FASTCALL_ATTR JSOBJ FASTCALL_MSVC decode_true ( struct DecoderState *ds)
+FASTCALL_ATTR JSOBJ FASTCALL_MSVC decode_true ( struct DecoderState *ds)
{
- char *offset = ds->start;
- offset ++;
+ char *offset = ds->start;
+ offset ++;
- if (*(offset++) != 'r')
- goto SETERROR;
- if (*(offset++) != 'u')
- goto SETERROR;
- if (*(offset++) != 'e')
- goto SETERROR;
+ if (*(offset++) != 'r')
+ goto SETERROR;
+ if (*(offset++) != 'u')
+ goto SETERROR;
+ if (*(offset++) != 'e')
+ goto SETERROR;
- ds->lastType = JT_TRUE;
- ds->start = offset;
- RETURN_JSOBJ_NULLCHECK(ds->dec->newTrue());
+ ds->lastType = JT_TRUE;
+ ds->start = offset;
+ return ds->dec->newTrue(ds->prv);
SETERROR:
- return SetError(ds, -1, "Unexpected character found when decoding 'true'");
+ return SetError(ds, -1, "Unexpected character found when decoding 'true'");
}
-FASTCALL_ATTR JSOBJ FASTCALL_MSVC decode_false ( struct DecoderState *ds)
+FASTCALL_ATTR JSOBJ FASTCALL_MSVC decode_false ( struct DecoderState *ds)
{
- char *offset = ds->start;
- offset ++;
-
- if (*(offset++) != 'a')
- goto SETERROR;
- if (*(offset++) != 'l')
- goto SETERROR;
- if (*(offset++) != 's')
- goto SETERROR;
- if (*(offset++) != 'e')
- goto SETERROR;
-
- ds->lastType = JT_FALSE;
- ds->start = offset;
- RETURN_JSOBJ_NULLCHECK(ds->dec->newFalse());
+ char *offset = ds->start;
+ offset ++;
+
+ if (*(offset++) != 'a')
+ goto SETERROR;
+ if (*(offset++) != 'l')
+ goto SETERROR;
+ if (*(offset++) != 's')
+ goto SETERROR;
+ if (*(offset++) != 'e')
+ goto SETERROR;
+
+ ds->lastType = JT_FALSE;
+ ds->start = offset;
+ return ds->dec->newFalse(ds->prv);
SETERROR:
- return SetError(ds, -1, "Unexpected character found when decoding 'false'");
-
+ return SetError(ds, -1, "Unexpected character found when decoding 'false'");
}
-
-FASTCALL_ATTR JSOBJ FASTCALL_MSVC decode_null ( struct DecoderState *ds)
+FASTCALL_ATTR JSOBJ FASTCALL_MSVC decode_null ( struct DecoderState *ds)
{
- char *offset = ds->start;
- offset ++;
+ char *offset = ds->start;
+ offset ++;
- if (*(offset++) != 'u')
- goto SETERROR;
- if (*(offset++) != 'l')
- goto SETERROR;
- if (*(offset++) != 'l')
- goto SETERROR;
+ if (*(offset++) != 'u')
+ goto SETERROR;
+ if (*(offset++) != 'l')
+ goto SETERROR;
+ if (*(offset++) != 'l')
+ goto SETERROR;
- ds->lastType = JT_NULL;
- ds->start = offset;
- RETURN_JSOBJ_NULLCHECK(ds->dec->newNull());
+ ds->lastType = JT_NULL;
+ ds->start = offset;
+ return ds->dec->newNull(ds->prv);
SETERROR:
- return SetError(ds, -1, "Unexpected character found when decoding 'null'");
+ return SetError(ds, -1, "Unexpected character found when decoding 'null'");
}
-FASTCALL_ATTR void FASTCALL_MSVC SkipWhitespace(struct DecoderState *ds)
+FASTCALL_ATTR void FASTCALL_MSVC SkipWhitespace(struct DecoderState *ds)
{
- char *offset = ds->start;
+ char *offset;
- while (1)
+ for (offset = ds->start; (ds->end - offset) > 0; offset ++)
+ {
+ switch (*offset)
{
- switch (*offset)
- {
- case ' ':
- case '\t':
- case '\r':
- case '\n':
- offset ++;
- break;
-
- default:
- ds->start = offset;
- return;
- }
+ case ' ':
+ case '\t':
+ case '\r':
+ case '\n':
+ break;
+
+ default:
+ ds->start = offset;
+ return;
}
-}
+ }
+ if (offset == ds->end)
+ {
+ ds->start = ds->end;
+ }
+}
enum DECODESTRINGSTATE
{
- DS_ISNULL = 0x32,
- DS_ISQUOTE,
- DS_ISESCAPE,
- DS_UTFLENERROR,
+ DS_ISNULL = 0x32,
+ DS_ISQUOTE,
+ DS_ISESCAPE,
+ DS_UTFLENERROR,
};
-static const JSUINT8 g_decoderLookup[256] =
+static const JSUINT8 g_decoderLookup[256] =
{
-/* 0x00 */ DS_ISNULL, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
-/* 0x10 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
-/* 0x20 */ 1, 1, DS_ISQUOTE, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
-/* 0x30 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
-/* 0x40 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
-/* 0x50 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, DS_ISESCAPE, 1, 1, 1,
-/* 0x60 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
-/* 0x70 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
-/* 0x80 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
-/* 0x90 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
-/* 0xa0 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
-/* 0xb0 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
-/* 0xc0 */ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
-/* 0xd0 */ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
-/* 0xe0 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
-/* 0xf0 */ 4, 4, 4, 4, 4, 4, 4, 4, DS_UTFLENERROR, DS_UTFLENERROR, DS_UTFLENERROR, DS_UTFLENERROR, DS_UTFLENERROR, DS_UTFLENERROR, DS_UTFLENERROR, DS_UTFLENERROR,
+ /* 0x00 */ DS_ISNULL, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ /* 0x10 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ /* 0x20 */ 1, 1, DS_ISQUOTE, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ /* 0x30 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ /* 0x40 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ /* 0x50 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, DS_ISESCAPE, 1, 1, 1,
+ /* 0x60 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ /* 0x70 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ /* 0x80 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ /* 0x90 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ /* 0xa0 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ /* 0xb0 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ /* 0xc0 */ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ /* 0xd0 */ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ /* 0xe0 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ /* 0xf0 */ 4, 4, 4, 4, 4, 4, 4, 4, DS_UTFLENERROR, DS_UTFLENERROR, DS_UTFLENERROR, DS_UTFLENERROR, DS_UTFLENERROR, DS_UTFLENERROR, DS_UTFLENERROR, DS_UTFLENERROR,
};
-
FASTCALL_ATTR JSOBJ FASTCALL_MSVC decode_string ( struct DecoderState *ds)
{
- JSUTF16 sur[2] = { 0 };
- int iSur = 0;
- int index;
- wchar_t *escOffset;
- size_t escLen = (ds->escEnd - ds->escStart);
- JSUINT8 *inputOffset;
- JSUINT8 oct;
- JSUTF32 ucs;
- ds->lastType = JT_INVALID;
- ds->start ++;
-
- if ( (ds->end - ds->start) > escLen)
+ JSUTF16 sur[2] = { 0 };
+ int iSur = 0;
+ int index;
+ wchar_t *escOffset;
+ wchar_t *escStart;
+ size_t escLen = (ds->escEnd - ds->escStart);
+ JSUINT8 *inputOffset;
+ JSUINT8 oct;
+ JSUTF32 ucs;
+ ds->lastType = JT_INVALID;
+ ds->start ++;
+
+ if ( (size_t) (ds->end - ds->start) > escLen)
+ {
+ size_t newSize = (ds->end - ds->start);
+
+ if (ds->escHeap)
{
- size_t newSize = (ds->end - ds->start);
-
- if (ds->escHeap)
- {
- ds->escStart = (wchar_t *) ds->dec->realloc (ds->escStart, newSize * sizeof(wchar_t));
- if (!ds->escStart)
- {
- return SetError(ds, -1, "Could not reserve memory block");
- }
- }
- else
- {
- wchar_t *oldStart = ds->escStart;
- ds->escHeap = 1;
- ds->escStart = (wchar_t *) ds->dec->malloc (newSize * sizeof(wchar_t));
- if (!ds->escStart)
- {
- return SetError(ds, -1, "Could not reserve memory block");
- }
- memcpy (ds->escStart, oldStart, escLen * sizeof(wchar_t));
- }
-
- ds->escEnd = ds->escStart + newSize;
+ if (newSize > (UINT_MAX / sizeof(wchar_t)))
+ {
+ return SetError(ds, -1, "Could not reserve memory block");
+ }
+ escStart = (wchar_t *)ds->dec->realloc(ds->escStart, newSize * sizeof(wchar_t));
+ if (!escStart)
+ {
+ ds->dec->free(ds->escStart);
+ return SetError(ds, -1, "Could not reserve memory block");
+ }
+ ds->escStart = escStart;
+ }
+ else
+ {
+ wchar_t *oldStart = ds->escStart;
+ ds->escHeap = 1;
+ if (newSize > (UINT_MAX / sizeof(wchar_t)))
+ {
+ return SetError(ds, -1, "Could not reserve memory block");
+ }
+ ds->escStart = (wchar_t *) ds->dec->malloc(newSize * sizeof(wchar_t));
+ if (!ds->escStart)
+ {
+ return SetError(ds, -1, "Could not reserve memory block");
+ }
+ memcpy(ds->escStart, oldStart, escLen * sizeof(wchar_t));
}
- escOffset = ds->escStart;
- inputOffset = ds->start;
+ ds->escEnd = ds->escStart + newSize;
+ }
- while(1)
+ escOffset = ds->escStart;
+ inputOffset = (JSUINT8 *) ds->start;
+
+ for (;;)
+ {
+ switch (g_decoderLookup[(JSUINT8)(*inputOffset)])
{
- switch (g_decoderLookup[(JSUINT8)(*inputOffset)])
+ case DS_ISNULL:
+ {
+ return SetError(ds, -1, "Unmatched ''\"' when when decoding 'string'");
+ }
+ case DS_ISQUOTE:
+ {
+ ds->lastType = JT_UTF8;
+ inputOffset ++;
+ ds->start += ( (char *) inputOffset - (ds->start));
+ return ds->dec->newString(ds->prv, ds->escStart, escOffset);
+ }
+ case DS_UTFLENERROR:
+ {
+ return SetError (ds, -1, "Invalid UTF-8 sequence length when decoding 'string'");
+ }
+ case DS_ISESCAPE:
+ inputOffset ++;
+ switch (*inputOffset)
{
- case DS_ISNULL:
- return SetError(ds, -1, "Unmatched ''\"' when when decoding 'string'");
-
- case DS_ISQUOTE:
- ds->lastType = JT_UTF8;
+ case '\\': *(escOffset++) = L'\\'; inputOffset++; continue;
+ case '\"': *(escOffset++) = L'\"'; inputOffset++; continue;
+ case '/': *(escOffset++) = L'/'; inputOffset++; continue;
+ case 'b': *(escOffset++) = L'\b'; inputOffset++; continue;
+ case 'f': *(escOffset++) = L'\f'; inputOffset++; continue;
+ case 'n': *(escOffset++) = L'\n'; inputOffset++; continue;
+ case 'r': *(escOffset++) = L'\r'; inputOffset++; continue;
+ case 't': *(escOffset++) = L'\t'; inputOffset++; continue;
+
+ case 'u':
+ {
+ int index;
inputOffset ++;
- ds->start += ( (char *) inputOffset - (ds->start));
- RETURN_JSOBJ_NULLCHECK(ds->dec->newString(ds->escStart, escOffset));
-
- case DS_UTFLENERROR:
- return SetError (ds, -1, "Invalid UTF-8 sequence length when decoding 'string'");
- case DS_ISESCAPE:
- inputOffset ++;
- switch (*inputOffset)
+ for (index = 0; index < 4; index ++)
{
- case '\\': *(escOffset++) = L'\\'; inputOffset++; continue;
- case '\"': *(escOffset++) = L'\"'; inputOffset++; continue;
- case '/': *(escOffset++) = L'/'; inputOffset++; continue;
- case 'b': *(escOffset++) = L'\b'; inputOffset++; continue;
- case 'f': *(escOffset++) = L'\f'; inputOffset++; continue;
- case 'n': *(escOffset++) = L'\n'; inputOffset++; continue;
- case 'r': *(escOffset++) = L'\r'; inputOffset++; continue;
- case 't': *(escOffset++) = L'\t'; inputOffset++; continue;
-
- case 'u':
- {
- int index;
- inputOffset ++;
-
- for (index = 0; index < 4; index ++)
- {
- switch (*inputOffset)
- {
- case '\0': return SetError (ds, -1, "Unterminated unicode escape sequence when decoding 'string'");
- default: return SetError (ds, -1, "Unexpected character in unicode escape sequence when decoding 'string'");
-
- case '0':
- case '1':
- case '2':
- case '3':
- case '4':
- case '5':
- case '6':
- case '7':
- case '8':
- case '9':
- sur[iSur] = (sur[iSur] << 4) + (JSUTF16) (*inputOffset - '0');
- break;
-
- case 'a':
- case 'b':
- case 'c':
- case 'd':
- case 'e':
- case 'f':
- sur[iSur] = (sur[iSur] << 4) + 10 + (JSUTF16) (*inputOffset - 'a');
- break;
-
- case 'A':
- case 'B':
- case 'C':
- case 'D':
- case 'E':
- case 'F':
- sur[iSur] = (sur[iSur] << 4) + 10 + (JSUTF16) (*inputOffset - 'A');
- break;
- }
-
- inputOffset ++;
- }
-
-
- if (iSur == 0)
- {
- if((sur[iSur] & 0xfc00) == 0xd800)
- {
- // First of a surrogate pair, continue parsing
- iSur ++;
- break;
- }
- (*escOffset++) = (wchar_t) sur[iSur];
- iSur = 0;
- }
- else
- {
- // Decode pair
- if ((sur[1] & 0xfc00) != 0xdc00)
- {
- return SetError (ds, -1, "Unpaired high surrogate when decoding 'string'");
- }
+ switch (*inputOffset)
+ {
+ case '\0': return SetError (ds, -1, "Unterminated unicode escape sequence when decoding 'string'");
+ default: return SetError (ds, -1, "Unexpected character in unicode escape sequence when decoding 'string'");
+
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ sur[iSur] = (sur[iSur] << 4) + (JSUTF16) (*inputOffset - '0');
+ break;
+
+ case 'a':
+ case 'b':
+ case 'c':
+ case 'd':
+ case 'e':
+ case 'f':
+ sur[iSur] = (sur[iSur] << 4) + 10 + (JSUTF16) (*inputOffset - 'a');
+ break;
+
+ case 'A':
+ case 'B':
+ case 'C':
+ case 'D':
+ case 'E':
+ case 'F':
+ sur[iSur] = (sur[iSur] << 4) + 10 + (JSUTF16) (*inputOffset - 'A');
+ break;
+ }
+
+ inputOffset ++;
+ }
+ if (iSur == 0)
+ {
+ if((sur[iSur] & 0xfc00) == 0xd800)
+ {
+ // First of a surrogate pair, continue parsing
+ iSur ++;
+ break;
+ }
+ (*escOffset++) = (wchar_t) sur[iSur];
+ iSur = 0;
+ }
+ else
+ {
+ // Decode pair
+ if ((sur[1] & 0xfc00) != 0xdc00)
+ {
+ return SetError (ds, -1, "Unpaired high surrogate when decoding 'string'");
+ }
#if WCHAR_MAX == 0xffff
- (*escOffset++) = (wchar_t) sur[0];
- (*escOffset++) = (wchar_t) sur[1];
+ (*escOffset++) = (wchar_t) sur[0];
+ (*escOffset++) = (wchar_t) sur[1];
#else
- (*escOffset++) = (wchar_t) 0x10000 + (((sur[0] - 0xd800) << 10) | (sur[1] - 0xdc00));
+ (*escOffset++) = (wchar_t) 0x10000 + (((sur[0] - 0xd800) << 10) | (sur[1] - 0xdc00));
#endif
- iSur = 0;
- }
- break;
- }
-
- case '\0': return SetError(ds, -1, "Unterminated escape sequence when decoding 'string'");
- default: return SetError(ds, -1, "Unrecognized escape sequence when decoding 'string'");
+ iSur = 0;
}
- break;
-
- case 1:
- *(escOffset++) = (wchar_t) (*inputOffset++);
- break;
+ break;
+ }
- case 2:
+ case '\0': return SetError(ds, -1, "Unterminated escape sequence when decoding 'string'");
+ default: return SetError(ds, -1, "Unrecognized escape sequence when decoding 'string'");
+ }
+ break;
+
+ case 1:
+ {
+ *(escOffset++) = (wchar_t) (*inputOffset++);
+ break;
+ }
+
+ case 2:
+ {
+ ucs = (*inputOffset++) & 0x1f;
+ ucs <<= 6;
+ if (((*inputOffset) & 0x80) != 0x80)
{
- ucs = (*inputOffset++) & 0x1f;
- ucs <<= 6;
- if (((*inputOffset) & 0x80) != 0x80)
- {
- return SetError(ds, -1, "Invalid octet in UTF-8 sequence when decoding 'string'");
- }
- ucs |= (*inputOffset++) & 0x3f;
- if (ucs < 0x80) return SetError (ds, -1, "Overlong 2 byte UTF-8 sequence detected when decoding 'string'");
- *(escOffset++) = (wchar_t) ucs;
- break;
+ return SetError(ds, -1, "Invalid octet in UTF-8 sequence when decoding 'string'");
}
-
- case 3:
+ ucs |= (*inputOffset++) & 0x3f;
+ if (ucs < 0x80) return SetError (ds, -1, "Overlong 2 byte UTF-8 sequence detected when decoding 'string'");
+ *(escOffset++) = (wchar_t) ucs;
+ break;
+ }
+
+ case 3:
+ {
+ JSUTF32 ucs = 0;
+ ucs |= (*inputOffset++) & 0x0f;
+
+ for (index = 0; index < 2; index ++)
{
- JSUTF32 ucs = 0;
- ucs |= (*inputOffset++) & 0x0f;
+ ucs <<= 6;
+ oct = (*inputOffset++);
- for (index = 0; index < 2; index ++)
- {
- ucs <<= 6;
- oct = (*inputOffset++);
+ if ((oct & 0x80) != 0x80)
+ {
+ return SetError(ds, -1, "Invalid octet in UTF-8 sequence when decoding 'string'");
+ }
- if ((oct & 0x80) != 0x80)
- {
- return SetError(ds, -1, "Invalid octet in UTF-8 sequence when decoding 'string'");
- }
+ ucs |= oct & 0x3f;
+ }
- ucs |= oct & 0x3f;
- }
+ if (ucs < 0x800) return SetError (ds, -1, "Overlong 3 byte UTF-8 sequence detected when encoding string");
+ *(escOffset++) = (wchar_t) ucs;
+ break;
+ }
- if (ucs < 0x800) return SetError (ds, -1, "Overlong 3 byte UTF-8 sequence detected when encoding string");
- *(escOffset++) = (wchar_t) ucs;
- break;
- }
+ case 4:
+ {
+ JSUTF32 ucs = 0;
+ ucs |= (*inputOffset++) & 0x07;
- case 4:
+ for (index = 0; index < 3; index ++)
{
- JSUTF32 ucs = 0;
- ucs |= (*inputOffset++) & 0x07;
-
- for (index = 0; index < 3; index ++)
- {
- ucs <<= 6;
- oct = (*inputOffset++);
+ ucs <<= 6;
+ oct = (*inputOffset++);
- if ((oct & 0x80) != 0x80)
- {
- return SetError(ds, -1, "Invalid octet in UTF-8 sequence when decoding 'string'");
- }
+ if ((oct & 0x80) != 0x80)
+ {
+ return SetError(ds, -1, "Invalid octet in UTF-8 sequence when decoding 'string'");
+ }
- ucs |= oct & 0x3f;
- }
+ ucs |= oct & 0x3f;
+ }
- if (ucs < 0x10000) return SetError (ds, -1, "Overlong 4 byte UTF-8 sequence detected when decoding 'string'");
+ if (ucs < 0x10000) return SetError (ds, -1, "Overlong 4 byte UTF-8 sequence detected when decoding 'string'");
- #if WCHAR_MAX == 0xffff
- if (ucs >= 0x10000)
- {
- ucs -= 0x10000;
- *(escOffset++) = (ucs >> 10) + 0xd800;
- *(escOffset++) = (ucs & 0x3ff) + 0xdc00;
- }
- else
- {
- *(escOffset++) = (wchar_t) ucs;
- }
- #else
- *(escOffset++) = (wchar_t) ucs;
- #endif
- break;
+#if WCHAR_MAX == 0xffff
+ if (ucs >= 0x10000)
+ {
+ ucs -= 0x10000;
+ *(escOffset++) = (wchar_t) (ucs >> 10) + 0xd800;
+ *(escOffset++) = (wchar_t) (ucs & 0x3ff) + 0xdc00;
}
+ else
+ {
+ *(escOffset++) = (wchar_t) ucs;
}
+#else
+ *(escOffset++) = (wchar_t) ucs;
+#endif
+ break;
+ }
}
+ }
}
-FASTCALL_ATTR JSOBJ FASTCALL_MSVC decode_array( struct DecoderState *ds)
+FASTCALL_ATTR JSOBJ FASTCALL_MSVC decode_array(struct DecoderState *ds)
{
- JSOBJ itemValue;
- JSOBJ newObj = ds->dec->newArray(ds->dec);
+ JSOBJ itemValue;
+ JSOBJ newObj;
+ int len;
+ ds->objDepth++;
+ if (ds->objDepth > JSON_MAX_OBJECT_DEPTH) {
+ return SetError(ds, -1, "Reached object decoding depth limit");
+ }
- ds->lastType = JT_INVALID;
- ds->start ++;
+ newObj = ds->dec->newArray(ds->prv, ds->dec);
+ len = 0;
- while (1)//(*ds->start) != '\0')
- {
- SkipWhitespace(ds);
+ ds->lastType = JT_INVALID;
+ ds->start ++;
- if ((*ds->start) == ']')
- {
- ds->start++;
- return ds->dec->endArray(newObj);
- }
+ for (;;)
+ {
+ SkipWhitespace(ds);
- itemValue = decode_any(ds);
+ if ((*ds->start) == ']')
+ {
+ ds->objDepth--;
+ if (len == 0)
+ {
+ ds->start ++;
+ return ds->dec->endArray(ds->prv, newObj);
+ }
+
+ ds->dec->releaseObject(ds->prv, newObj, ds->dec);
+ return SetError(ds, -1, "Unexpected character found when decoding array value (1)");
+ }
- if (itemValue == NULL)
- {
- ds->dec->releaseObject(newObj, ds->dec);
- return NULL;
- }
+ itemValue = decode_any(ds);
- if (!ds->dec->arrayAddItem (newObj, itemValue))
- {
- ds->dec->releaseObject(newObj, ds->dec);
- return NULL;
- }
+ if (itemValue == NULL)
+ {
+ ds->dec->releaseObject(ds->prv, newObj, ds->dec);
+ return NULL;
+ }
- SkipWhitespace(ds);
+ if (!ds->dec->arrayAddItem (ds->prv, newObj, itemValue))
+ {
+ ds->dec->releaseObject(ds->prv, newObj, ds->dec);
+ return NULL;
+ }
- switch (*(ds->start++))
- {
- case ']':
- return ds->dec->endArray(newObj);
+ SkipWhitespace(ds);
- case ',':
- break;
+ switch (*(ds->start++))
+ {
+ case ']':
+ {
+ ds->objDepth--;
+ return ds->dec->endArray(ds->prv, newObj);
+ }
+ case ',':
+ break;
- default:
- ds->dec->releaseObject(newObj, ds->dec);
- return SetError(ds, -1, "Unexpected character in found when decoding array value");
- }
+ default:
+ ds->dec->releaseObject(ds->prv, newObj, ds->dec);
+ return SetError(ds, -1, "Unexpected character found when decoding array value (2)");
}
- ds->dec->releaseObject(newObj, ds->dec);
- return SetError(ds, -1, "Unmatched ']' when decoding 'array'");
+ len ++;
+ }
}
-
-
FASTCALL_ATTR JSOBJ FASTCALL_MSVC decode_object( struct DecoderState *ds)
{
- JSOBJ itemName;
- JSOBJ itemValue;
- JSOBJ newObj = ds->dec->newObject(ds->dec);
+ JSOBJ itemName;
+ JSOBJ itemValue;
+ JSOBJ newObj;
- ds->start ++;
+ ds->objDepth++;
+ if (ds->objDepth > JSON_MAX_OBJECT_DEPTH) {
+ return SetError(ds, -1, "Reached object decoding depth limit");
+ }
- while (1)
- {
- SkipWhitespace(ds);
+ newObj = ds->dec->newObject(ds->prv, ds->dec);
- if ((*ds->start) == '}')
- {
- ds->start ++;
- return ds->dec->endObject(newObj);
- }
+ ds->start ++;
- ds->lastType = JT_INVALID;
- itemName = decode_any(ds);
+ for (;;)
+ {
+ SkipWhitespace(ds);
- if (itemName == NULL)
- {
- ds->dec->releaseObject(newObj, ds->dec);
- return NULL;
- }
+ if ((*ds->start) == '}')
+ {
+ ds->objDepth--;
+ ds->start ++;
+ return ds->dec->endObject(ds->prv, newObj);
+ }
- if (ds->lastType != JT_UTF8)
- {
- ds->dec->releaseObject(newObj, ds->dec);
- ds->dec->releaseObject(itemName, ds->dec);
- return SetError(ds, -1, "Key name of object must be 'string' when decoding 'object'");
- }
+ ds->lastType = JT_INVALID;
+ itemName = decode_any(ds);
- SkipWhitespace(ds);
+ if (itemName == NULL)
+ {
+ ds->dec->releaseObject(ds->prv, newObj, ds->dec);
+ return NULL;
+ }
- if (*(ds->start++) != ':')
- {
- ds->dec->releaseObject(newObj, ds->dec);
- ds->dec->releaseObject(itemName, ds->dec);
- return SetError(ds, -1, "No ':' found when decoding object value");
- }
+ if (ds->lastType != JT_UTF8)
+ {
+ ds->dec->releaseObject(ds->prv, newObj, ds->dec);
+ ds->dec->releaseObject(ds->prv, itemName, ds->dec);
+ return SetError(ds, -1, "Key name of object must be 'string' when decoding 'object'");
+ }
- SkipWhitespace(ds);
+ SkipWhitespace(ds);
- itemValue = decode_any(ds);
+ if (*(ds->start++) != ':')
+ {
+ ds->dec->releaseObject(ds->prv, newObj, ds->dec);
+ ds->dec->releaseObject(ds->prv, itemName, ds->dec);
+ return SetError(ds, -1, "No ':' found when decoding object value");
+ }
- if (itemValue == NULL)
- {
- ds->dec->releaseObject(newObj, ds->dec);
- ds->dec->releaseObject(itemName, ds->dec);
- return NULL;
- }
+ SkipWhitespace(ds);
- if (!ds->dec->objectAddKey (newObj, itemName, itemValue))
- {
- ds->dec->releaseObject(newObj, ds->dec);
- ds->dec->releaseObject(itemName, ds->dec);
- ds->dec->releaseObject(itemValue, ds->dec);
- return NULL;
- }
+ itemValue = decode_any(ds);
- SkipWhitespace(ds);
+ if (itemValue == NULL)
+ {
+ ds->dec->releaseObject(ds->prv, newObj, ds->dec);
+ ds->dec->releaseObject(ds->prv, itemName, ds->dec);
+ return NULL;
+ }
- switch (*(ds->start++))
- {
- case '}':
- return ds->dec->endObject(newObj);
+ if (!ds->dec->objectAddKey (ds->prv, newObj, itemName, itemValue))
+ {
+ ds->dec->releaseObject(ds->prv, newObj, ds->dec);
+ ds->dec->releaseObject(ds->prv, itemName, ds->dec);
+ ds->dec->releaseObject(ds->prv, itemValue, ds->dec);
+ return NULL;
+ }
- case ',':
- break;
+ SkipWhitespace(ds);
- default:
- ds->dec->releaseObject(newObj, ds->dec);
- return SetError(ds, -1, "Unexpected character in found when decoding object value");
- }
+ switch (*(ds->start++))
+ {
+ case '}':
+ {
+ ds->objDepth--;
+ return ds->dec->endObject(ds->prv, newObj);
+ }
+ case ',':
+ break;
+
+ default:
+ ds->dec->releaseObject(ds->prv, newObj, ds->dec);
+ return SetError(ds, -1, "Unexpected character in found when decoding object value");
}
-
- ds->dec->releaseObject(newObj, ds->dec);
- return SetError(ds, -1, "Unmatched '}' when decoding object");
+ }
}
FASTCALL_ATTR JSOBJ FASTCALL_MSVC decode_any(struct DecoderState *ds)
{
- while (1)
+ for (;;)
+ {
+ switch (*ds->start)
{
- switch (*ds->start)
- {
- case '\"':
- return decode_string (ds);
- case '0':
- case '1':
- case '2':
- case '3':
- case '4':
- case '5':
- case '6':
- case '7':
- case '8':
- case '9':
- case '-':
- return decode_numeric (ds);
-
- case '[': return decode_array (ds);
- case '{': return decode_object (ds);
- case 't': return decode_true (ds);
- case 'f': return decode_false (ds);
- case 'n': return decode_null (ds);
-
- case ' ':
- case '\t':
- case '\r':
- case '\n':
- // White space
- ds->start ++;
- break;
-
- default:
- return SetError(ds, -1, "Expected object or value");
- }
+ case '\"':
+ return decode_string (ds);
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ case '-':
+ return decode_numeric (ds);
+
+ case '[': return decode_array (ds);
+ case '{': return decode_object (ds);
+ case 't': return decode_true (ds);
+ case 'f': return decode_false (ds);
+ case 'n': return decode_null (ds);
+
+ case ' ':
+ case '\t':
+ case '\r':
+ case '\n':
+ // White space
+ ds->start ++;
+ break;
+
+ default:
+ return SetError(ds, -1, "Expected object or value");
}
+ }
}
-
JSOBJ JSON_DecodeObject(JSONObjectDecoder *dec, const char *buffer, size_t cbBuffer)
{
-
- /*
- FIXME: Base the size of escBuffer of that of cbBuffer so that the unicode escaping doesn't run into the wall each time */
- struct DecoderState ds;
- wchar_t escBuffer[(JSON_MAX_STACK_BUFFER_SIZE / sizeof(wchar_t))];
- JSOBJ ret;
-
- ds.start = (char *) buffer;
- ds.end = ds.start + cbBuffer;
-
- ds.escStart = escBuffer;
- ds.escEnd = ds.escStart + (JSON_MAX_STACK_BUFFER_SIZE / sizeof(wchar_t));
- ds.escHeap = 0;
- ds.dec = dec;
- ds.dec->errorStr = NULL;
- ds.dec->errorOffset = NULL;
-
- ds.dec = dec;
-
- ret = decode_any (&ds);
-
- if (ds.escHeap)
- {
- dec->free(ds.escStart);
- }
- return ret;
+ /*
+ FIXME: Base the size of escBuffer of that of cbBuffer so that the unicode escaping doesn't run into the wall each time */
+ struct DecoderState ds;
+ wchar_t escBuffer[(JSON_MAX_STACK_BUFFER_SIZE / sizeof(wchar_t))];
+ JSOBJ ret;
+
+ ds.start = (char *) buffer;
+ ds.end = ds.start + cbBuffer;
+
+ ds.escStart = escBuffer;
+ ds.escEnd = ds.escStart + (JSON_MAX_STACK_BUFFER_SIZE / sizeof(wchar_t));
+ ds.escHeap = 0;
+ ds.prv = dec->prv;
+ ds.dec = dec;
+ ds.dec->errorStr = NULL;
+ ds.dec->errorOffset = NULL;
+ ds.objDepth = 0;
+
+ ds.dec = dec;
+
+ ret = decode_any (&ds);
+
+ if (ds.escHeap)
+ {
+ dec->free(ds.escStart);
+ }
+
+ SkipWhitespace(&ds);
+
+ if (ds.start != ds.end && ret)
+ {
+ dec->releaseObject(ds.prv, ret, ds.dec);
+ return SetError(&ds, -1, "Trailing data");
+ }
+
+ return ret;
}
diff --git a/pandas/src/ujson/lib/ultrajsonenc.c b/pandas/src/ujson/lib/ultrajsonenc.c
index 22871513870b7..01fc7c10fe755 100644
--- a/pandas/src/ujson/lib/ultrajsonenc.c
+++ b/pandas/src/ujson/lib/ultrajsonenc.c
@@ -1,37 +1,38 @@
/*
-Copyright (c) 2011, Jonas Tarnstrom and ESN Social Software AB
+Copyright (c) 2011-2013, ESN Social Software AB and Jonas Tarnstrom
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
-1. Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
-2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-3. All advertising materials mentioning features or use of this software
- must display the following acknowledgement:
- This product includes software developed by ESN Social Software AB (www.esn.me).
-4. Neither the name of the ESN Social Software AB nor the
- names of its contributors may be used to endorse or promote products
- derived from this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY ESN SOCIAL SOFTWARE AB ''AS IS'' AND ANY
-EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of the ESN Social Software AB nor the
+ names of its contributors may be used to endorse or promote products
+ derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL ESN SOCIAL SOFTWARE AB BE LIABLE FOR ANY
-DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+DISCLAIMED. IN NO EVENT SHALL ESN SOCIAL SOFTWARE AB OR JONAS TARNSTROM BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-Portions of code from:
-MODP_ASCII - Ascii transformations (upper/lower, etc)
+
+Portions of code from MODP_ASCII - Ascii transformations (upper/lower, etc)
http://code.google.com/p/stringencoders/
Copyright (c) 2007 Nick Galbreath -- nickg [at] modp [dot] com. All rights reserved.
+Numeric decoder derived from from TCL library
+http://www.opensource.apple.com/source/tcl/tcl-14/tcl/license.terms
+ * Copyright (c) 1988-1993 The Regents of the University of California.
+ * Copyright (c) 1994 Sun Microsystems, Inc.
*/
#include "ultrajson.h"
@@ -50,42 +51,57 @@ Copyright (c) 2007 Nick Galbreath -- nickg [at] modp [dot] com. All rights rese
#define FALSE 0
#endif
+/*
+Worst cases being:
+
+Control characters (ASCII < 32)
+0x00 (1 byte) input => \u0000 output (6 bytes)
+1 * 6 => 6 (6 bytes required)
+
+or UTF-16 surrogate pairs
+4 bytes input in UTF-8 => \uXXXX\uYYYY (12 bytes).
+
+4 * 6 => 24 bytes (12 bytes required)
+
+The extra 2 bytes are for the quotes around the string
+
+*/
+#define RESERVE_STRING(_len) (2 + ((_len) * 6))
+
static const double g_pow10[] = {1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000, 10000000000, 100000000000, 1000000000000, 10000000000000, 100000000000000, 1000000000000000};
static const char g_hexChars[] = "0123456789abcdef";
static const char g_escapeChars[] = "0123456789\\b\\t\\n\\f\\r\\\"\\\\\\/";
-
/*
FIXME: While this is fine dandy and working it's a magic value mess which probably only the author understands.
Needs a cleanup and more documentation */
/*
Table for pure ascii output escaping all characters above 127 to \uXXXX */
-static const JSUINT8 g_asciiOutputTable[256] =
+static const JSUINT8 g_asciiOutputTable[256] =
{
-/* 0x00 */ 0, 30, 30, 30, 30, 30, 30, 30, 10, 12, 14, 30, 16, 18, 30, 30,
+/* 0x00 */ 0, 30, 30, 30, 30, 30, 30, 30, 10, 12, 14, 30, 16, 18, 30, 30,
/* 0x10 */ 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30,
-/* 0x20 */ 1, 1, 20, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 24,
-/* 0x30 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
-/* 0x40 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+/* 0x20 */ 1, 1, 20, 1, 1, 1, 29, 1, 1, 1, 1, 1, 1, 1, 1, 24,
+/* 0x30 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 29, 1, 29, 1,
+/* 0x40 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
/* 0x50 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 22, 1, 1, 1,
-/* 0x60 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+/* 0x60 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
/* 0x70 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
-/* 0x80 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+/* 0x80 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
/* 0x90 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
-/* 0xa0 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+/* 0xa0 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
/* 0xb0 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
-/* 0xc0 */ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+/* 0xc0 */ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
/* 0xd0 */ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
-/* 0xe0 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+/* 0xe0 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
/* 0xf0 */ 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 1, 1
};
-
static void SetError (JSOBJ obj, JSONObjectEncoder *enc, const char *message)
{
- enc->errorMsg = message;
- enc->errorObj = obj;
+ enc->errorMsg = message;
+ enc->errorObj = obj;
}
/*
@@ -93,332 +109,357 @@ FIXME: Keep track of how big these get across several encoder calls and try to m
That way we won't run our head into the wall each call */
void Buffer_Realloc (JSONObjectEncoder *enc, size_t cbNeeded)
{
- size_t curSize = enc->end - enc->start;
- size_t newSize = curSize * 2;
- size_t offset = enc->offset - enc->start;
-
- while (newSize < curSize + cbNeeded)
+ size_t curSize = enc->end - enc->start;
+ size_t newSize = curSize * 2;
+ size_t offset = enc->offset - enc->start;
+
+ while (newSize < curSize + cbNeeded)
+ {
+ newSize *= 2;
+ }
+
+ if (enc->heap)
+ {
+ enc->start = (char *) enc->realloc (enc->start, newSize);
+ if (!enc->start)
{
- newSize *= 2;
+ SetError (NULL, enc, "Could not reserve memory block");
+ return;
}
-
- if (enc->heap)
+ }
+ else
+ {
+ char *oldStart = enc->start;
+ enc->heap = 1;
+ enc->start = (char *) enc->malloc (newSize);
+ if (!enc->start)
{
- enc->start = (char *) enc->realloc (enc->start, newSize);
- if (!enc->start)
- {
- SetError (NULL, enc, "Could not reserve memory block");
- return;
- }
+ SetError (NULL, enc, "Could not reserve memory block");
+ return;
}
- else
- {
- char *oldStart = enc->start;
- enc->heap = 1;
- enc->start = (char *) enc->malloc (newSize);
- if (!enc->start)
- {
- SetError (NULL, enc, "Could not reserve memory block");
- return;
- }
- memcpy (enc->start, oldStart, offset);
- }
- enc->offset = enc->start + offset;
- enc->end = enc->start + newSize;
+ memcpy (enc->start, oldStart, offset);
+ }
+ enc->offset = enc->start + offset;
+ enc->end = enc->start + newSize;
}
FASTCALL_ATTR INLINE_PREFIX void FASTCALL_MSVC Buffer_AppendShortHexUnchecked (char *outputOffset, unsigned short value)
{
- *(outputOffset++) = g_hexChars[(value & 0xf000) >> 12];
- *(outputOffset++) = g_hexChars[(value & 0x0f00) >> 8];
- *(outputOffset++) = g_hexChars[(value & 0x00f0) >> 4];
- *(outputOffset++) = g_hexChars[(value & 0x000f) >> 0];
+ *(outputOffset++) = g_hexChars[(value & 0xf000) >> 12];
+ *(outputOffset++) = g_hexChars[(value & 0x0f00) >> 8];
+ *(outputOffset++) = g_hexChars[(value & 0x00f0) >> 4];
+ *(outputOffset++) = g_hexChars[(value & 0x000f) >> 0];
}
-int Buffer_EscapeStringUnvalidated (JSOBJ obj, JSONObjectEncoder *enc, const char *io, const char *end)
+int Buffer_EscapeStringUnvalidated (JSONObjectEncoder *enc, const char *io, const char *end)
{
- char *of = (char *) enc->offset;
+ char *of = (char *) enc->offset;
- while (1)
+ for (;;)
+ {
+ switch (*io)
{
- switch (*io)
+ case 0x00:
+ {
+ if (io < end)
{
- case 0x00:
- if (io < end)
- {
- *(of++) = '\\';
- *(of++) = 'u';
- *(of++) = '0';
- *(of++) = '0';
- *(of++) = '0';
- *(of++) = '0';
- break;
- }
- else
- {
- enc->offset += (of - enc->offset);
- return TRUE;
- }
-
- case '\"': (*of++) = '\\'; (*of++) = '\"'; break;
- case '\\': (*of++) = '\\'; (*of++) = '\\'; break;
- case '/': (*of++) = '\\'; (*of++) = '/'; break;
- case '\b': (*of++) = '\\'; (*of++) = 'b'; break;
- case '\f': (*of++) = '\\'; (*of++) = 'f'; break;
- case '\n': (*of++) = '\\'; (*of++) = 'n'; break;
- case '\r': (*of++) = '\\'; (*of++) = 'r'; break;
- case '\t': (*of++) = '\\'; (*of++) = 't'; break;
-
- case 0x01:
- case 0x02:
- case 0x03:
- case 0x04:
- case 0x05:
- case 0x06:
- case 0x07:
- case 0x0b:
- case 0x0e:
- case 0x0f:
- case 0x10:
- case 0x11:
- case 0x12:
- case 0x13:
- case 0x14:
- case 0x15:
- case 0x16:
- case 0x17:
- case 0x18:
- case 0x19:
- case 0x1a:
- case 0x1b:
- case 0x1c:
- case 0x1d:
- case 0x1e:
- case 0x1f:
- *(of++) = '\\';
- *(of++) = 'u';
- *(of++) = '0';
- *(of++) = '0';
- *(of++) = g_hexChars[ (unsigned char) (((*io) & 0xf0) >> 4)];
- *(of++) = g_hexChars[ (unsigned char) ((*io) & 0x0f)];
- break;
-
- default: (*of++) = (*io); break;
+ *(of++) = '\\';
+ *(of++) = 'u';
+ *(of++) = '0';
+ *(of++) = '0';
+ *(of++) = '0';
+ *(of++) = '0';
+ break;
}
-
- io++;
- }
-
- return FALSE;
+ else
+ {
+ enc->offset += (of - enc->offset);
+ return TRUE;
+ }
+ }
+ case '\"': (*of++) = '\\'; (*of++) = '\"'; break;
+ case '\\': (*of++) = '\\'; (*of++) = '\\'; break;
+ case '/': (*of++) = '\\'; (*of++) = '/'; break;
+ case '\b': (*of++) = '\\'; (*of++) = 'b'; break;
+ case '\f': (*of++) = '\\'; (*of++) = 'f'; break;
+ case '\n': (*of++) = '\\'; (*of++) = 'n'; break;
+ case '\r': (*of++) = '\\'; (*of++) = 'r'; break;
+ case '\t': (*of++) = '\\'; (*of++) = 't'; break;
+
+ case 0x26: // '/'
+ case 0x3c: // '<'
+ case 0x3e: // '>'
+ {
+ if (enc->encodeHTMLChars)
+ {
+ // Fall through to \u00XX case below.
+ }
+ else
+ {
+ // Same as default case below.
+ (*of++) = (*io);
+ break;
+ }
+ }
+ case 0x01:
+ case 0x02:
+ case 0x03:
+ case 0x04:
+ case 0x05:
+ case 0x06:
+ case 0x07:
+ case 0x0b:
+ case 0x0e:
+ case 0x0f:
+ case 0x10:
+ case 0x11:
+ case 0x12:
+ case 0x13:
+ case 0x14:
+ case 0x15:
+ case 0x16:
+ case 0x17:
+ case 0x18:
+ case 0x19:
+ case 0x1a:
+ case 0x1b:
+ case 0x1c:
+ case 0x1d:
+ case 0x1e:
+ case 0x1f:
+ {
+ *(of++) = '\\';
+ *(of++) = 'u';
+ *(of++) = '0';
+ *(of++) = '0';
+ *(of++) = g_hexChars[ (unsigned char) (((*io) & 0xf0) >> 4)];
+ *(of++) = g_hexChars[ (unsigned char) ((*io) & 0x0f)];
+ break;
+ }
+ default: (*of++) = (*io); break;
+ }
+ io++;
+ }
}
-
-/*
-FIXME:
-This code only works with Little and Big Endian
-
-FIXME: The JSON spec says escape "/" but non of the others do and we don't
-want to be left alone doing it so we don't :)
-
-*/
int Buffer_EscapeStringValidated (JSOBJ obj, JSONObjectEncoder *enc, const char *io, const char *end)
{
- JSUTF32 ucs;
- char *of = (char *) enc->offset;
+ JSUTF32 ucs;
+ char *of = (char *) enc->offset;
- while (1)
+ for (;;)
+ {
+ JSUINT8 utflen = g_asciiOutputTable[(unsigned char) *io];
+
+ switch (utflen)
{
+ case 0:
+ {
+ if (io < end)
+ {
+ *(of++) = '\\';
+ *(of++) = 'u';
+ *(of++) = '0';
+ *(of++) = '0';
+ *(of++) = '0';
+ *(of++) = '0';
+ io ++;
+ continue;
+ }
+ else
+ {
+ enc->offset += (of - enc->offset);
+ return TRUE;
+ }
+ }
- //JSUINT8 chr = (unsigned char) *io;
- JSUINT8 utflen = g_asciiOutputTable[(unsigned char) *io];
+ case 1:
+ {
+ *(of++)= (*io++);
+ continue;
+ }
- switch (utflen)
+ case 2:
+ {
+ JSUTF32 in;
+ JSUTF16 in16;
+
+ if (end - io < 1)
{
- case 0:
- {
- if (io < end)
- {
- *(of++) = '\\';
- *(of++) = 'u';
- *(of++) = '0';
- *(of++) = '0';
- *(of++) = '0';
- *(of++) = '0';
- io ++;
- continue;
- }
- else
- {
- enc->offset += (of - enc->offset);
- return TRUE;
- }
- }
-
- case 1:
- {
- *(of++)= (*io++);
- continue;
- }
-
- case 2:
- {
- JSUTF32 in;
- JSUTF16 in16;
-
- if (end - io < 1)
- {
- enc->offset += (of - enc->offset);
- SetError (obj, enc, "Unterminated UTF-8 sequence when encoding string");
- return FALSE;
- }
-
- memcpy(&in16, io, sizeof(JSUTF16));
- in = (JSUTF32) in16;
+ enc->offset += (of - enc->offset);
+ SetError (obj, enc, "Unterminated UTF-8 sequence when encoding string");
+ return FALSE;
+ }
+
+ memcpy(&in16, io, sizeof(JSUTF16));
+ in = (JSUTF32) in16;
#ifdef __LITTLE_ENDIAN__
- ucs = ((in & 0x1f) << 6) | ((in >> 8) & 0x3f);
+ ucs = ((in & 0x1f) << 6) | ((in >> 8) & 0x3f);
#else
- ucs = ((in & 0x1f00) >> 2) | (in & 0x3f);
+ ucs = ((in & 0x1f00) >> 2) | (in & 0x3f);
#endif
- if (ucs < 0x80)
- {
- enc->offset += (of - enc->offset);
- SetError (obj, enc, "Overlong 2 byte UTF-8 sequence detected when encoding string");
- return FALSE;
- }
-
- io += 2;
- break;
- }
-
- case 3:
- {
- JSUTF32 in;
- JSUTF16 in16;
- JSUINT8 in8;
-
- if (end - io < 2)
- {
- enc->offset += (of - enc->offset);
- SetError (obj, enc, "Unterminated UTF-8 sequence when encoding string");
- return FALSE;
- }
-
- memcpy(&in16, io, sizeof(JSUTF16));
- memcpy(&in8, io + 2, sizeof(JSUINT8));
+ if (ucs < 0x80)
+ {
+ enc->offset += (of - enc->offset);
+ SetError (obj, enc, "Overlong 2 byte UTF-8 sequence detected when encoding string");
+ return FALSE;
+ }
+
+ io += 2;
+ break;
+ }
+
+ case 3:
+ {
+ JSUTF32 in;
+ JSUTF16 in16;
+ JSUINT8 in8;
+
+ if (end - io < 2)
+ {
+ enc->offset += (of - enc->offset);
+ SetError (obj, enc, "Unterminated UTF-8 sequence when encoding string");
+ return FALSE;
+ }
+
+ memcpy(&in16, io, sizeof(JSUTF16));
+ memcpy(&in8, io + 2, sizeof(JSUINT8));
#ifdef __LITTLE_ENDIAN__
- in = (JSUTF32) in16;
- in |= in8 << 16;
- ucs = ((in & 0x0f) << 12) | ((in & 0x3f00) >> 2) | ((in & 0x3f0000) >> 16);
+ in = (JSUTF32) in16;
+ in |= in8 << 16;
+ ucs = ((in & 0x0f) << 12) | ((in & 0x3f00) >> 2) | ((in & 0x3f0000) >> 16);
#else
- in = in16 << 8;
- in |= in8;
- ucs = ((in & 0x0f0000) >> 4) | ((in & 0x3f00) >> 2) | (in & 0x3f);
+ in = in16 << 8;
+ in |= in8;
+ ucs = ((in & 0x0f0000) >> 4) | ((in & 0x3f00) >> 2) | (in & 0x3f);
#endif
+ if (ucs < 0x800)
+ {
+ enc->offset += (of - enc->offset);
+ SetError (obj, enc, "Overlong 3 byte UTF-8 sequence detected when encoding string");
+ return FALSE;
+ }
+
+ io += 3;
+ break;
+ }
+ case 4:
+ {
+ JSUTF32 in;
- if (ucs < 0x800)
- {
- enc->offset += (of - enc->offset);
- SetError (obj, enc, "Overlong 3 byte UTF-8 sequence detected when encoding string");
- return FALSE;
- }
-
- io += 3;
- break;
- }
- case 4:
- {
- JSUTF32 in;
-
- if (end - io < 3)
- {
- enc->offset += (of - enc->offset);
- SetError (obj, enc, "Unterminated UTF-8 sequence when encoding string");
- return FALSE;
- }
-
- memcpy(&in, io, sizeof(JSUTF32));
+ if (end - io < 3)
+ {
+ enc->offset += (of - enc->offset);
+ SetError (obj, enc, "Unterminated UTF-8 sequence when encoding string");
+ return FALSE;
+ }
+
+ memcpy(&in, io, sizeof(JSUTF32));
#ifdef __LITTLE_ENDIAN__
- ucs = ((in & 0x07) << 18) | ((in & 0x3f00) << 4) | ((in & 0x3f0000) >> 10) | ((in & 0x3f000000) >> 24);
+ ucs = ((in & 0x07) << 18) | ((in & 0x3f00) << 4) | ((in & 0x3f0000) >> 10) | ((in & 0x3f000000) >> 24);
#else
- ucs = ((in & 0x07000000) >> 6) | ((in & 0x3f0000) >> 4) | ((in & 0x3f00) >> 2) | (in & 0x3f);
+ ucs = ((in & 0x07000000) >> 6) | ((in & 0x3f0000) >> 4) | ((in & 0x3f00) >> 2) | (in & 0x3f);
#endif
- if (ucs < 0x10000)
- {
- enc->offset += (of - enc->offset);
- SetError (obj, enc, "Overlong 4 byte UTF-8 sequence detected when encoding string");
- return FALSE;
- }
-
- io += 4;
- break;
- }
-
-
- case 5:
- case 6:
- enc->offset += (of - enc->offset);
- SetError (obj, enc, "Unsupported UTF-8 sequence length when encoding string");
- return FALSE;
-
- case 30:
- // \uXXXX encode
- *(of++) = '\\';
- *(of++) = 'u';
- *(of++) = '0';
- *(of++) = '0';
- *(of++) = g_hexChars[ (unsigned char) (((*io) & 0xf0) >> 4)];
- *(of++) = g_hexChars[ (unsigned char) ((*io) & 0x0f)];
- io ++;
- continue;
-
- case 10:
- case 12:
- case 14:
- case 16:
- case 18:
- case 20:
- case 22:
- case 24:
- *(of++) = *( (char *) (g_escapeChars + utflen + 0));
- *(of++) = *( (char *) (g_escapeChars + utflen + 1));
- io ++;
- continue;
+ if (ucs < 0x10000)
+ {
+ enc->offset += (of - enc->offset);
+ SetError (obj, enc, "Overlong 4 byte UTF-8 sequence detected when encoding string");
+ return FALSE;
}
- /*
- If the character is a UTF8 sequence of length > 1 we end up here */
- if (ucs >= 0x10000)
+ io += 4;
+ break;
+ }
+
+
+ case 5:
+ case 6:
+ {
+ enc->offset += (of - enc->offset);
+ SetError (obj, enc, "Unsupported UTF-8 sequence length when encoding string");
+ return FALSE;
+ }
+
+ case 29:
+ {
+ if (enc->encodeHTMLChars)
{
- ucs -= 0x10000;
- *(of++) = '\\';
- *(of++) = 'u';
- Buffer_AppendShortHexUnchecked(of, (ucs >> 10) + 0xd800);
- of += 4;
-
- *(of++) = '\\';
- *(of++) = 'u';
- Buffer_AppendShortHexUnchecked(of, (ucs & 0x3ff) + 0xdc00);
- of += 4;
+ // Fall through to \u00XX case 30 below.
}
else
{
- *(of++) = '\\';
- *(of++) = 'u';
- Buffer_AppendShortHexUnchecked(of, ucs);
- of += 4;
+ // Same as case 1 above.
+ *(of++) = (*io++);
+ continue;
}
+ }
+
+ case 30:
+ {
+ // \uXXXX encode
+ *(of++) = '\\';
+ *(of++) = 'u';
+ *(of++) = '0';
+ *(of++) = '0';
+ *(of++) = g_hexChars[ (unsigned char) (((*io) & 0xf0) >> 4)];
+ *(of++) = g_hexChars[ (unsigned char) ((*io) & 0x0f)];
+ io ++;
+ continue;
+ }
+ case 10:
+ case 12:
+ case 14:
+ case 16:
+ case 18:
+ case 20:
+ case 22:
+ case 24:
+ {
+ *(of++) = *( (char *) (g_escapeChars + utflen + 0));
+ *(of++) = *( (char *) (g_escapeChars + utflen + 1));
+ io ++;
+ continue;
+ }
+ // This can never happen, it's here to make L4 VC++ happy
+ default:
+ {
+ ucs = 0;
+ break;
+ }
}
- return FALSE;
+ /*
+ If the character is a UTF8 sequence of length > 1 we end up here */
+ if (ucs >= 0x10000)
+ {
+ ucs -= 0x10000;
+ *(of++) = '\\';
+ *(of++) = 'u';
+ Buffer_AppendShortHexUnchecked(of, (unsigned short) (ucs >> 10) + 0xd800);
+ of += 4;
+
+ *(of++) = '\\';
+ *(of++) = 'u';
+ Buffer_AppendShortHexUnchecked(of, (unsigned short) (ucs & 0x3ff) + 0xdc00);
+ of += 4;
+ }
+ else
+ {
+ *(of++) = '\\';
+ *(of++) = 'u';
+ Buffer_AppendShortHexUnchecked(of, (unsigned short) ucs);
+ of += 4;
+ }
+ }
}
#define Buffer_Reserve(__enc, __len) \
- if ((__enc)->end - (__enc)->offset < (__len)) \
+ if ( (size_t) ((__enc)->end - (__enc)->offset) < (size_t) (__len)) \
{ \
- Buffer_Realloc((__enc), (__len));\
+ Buffer_Realloc((__enc), (__len));\
} \
@@ -427,176 +468,180 @@ int Buffer_EscapeStringValidated (JSOBJ obj, JSONObjectEncoder *enc, const char
FASTCALL_ATTR INLINE_PREFIX void FASTCALL_MSVC strreverse(char* begin, char* end)
{
- char aux;
- while (end > begin)
- aux = *end, *end-- = *begin, *begin++ = aux;
+ char aux;
+ while (end > begin)
+ aux = *end, *end-- = *begin, *begin++ = aux;
}
void Buffer_AppendIntUnchecked(JSONObjectEncoder *enc, JSINT32 value)
{
- char* wstr;
- JSUINT32 uvalue = (value < 0) ? -value : value;
+ char* wstr;
+ JSUINT32 uvalue = (value < 0) ? -value : value;
- wstr = enc->offset;
- // Conversion. Number is reversed.
-
- do *wstr++ = (char)(48 + (uvalue % 10)); while(uvalue /= 10);
- if (value < 0) *wstr++ = '-';
+ wstr = enc->offset;
+ // Conversion. Number is reversed.
- // Reverse string
- strreverse(enc->offset,wstr - 1);
- enc->offset += (wstr - (enc->offset));
+ do *wstr++ = (char)(48 + (uvalue % 10)); while(uvalue /= 10);
+ if (value < 0) *wstr++ = '-';
+
+ // Reverse string
+ strreverse(enc->offset,wstr - 1);
+ enc->offset += (wstr - (enc->offset));
}
void Buffer_AppendLongUnchecked(JSONObjectEncoder *enc, JSINT64 value)
{
- char* wstr;
- JSUINT64 uvalue = (value < 0) ? -value : value;
+ char* wstr;
+ JSUINT64 uvalue = (value < 0) ? -value : value;
- wstr = enc->offset;
- // Conversion. Number is reversed.
-
- do *wstr++ = (char)(48 + (uvalue % 10ULL)); while(uvalue /= 10ULL);
- if (value < 0) *wstr++ = '-';
+ wstr = enc->offset;
+ // Conversion. Number is reversed.
- // Reverse string
- strreverse(enc->offset,wstr - 1);
- enc->offset += (wstr - (enc->offset));
+ do *wstr++ = (char)(48 + (uvalue % 10ULL)); while(uvalue /= 10ULL);
+ if (value < 0) *wstr++ = '-';
+
+ // Reverse string
+ strreverse(enc->offset,wstr - 1);
+ enc->offset += (wstr - (enc->offset));
}
int Buffer_AppendDoubleUnchecked(JSOBJ obj, JSONObjectEncoder *enc, double value)
{
- /* if input is larger than thres_max, revert to exponential */
- const double thres_max = (double) 1e16 - 1;
- int count;
- double diff = 0.0;
- char* str = enc->offset;
- char* wstr = str;
- unsigned long long whole;
- double tmp;
- unsigned long long frac;
- int neg;
- double pow10;
-
- if (value == HUGE_VAL || value == -HUGE_VAL)
- {
- SetError (obj, enc, "Invalid Inf value when encoding double");
- return FALSE;
- }
- if (! (value == value))
- {
- SetError (obj, enc, "Invalid Nan value when encoding double");
- return FALSE;
- }
-
+ /* if input is larger than thres_max, revert to exponential */
+ const double thres_max = (double) 1e16 - 1;
+ int count;
+ double diff = 0.0;
+ char* str = enc->offset;
+ char* wstr = str;
+ unsigned long long whole;
+ double tmp;
+ unsigned long long frac;
+ int neg;
+ double pow10;
+
+ if (value == HUGE_VAL || value == -HUGE_VAL)
+ {
+ SetError (obj, enc, "Invalid Inf value when encoding double");
+ return FALSE;
+ }
- /* we'll work in positive values and deal with the
- negative sign issue later */
- neg = 0;
- if (value < 0)
+ if (!(value == value))
+ {
+ SetError (obj, enc, "Invalid Nan value when encoding double");
+ return FALSE;
+ }
+
+ /* we'll work in positive values and deal with the
+ negative sign issue later */
+ neg = 0;
+ if (value < 0)
+ {
+ neg = 1;
+ value = -value;
+ }
+
+ pow10 = g_pow10[enc->doublePrecision];
+
+ whole = (unsigned long long) value;
+ tmp = (value - whole) * pow10;
+ frac = (unsigned long long)(tmp);
+ diff = tmp - frac;
+
+ if (diff > 0.5)
+ {
+ ++frac;
+ /* handle rollover, e.g. case 0.99 with prec 1 is 1.0 */
+ if (frac >= pow10)
{
- neg = 1;
- value = -value;
+ frac = 0;
+ ++whole;
}
+ }
+ else
+ if (diff == 0.5 && ((frac == 0) || (frac & 1)))
+ {
+ /* if halfway, round up if odd, OR
+ if last digit is 0. That last part is strange */
+ ++frac;
+ }
+
+ /* for very large numbers switch back to native sprintf for exponentials.
+ anyone want to write code to replace this? */
+ /*
+ normal printf behavior is to print EVERY whole number digit
+ which can be 100s of characters overflowing your buffers == bad
+ */
+ if (value > thres_max)
+ {
+#ifdef _WIN32
+ enc->offset += sprintf_s(str, enc->end - enc->offset, "%.15e", neg ? -value : value);
+#else
+ enc->offset += snprintf(str, enc->end - enc->offset, "%.15e", neg ? -value : value);
+#endif
+ return TRUE;
+ }
- pow10 = g_pow10[enc->doublePrecision];
-
- whole = (unsigned long long) value;
- tmp = (value - whole) * pow10;
- frac = (unsigned long long)(tmp);
- diff = tmp - frac;
+ if (enc->doublePrecision == 0)
+ {
+ diff = value - whole;
- if (diff > 0.5)
+ if (diff > 0.5)
{
- ++frac;
- /* handle rollover, e.g. case 0.99 with prec 1 is 1.0 */
- if (frac >= pow10)
- {
- frac = 0;
- ++whole;
- }
- }
- else
- if (diff == 0.5 && ((frac == 0) || (frac & 1)))
- {
- /* if halfway, round up if odd, OR
- if last digit is 0. That last part is strange */
- ++frac;
+ /* greater than 0.5, round up, e.g. 1.6 -> 2 */
+ ++whole;
}
-
- /* for very large numbers switch back to native sprintf for exponentials.
- anyone want to write code to replace this? */
- /*
- normal printf behavior is to print EVERY whole number digit
- which can be 100s of characters overflowing your buffers == bad
- */
- if (value > thres_max)
+ else
+ if (diff == 0.5 && (whole & 1))
{
- enc->offset += sprintf(str, "%.15e", neg ? -value : value);
- return TRUE;
+ /* exactly 0.5 and ODD, then round up */
+ /* 1.5 -> 2, but 2.5 -> 2 */
+ ++whole;
}
- if (enc->doublePrecision == 0)
+ //vvvvvvvvvvvvvvvvvvv Diff from modp_dto2
+ }
+ else
+ if (frac)
{
- diff = value - whole;
-
- if (diff > 0.5)
- {
- /* greater than 0.5, round up, e.g. 1.6 -> 2 */
- ++whole;
- }
- else
- if (diff == 0.5 && (whole & 1))
- {
- /* exactly 0.5 and ODD, then round up */
- /* 1.5 -> 2, but 2.5 -> 2 */
- ++whole;
- }
-
- //vvvvvvvvvvvvvvvvvvv Diff from modp_dto2
- }
- else
- if (frac)
- {
- count = enc->doublePrecision;
- // now do fractional part, as an unsigned number
- // we know it is not 0 but we can have leading zeros, these
- // should be removed
- while (!(frac % 10))
- {
+ count = enc->doublePrecision;
+ // now do fractional part, as an unsigned number
+ // we know it is not 0 but we can have leading zeros, these
+ // should be removed
+ while (!(frac % 10))
+ {
--count;
frac /= 10;
- }
- //^^^^^^^^^^^^^^^^^^^ Diff from modp_dto2
+ }
+ //^^^^^^^^^^^^^^^^^^^ Diff from modp_dto2
- // now do fractional part, as an unsigned number
- do
- {
- --count;
- *wstr++ = (char)(48 + (frac % 10));
- } while (frac /= 10);
- // add extra 0s
- while (count-- > 0)
- {
- *wstr++ = '0';
- }
- // add decimal
- *wstr++ = '.';
+ // now do fractional part, as an unsigned number
+ do
+ {
+ --count;
+ *wstr++ = (char)(48 + (frac % 10));
+ } while (frac /= 10);
+ // add extra 0s
+ while (count-- > 0)
+ {
+ *wstr++ = '0';
+ }
+ // add decimal
+ *wstr++ = '.';
}
else
{
- *wstr++ = '0';
- *wstr++ = '.';
+ *wstr++ = '0';
+ *wstr++ = '.';
}
// do whole part
// Take care of sign
// Conversion. Number is reversed.
do *wstr++ = (char)(48 + (whole % 10)); while (whole /= 10);
-
- if (neg)
+
+ if (neg)
{
- *wstr++ = '-';
+ *wstr++ = '-';
}
strreverse(str, wstr-1);
enc->offset += (wstr - (enc->offset));
@@ -604,11 +649,6 @@ int Buffer_AppendDoubleUnchecked(JSOBJ obj, JSONObjectEncoder *enc, double value
return TRUE;
}
-
-
-
-
-
/*
FIXME:
Handle integration functions returning NULL here */
@@ -619,62 +659,57 @@ Perhaps implement recursion detection */
void encode(JSOBJ obj, JSONObjectEncoder *enc, const char *name, size_t cbName)
{
- const char *value;
- char *objName;
- int count;
- JSOBJ iterObj;
- size_t szlen;
- JSONTypeContext tc;
- tc.encoder = enc;
-
- if (enc->level > enc->recursionMax)
+ const char *value;
+ char *objName;
+ int count;
+ JSOBJ iterObj;
+ size_t szlen;
+ JSONTypeContext tc;
+ tc.encoder = enc;
+
+ if (enc->level > enc->recursionMax)
+ {
+ SetError (obj, enc, "Maximum recursion level reached");
+ return;
+ }
+
+ /*
+ This reservation must hold
+
+ length of _name as encoded worst case +
+ maxLength of double to string OR maxLength of JSLONG to string
+ */
+
+ Buffer_Reserve(enc, 256 + RESERVE_STRING(cbName));
+ if (enc->errorMsg)
+ {
+ return;
+ }
+
+ if (name)
+ {
+ Buffer_AppendCharUnchecked(enc, '\"');
+
+ if (enc->forceASCII)
{
- SetError (obj, enc, "Maximum recursion level reached");
+ if (!Buffer_EscapeStringValidated(obj, enc, name, name + cbName))
+ {
return;
+ }
}
-
- /*
- This reservation must hold
-
- length of _name as encoded worst case +
- maxLength of double to string OR maxLength of JSLONG to string
-
- Since input is assumed to be UTF-8 the worst character length is:
-
- 4 bytes (of UTF-8) => "\uXXXX\uXXXX" (12 bytes)
- */
-
- Buffer_Reserve(enc, 256 + (((cbName / 4) + 1) * 12));
- if (enc->errorMsg)
+ else
{
+ if (!Buffer_EscapeStringUnvalidated(enc, name, name + cbName))
+ {
return;
+ }
}
- if (name)
- {
- Buffer_AppendCharUnchecked(enc, '\"');
+ Buffer_AppendCharUnchecked(enc, '\"');
- if (enc->forceASCII)
- {
- if (!Buffer_EscapeStringValidated(obj, enc, name, name + cbName))
- {
- return;
- }
- }
- else
- {
- if (!Buffer_EscapeStringUnvalidated(obj, enc, name, name + cbName))
- {
- return;
- }
- }
-
-
- Buffer_AppendCharUnchecked(enc, '\"');
-
- Buffer_AppendCharUnchecked (enc, ':');
+ Buffer_AppendCharUnchecked (enc, ':');
#ifndef JSON_NO_EXTRA_WHITESPACE
- Buffer_AppendCharUnchecked (enc, ' ');
+ Buffer_AppendCharUnchecked (enc, ' ');
#endif
}
@@ -682,210 +717,209 @@ void encode(JSOBJ obj, JSONObjectEncoder *enc, const char *name, size_t cbName)
switch (tc.type)
{
- case JT_INVALID:
- return;
+ case JT_INVALID:
+ {
+ return;
+ }
- case JT_ARRAY:
- {
- count = 0;
- enc->iterBegin(obj, &tc);
+ case JT_ARRAY:
+ {
+ count = 0;
+ enc->iterBegin(obj, &tc);
- Buffer_AppendCharUnchecked (enc, '[');
+ Buffer_AppendCharUnchecked (enc, '[');
- while (enc->iterNext(obj, &tc))
- {
- if (count > 0)
- {
- Buffer_AppendCharUnchecked (enc, ',');
+ while (enc->iterNext(obj, &tc))
+ {
+ if (count > 0)
+ {
+ Buffer_AppendCharUnchecked (enc, ',');
#ifndef JSON_NO_EXTRA_WHITESPACE
- Buffer_AppendCharUnchecked (buffer, ' ');
+ Buffer_AppendCharUnchecked (buffer, ' ');
#endif
- }
+ }
- iterObj = enc->iterGetValue(obj, &tc);
+ iterObj = enc->iterGetValue(obj, &tc);
- enc->level ++;
- encode (iterObj, enc, NULL, 0);
- count ++;
- }
+ enc->level ++;
+ encode (iterObj, enc, NULL, 0);
+ count ++;
+ }
- enc->iterEnd(obj, &tc);
- Buffer_AppendCharUnchecked (enc, ']');
- break;
- }
+ enc->iterEnd(obj, &tc);
+ Buffer_AppendCharUnchecked (enc, ']');
+ break;
+ }
- case JT_OBJECT:
- {
- count = 0;
- enc->iterBegin(obj, &tc);
+ case JT_OBJECT:
+ {
+ count = 0;
+ enc->iterBegin(obj, &tc);
- Buffer_AppendCharUnchecked (enc, '{');
+ Buffer_AppendCharUnchecked (enc, '{');
- while (enc->iterNext(obj, &tc))
- {
- if (count > 0)
- {
- Buffer_AppendCharUnchecked (enc, ',');
+ while (enc->iterNext(obj, &tc))
+ {
+ if (count > 0)
+ {
+ Buffer_AppendCharUnchecked (enc, ',');
#ifndef JSON_NO_EXTRA_WHITESPACE
- Buffer_AppendCharUnchecked (enc, ' ');
+ Buffer_AppendCharUnchecked (enc, ' ');
#endif
- }
+ }
- iterObj = enc->iterGetValue(obj, &tc);
- objName = enc->iterGetName(obj, &tc, &szlen);
-
- enc->level ++;
- encode (iterObj, enc, objName, szlen);
- count ++;
- }
-
- enc->iterEnd(obj, &tc);
- Buffer_AppendCharUnchecked (enc, '}');
- break;
- }
-
- case JT_LONG:
- {
- Buffer_AppendLongUnchecked (enc, enc->getLongValue(obj, &tc));
- break;
- }
+ iterObj = enc->iterGetValue(obj, &tc);
+ objName = enc->iterGetName(obj, &tc, &szlen);
- case JT_INT:
- {
- Buffer_AppendIntUnchecked (enc, enc->getIntValue(obj, &tc));
- break;
- }
-
- case JT_TRUE:
- {
- Buffer_AppendCharUnchecked (enc, 't');
- Buffer_AppendCharUnchecked (enc, 'r');
- Buffer_AppendCharUnchecked (enc, 'u');
- Buffer_AppendCharUnchecked (enc, 'e');
- break;
- }
-
- case JT_FALSE:
- {
- Buffer_AppendCharUnchecked (enc, 'f');
- Buffer_AppendCharUnchecked (enc, 'a');
- Buffer_AppendCharUnchecked (enc, 'l');
- Buffer_AppendCharUnchecked (enc, 's');
- Buffer_AppendCharUnchecked (enc, 'e');
- break;
- }
+ enc->level ++;
+ encode (iterObj, enc, objName, szlen);
+ count ++;
+ }
+ enc->iterEnd(obj, &tc);
+ Buffer_AppendCharUnchecked (enc, '}');
+ break;
+ }
+
+ case JT_LONG:
+ {
+ Buffer_AppendLongUnchecked (enc, enc->getLongValue(obj, &tc));
+ break;
+ }
+
+ case JT_INT:
+ {
+ Buffer_AppendIntUnchecked (enc, enc->getIntValue(obj, &tc));
+ break;
+ }
+
+ case JT_TRUE:
+ {
+ Buffer_AppendCharUnchecked (enc, 't');
+ Buffer_AppendCharUnchecked (enc, 'r');
+ Buffer_AppendCharUnchecked (enc, 'u');
+ Buffer_AppendCharUnchecked (enc, 'e');
+ break;
+ }
+
+ case JT_FALSE:
+ {
+ Buffer_AppendCharUnchecked (enc, 'f');
+ Buffer_AppendCharUnchecked (enc, 'a');
+ Buffer_AppendCharUnchecked (enc, 'l');
+ Buffer_AppendCharUnchecked (enc, 's');
+ Buffer_AppendCharUnchecked (enc, 'e');
+ break;
+ }
+
+
+ case JT_NULL:
+ {
+ Buffer_AppendCharUnchecked (enc, 'n');
+ Buffer_AppendCharUnchecked (enc, 'u');
+ Buffer_AppendCharUnchecked (enc, 'l');
+ Buffer_AppendCharUnchecked (enc, 'l');
+ break;
+ }
+
+ case JT_DOUBLE:
+ {
+ if (!Buffer_AppendDoubleUnchecked (obj, enc, enc->getDoubleValue(obj, &tc)))
+ {
+ enc->endTypeContext(obj, &tc);
+ enc->level --;
+ return;
+ }
+ break;
+ }
+
+ case JT_UTF8:
+ {
+ value = enc->getStringValue(obj, &tc, &szlen);
+ Buffer_Reserve(enc, RESERVE_STRING(szlen));
+ if (enc->errorMsg)
+ {
+ enc->endTypeContext(obj, &tc);
+ return;
+ }
+ Buffer_AppendCharUnchecked (enc, '\"');
- case JT_NULL:
+ if (enc->forceASCII)
+ {
+ if (!Buffer_EscapeStringValidated(obj, enc, value, value + szlen))
{
- Buffer_AppendCharUnchecked (enc, 'n');
- Buffer_AppendCharUnchecked (enc, 'u');
- Buffer_AppendCharUnchecked (enc, 'l');
- Buffer_AppendCharUnchecked (enc, 'l');
- break;
+ enc->endTypeContext(obj, &tc);
+ enc->level --;
+ return;
}
-
- case JT_DOUBLE:
+ }
+ else
+ {
+ if (!Buffer_EscapeStringUnvalidated(enc, value, value + szlen))
{
- if (!Buffer_AppendDoubleUnchecked (obj, enc, enc->getDoubleValue(obj, &tc)))
- {
- enc->endTypeContext(obj, &tc);
- enc->level --;
- return;
- }
- break;
+ enc->endTypeContext(obj, &tc);
+ enc->level --;
+ return;
}
+ }
- case JT_UTF8:
- {
- value = enc->getStringValue(obj, &tc, &szlen);
- Buffer_Reserve(enc, ((szlen / 4) + 1) * 12);
- if (enc->errorMsg)
- {
- enc->endTypeContext(obj, &tc);
- return;
- }
- Buffer_AppendCharUnchecked (enc, '\"');
-
-
- if (enc->forceASCII)
- {
- if (!Buffer_EscapeStringValidated(obj, enc, value, value + szlen))
- {
- enc->endTypeContext(obj, &tc);
- enc->level --;
- return;
- }
- }
- else
- {
- if (!Buffer_EscapeStringUnvalidated(obj, enc, value, value + szlen))
- {
- enc->endTypeContext(obj, &tc);
- enc->level --;
- return;
- }
- }
-
- Buffer_AppendCharUnchecked (enc, '\"');
- break;
- }
+ Buffer_AppendCharUnchecked (enc, '\"');
+ break;
}
+ }
- enc->endTypeContext(obj, &tc);
- enc->level --;
-
+ enc->endTypeContext(obj, &tc);
+ enc->level --;
}
char *JSON_EncodeObject(JSOBJ obj, JSONObjectEncoder *enc, char *_buffer, size_t _cbBuffer)
{
- enc->malloc = enc->malloc ? enc->malloc : malloc;
- enc->free = enc->free ? enc->free : free;
- enc->realloc = enc->realloc ? enc->realloc : realloc;
- enc->errorMsg = NULL;
- enc->errorObj = NULL;
- enc->level = 0;
-
- if (enc->recursionMax < 1)
- {
- enc->recursionMax = JSON_MAX_RECURSION_DEPTH;
- }
-
- if (enc->doublePrecision < 0 ||
- enc->doublePrecision > JSON_DOUBLE_MAX_DECIMALS)
- {
- enc->doublePrecision = JSON_DOUBLE_MAX_DECIMALS;
- }
-
- if (_buffer == NULL)
+ enc->malloc = enc->malloc ? enc->malloc : malloc;
+ enc->free = enc->free ? enc->free : free;
+ enc->realloc = enc->realloc ? enc->realloc : realloc;
+ enc->errorMsg = NULL;
+ enc->errorObj = NULL;
+ enc->level = 0;
+
+ if (enc->recursionMax < 1)
+ {
+ enc->recursionMax = JSON_MAX_RECURSION_DEPTH;
+ }
+
+ if (enc->doublePrecision < 0 ||
+ enc->doublePrecision > JSON_DOUBLE_MAX_DECIMALS)
+ {
+ enc->doublePrecision = JSON_DOUBLE_MAX_DECIMALS;
+ }
+
+ if (_buffer == NULL)
+ {
+ _cbBuffer = 32768;
+ enc->start = (char *) enc->malloc (_cbBuffer);
+ if (!enc->start)
{
- _cbBuffer = 32768;
- enc->start = (char *) enc->malloc (_cbBuffer);
- if (!enc->start)
- {
- SetError(obj, enc, "Could not reserve memory block");
- return NULL;
- }
- enc->heap = 1;
- }
- else
- {
- enc->start = _buffer;
- enc->heap = 0;
+ SetError(obj, enc, "Could not reserve memory block");
+ return NULL;
}
-
- enc->end = enc->start + _cbBuffer;
- enc->offset = enc->start;
-
-
- encode (obj, enc, NULL, 0);
-
- Buffer_Reserve(enc, 1);
- if (enc->errorMsg)
- {
- return NULL;
- }
- Buffer_AppendCharUnchecked(enc, '\0');
-
- return enc->start;
+ enc->heap = 1;
+ }
+ else
+ {
+ enc->start = _buffer;
+ enc->heap = 0;
+ }
+
+ enc->end = enc->start + _cbBuffer;
+ enc->offset = enc->start;
+
+ encode (obj, enc, NULL, 0);
+
+ Buffer_Reserve(enc, 1);
+ if (enc->errorMsg)
+ {
+ return NULL;
+ }
+ Buffer_AppendCharUnchecked(enc, '\0');
+
+ return enc->start;
}
diff --git a/pandas/src/ujson/python/JSONtoObj.c b/pandas/src/ujson/python/JSONtoObj.c
index bc42269d9698b..9c1b4febd9895 100644
--- a/pandas/src/ujson/python/JSONtoObj.c
+++ b/pandas/src/ujson/python/JSONtoObj.c
@@ -1,3 +1,40 @@
+/*
+Copyright (c) 2011-2013, ESN Social Software AB and Jonas Tarnstrom
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of the ESN Social Software AB nor the
+ names of its contributors may be used to endorse or promote products
+ derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ESN SOCIAL SOFTWARE AB OR JONAS TARNSTROM BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+Portions of code from MODP_ASCII - Ascii transformations (upper/lower, etc)
+http://code.google.com/p/stringencoders/
+Copyright (c) 2007 Nick Galbreath -- nickg [at] modp [dot] com. All rights reserved.
+
+Numeric decoder derived from from TCL library
+http://www.opensource.apple.com/source/tcl/tcl-14/tcl/license.terms
+ * Copyright (c) 1988-1993 The Regents of the University of California.
+ * Copyright (c) 1994 Sun Microsystems, Inc.
+*/
+
#include "py_defines.h"
#define PY_ARRAY_UNIQUE_SYMBOL UJSON_NUMPY
#define NO_IMPORT_ARRAY
@@ -5,33 +42,33 @@
#include <ultrajson.h>
+//#define PRINTMARK() fprintf(stderr, "%s: MARK(%d)\n", __FILE__, __LINE__)
+#define PRINTMARK()
+
typedef struct __PyObjectDecoder
{
- JSONObjectDecoder dec;
+ JSONObjectDecoder dec;
- void* npyarr; // Numpy context buffer
- void* npyarr_addr; // Ref to npyarr ptr to track DECREF calls
- npy_intp curdim; // Current array dimension
+ void* npyarr; // Numpy context buffer
+ void* npyarr_addr; // Ref to npyarr ptr to track DECREF calls
+ npy_intp curdim; // Current array dimension
- PyArray_Descr* dtype;
+ PyArray_Descr* dtype;
} PyObjectDecoder;
typedef struct __NpyArrContext
{
- PyObject* ret;
- PyObject* labels[2];
- PyArray_Dims shape;
+ PyObject* ret;
+ PyObject* labels[2];
+ PyArray_Dims shape;
- PyObjectDecoder* dec;
+ PyObjectDecoder* dec;
- npy_intp i;
- npy_intp elsize;
- npy_intp elcount;
+ npy_intp i;
+ npy_intp elsize;
+ npy_intp elcount;
} NpyArrContext;
-//#define PRINTMARK() fprintf(stderr, "%s: MARK(%d)\n", __FILE__, __LINE__)
-#define PRINTMARK()
-
// Numpy handling based on numpy internal code, specifically the function
// PyArray_FromIter.
@@ -39,638 +76,661 @@ typedef struct __NpyArrContext
// to ensure the compiler catches any errors
// standard numpy array handling
-JSOBJ Object_npyNewArray(void* decoder);
-JSOBJ Object_npyEndArray(JSOBJ obj);
-int Object_npyArrayAddItem(JSOBJ obj, JSOBJ value);
+JSOBJ Object_npyNewArray(void *prv, void* decoder);
+JSOBJ Object_npyEndArray(void *prv, JSOBJ obj);
+int Object_npyArrayAddItem(void *prv, JSOBJ obj, JSOBJ value);
// for more complex dtypes (object and string) fill a standard Python list
// and convert to a numpy array when done.
-JSOBJ Object_npyNewArrayList(void* decoder);
-JSOBJ Object_npyEndArrayList(JSOBJ obj);
-int Object_npyArrayListAddItem(JSOBJ obj, JSOBJ value);
+JSOBJ Object_npyNewArrayList(void *prv, void* decoder);
+JSOBJ Object_npyEndArrayList(void *prv, JSOBJ obj);
+int Object_npyArrayListAddItem(void *prv, JSOBJ obj, JSOBJ value);
// labelled support, encode keys and values of JS object into separate numpy
// arrays
-JSOBJ Object_npyNewObject(void* decoder);
-JSOBJ Object_npyEndObject(JSOBJ obj);
-int Object_npyObjectAddKey(JSOBJ obj, JSOBJ name, JSOBJ value);
-
+JSOBJ Object_npyNewObject(void *prv, void* decoder);
+JSOBJ Object_npyEndObject(void *prv, JSOBJ obj);
+int Object_npyObjectAddKey(void *prv, JSOBJ obj, JSOBJ name, JSOBJ value);
// free the numpy context buffer
void Npy_releaseContext(NpyArrContext* npyarr)
{
- PRINTMARK();
- if (npyarr)
+ PRINTMARK();
+ if (npyarr)
+ {
+ if (npyarr->shape.ptr)
{
- if (npyarr->shape.ptr)
- {
- PyObject_Free(npyarr->shape.ptr);
- }
- if (npyarr->dec)
- {
- npyarr->dec->npyarr = NULL;
- npyarr->dec->curdim = 0;
- }
- Py_XDECREF(npyarr->labels[0]);
- Py_XDECREF(npyarr->labels[1]);
- Py_XDECREF(npyarr->ret);
- PyObject_Free(npyarr);
+ PyObject_Free(npyarr->shape.ptr);
}
+ if (npyarr->dec)
+ {
+ npyarr->dec->npyarr = NULL;
+ npyarr->dec->curdim = 0;
+ }
+ Py_XDECREF(npyarr->labels[0]);
+ Py_XDECREF(npyarr->labels[1]);
+ Py_XDECREF(npyarr->ret);
+ PyObject_Free(npyarr);
+ }
}
-JSOBJ Object_npyNewArray(void* _decoder)
+JSOBJ Object_npyNewArray(void *prv, void* _decoder)
{
- NpyArrContext* npyarr;
- PyObjectDecoder* decoder = (PyObjectDecoder*) _decoder;
- PRINTMARK();
- if (decoder->curdim <= 0)
+ NpyArrContext* npyarr;
+ PyObjectDecoder* decoder = (PyObjectDecoder*) _decoder;
+ PRINTMARK();
+ if (decoder->curdim <= 0)
+ {
+ // start of array - initialise the context buffer
+ npyarr = decoder->npyarr = PyObject_Malloc(sizeof(NpyArrContext));
+ decoder->npyarr_addr = npyarr;
+
+ if (!npyarr)
{
- // start of array - initialise the context buffer
- npyarr = decoder->npyarr = PyObject_Malloc(sizeof(NpyArrContext));
- decoder->npyarr_addr = npyarr;
-
- if (!npyarr)
- {
- PyErr_NoMemory();
- return NULL;
- }
-
- npyarr->dec = decoder;
- npyarr->labels[0] = npyarr->labels[1] = NULL;
-
- npyarr->shape.ptr = PyObject_Malloc(sizeof(npy_intp)*NPY_MAXDIMS);
- npyarr->shape.len = 1;
- npyarr->ret = NULL;
-
- npyarr->elsize = 0;
- npyarr->elcount = 4;
- npyarr->i = 0;
- }
- else
+ PyErr_NoMemory();
+ return NULL;
+ }
+
+ npyarr->dec = decoder;
+ npyarr->labels[0] = npyarr->labels[1] = NULL;
+
+ npyarr->shape.ptr = PyObject_Malloc(sizeof(npy_intp)*NPY_MAXDIMS);
+ npyarr->shape.len = 1;
+ npyarr->ret = NULL;
+
+ npyarr->elsize = 0;
+ npyarr->elcount = 4;
+ npyarr->i = 0;
+ }
+ else
+ {
+ // starting a new dimension continue the current array (and reshape after)
+ npyarr = (NpyArrContext*) decoder->npyarr;
+ if (decoder->curdim >= npyarr->shape.len)
{
- // starting a new dimension continue the current array (and reshape after)
- npyarr = (NpyArrContext*) decoder->npyarr;
- if (decoder->curdim >= npyarr->shape.len)
- {
- npyarr->shape.len++;
- }
+ npyarr->shape.len++;
}
+ }
- npyarr->shape.ptr[decoder->curdim] = 0;
- decoder->curdim++;
- return npyarr;
+ npyarr->shape.ptr[decoder->curdim] = 0;
+ decoder->curdim++;
+ return npyarr;
}
PyObject* Npy_returnLabelled(NpyArrContext* npyarr)
{
- PyObject* ret = npyarr->ret;
- npy_intp i;
-
- if (npyarr->labels[0] || npyarr->labels[1])
+ PyObject* ret = npyarr->ret;
+ npy_intp i;
+
+ if (npyarr->labels[0] || npyarr->labels[1])
+ {
+ // finished decoding, build tuple with values and labels
+ ret = PyTuple_New(npyarr->shape.len+1);
+ for (i = 0; i < npyarr->shape.len; i++)
{
- // finished decoding, build tuple with values and labels
- ret = PyTuple_New(npyarr->shape.len+1);
- for (i = 0; i < npyarr->shape.len; i++)
- {
- if (npyarr->labels[i])
- {
- PyTuple_SET_ITEM(ret, i+1, npyarr->labels[i]);
- npyarr->labels[i] = NULL;
- }
- else
- {
- Py_INCREF(Py_None);
- PyTuple_SET_ITEM(ret, i+1, Py_None);
- }
- }
- PyTuple_SET_ITEM(ret, 0, npyarr->ret);
- }
-
- return ret;
+ if (npyarr->labels[i])
+ {
+ PyTuple_SET_ITEM(ret, i+1, npyarr->labels[i]);
+ npyarr->labels[i] = NULL;
+ }
+ else
+ {
+ Py_INCREF(Py_None);
+ PyTuple_SET_ITEM(ret, i+1, Py_None);
+ }
+ }
+ PyTuple_SET_ITEM(ret, 0, npyarr->ret);
+ }
+
+ return ret;
}
-JSOBJ Object_npyEndArray(JSOBJ obj)
+JSOBJ Object_npyEndArray(void *prv, JSOBJ obj)
{
- PyObject *ret;
- char* new_data;
- NpyArrContext* npyarr = (NpyArrContext*) obj;
- int emptyType = NPY_DEFAULT_TYPE;
- npy_intp i;
- PRINTMARK();
- if (!npyarr)
- {
- return NULL;
- }
+ PyObject *ret;
+ char* new_data;
+ NpyArrContext* npyarr = (NpyArrContext*) obj;
+ int emptyType = NPY_DEFAULT_TYPE;
+ npy_intp i;
+ PRINTMARK();
+ if (!npyarr)
+ {
+ return NULL;
+ }
- ret = npyarr->ret;
- i = npyarr->i;
+ ret = npyarr->ret;
+ i = npyarr->i;
- npyarr->dec->curdim--;
+ npyarr->dec->curdim--;
- if (i == 0 || !npyarr->ret) {
- // empty array would not have been initialised so do it now.
- if (npyarr->dec->dtype)
- {
- emptyType = npyarr->dec->dtype->type_num;
- }
- npyarr->ret = ret = PyArray_EMPTY(npyarr->shape.len, npyarr->shape.ptr, emptyType, 0);
- }
- else if (npyarr->dec->curdim <= 0)
+ if (i == 0 || !npyarr->ret) {
+ // empty array would not have been initialised so do it now.
+ if (npyarr->dec->dtype)
{
- // realloc to final size
- new_data = PyDataMem_RENEW(PyArray_DATA(ret), i * npyarr->elsize);
- if (new_data == NULL) {
- PyErr_NoMemory();
- Npy_releaseContext(npyarr);
- return NULL;
- }
- ((PyArrayObject*) ret)->data = (void*) new_data;
- // PyArray_BYTES(ret) = new_data;
- }
-
- if (npyarr->dec->curdim <= 0)
+ emptyType = npyarr->dec->dtype->type_num;
+ }
+ npyarr->ret = ret = PyArray_EMPTY(npyarr->shape.len, npyarr->shape.ptr, emptyType, 0);
+ }
+ else if (npyarr->dec->curdim <= 0)
+ {
+ // realloc to final size
+ new_data = PyDataMem_RENEW(PyArray_DATA(ret), i * npyarr->elsize);
+ if (new_data == NULL) {
+ PyErr_NoMemory();
+ Npy_releaseContext(npyarr);
+ return NULL;
+ }
+ ((PyArrayObject*) ret)->data = (void*) new_data;
+ // PyArray_BYTES(ret) = new_data;
+ }
+
+ if (npyarr->dec->curdim <= 0)
+ {
+ // finished decoding array, reshape if necessary
+ if (npyarr->shape.len > 1)
{
- // finished decoding array, reshape if necessary
- if (npyarr->shape.len > 1)
- {
- npyarr->ret = PyArray_Newshape((PyArrayObject*) ret, &npyarr->shape, NPY_ANYORDER);
- Py_DECREF(ret);
- }
+ npyarr->ret = PyArray_Newshape((PyArrayObject*) ret, &npyarr->shape, NPY_ANYORDER);
+ Py_DECREF(ret);
+ }
- ret = Npy_returnLabelled(npyarr);
+ ret = Npy_returnLabelled(npyarr);
- npyarr->ret = NULL;
- Npy_releaseContext(npyarr);
- }
+ npyarr->ret = NULL;
+ Npy_releaseContext(npyarr);
+ }
- return ret;
+ return ret;
}
-int Object_npyArrayAddItem(JSOBJ obj, JSOBJ value)
+int Object_npyArrayAddItem(void *prv, JSOBJ obj, JSOBJ value)
{
- PyObject* type;
- PyArray_Descr* dtype;
- npy_intp i;
- char *new_data, *item;
- NpyArrContext* npyarr = (NpyArrContext*) obj;
- PRINTMARK();
- if (!npyarr)
+ PyObject* type;
+ PyArray_Descr* dtype;
+ npy_intp i;
+ char *new_data, *item;
+ NpyArrContext* npyarr = (NpyArrContext*) obj;
+ PRINTMARK();
+ if (!npyarr)
+ {
+ return 0;
+ }
+
+ i = npyarr->i;
+
+ npyarr->shape.ptr[npyarr->dec->curdim-1]++;
+
+ if (PyArray_Check((PyObject*)value))
+ {
+ // multidimensional array, keep decoding values.
+ return 1;
+ }
+
+ if (!npyarr->ret)
+ {
+ // Array not initialised yet.
+ // We do it here so we can 'sniff' the data type if none was provided
+ if (!npyarr->dec->dtype)
+ {
+ type = PyObject_Type(value);
+ if(!PyArray_DescrConverter(type, &dtype))
+ {
+ Py_DECREF(type);
+ goto fail;
+ }
+ Py_INCREF(dtype);
+ Py_DECREF(type);
+ }
+ else
{
- return 0;
+ dtype = PyArray_DescrNew(npyarr->dec->dtype);
}
- i = npyarr->i;
+ // If it's an object or string then fill a Python list and subsequently
+ // convert. Otherwise we would need to somehow mess about with
+ // reference counts when renewing memory.
+ npyarr->elsize = dtype->elsize;
+ if (PyDataType_REFCHK(dtype) || npyarr->elsize == 0)
+ {
+ Py_XDECREF(dtype);
- npyarr->shape.ptr[npyarr->dec->curdim-1]++;
+ if (npyarr->dec->curdim > 1)
+ {
+ PyErr_SetString(PyExc_ValueError, "Cannot decode multidimensional arrays with variable length elements to numpy");
+ goto fail;
+ }
+ npyarr->elcount = 0;
+ npyarr->ret = PyList_New(0);
+ if (!npyarr->ret)
+ {
+ goto fail;
+ }
+ ((JSONObjectDecoder*)npyarr->dec)->newArray = Object_npyNewArrayList;
+ ((JSONObjectDecoder*)npyarr->dec)->arrayAddItem = Object_npyArrayListAddItem;
+ ((JSONObjectDecoder*)npyarr->dec)->endArray = Object_npyEndArrayList;
+ return Object_npyArrayListAddItem(prv, obj, value);
+ }
+
+ npyarr->ret = PyArray_NewFromDescr(&PyArray_Type, dtype, 1,
+ &npyarr->elcount, NULL,NULL, 0, NULL);
- if (PyArray_Check((PyObject*)value))
+ if (!npyarr->ret)
{
- // multidimensional array, keep decoding values.
- return 1;
+ goto fail;
}
+ }
- if (!npyarr->ret)
+ if (i >= npyarr->elcount) {
+ // Grow PyArray_DATA(ret):
+ // this is similar for the strategy for PyListObject, but we use
+ // 50% overallocation => 0, 4, 8, 14, 23, 36, 56, 86 ...
+ if (npyarr->elsize == 0)
{
- // Array not initialised yet.
- // We do it here so we can 'sniff' the data type if none was provided
- if (!npyarr->dec->dtype)
- {
- type = PyObject_Type(value);
- if(!PyArray_DescrConverter(type, &dtype))
- {
- Py_DECREF(type);
- goto fail;
- }
- Py_INCREF(dtype);
- Py_DECREF(type);
- }
- else
- {
- dtype = PyArray_DescrNew(npyarr->dec->dtype);
- }
-
- // If it's an object or string then fill a Python list and subsequently
- // convert. Otherwise we would need to somehow mess about with
- // reference counts when renewing memory.
- npyarr->elsize = dtype->elsize;
- if (PyDataType_REFCHK(dtype) || npyarr->elsize == 0)
- {
- Py_XDECREF(dtype);
-
- if (npyarr->dec->curdim > 1)
- {
- PyErr_SetString(PyExc_ValueError, "Cannot decode multidimensional arrays with variable length elements to numpy");
- goto fail;
- }
- npyarr->elcount = 0;
- npyarr->ret = PyList_New(0);
- if (!npyarr->ret)
- {
- goto fail;
- }
- ((JSONObjectDecoder*)npyarr->dec)->newArray = Object_npyNewArrayList;
- ((JSONObjectDecoder*)npyarr->dec)->arrayAddItem = Object_npyArrayListAddItem;
- ((JSONObjectDecoder*)npyarr->dec)->endArray = Object_npyEndArrayList;
- return Object_npyArrayListAddItem(obj, value);
- }
-
- npyarr->ret = PyArray_NewFromDescr(&PyArray_Type, dtype, 1,
- &npyarr->elcount, NULL,NULL, 0, NULL);
-
- if (!npyarr->ret)
- {
- goto fail;
- }
+ PyErr_SetString(PyExc_ValueError, "Cannot decode multidimensional arrays with variable length elements to numpy");
+ goto fail;
}
- if (i >= npyarr->elcount) {
- // Grow PyArray_DATA(ret):
- // this is similar for the strategy for PyListObject, but we use
- // 50% overallocation => 0, 4, 8, 14, 23, 36, 56, 86 ...
- if (npyarr->elsize == 0)
- {
- PyErr_SetString(PyExc_ValueError, "Cannot decode multidimensional arrays with variable length elements to numpy");
- goto fail;
- }
-
- npyarr->elcount = (i >> 1) + (i < 4 ? 4 : 2) + i;
- if (npyarr->elcount <= NPY_MAX_INTP/npyarr->elsize) {
- new_data = PyDataMem_RENEW(PyArray_DATA(npyarr->ret), npyarr->elcount * npyarr->elsize);
- }
- else {
- PyErr_NoMemory();
- goto fail;
- }
- ((PyArrayObject*) npyarr->ret)->data = (void*) new_data;
-
- // PyArray_BYTES(npyarr->ret) = new_data;
+ npyarr->elcount = (i >> 1) + (i < 4 ? 4 : 2) + i;
+ if (npyarr->elcount <= NPY_MAX_INTP/npyarr->elsize) {
+ new_data = PyDataMem_RENEW(PyArray_DATA(npyarr->ret), npyarr->elcount * npyarr->elsize);
+ }
+ else {
+ PyErr_NoMemory();
+ goto fail;
}
+ ((PyArrayObject*) npyarr->ret)->data = (void*) new_data;
- PyArray_DIMS(npyarr->ret)[0] = i + 1;
+ // PyArray_BYTES(npyarr->ret) = new_data;
+ }
- if ((item = PyArray_GETPTR1(npyarr->ret, i)) == NULL
- || PyArray_SETITEM(npyarr->ret, item, value) == -1) {
- goto fail;
- }
+ PyArray_DIMS(npyarr->ret)[0] = i + 1;
- Py_DECREF( (PyObject *) value);
- npyarr->i++;
- return 1;
+ if ((item = PyArray_GETPTR1(npyarr->ret, i)) == NULL
+ || PyArray_SETITEM(npyarr->ret, item, value) == -1) {
+ goto fail;
+ }
+
+ Py_DECREF( (PyObject *) value);
+ npyarr->i++;
+ return 1;
fail:
- Npy_releaseContext(npyarr);
- return 0;
+ Npy_releaseContext(npyarr);
+ return 0;
}
-JSOBJ Object_npyNewArrayList(void* _decoder)
+JSOBJ Object_npyNewArrayList(void *prv, void* _decoder)
{
- PyObjectDecoder* decoder = (PyObjectDecoder*) _decoder;
- PRINTMARK();
- PyErr_SetString(PyExc_ValueError, "nesting not supported for object or variable length dtypes");
- Npy_releaseContext(decoder->npyarr);
- return NULL;
+ PyObjectDecoder* decoder = (PyObjectDecoder*) _decoder;
+ PRINTMARK();
+ PyErr_SetString(PyExc_ValueError, "nesting not supported for object or variable length dtypes");
+ Npy_releaseContext(decoder->npyarr);
+ return NULL;
}
-JSOBJ Object_npyEndArrayList(JSOBJ obj)
+JSOBJ Object_npyEndArrayList(void *prv, JSOBJ obj)
{
- PyObject *list, *ret;
- NpyArrContext* npyarr = (NpyArrContext*) obj;
- PRINTMARK();
- if (!npyarr)
- {
- return NULL;
- }
+ PyObject *list, *ret;
+ NpyArrContext* npyarr = (NpyArrContext*) obj;
+ PRINTMARK();
+ if (!npyarr)
+ {
+ return NULL;
+ }
- // convert decoded list to numpy array
- list = (PyObject *) npyarr->ret;
- npyarr->ret = PyArray_FROM_O(list);
+ // convert decoded list to numpy array
+ list = (PyObject *) npyarr->ret;
+ npyarr->ret = PyArray_FROM_O(list);
- ret = Npy_returnLabelled(npyarr);
- npyarr->ret = list;
+ ret = Npy_returnLabelled(npyarr);
+ npyarr->ret = list;
- ((JSONObjectDecoder*)npyarr->dec)->newArray = Object_npyNewArray;
- ((JSONObjectDecoder*)npyarr->dec)->arrayAddItem = Object_npyArrayAddItem;
- ((JSONObjectDecoder*)npyarr->dec)->endArray = Object_npyEndArray;
- Npy_releaseContext(npyarr);
- return ret;
+ ((JSONObjectDecoder*)npyarr->dec)->newArray = Object_npyNewArray;
+ ((JSONObjectDecoder*)npyarr->dec)->arrayAddItem = Object_npyArrayAddItem;
+ ((JSONObjectDecoder*)npyarr->dec)->endArray = Object_npyEndArray;
+ Npy_releaseContext(npyarr);
+ return ret;
}
-int Object_npyArrayListAddItem(JSOBJ obj, JSOBJ value)
+int Object_npyArrayListAddItem(void *prv, JSOBJ obj, JSOBJ value)
{
- NpyArrContext* npyarr = (NpyArrContext*) obj;
- PRINTMARK();
- if (!npyarr)
- {
- return 0;
- }
- PyList_Append((PyObject*) npyarr->ret, value);
- Py_DECREF( (PyObject *) value);
- npyarr->elcount++;
- return 1;
+ NpyArrContext* npyarr = (NpyArrContext*) obj;
+ PRINTMARK();
+ if (!npyarr)
+ {
+ return 0;
+ }
+ PyList_Append((PyObject*) npyarr->ret, value);
+ Py_DECREF( (PyObject *) value);
+ npyarr->elcount++;
+ return 1;
}
-JSOBJ Object_npyNewObject(void* _decoder)
+JSOBJ Object_npyNewObject(void *prv, void* _decoder)
{
- PyObjectDecoder* decoder = (PyObjectDecoder*) _decoder;
- PRINTMARK();
- if (decoder->curdim > 1)
- {
- PyErr_SetString(PyExc_ValueError, "labels only supported up to 2 dimensions");
- return NULL;
- }
+ PyObjectDecoder* decoder = (PyObjectDecoder*) _decoder;
+ PRINTMARK();
+ if (decoder->curdim > 1)
+ {
+ PyErr_SetString(PyExc_ValueError, "labels only supported up to 2 dimensions");
+ return NULL;
+ }
- return ((JSONObjectDecoder*)decoder)->newArray(decoder);
+ return ((JSONObjectDecoder*)decoder)->newArray(prv, decoder);
}
-JSOBJ Object_npyEndObject(JSOBJ obj)
+JSOBJ Object_npyEndObject(void *prv, JSOBJ obj)
{
- PyObject *list;
- npy_intp labelidx;
- NpyArrContext* npyarr = (NpyArrContext*) obj;
- PRINTMARK();
- if (!npyarr)
- {
- return NULL;
- }
+ PyObject *list;
+ npy_intp labelidx;
+ NpyArrContext* npyarr = (NpyArrContext*) obj;
+ PRINTMARK();
+ if (!npyarr)
+ {
+ return NULL;
+ }
- labelidx = npyarr->dec->curdim-1;
+ labelidx = npyarr->dec->curdim-1;
- list = npyarr->labels[labelidx];
- if (list)
- {
- npyarr->labels[labelidx] = PyArray_FROM_O(list);
- Py_DECREF(list);
- }
+ list = npyarr->labels[labelidx];
+ if (list)
+ {
+ npyarr->labels[labelidx] = PyArray_FROM_O(list);
+ Py_DECREF(list);
+ }
- return (PyObject*) ((JSONObjectDecoder*)npyarr->dec)->endArray(obj);
+ return (PyObject*) ((JSONObjectDecoder*)npyarr->dec)->endArray(prv, obj);
}
-int Object_npyObjectAddKey(JSOBJ obj, JSOBJ name, JSOBJ value)
+int Object_npyObjectAddKey(void *prv, JSOBJ obj, JSOBJ name, JSOBJ value)
{
- PyObject *label;
- npy_intp labelidx;
- // add key to label array, value to values array
- NpyArrContext* npyarr = (NpyArrContext*) obj;
- PRINTMARK();
- if (!npyarr)
- {
- return 0;
- }
-
- label = (PyObject*) name;
- labelidx = npyarr->dec->curdim-1;
-
- if (!npyarr->labels[labelidx])
- {
- npyarr->labels[labelidx] = PyList_New(0);
- }
-
- // only fill label array once, assumes all column labels are the same
- // for 2-dimensional arrays.
- if (PyList_GET_SIZE(npyarr->labels[labelidx]) <= npyarr->elcount)
- {
- PyList_Append(npyarr->labels[labelidx], label);
- }
-
- if(((JSONObjectDecoder*)npyarr->dec)->arrayAddItem(obj, value))
- {
- Py_DECREF(label);
- return 1;
- }
+ PyObject *label;
+ npy_intp labelidx;
+ // add key to label array, value to values array
+ NpyArrContext* npyarr = (NpyArrContext*) obj;
+ PRINTMARK();
+ if (!npyarr)
+ {
return 0;
+ }
+
+ label = (PyObject*) name;
+ labelidx = npyarr->dec->curdim-1;
+
+ if (!npyarr->labels[labelidx])
+ {
+ npyarr->labels[labelidx] = PyList_New(0);
+ }
+
+ // only fill label array once, assumes all column labels are the same
+ // for 2-dimensional arrays.
+ if (PyList_GET_SIZE(npyarr->labels[labelidx]) <= npyarr->elcount)
+ {
+ PyList_Append(npyarr->labels[labelidx], label);
+ }
+
+ if(((JSONObjectDecoder*)npyarr->dec)->arrayAddItem(prv, obj, value))
+ {
+ Py_DECREF(label);
+ return 1;
+ }
+ return 0;
}
-int Object_objectAddKey(JSOBJ obj, JSOBJ name, JSOBJ value)
+int Object_objectAddKey(void *prv, JSOBJ obj, JSOBJ name, JSOBJ value)
{
- PyDict_SetItem (obj, name, value);
- Py_DECREF( (PyObject *) name);
- Py_DECREF( (PyObject *) value);
- return 1;
+ PyDict_SetItem (obj, name, value);
+ Py_DECREF( (PyObject *) name);
+ Py_DECREF( (PyObject *) value);
+ return 1;
}
-int Object_arrayAddItem(JSOBJ obj, JSOBJ value)
+int Object_arrayAddItem(void *prv, JSOBJ obj, JSOBJ value)
{
- PyList_Append(obj, value);
- Py_DECREF( (PyObject *) value);
- return 1;
+ PyList_Append(obj, value);
+ Py_DECREF( (PyObject *) value);
+ return 1;
}
-JSOBJ Object_newString(wchar_t *start, wchar_t *end)
+JSOBJ Object_newString(void *prv, wchar_t *start, wchar_t *end)
{
- return PyUnicode_FromWideChar (start, (end - start));
+ return PyUnicode_FromWideChar (start, (end - start));
}
-JSOBJ Object_newTrue(void)
+JSOBJ Object_newTrue(void *prv)
{
- Py_RETURN_TRUE;
+ Py_RETURN_TRUE;
}
-JSOBJ Object_newFalse(void)
+JSOBJ Object_newFalse(void *prv)
{
- Py_RETURN_FALSE;
+ Py_RETURN_FALSE;
}
-JSOBJ Object_newNull(void)
+JSOBJ Object_newNull(void *prv)
{
- Py_RETURN_NONE;
+ Py_RETURN_NONE;
}
-JSOBJ Object_newObject(void* decoder)
+JSOBJ Object_newObject(void *prv, void* decoder)
{
- return PyDict_New();
+ return PyDict_New();
}
-JSOBJ Object_endObject(JSOBJ obj)
+JSOBJ Object_endObject(void *prv, JSOBJ obj)
{
- return obj;
+ return obj;
}
-JSOBJ Object_newArray(void* decoder)
+JSOBJ Object_newArray(void *prv, void* decoder)
{
- return PyList_New(0);
+ return PyList_New(0);
}
-JSOBJ Object_endArray(JSOBJ obj)
+JSOBJ Object_endArray(void *prv, JSOBJ obj)
{
- return obj;
+ return obj;
}
-JSOBJ Object_newInteger(JSINT32 value)
+JSOBJ Object_newInteger(void *prv, JSINT32 value)
{
- return PyInt_FromLong( (long) value);
+ return PyInt_FromLong( (long) value);
}
-JSOBJ Object_newLong(JSINT64 value)
+JSOBJ Object_newLong(void *prv, JSINT64 value)
{
- return PyLong_FromLongLong (value);
+ return PyLong_FromLongLong (value);
}
-JSOBJ Object_newDouble(double value)
+JSOBJ Object_newDouble(void *prv, double value)
{
- return PyFloat_FromDouble(value);
+ return PyFloat_FromDouble(value);
}
-static void Object_releaseObject(JSOBJ obj, void* _decoder)
+static void Object_releaseObject(void *prv, JSOBJ obj, void* _decoder)
{
- PyObjectDecoder* decoder = (PyObjectDecoder*) _decoder;
- if (obj != decoder->npyarr_addr)
- {
- Py_XDECREF( ((PyObject *)obj));
- }
+ PyObjectDecoder* decoder = (PyObjectDecoder*) _decoder;
+ if (obj != decoder->npyarr_addr)
+ {
+ Py_XDECREF( ((PyObject *)obj));
+ }
}
+static char *g_kwlist[] = {"obj", "precise_float", "numpy", "labelled", "dtype", NULL};
PyObject* JSONToObj(PyObject* self, PyObject *args, PyObject *kwargs)
{
- PyObject *ret;
- PyObject *sarg;
- JSONObjectDecoder *decoder;
- PyObjectDecoder pyDecoder;
- PyArray_Descr *dtype = NULL;
- static char *kwlist[] = { "obj", "numpy", "labelled", "dtype", NULL};
- int numpy = 0, labelled = 0, decref = 0;
- // PRINTMARK();
-
- JSONObjectDecoder dec = {
- Object_newString,
- Object_objectAddKey,
- Object_arrayAddItem,
- Object_newTrue,
- Object_newFalse,
- Object_newNull,
- Object_newObject,
- Object_endObject,
- Object_newArray,
- Object_endArray,
- Object_newInteger,
- Object_newLong,
- Object_newDouble,
- Object_releaseObject,
- PyObject_Malloc,
- PyObject_Free,
- PyObject_Realloc,
- };
- pyDecoder.dec = dec;
- pyDecoder.curdim = 0;
- pyDecoder.npyarr = NULL;
- pyDecoder.npyarr_addr = NULL;
-
- decoder = (JSONObjectDecoder*) &pyDecoder;
-
- if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|iiO&", kwlist, &sarg, &numpy, &labelled, PyArray_DescrConverter2, &dtype))
+ PyObject *ret;
+ PyObject *sarg;
+ PyObject *arg;
+ PyObject *opreciseFloat = NULL;
+ JSONObjectDecoder *decoder;
+ PyObjectDecoder pyDecoder;
+ PyArray_Descr *dtype = NULL;
+ int numpy = 0, labelled = 0;
+
+ JSONObjectDecoder dec =
+ {
+ Object_newString,
+ Object_objectAddKey,
+ Object_arrayAddItem,
+ Object_newTrue,
+ Object_newFalse,
+ Object_newNull,
+ Object_newObject,
+ Object_endObject,
+ Object_newArray,
+ Object_endArray,
+ Object_newInteger,
+ Object_newLong,
+ Object_newDouble,
+ Object_releaseObject,
+ PyObject_Malloc,
+ PyObject_Free,
+ PyObject_Realloc
+ };
+
+ dec.preciseFloat = 0;
+ dec.prv = NULL;
+
+ pyDecoder.dec = dec;
+ pyDecoder.curdim = 0;
+ pyDecoder.npyarr = NULL;
+ pyDecoder.npyarr_addr = NULL;
+
+ decoder = (JSONObjectDecoder*) &pyDecoder;
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|OiiO&", g_kwlist, &arg, &opreciseFloat, &numpy, &labelled, PyArray_DescrConverter2, &dtype))
+ {
+ Npy_releaseContext(pyDecoder.npyarr);
+ return NULL;
+ }
+
+ if (opreciseFloat && PyObject_IsTrue(opreciseFloat))
+ {
+ decoder->preciseFloat = 1;
+ }
+
+ if (PyString_Check(arg))
+ {
+ sarg = arg;
+ }
+ else
+ if (PyUnicode_Check(arg))
+ {
+ sarg = PyUnicode_AsUTF8String(arg);
+ if (sarg == NULL)
{
- Npy_releaseContext(pyDecoder.npyarr);
- return NULL;
+ //Exception raised above us by codec according to docs
+ return NULL;
}
+ }
+ else
+ {
+ PyErr_Format(PyExc_TypeError, "Expected String or Unicode");
+ return NULL;
+ }
- if (PyUnicode_Check(sarg))
- {
- sarg = PyUnicode_AsUTF8String(sarg);
- if (sarg == NULL)
- {
- //Exception raised above us by codec according to docs
- return NULL;
- }
- decref = 1;
- }
- else
- if (!PyString_Check(sarg))
- {
- PyErr_Format(PyExc_TypeError, "Expected String or Unicode");
- return NULL;
- }
+ decoder->errorStr = NULL;
+ decoder->errorOffset = NULL;
- if (numpy)
+ if (numpy)
+ {
+ pyDecoder.dtype = dtype;
+ decoder->newArray = Object_npyNewArray;
+ decoder->endArray = Object_npyEndArray;
+ decoder->arrayAddItem = Object_npyArrayAddItem;
+
+ if (labelled)
{
- pyDecoder.dtype = dtype;
- decoder->newArray = Object_npyNewArray;
- decoder->endArray = Object_npyEndArray;
- decoder->arrayAddItem = Object_npyArrayAddItem;
-
- if (labelled)
- {
- decoder->newObject = Object_npyNewObject;
- decoder->endObject = Object_npyEndObject;
- decoder->objectAddKey = Object_npyObjectAddKey;
- }
+ decoder->newObject = Object_npyNewObject;
+ decoder->endObject = Object_npyEndObject;
+ decoder->objectAddKey = Object_npyObjectAddKey;
}
+ }
- decoder->errorStr = NULL;
- decoder->errorOffset = NULL;
+ ret = JSON_DecodeObject(decoder, PyString_AS_STRING(sarg), PyString_GET_SIZE(sarg));
- PRINTMARK();
- ret = JSON_DecodeObject(decoder, PyString_AS_STRING(sarg), PyString_GET_SIZE(sarg));
- PRINTMARK();
+ if (sarg != arg)
+ {
+ Py_DECREF(sarg);
+ }
- if (decref)
+ if (PyErr_Occurred())
+ {
+ if (ret)
{
- Py_DECREF(sarg);
+ Py_DECREF( (PyObject *) ret);
}
+ Npy_releaseContext(pyDecoder.npyarr);
+ return NULL;
+ }
- if (PyErr_Occurred())
- {
- return NULL;
- }
+ if (decoder->errorStr)
+ {
+ /*
+ FIXME: It's possible to give a much nicer error message here with actual failing element in input etc*/
- if (decoder->errorStr)
- {
- /*FIXME: It's possible to give a much nicer error message here with actual failing element in input etc*/
- PyErr_Format (PyExc_ValueError, "%s", decoder->errorStr);
- Py_XDECREF( (PyObject *) ret);
- Npy_releaseContext(pyDecoder.npyarr);
+ PyErr_Format (PyExc_ValueError, "%s", decoder->errorStr);
- return NULL;
+ if (ret)
+ {
+ Py_DECREF( (PyObject *) ret);
}
+ Npy_releaseContext(pyDecoder.npyarr);
- return ret;
+ return NULL;
+ }
+
+ return ret;
}
PyObject* JSONFileToObj(PyObject* self, PyObject *args, PyObject *kwargs)
{
- PyObject *file;
- PyObject *read;
- PyObject *string;
- PyObject *result;
- PyObject *argtuple;
-
- if (!PyArg_ParseTuple (args, "O", &file)) {
- return NULL;
- }
-
- if (!PyObject_HasAttrString (file, "read"))
- {
- PyErr_Format (PyExc_TypeError, "expected file");
- return NULL;
- }
+ PyObject *read;
+ PyObject *string;
+ PyObject *result;
+ PyObject *file = NULL;
+ PyObject *argtuple;
+
+ if (!PyArg_ParseTuple (args, "O", &file))
+ {
+ return NULL;
+ }
- read = PyObject_GetAttrString (file, "read");
+ if (!PyObject_HasAttrString (file, "read"))
+ {
+ PyErr_Format (PyExc_TypeError, "expected file");
+ return NULL;
+ }
- if (!PyCallable_Check (read)) {
- Py_XDECREF(read);
- PyErr_Format (PyExc_TypeError, "expected file");
- return NULL;
- }
+ read = PyObject_GetAttrString (file, "read");
- string = PyObject_CallObject (read, NULL);
+ if (!PyCallable_Check (read)) {
Py_XDECREF(read);
+ PyErr_Format (PyExc_TypeError, "expected file");
+ return NULL;
+ }
- if (string == NULL)
- {
- return NULL;
- }
+ string = PyObject_CallObject (read, NULL);
+ Py_XDECREF(read);
- argtuple = PyTuple_Pack(1, string);
+ if (string == NULL)
+ {
+ return NULL;
+ }
- result = JSONToObj (self, argtuple, kwargs);
- Py_XDECREF(string);
- Py_DECREF(argtuple);
+ argtuple = PyTuple_Pack(1, string);
- if (result == NULL) {
- return NULL;
- }
+ result = JSONToObj (self, argtuple, kwargs);
- return result;
-}
+ Py_XDECREF(argtuple);
+ Py_XDECREF(string);
+ if (result == NULL) {
+ return NULL;
+ }
+
+ return result;
+}
diff --git a/pandas/src/ujson/python/objToJSON.c b/pandas/src/ujson/python/objToJSON.c
index 4fdd8dc91ab04..89d3c203fbb7d 100644
--- a/pandas/src/ujson/python/objToJSON.c
+++ b/pandas/src/ujson/python/objToJSON.c
@@ -1,3 +1,39 @@
+/*
+Copyright (c) 2011-2013, ESN Social Software AB and Jonas Tarnstrom
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+* Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+* Neither the name of the ESN Social Software AB nor the
+names of its contributors may be used to endorse or promote products
+derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ESN SOCIAL SOFTWARE AB OR JONAS TARNSTROM BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+Portions of code from MODP_ASCII - Ascii transformations (upper/lower, etc)
+http://code.google.com/p/stringencoders/
+Copyright (c) 2007 Nick Galbreath -- nickg [at] modp [dot] com. All rights reserved.
+
+Numeric decoder derived from from TCL library
+http://www.opensource.apple.com/source/tcl/tcl-14/tcl/license.terms
+* Copyright (c) 1988-1993 The Regents of the University of California.
+* Copyright (c) 1994 Sun Microsystems, Inc.
+*/
#define PY_ARRAY_UNIQUE_SYMBOL UJSON_NUMPY
#include "py_defines.h"
@@ -8,6 +44,9 @@
#include <datetime.h>
#include <ultrajson.h>
+#define EPOCH_ORD 719163
+static PyObject* type_decimal;
+
#define NPY_JSON_BUFSIZE 32768
static PyObject* cls_dataframe;
@@ -16,55 +55,54 @@ static PyObject* cls_index;
typedef void *(*PFN_PyTypeToJSON)(JSOBJ obj, JSONTypeContext *ti, void *outValue, size_t *_outLen);
-
#if (PY_VERSION_HEX < 0x02050000)
typedef ssize_t Py_ssize_t;
#endif
typedef struct __NpyArrContext
{
- PyObject *array;
- char* dataptr;
- int was_datetime64;
- int curdim; // current dimension in array's order
- int stridedim; // dimension we are striding over
- int inc; // stride dimension increment (+/- 1)
- npy_intp dim;
- npy_intp stride;
- npy_intp ndim;
- npy_intp index[NPY_MAXDIMS];
- PyArray_GetItemFunc* getitem;
-
- char** rowLabels;
- char** columnLabels;
+ PyObject *array;
+ char* dataptr;
+ int was_datetime64;
+ int curdim; // current dimension in array's order
+ int stridedim; // dimension we are striding over
+ int inc; // stride dimension increment (+/- 1)
+ npy_intp dim;
+ npy_intp stride;
+ npy_intp ndim;
+ npy_intp index[NPY_MAXDIMS];
+ PyArray_GetItemFunc* getitem;
+
+ char** rowLabels;
+ char** columnLabels;
} NpyArrContext;
typedef struct __TypeContext
{
- JSPFN_ITERBEGIN iterBegin;
- JSPFN_ITEREND iterEnd;
- JSPFN_ITERNEXT iterNext;
- JSPFN_ITERGETNAME iterGetName;
- JSPFN_ITERGETVALUE iterGetValue;
- PFN_PyTypeToJSON PyTypeToJSON;
- PyObject *newObj;
- PyObject *dictObj;
- Py_ssize_t index;
- Py_ssize_t size;
- PyObject *itemValue;
- PyObject *itemName;
- PyObject *attrList;
- char *citemName;
-
- JSINT64 longValue;
-
- NpyArrContext *npyarr;
- int transpose;
- char** rowLabels;
- char** columnLabels;
- npy_intp rowLabelsLen;
- npy_intp columnLabelsLen;
-
+ JSPFN_ITERBEGIN iterBegin;
+ JSPFN_ITEREND iterEnd;
+ JSPFN_ITERNEXT iterNext;
+ JSPFN_ITERGETNAME iterGetName;
+ JSPFN_ITERGETVALUE iterGetValue;
+ PFN_PyTypeToJSON PyTypeToJSON;
+ PyObject *newObj;
+ PyObject *dictObj;
+ Py_ssize_t index;
+ Py_ssize_t size;
+ PyObject *itemValue;
+ PyObject *itemName;
+ PyObject *attrList;
+ PyObject *iterator;
+
+ JSINT64 longValue;
+
+ char *citemName;
+ NpyArrContext *npyarr;
+ int transpose;
+ char** rowLabels;
+ char** columnLabels;
+ npy_intp rowLabelsLen;
+ npy_intp columnLabelsLen;
} TypeContext;
typedef struct __PyObjectEncoder
@@ -83,18 +121,18 @@ typedef struct __PyObjectEncoder
struct PyDictIterState
{
- PyObject *keys;
- size_t i;
- size_t sz;
+ PyObject *keys;
+ size_t i;
+ size_t sz;
};
enum PANDAS_FORMAT
{
- SPLIT,
- RECORDS,
- INDEX,
- COLUMNS,
- VALUES
+ SPLIT,
+ RECORDS,
+ INDEX,
+ COLUMNS,
+ VALUES
};
//#define PRINTMARK() fprintf(stderr, "%s: MARK(%d)\n", __FILE__, __LINE__)
@@ -106,40 +144,45 @@ void initObjToJSON(void)
int initObjToJSON(void)
#endif
{
- PyObject *mod_frame;
- PyDateTime_IMPORT;
+ PyObject *mod_frame;
+ PyObject* mod_decimal = PyImport_ImportModule("decimal");
+ type_decimal = PyObject_GetAttrString(mod_decimal, "Decimal");
+ Py_INCREF(type_decimal);
+ Py_DECREF(mod_decimal);
- mod_frame = PyImport_ImportModule("pandas.core.frame");
- if (mod_frame)
- {
- cls_dataframe = PyObject_GetAttrString(mod_frame, "DataFrame");
- cls_index = PyObject_GetAttrString(mod_frame, "Index");
- cls_series = PyObject_GetAttrString(mod_frame, "Series");
- Py_DECREF(mod_frame);
- }
+ PyDateTime_IMPORT;
+
+ mod_frame = PyImport_ImportModule("pandas.core.frame");
+ if (mod_frame)
+ {
+ cls_dataframe = PyObject_GetAttrString(mod_frame, "DataFrame");
+ cls_index = PyObject_GetAttrString(mod_frame, "Index");
+ cls_series = PyObject_GetAttrString(mod_frame, "Series");
+ Py_DECREF(mod_frame);
+ }
- /* Initialise numpy API */
- import_array();
+ /* Initialise numpy API */
+ import_array();
}
static void *PyIntToINT32(JSOBJ _obj, JSONTypeContext *tc, void *outValue, size_t *_outLen)
{
- PyObject *obj = (PyObject *) _obj;
- *((JSINT32 *) outValue) = PyInt_AS_LONG (obj);
- return NULL;
+ PyObject *obj = (PyObject *) _obj;
+ *((JSINT32 *) outValue) = PyInt_AS_LONG (obj);
+ return NULL;
}
static void *PyIntToINT64(JSOBJ _obj, JSONTypeContext *tc, void *outValue, size_t *_outLen)
{
- PyObject *obj = (PyObject *) _obj;
- *((JSINT64 *) outValue) = PyInt_AS_LONG (obj);
- return NULL;
+ PyObject *obj = (PyObject *) _obj;
+ *((JSINT64 *) outValue) = PyInt_AS_LONG (obj);
+ return NULL;
}
static void *PyLongToINT64(JSOBJ _obj, JSONTypeContext *tc, void *outValue, size_t *_outLen)
{
- *((JSINT64 *) outValue) = GET_TC(tc)->longValue;
- return NULL;
+ *((JSINT64 *) outValue) = GET_TC(tc)->longValue;
+ return NULL;
}
static void *NpyFloatToDOUBLE(JSOBJ _obj, JSONTypeContext *tc, void *outValue, size_t *_outLen)
@@ -151,27 +194,27 @@ static void *NpyFloatToDOUBLE(JSOBJ _obj, JSONTypeContext *tc, void *outValue, s
static void *PyFloatToDOUBLE(JSOBJ _obj, JSONTypeContext *tc, void *outValue, size_t *_outLen)
{
- PyObject *obj = (PyObject *) _obj;
- *((double *) outValue) = PyFloat_AS_DOUBLE (obj);
- return NULL;
+ PyObject *obj = (PyObject *) _obj;
+ *((double *) outValue) = PyFloat_AsDouble (obj);
+ return NULL;
}
static void *PyStringToUTF8(JSOBJ _obj, JSONTypeContext *tc, void *outValue, size_t *_outLen)
{
- PyObject *obj = (PyObject *) _obj;
- *_outLen = PyString_GET_SIZE(obj);
- return PyString_AS_STRING(obj);
+ PyObject *obj = (PyObject *) _obj;
+ *_outLen = PyString_GET_SIZE(obj);
+ return PyString_AS_STRING(obj);
}
static void *PyUnicodeToUTF8(JSOBJ _obj, JSONTypeContext *tc, void *outValue, size_t *_outLen)
{
- PyObject *obj = (PyObject *) _obj;
- PyObject *newObj = PyUnicode_AsUTF8String (obj);
+ PyObject *obj = (PyObject *) _obj;
+ PyObject *newObj = PyUnicode_EncodeUTF8 (PyUnicode_AS_UNICODE(obj), PyUnicode_GET_SIZE(obj), NULL);
- GET_TC(tc)->newObj = newObj;
+ GET_TC(tc)->newObj = newObj;
- *_outLen = PyString_GET_SIZE(newObj);
- return PyString_AS_STRING(newObj);
+ *_outLen = PyString_GET_SIZE(newObj);
+ return PyString_AS_STRING(newObj);
}
static void *NpyDateTimeToINT64(JSOBJ _obj, JSONTypeContext *tc, void *outValue, size_t *_outLen)
@@ -183,32 +226,32 @@ static void *NpyDateTimeToINT64(JSOBJ _obj, JSONTypeContext *tc, void *outValue,
static void *PyDateTimeToINT64(JSOBJ _obj, JSONTypeContext *tc, void *outValue, size_t *_outLen)
{
- pandas_datetimestruct dts;
- PyObject *obj = (PyObject *) _obj;
+ pandas_datetimestruct dts;
+ PyObject *obj = (PyObject *) _obj;
- dts.year = PyDateTime_GET_YEAR(obj);
- dts.month = PyDateTime_GET_MONTH(obj);
- dts.day = PyDateTime_GET_DAY(obj);
- dts.hour = PyDateTime_DATE_GET_HOUR(obj);
- dts.min = PyDateTime_DATE_GET_MINUTE(obj);
- dts.sec = PyDateTime_DATE_GET_SECOND(obj);
- dts.us = PyDateTime_DATE_GET_MICROSECOND(obj);
- dts.ps = dts.as = 0;
- *((JSINT64*)outValue) = (JSINT64) pandas_datetimestruct_to_datetime(PANDAS_FR_ns, &dts);
- return NULL;
+ dts.year = PyDateTime_GET_YEAR(obj);
+ dts.month = PyDateTime_GET_MONTH(obj);
+ dts.day = PyDateTime_GET_DAY(obj);
+ dts.hour = PyDateTime_DATE_GET_HOUR(obj);
+ dts.min = PyDateTime_DATE_GET_MINUTE(obj);
+ dts.sec = PyDateTime_DATE_GET_SECOND(obj);
+ dts.us = PyDateTime_DATE_GET_MICROSECOND(obj);
+ dts.ps = dts.as = 0;
+ *((JSINT64*)outValue) = (JSINT64) pandas_datetimestruct_to_datetime(PANDAS_FR_ns, &dts);
+ return NULL;
}
static void *PyDateToINT64(JSOBJ _obj, JSONTypeContext *tc, void *outValue, size_t *_outLen)
{
- pandas_datetimestruct dts;
- PyObject *obj = (PyObject *) _obj;
+ pandas_datetimestruct dts;
+ PyObject *obj = (PyObject *) _obj;
- dts.year = PyDateTime_GET_YEAR(obj);
- dts.month = PyDateTime_GET_MONTH(obj);
- dts.day = PyDateTime_GET_DAY(obj);
- dts.hour = dts.min = dts.sec = dts.ps = dts.as = 0;
- *((JSINT64*)outValue) = (JSINT64) pandas_datetimestruct_to_datetime(PANDAS_FR_ns, &dts);
- return NULL;
+ dts.year = PyDateTime_GET_YEAR(obj);
+ dts.month = PyDateTime_GET_MONTH(obj);
+ dts.day = PyDateTime_GET_DAY(obj);
+ dts.hour = dts.min = dts.sec = dts.ps = dts.as = 0;
+ *((JSINT64*)outValue) = (JSINT64) pandas_datetimestruct_to_datetime(PANDAS_FR_ns, &dts);
+ return NULL;
}
//=============================================================================
@@ -216,200 +259,200 @@ static void *PyDateToINT64(JSOBJ _obj, JSONTypeContext *tc, void *outValue, size
//=============================================================================
int NpyArr_iterNextNone(JSOBJ _obj, JSONTypeContext *tc)
{
- return 0;
+ return 0;
}
void NpyArr_iterBegin(JSOBJ _obj, JSONTypeContext *tc)
{
- PyArrayObject *obj;
- PyArray_Descr *dtype;
- NpyArrContext *npyarr;
+ PyArrayObject *obj;
+ PyArray_Descr *dtype;
+ NpyArrContext *npyarr;
+
+ if (GET_TC(tc)->newObj)
+ {
+ obj = (PyArrayObject *) GET_TC(tc)->newObj;
+ }
+ else
+ {
+ obj = (PyArrayObject *) _obj;
+ }
+
+ if (PyArray_SIZE(obj) > 0)
+ {
+ PRINTMARK();
+ npyarr = PyObject_Malloc(sizeof(NpyArrContext));
+ GET_TC(tc)->npyarr = npyarr;
- if (GET_TC(tc)->newObj)
+ if (!npyarr)
{
- obj = (PyArrayObject *) GET_TC(tc)->newObj;
- }
- else
- {
- obj = (PyArrayObject *) _obj;
+ PyErr_NoMemory();
+ GET_TC(tc)->iterNext = NpyArr_iterNextNone;
+ return;
}
- if (PyArray_SIZE(obj) > 0)
- {
- PRINTMARK();
- npyarr = PyObject_Malloc(sizeof(NpyArrContext));
- GET_TC(tc)->npyarr = npyarr;
-
- if (!npyarr)
- {
- PyErr_NoMemory();
- GET_TC(tc)->iterNext = NpyArr_iterNextNone;
- return;
- }
-
- // uber hack to support datetime64[ns] arrays
- if (PyArray_DESCR(obj)->type_num == NPY_DATETIME) {
- npyarr->was_datetime64 = 1;
- dtype = PyArray_DescrFromType(NPY_INT64);
- obj = (PyArrayObject *) PyArray_CastToType(obj, dtype, 0);
- } else {
- npyarr->was_datetime64 = 0;
- }
+ // uber hack to support datetime64[ns] arrays
+ if (PyArray_DESCR(obj)->type_num == NPY_DATETIME) {
+ npyarr->was_datetime64 = 1;
+ dtype = PyArray_DescrFromType(NPY_INT64);
+ obj = (PyArrayObject *) PyArray_CastToType(obj, dtype, 0);
+ } else {
+ npyarr->was_datetime64 = 0;
+ }
- npyarr->array = (PyObject*) obj;
- npyarr->getitem = (PyArray_GetItemFunc*) PyArray_DESCR(obj)->f->getitem;
- npyarr->dataptr = PyArray_DATA(obj);
- npyarr->ndim = PyArray_NDIM(obj) - 1;
- npyarr->curdim = 0;
+ npyarr->array = (PyObject*) obj;
+ npyarr->getitem = (PyArray_GetItemFunc*) PyArray_DESCR(obj)->f->getitem;
+ npyarr->dataptr = PyArray_DATA(obj);
+ npyarr->ndim = PyArray_NDIM(obj) - 1;
+ npyarr->curdim = 0;
- if (GET_TC(tc)->transpose)
- {
- npyarr->dim = PyArray_DIM(obj, npyarr->ndim);
- npyarr->stride = PyArray_STRIDE(obj, npyarr->ndim);
- npyarr->stridedim = npyarr->ndim;
- npyarr->index[npyarr->ndim] = 0;
- npyarr->inc = -1;
- }
- else
- {
- npyarr->dim = PyArray_DIM(obj, 0);
- npyarr->stride = PyArray_STRIDE(obj, 0);
- npyarr->stridedim = 0;
- npyarr->index[0] = 0;
- npyarr->inc = 1;
- }
-
- npyarr->columnLabels = GET_TC(tc)->columnLabels;
- npyarr->rowLabels = GET_TC(tc)->rowLabels;
+ if (GET_TC(tc)->transpose)
+ {
+ npyarr->dim = PyArray_DIM(obj, npyarr->ndim);
+ npyarr->stride = PyArray_STRIDE(obj, npyarr->ndim);
+ npyarr->stridedim = npyarr->ndim;
+ npyarr->index[npyarr->ndim] = 0;
+ npyarr->inc = -1;
}
else
{
- GET_TC(tc)->iterNext = NpyArr_iterNextNone;
+ npyarr->dim = PyArray_DIM(obj, 0);
+ npyarr->stride = PyArray_STRIDE(obj, 0);
+ npyarr->stridedim = 0;
+ npyarr->index[0] = 0;
+ npyarr->inc = 1;
}
- PRINTMARK();
+
+ npyarr->columnLabels = GET_TC(tc)->columnLabels;
+ npyarr->rowLabels = GET_TC(tc)->rowLabels;
+ }
+ else
+ {
+ GET_TC(tc)->iterNext = NpyArr_iterNextNone;
+ }
+ PRINTMARK();
}
void NpyArr_iterEnd(JSOBJ obj, JSONTypeContext *tc)
{
- NpyArrContext *npyarr = GET_TC(tc)->npyarr;
-
- if (npyarr)
- {
- if (npyarr->was_datetime64) {
- Py_XDECREF(npyarr->array);
- }
+ NpyArrContext *npyarr = GET_TC(tc)->npyarr;
- if (GET_TC(tc)->itemValue != npyarr->array)
- {
- Py_XDECREF(GET_TC(tc)->itemValue);
- }
- GET_TC(tc)->itemValue = NULL;
+ if (npyarr)
+ {
+ if (npyarr->was_datetime64) {
+ Py_XDECREF(npyarr->array);
+ }
- PyObject_Free(npyarr);
+ if (GET_TC(tc)->itemValue != npyarr->array)
+ {
+ Py_XDECREF(GET_TC(tc)->itemValue);
}
- PRINTMARK();
+ GET_TC(tc)->itemValue = NULL;
+
+ PyObject_Free(npyarr);
+ }
+ PRINTMARK();
}
void NpyArrPassThru_iterBegin(JSOBJ obj, JSONTypeContext *tc)
{
- PRINTMARK();
+ PRINTMARK();
}
void NpyArrPassThru_iterEnd(JSOBJ obj, JSONTypeContext *tc)
{
- NpyArrContext* npyarr;
- PRINTMARK();
- // finished this dimension, reset the data pointer
- npyarr = GET_TC(tc)->npyarr;
- npyarr->curdim--;
- npyarr->dataptr -= npyarr->stride * npyarr->index[npyarr->stridedim];
- npyarr->stridedim -= npyarr->inc;
- npyarr->dim = PyArray_DIM(npyarr->array, npyarr->stridedim);
- npyarr->stride = PyArray_STRIDE(npyarr->array, npyarr->stridedim);
- npyarr->dataptr += npyarr->stride;
-
- if (GET_TC(tc)->itemValue != npyarr->array)
- {
- Py_XDECREF(GET_TC(tc)->itemValue);
- GET_TC(tc)->itemValue = NULL;
- }
+ NpyArrContext* npyarr;
+ PRINTMARK();
+ // finished this dimension, reset the data pointer
+ npyarr = GET_TC(tc)->npyarr;
+ npyarr->curdim--;
+ npyarr->dataptr -= npyarr->stride * npyarr->index[npyarr->stridedim];
+ npyarr->stridedim -= npyarr->inc;
+ npyarr->dim = PyArray_DIM(npyarr->array, npyarr->stridedim);
+ npyarr->stride = PyArray_STRIDE(npyarr->array, npyarr->stridedim);
+ npyarr->dataptr += npyarr->stride;
+
+ if (GET_TC(tc)->itemValue != npyarr->array)
+ {
+ Py_XDECREF(GET_TC(tc)->itemValue);
+ GET_TC(tc)->itemValue = NULL;
+ }
}
int NpyArr_iterNextItem(JSOBJ _obj, JSONTypeContext *tc)
{
- NpyArrContext* npyarr;
- PRINTMARK();
- npyarr = GET_TC(tc)->npyarr;
+ NpyArrContext* npyarr;
+ PRINTMARK();
+ npyarr = GET_TC(tc)->npyarr;
- if (GET_TC(tc)->itemValue != npyarr->array)
- {
- Py_XDECREF(GET_TC(tc)->itemValue);
- GET_TC(tc)->itemValue = NULL;
- }
+ if (GET_TC(tc)->itemValue != npyarr->array)
+ {
+ Py_XDECREF(GET_TC(tc)->itemValue);
+ GET_TC(tc)->itemValue = NULL;
+ }
- if (npyarr->index[npyarr->stridedim] >= npyarr->dim)
- {
- return 0;
- }
+ if (npyarr->index[npyarr->stridedim] >= npyarr->dim)
+ {
+ return 0;
+ }
- GET_TC(tc)->itemValue = npyarr->getitem(npyarr->dataptr, npyarr->array);
+ GET_TC(tc)->itemValue = npyarr->getitem(npyarr->dataptr, npyarr->array);
- npyarr->dataptr += npyarr->stride;
- npyarr->index[npyarr->stridedim]++;
- return 1;
+ npyarr->dataptr += npyarr->stride;
+ npyarr->index[npyarr->stridedim]++;
+ return 1;
}
int NpyArr_iterNext(JSOBJ _obj, JSONTypeContext *tc)
{
- NpyArrContext* npyarr;
- PRINTMARK();
- npyarr = GET_TC(tc)->npyarr;
+ NpyArrContext* npyarr;
+ PRINTMARK();
+ npyarr = GET_TC(tc)->npyarr;
- if (npyarr->curdim >= npyarr->ndim || npyarr->index[npyarr->stridedim] >= npyarr->dim)
- {
- // innermost dimension, start retrieving item values
- GET_TC(tc)->iterNext = NpyArr_iterNextItem;
- return NpyArr_iterNextItem(_obj, tc);
- }
+ if (npyarr->curdim >= npyarr->ndim || npyarr->index[npyarr->stridedim] >= npyarr->dim)
+ {
+ // innermost dimension, start retrieving item values
+ GET_TC(tc)->iterNext = NpyArr_iterNextItem;
+ return NpyArr_iterNextItem(_obj, tc);
+ }
- // dig a dimension deeper
- npyarr->index[npyarr->stridedim]++;
+ // dig a dimension deeper
+ npyarr->index[npyarr->stridedim]++;
- npyarr->curdim++;
- npyarr->stridedim += npyarr->inc;
- npyarr->dim = PyArray_DIM(npyarr->array, npyarr->stridedim);
- npyarr->stride = PyArray_STRIDE(npyarr->array, npyarr->stridedim);
- npyarr->index[npyarr->stridedim] = 0;
+ npyarr->curdim++;
+ npyarr->stridedim += npyarr->inc;
+ npyarr->dim = PyArray_DIM(npyarr->array, npyarr->stridedim);
+ npyarr->stride = PyArray_STRIDE(npyarr->array, npyarr->stridedim);
+ npyarr->index[npyarr->stridedim] = 0;
- ((PyObjectEncoder*) tc->encoder)->npyCtxtPassthru = npyarr;
- GET_TC(tc)->itemValue = npyarr->array;
- return 1;
+ ((PyObjectEncoder*) tc->encoder)->npyCtxtPassthru = npyarr;
+ GET_TC(tc)->itemValue = npyarr->array;
+ return 1;
}
JSOBJ NpyArr_iterGetValue(JSOBJ obj, JSONTypeContext *tc)
{
- PRINTMARK();
- return GET_TC(tc)->itemValue;
+ PRINTMARK();
+ return GET_TC(tc)->itemValue;
}
char *NpyArr_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen)
{
- NpyArrContext* npyarr;
- npy_intp idx;
- PRINTMARK();
- npyarr = GET_TC(tc)->npyarr;
- if (GET_TC(tc)->iterNext == NpyArr_iterNextItem)
- {
- idx = npyarr->index[npyarr->stridedim] - 1;
- *outLen = strlen(npyarr->columnLabels[idx]);
- return npyarr->columnLabels[idx];
- }
- else
- {
- idx = npyarr->index[npyarr->stridedim - npyarr->inc] - 1;
- *outLen = strlen(npyarr->rowLabels[idx]);
- return npyarr->rowLabels[idx];
- }
+ NpyArrContext* npyarr;
+ npy_intp idx;
+ PRINTMARK();
+ npyarr = GET_TC(tc)->npyarr;
+ if (GET_TC(tc)->iterNext == NpyArr_iterNextItem)
+ {
+ idx = npyarr->index[npyarr->stridedim] - 1;
+ *outLen = strlen(npyarr->columnLabels[idx]);
+ return npyarr->columnLabels[idx];
+ }
+ else
+ {
+ idx = npyarr->index[npyarr->stridedim - npyarr->inc] - 1;
+ *outLen = strlen(npyarr->rowLabels[idx]);
+ return npyarr->rowLabels[idx];
+ }
}
//=============================================================================
@@ -418,25 +461,25 @@ char *NpyArr_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen)
//=============================================================================
void Tuple_iterBegin(JSOBJ obj, JSONTypeContext *tc)
{
- GET_TC(tc)->index = 0;
- GET_TC(tc)->size = PyTuple_GET_SIZE( (PyObject *) obj);
- GET_TC(tc)->itemValue = NULL;
+ GET_TC(tc)->index = 0;
+ GET_TC(tc)->size = PyTuple_GET_SIZE( (PyObject *) obj);
+ GET_TC(tc)->itemValue = NULL;
}
int Tuple_iterNext(JSOBJ obj, JSONTypeContext *tc)
{
- PyObject *item;
+ PyObject *item;
- if (GET_TC(tc)->index >= GET_TC(tc)->size)
- {
- return 0;
- }
+ if (GET_TC(tc)->index >= GET_TC(tc)->size)
+ {
+ return 0;
+ }
- item = PyTuple_GET_ITEM (obj, GET_TC(tc)->index);
+ item = PyTuple_GET_ITEM (obj, GET_TC(tc)->index);
- GET_TC(tc)->itemValue = item;
- GET_TC(tc)->index ++;
- return 1;
+ GET_TC(tc)->itemValue = item;
+ GET_TC(tc)->index ++;
+ return 1;
}
void Tuple_iterEnd(JSOBJ obj, JSONTypeContext *tc)
@@ -445,12 +488,68 @@ void Tuple_iterEnd(JSOBJ obj, JSONTypeContext *tc)
JSOBJ Tuple_iterGetValue(JSOBJ obj, JSONTypeContext *tc)
{
- return GET_TC(tc)->itemValue;
+ return GET_TC(tc)->itemValue;
}
char *Tuple_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen)
{
- return NULL;
+ return NULL;
+}
+
+//=============================================================================
+// Iterator iteration functions
+// itemValue is borrowed reference, no ref counting
+//=============================================================================
+void Iter_iterBegin(JSOBJ obj, JSONTypeContext *tc)
+{
+ GET_TC(tc)->itemValue = NULL;
+ GET_TC(tc)->iterator = PyObject_GetIter(obj);
+}
+
+int Iter_iterNext(JSOBJ obj, JSONTypeContext *tc)
+{
+ PyObject *item;
+
+ if (GET_TC(tc)->itemValue)
+ {
+ Py_DECREF(GET_TC(tc)->itemValue);
+ GET_TC(tc)->itemValue = NULL;
+ }
+
+ item = PyIter_Next(GET_TC(tc)->iterator);
+
+ if (item == NULL)
+ {
+ return 0;
+ }
+
+ GET_TC(tc)->itemValue = item;
+ return 1;
+}
+
+void Iter_iterEnd(JSOBJ obj, JSONTypeContext *tc)
+{
+ if (GET_TC(tc)->itemValue)
+ {
+ Py_DECREF(GET_TC(tc)->itemValue);
+ GET_TC(tc)->itemValue = NULL;
+ }
+
+ if (GET_TC(tc)->iterator)
+ {
+ Py_DECREF(GET_TC(tc)->iterator);
+ GET_TC(tc)->iterator = NULL;
+ }
+}
+
+JSOBJ Iter_iterGetValue(JSOBJ obj, JSONTypeContext *tc)
+{
+ return GET_TC(tc)->itemValue;
+}
+
+char *Iter_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen)
+{
+ return NULL;
}
//=============================================================================
@@ -460,97 +559,84 @@ char *Tuple_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen)
//=============================================================================
void Dir_iterBegin(JSOBJ obj, JSONTypeContext *tc)
{
- GET_TC(tc)->attrList = PyObject_Dir(obj);
- GET_TC(tc)->index = 0;
- GET_TC(tc)->size = PyList_GET_SIZE(GET_TC(tc)->attrList);
- PRINTMARK();
+ GET_TC(tc)->attrList = PyObject_Dir(obj);
+ GET_TC(tc)->index = 0;
+ GET_TC(tc)->size = PyList_GET_SIZE(GET_TC(tc)->attrList);
+ PRINTMARK();
}
void Dir_iterEnd(JSOBJ obj, JSONTypeContext *tc)
{
- if (GET_TC(tc)->itemValue)
- {
- Py_DECREF(GET_TC(tc)->itemValue);
- GET_TC(tc)->itemValue = NULL;
- }
+ if (GET_TC(tc)->itemValue)
+ {
+ Py_DECREF(GET_TC(tc)->itemValue);
+ GET_TC(tc)->itemValue = NULL;
+ }
- if (GET_TC(tc)->itemName)
- {
- Py_DECREF(GET_TC(tc)->itemName);
- GET_TC(tc)->itemName = NULL;
- }
+ if (GET_TC(tc)->itemName)
+ {
+ Py_DECREF(GET_TC(tc)->itemName);
+ GET_TC(tc)->itemName = NULL;
+ }
- Py_DECREF( (PyObject *) GET_TC(tc)->attrList);
- PRINTMARK();
+ Py_DECREF( (PyObject *) GET_TC(tc)->attrList);
+ PRINTMARK();
}
int Dir_iterNext(JSOBJ _obj, JSONTypeContext *tc)
{
- PyObject *obj = (PyObject *) _obj;
- PyObject *itemValue = GET_TC(tc)->itemValue;
- PyObject *itemName = GET_TC(tc)->itemName;
- PyObject* attr;
- PyObject* attrName;
- char* attrStr;
-
+ PyObject *obj = (PyObject *) _obj;
+ PyObject *itemValue = GET_TC(tc)->itemValue;
+ PyObject *itemName = GET_TC(tc)->itemName;
+ PyObject* attr;
+ PyObject* attrName;
+ char* attrStr;
+
+ if (itemValue)
+ {
+ Py_DECREF(GET_TC(tc)->itemValue);
+ GET_TC(tc)->itemValue = itemValue = NULL;
+ }
+
+ if (itemName)
+ {
+ Py_DECREF(GET_TC(tc)->itemName);
+ GET_TC(tc)->itemName = itemName = NULL;
+ }
+
+ for (; GET_TC(tc)->index < GET_TC(tc)->size; GET_TC(tc)->index ++)
+ {
+ attrName = PyList_GET_ITEM(GET_TC(tc)->attrList, GET_TC(tc)->index);
+#if PY_MAJOR_VERSION >= 3
+ attr = PyUnicode_AsUTF8String(attrName);
+#else
+ attr = attrName;
+ Py_INCREF(attr);
+#endif
+ attrStr = PyString_AS_STRING(attr);
- if (itemValue)
+ if (attrStr[0] == '_')
{
- Py_DECREF(GET_TC(tc)->itemValue);
- GET_TC(tc)->itemValue = itemValue = NULL;
+ PRINTMARK();
+ Py_DECREF(attr);
+ continue;
}
- if (itemName)
+ itemValue = PyObject_GetAttr(obj, attrName);
+ if (itemValue == NULL)
{
- Py_DECREF(GET_TC(tc)->itemName);
- GET_TC(tc)->itemName = itemName = NULL;
+ PyErr_Clear();
+ Py_DECREF(attr);
+ PRINTMARK();
+ continue;
}
- for (; GET_TC(tc)->index < GET_TC(tc)->size; GET_TC(tc)->index ++)
+ if (PyCallable_Check(itemValue))
{
- attrName = PyList_GET_ITEM(GET_TC(tc)->attrList, GET_TC(tc)->index);
-#if PY_MAJOR_VERSION >= 3
- attr = PyUnicode_AsUTF8String(attrName);
-#else
- attr = attrName;
- Py_INCREF(attr);
-#endif
- attrStr = PyString_AS_STRING(attr);
-
- if (attrStr[0] == '_')
- {
- PRINTMARK();
- Py_DECREF(attr);
- continue;
- }
-
- itemValue = PyObject_GetAttr(obj, attrName);
- if (itemValue == NULL)
- {
- PyErr_Clear();
- Py_DECREF(attr);
- PRINTMARK();
- continue;
- }
-
- if (PyCallable_Check(itemValue))
- {
- Py_DECREF(itemValue);
- Py_DECREF(attr);
- PRINTMARK();
- continue;
- }
-
- PRINTMARK();
- itemName = attr;
- break;
- }
-
- if (itemName == NULL)
- {
- GET_TC(tc)->index = GET_TC(tc)->size;
- GET_TC(tc)->itemValue = NULL;
- return 0;
+ Py_DECREF(itemValue);
+ Py_DECREF(attr);
+ PRINTMARK();
+ continue;
}
GET_TC(tc)->itemName = itemName;
@@ -558,48 +644,60 @@ int Dir_iterNext(JSOBJ _obj, JSONTypeContext *tc)
GET_TC(tc)->index ++;
PRINTMARK();
- return 1;
-}
+ itemName = attr;
+ break;
+ }
+ if (itemName == NULL)
+ {
+ GET_TC(tc)->index = GET_TC(tc)->size;
+ GET_TC(tc)->itemValue = NULL;
+ return 0;
+ }
+
+ GET_TC(tc)->itemName = itemName;
+ GET_TC(tc)->itemValue = itemValue;
+ GET_TC(tc)->index ++;
+ PRINTMARK();
+ return 1;
+}
JSOBJ Dir_iterGetValue(JSOBJ obj, JSONTypeContext *tc)
{
- PRINTMARK();
- return GET_TC(tc)->itemValue;
+ PRINTMARK();
+ return GET_TC(tc)->itemValue;
}
char *Dir_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen)
{
- PRINTMARK();
- *outLen = PyString_GET_SIZE(GET_TC(tc)->itemName);
- return PyString_AS_STRING(GET_TC(tc)->itemName);
+ PRINTMARK();
+ *outLen = PyString_GET_SIZE(GET_TC(tc)->itemName);
+ return PyString_AS_STRING(GET_TC(tc)->itemName);
}
-
-
//=============================================================================
// List iteration functions
// itemValue is borrowed from object (which is list). No refcounting
//=============================================================================
void List_iterBegin(JSOBJ obj, JSONTypeContext *tc)
{
- GET_TC(tc)->index = 0;
- GET_TC(tc)->size = PyList_GET_SIZE( (PyObject *) obj);
+ GET_TC(tc)->index = 0;
+ GET_TC(tc)->size = PyList_GET_SIZE( (PyObject *) obj);
}
int List_iterNext(JSOBJ obj, JSONTypeContext *tc)
{
- if (GET_TC(tc)->index >= GET_TC(tc)->size)
- {
- PRINTMARK();
- return 0;
- }
+ if (GET_TC(tc)->index >= GET_TC(tc)->size)
+ {
+ PRINTMARK();
+ return 0;
+ }
- GET_TC(tc)->itemValue = PyList_GET_ITEM (obj, GET_TC(tc)->index);
- GET_TC(tc)->index ++;
- return 1;
+ GET_TC(tc)->itemValue = PyList_GET_ITEM (obj, GET_TC(tc)->index);
+ GET_TC(tc)->index ++;
+ return 1;
}
void List_iterEnd(JSOBJ obj, JSONTypeContext *tc)
@@ -608,12 +706,12 @@ void List_iterEnd(JSOBJ obj, JSONTypeContext *tc)
JSOBJ List_iterGetValue(JSOBJ obj, JSONTypeContext *tc)
{
- return GET_TC(tc)->itemValue;
+ return GET_TC(tc)->itemValue;
}
char *List_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen)
{
- return NULL;
+ return NULL;
}
//=============================================================================
@@ -621,65 +719,65 @@ char *List_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen)
//=============================================================================
void Index_iterBegin(JSOBJ obj, JSONTypeContext *tc)
{
- GET_TC(tc)->index = 0;
- GET_TC(tc)->citemName = PyObject_Malloc(20 * sizeof(char));
- if (!GET_TC(tc)->citemName)
- {
- PyErr_NoMemory();
- }
- PRINTMARK();
+ GET_TC(tc)->index = 0;
+ GET_TC(tc)->citemName = PyObject_Malloc(20 * sizeof(char));
+ if (!GET_TC(tc)->citemName)
+ {
+ PyErr_NoMemory();
+ }
+ PRINTMARK();
}
int Index_iterNext(JSOBJ obj, JSONTypeContext *tc)
{
- Py_ssize_t index;
- if (!GET_TC(tc)->citemName)
- {
- return 0;
- }
-
- index = GET_TC(tc)->index;
- Py_XDECREF(GET_TC(tc)->itemValue);
- if (index == 0)
- {
- memcpy(GET_TC(tc)->citemName, "name", sizeof(char)*5);
- GET_TC(tc)->itemValue = PyObject_GetAttrString(obj, "name");
- }
- else
+ Py_ssize_t index;
+ if (!GET_TC(tc)->citemName)
+ {
+ return 0;
+ }
+
+ index = GET_TC(tc)->index;
+ Py_XDECREF(GET_TC(tc)->itemValue);
+ if (index == 0)
+ {
+ memcpy(GET_TC(tc)->citemName, "name", sizeof(char)*5);
+ GET_TC(tc)->itemValue = PyObject_GetAttrString(obj, "name");
+ }
+ else
if (index == 1)
{
- memcpy(GET_TC(tc)->citemName, "data", sizeof(char)*5);
- GET_TC(tc)->itemValue = PyObject_GetAttrString(obj, "values");
+ memcpy(GET_TC(tc)->citemName, "data", sizeof(char)*5);
+ GET_TC(tc)->itemValue = PyObject_GetAttrString(obj, "values");
}
else
{
- PRINTMARK();
- return 0;
+ PRINTMARK();
+ return 0;
}
- GET_TC(tc)->index++;
- PRINTMARK();
- return 1;
+ GET_TC(tc)->index++;
+ PRINTMARK();
+ return 1;
}
void Index_iterEnd(JSOBJ obj, JSONTypeContext *tc)
{
- if (GET_TC(tc)->citemName)
- {
- PyObject_Free(GET_TC(tc)->citemName);
- }
- PRINTMARK();
+ if (GET_TC(tc)->citemName)
+ {
+ PyObject_Free(GET_TC(tc)->citemName);
+ }
+ PRINTMARK();
}
JSOBJ Index_iterGetValue(JSOBJ obj, JSONTypeContext *tc)
{
- return GET_TC(tc)->itemValue;
+ return GET_TC(tc)->itemValue;
}
char *Index_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen)
{
- *outLen = strlen(GET_TC(tc)->citemName);
- return GET_TC(tc)->citemName;
+ *outLen = strlen(GET_TC(tc)->citemName);
+ return GET_TC(tc)->citemName;
}
//=============================================================================
@@ -687,75 +785,75 @@ char *Index_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen)
//=============================================================================
void Series_iterBegin(JSOBJ obj, JSONTypeContext *tc)
{
- PyObjectEncoder* enc = (PyObjectEncoder*) tc->encoder;
- GET_TC(tc)->index = 0;
- GET_TC(tc)->citemName = PyObject_Malloc(20 * sizeof(char));
- enc->outputFormat = VALUES; // for contained series
- if (!GET_TC(tc)->citemName)
- {
- PyErr_NoMemory();
- }
- PRINTMARK();
+ PyObjectEncoder* enc = (PyObjectEncoder*) tc->encoder;
+ GET_TC(tc)->index = 0;
+ GET_TC(tc)->citemName = PyObject_Malloc(20 * sizeof(char));
+ enc->outputFormat = VALUES; // for contained series
+ if (!GET_TC(tc)->citemName)
+ {
+ PyErr_NoMemory();
+ }
+ PRINTMARK();
}
int Series_iterNext(JSOBJ obj, JSONTypeContext *tc)
{
- Py_ssize_t index;
- if (!GET_TC(tc)->citemName)
- {
- return 0;
- }
-
- index = GET_TC(tc)->index;
- Py_XDECREF(GET_TC(tc)->itemValue);
- if (index == 0)
- {
- memcpy(GET_TC(tc)->citemName, "name", sizeof(char)*5);
- GET_TC(tc)->itemValue = PyObject_GetAttrString(obj, "name");
- }
- else
- if (index == 1)
- {
- memcpy(GET_TC(tc)->citemName, "index", sizeof(char)*6);
- GET_TC(tc)->itemValue = PyObject_GetAttrString(obj, "index");
- }
- else
- if (index == 2)
- {
- memcpy(GET_TC(tc)->citemName, "data", sizeof(char)*5);
- GET_TC(tc)->itemValue = PyObject_GetAttrString(obj, "values");
- }
- else
- {
- PRINTMARK();
- return 0;
- }
-
- GET_TC(tc)->index++;
+ Py_ssize_t index;
+ if (!GET_TC(tc)->citemName)
+ {
+ return 0;
+ }
+
+ index = GET_TC(tc)->index;
+ Py_XDECREF(GET_TC(tc)->itemValue);
+ if (index == 0)
+ {
+ memcpy(GET_TC(tc)->citemName, "name", sizeof(char)*5);
+ GET_TC(tc)->itemValue = PyObject_GetAttrString(obj, "name");
+ }
+ else
+ if (index == 1)
+ {
+ memcpy(GET_TC(tc)->citemName, "index", sizeof(char)*6);
+ GET_TC(tc)->itemValue = PyObject_GetAttrString(obj, "index");
+ }
+ else
+ if (index == 2)
+ {
+ memcpy(GET_TC(tc)->citemName, "data", sizeof(char)*5);
+ GET_TC(tc)->itemValue = PyObject_GetAttrString(obj, "values");
+ }
+ else
+ {
PRINTMARK();
- return 1;
+ return 0;
+ }
+
+ GET_TC(tc)->index++;
+ PRINTMARK();
+ return 1;
}
void Series_iterEnd(JSOBJ obj, JSONTypeContext *tc)
{
- PyObjectEncoder* enc = (PyObjectEncoder*) tc->encoder;
- enc->outputFormat = enc->originalOutputFormat;
- if (GET_TC(tc)->citemName)
- {
- PyObject_Free(GET_TC(tc)->citemName);
- }
- PRINTMARK();
+ PyObjectEncoder* enc = (PyObjectEncoder*) tc->encoder;
+ enc->outputFormat = enc->originalOutputFormat;
+ if (GET_TC(tc)->citemName)
+ {
+ PyObject_Free(GET_TC(tc)->citemName);
+ }
+ PRINTMARK();
}
JSOBJ Series_iterGetValue(JSOBJ obj, JSONTypeContext *tc)
{
- return GET_TC(tc)->itemValue;
+ return GET_TC(tc)->itemValue;
}
char *Series_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen)
{
- *outLen = strlen(GET_TC(tc)->citemName);
- return GET_TC(tc)->citemName;
+ *outLen = strlen(GET_TC(tc)->citemName);
+ return GET_TC(tc)->citemName;
}
//=============================================================================
@@ -763,75 +861,75 @@ char *Series_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen)
//=============================================================================
void DataFrame_iterBegin(JSOBJ obj, JSONTypeContext *tc)
{
- PyObjectEncoder* enc = (PyObjectEncoder*) tc->encoder;
- GET_TC(tc)->index = 0;
- GET_TC(tc)->citemName = PyObject_Malloc(20 * sizeof(char));
- enc->outputFormat = VALUES; // for contained series & index
- if (!GET_TC(tc)->citemName)
- {
- PyErr_NoMemory();
- }
- PRINTMARK();
+ PyObjectEncoder* enc = (PyObjectEncoder*) tc->encoder;
+ GET_TC(tc)->index = 0;
+ GET_TC(tc)->citemName = PyObject_Malloc(20 * sizeof(char));
+ enc->outputFormat = VALUES; // for contained series & index
+ if (!GET_TC(tc)->citemName)
+ {
+ PyErr_NoMemory();
+ }
+ PRINTMARK();
}
int DataFrame_iterNext(JSOBJ obj, JSONTypeContext *tc)
{
- Py_ssize_t index;
- if (!GET_TC(tc)->citemName)
- {
- return 0;
- }
-
- index = GET_TC(tc)->index;
- Py_XDECREF(GET_TC(tc)->itemValue);
- if (index == 0)
- {
- memcpy(GET_TC(tc)->citemName, "columns", sizeof(char)*8);
- GET_TC(tc)->itemValue = PyObject_GetAttrString(obj, "columns");
- }
- else
+ Py_ssize_t index;
+ if (!GET_TC(tc)->citemName)
+ {
+ return 0;
+ }
+
+ index = GET_TC(tc)->index;
+ Py_XDECREF(GET_TC(tc)->itemValue);
+ if (index == 0)
+ {
+ memcpy(GET_TC(tc)->citemName, "columns", sizeof(char)*8);
+ GET_TC(tc)->itemValue = PyObject_GetAttrString(obj, "columns");
+ }
+ else
if (index == 1)
{
- memcpy(GET_TC(tc)->citemName, "index", sizeof(char)*6);
- GET_TC(tc)->itemValue = PyObject_GetAttrString(obj, "index");
+ memcpy(GET_TC(tc)->citemName, "index", sizeof(char)*6);
+ GET_TC(tc)->itemValue = PyObject_GetAttrString(obj, "index");
}
else
- if (index == 2)
- {
+ if (index == 2)
+ {
memcpy(GET_TC(tc)->citemName, "data", sizeof(char)*5);
GET_TC(tc)->itemValue = PyObject_GetAttrString(obj, "values");
- }
- else
- {
+ }
+ else
+ {
PRINTMARK();
return 0;
- }
+ }
- GET_TC(tc)->index++;
- PRINTMARK();
- return 1;
+ GET_TC(tc)->index++;
+ PRINTMARK();
+ return 1;
}
void DataFrame_iterEnd(JSOBJ obj, JSONTypeContext *tc)
{
- PyObjectEncoder* enc = (PyObjectEncoder*) tc->encoder;
- enc->outputFormat = enc->originalOutputFormat;
- if (GET_TC(tc)->citemName)
- {
- PyObject_Free(GET_TC(tc)->citemName);
- }
- PRINTMARK();
+ PyObjectEncoder* enc = (PyObjectEncoder*) tc->encoder;
+ enc->outputFormat = enc->originalOutputFormat;
+ if (GET_TC(tc)->citemName)
+ {
+ PyObject_Free(GET_TC(tc)->citemName);
+ }
+ PRINTMARK();
}
JSOBJ DataFrame_iterGetValue(JSOBJ obj, JSONTypeContext *tc)
{
- return GET_TC(tc)->itemValue;
+ return GET_TC(tc)->itemValue;
}
char *DataFrame_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen)
{
- *outLen = strlen(GET_TC(tc)->citemName);
- return GET_TC(tc)->citemName;
+ *outLen = strlen(GET_TC(tc)->citemName);
+ return GET_TC(tc)->citemName;
}
//=============================================================================
@@ -841,46 +939,46 @@ char *DataFrame_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen)
//=============================================================================
void Dict_iterBegin(JSOBJ obj, JSONTypeContext *tc)
{
- GET_TC(tc)->index = 0;
- PRINTMARK();
+ GET_TC(tc)->index = 0;
+ PRINTMARK();
}
int Dict_iterNext(JSOBJ obj, JSONTypeContext *tc)
{
#if PY_MAJOR_VERSION >= 3
- PyObject* itemNameTmp;
+ PyObject* itemNameTmp;
#endif
- if (GET_TC(tc)->itemName)
- {
- Py_DECREF(GET_TC(tc)->itemName);
- GET_TC(tc)->itemName = NULL;
- }
+ if (GET_TC(tc)->itemName)
+ {
+ Py_DECREF(GET_TC(tc)->itemName);
+ GET_TC(tc)->itemName = NULL;
+ }
- if (!PyDict_Next ( (PyObject *)GET_TC(tc)->dictObj, &GET_TC(tc)->index, &GET_TC(tc)->itemName, &GET_TC(tc)->itemValue))
- {
- PRINTMARK();
- return 0;
- }
+ if (!PyDict_Next ( (PyObject *)GET_TC(tc)->dictObj, &GET_TC(tc)->index, &GET_TC(tc)->itemName, &GET_TC(tc)->itemValue))
+ {
+ PRINTMARK();
+ return 0;
+ }
- if (PyUnicode_Check(GET_TC(tc)->itemName))
- {
- GET_TC(tc)->itemName = PyUnicode_AsUTF8String (GET_TC(tc)->itemName);
- }
- else
+ if (PyUnicode_Check(GET_TC(tc)->itemName))
+ {
+ GET_TC(tc)->itemName = PyUnicode_AsUTF8String (GET_TC(tc)->itemName);
+ }
+ else
if (!PyString_Check(GET_TC(tc)->itemName))
{
- GET_TC(tc)->itemName = PyObject_Str(GET_TC(tc)->itemName);
+ GET_TC(tc)->itemName = PyObject_Str(GET_TC(tc)->itemName);
#if PY_MAJOR_VERSION >= 3
- itemNameTmp = GET_TC(tc)->itemName;
- GET_TC(tc)->itemName = PyUnicode_AsUTF8String (GET_TC(tc)->itemName);
- Py_DECREF(itemNameTmp);
+ itemNameTmp = GET_TC(tc)->itemName;
+ GET_TC(tc)->itemName = PyUnicode_AsUTF8String (GET_TC(tc)->itemName);
+ Py_DECREF(itemNameTmp);
#endif
}
else
{
- Py_INCREF(GET_TC(tc)->itemName);
+ Py_INCREF(GET_TC(tc)->itemName);
}
PRINTMARK();
return 1;
@@ -888,24 +986,24 @@ int Dict_iterNext(JSOBJ obj, JSONTypeContext *tc)
void Dict_iterEnd(JSOBJ obj, JSONTypeContext *tc)
{
- if (GET_TC(tc)->itemName)
- {
- Py_DECREF(GET_TC(tc)->itemName);
- GET_TC(tc)->itemName = NULL;
- }
- Py_DECREF(GET_TC(tc)->dictObj);
- PRINTMARK();
+ if (GET_TC(tc)->itemName)
+ {
+ Py_DECREF(GET_TC(tc)->itemName);
+ GET_TC(tc)->itemName = NULL;
+ }
+ Py_DECREF(GET_TC(tc)->dictObj);
+ PRINTMARK();
}
JSOBJ Dict_iterGetValue(JSOBJ obj, JSONTypeContext *tc)
{
- return GET_TC(tc)->itemValue;
+ return GET_TC(tc)->itemValue;
}
char *Dict_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen)
{
- *outLen = PyString_GET_SIZE(GET_TC(tc)->itemName);
- return PyString_AS_STRING(GET_TC(tc)->itemName);
+ *outLen = PyString_GET_SIZE(GET_TC(tc)->itemName);
+ return PyString_AS_STRING(GET_TC(tc)->itemName);
}
void NpyArr_freeLabels(char** labels, npy_intp len)
@@ -1023,433 +1121,456 @@ char** NpyArr_encodeLabels(PyArrayObject* labels, JSONObjectEncoder* enc, npy_in
void Object_beginTypeContext (JSOBJ _obj, JSONTypeContext *tc)
{
- PyObject *obj, *exc, *toDictFunc;
- TypeContext *pc;
- PyObjectEncoder *enc;
- double val;
- PRINTMARK();
- if (!_obj) {
- tc->type = JT_INVALID;
- return;
- }
+ PyObject *obj, *exc, *toDictFunc;
+ TypeContext *pc;
+ PyObjectEncoder *enc;
+ double val;
+ PRINTMARK();
+ if (!_obj) {
+ tc->type = JT_INVALID;
+ return;
+ }
- obj = (PyObject*) _obj;
- enc = (PyObjectEncoder*) tc->encoder;
+ obj = (PyObject*) _obj;
+ enc = (PyObjectEncoder*) tc->encoder;
- tc->prv = PyObject_Malloc(sizeof(TypeContext));
- pc = (TypeContext *) tc->prv;
- if (!pc)
- {
- tc->type = JT_INVALID;
- PyErr_NoMemory();
- return;
- }
- pc->newObj = NULL;
- pc->dictObj = NULL;
- pc->itemValue = NULL;
- pc->itemName = NULL;
- pc->attrList = NULL;
- pc->citemName = NULL;
- pc->npyarr = NULL;
- pc->rowLabels = NULL;
- pc->columnLabels = NULL;
- pc->index = 0;
- pc->size = 0;
- pc->longValue = 0;
- pc->transpose = 0;
- pc->rowLabelsLen = 0;
- pc->columnLabelsLen = 0;
-
- if (PyIter_Check(obj) || PyArray_Check(obj))
- {
- goto ISITERABLE;
- }
+ tc->prv = PyObject_Malloc(sizeof(TypeContext));
+ pc = (TypeContext *) tc->prv;
+ if (!pc)
+ {
+ tc->type = JT_INVALID;
+ PyErr_NoMemory();
+ return;
+ }
+ pc->newObj = NULL;
+ pc->dictObj = NULL;
+ pc->itemValue = NULL;
+ pc->itemName = NULL;
+ pc->attrList = NULL;
+ pc->index = 0;
+ pc->size = 0;
+ pc->longValue = 0;
+ pc->citemName = NULL;
+ pc->npyarr = NULL;
+ pc->rowLabels = NULL;
+ pc->columnLabels = NULL;
+ pc->transpose = 0;
+ pc->rowLabelsLen = 0;
+ pc->columnLabelsLen = 0;
+
+ if (PyIter_Check(obj))
+ {
+ PRINTMARK();
+ goto ISITERABLE;
+ }
- if (PyBool_Check(obj))
- {
- PRINTMARK();
- tc->type = (obj == Py_True) ? JT_TRUE : JT_FALSE;
- return;
- }
- else
- if (PyLong_Check(obj))
- {
- PRINTMARK();
- pc->PyTypeToJSON = PyLongToINT64;
- tc->type = JT_LONG;
- GET_TC(tc)->longValue = PyLong_AsLongLong(obj);
+ if (PyIter_Check(obj) || PyArray_Check(obj))
+ {
+ goto ISITERABLE;
+ }
- exc = PyErr_Occurred();
+ if (PyBool_Check(obj))
+ {
+ PRINTMARK();
+ tc->type = (obj == Py_True) ? JT_TRUE : JT_FALSE;
+ return;
+ }
+ else
+ if (PyLong_Check(obj))
+ {
+ PRINTMARK();
+ pc->PyTypeToJSON = PyLongToINT64;
+ tc->type = JT_LONG;
+ GET_TC(tc)->longValue = PyLong_AsLongLong(obj);
- if (exc && PyErr_ExceptionMatches(PyExc_OverflowError))
- {
- PRINTMARK();
- goto INVALID;
- }
+ exc = PyErr_Occurred();
- return;
- }
- else
- if (PyInt_Check(obj))
+ if (exc && PyErr_ExceptionMatches(PyExc_OverflowError))
{
- PRINTMARK();
+ PRINTMARK();
+ goto INVALID;
+ }
+
+ return;
+ }
+ else
+ if (PyInt_Check(obj))
+ {
+ PRINTMARK();
#ifdef _LP64
- pc->PyTypeToJSON = PyIntToINT64; tc->type = JT_LONG;
+ pc->PyTypeToJSON = PyIntToINT64; tc->type = JT_LONG;
#else
- pc->PyTypeToJSON = PyIntToINT32; tc->type = JT_INT;
+ pc->PyTypeToJSON = PyIntToINT32; tc->type = JT_INT;
#endif
- return;
- }
- else
- if (PyArray_IsScalar(obj, Integer))
- {
- PRINTMARK();
- pc->PyTypeToJSON = PyLongToINT64;
- tc->type = JT_LONG;
- PyArray_CastScalarToCtype(obj, &(GET_TC(tc)->longValue), PyArray_DescrFromType(NPY_INT64));
-
- exc = PyErr_Occurred();
+ return;
+ }
+ else
+ if (PyArray_IsScalar(obj, Integer))
+ {
+ PRINTMARK();
+ pc->PyTypeToJSON = PyLongToINT64;
+ tc->type = JT_LONG;
+ PyArray_CastScalarToCtype(obj, &(GET_TC(tc)->longValue), PyArray_DescrFromType(NPY_INT64));
- if (exc && PyErr_ExceptionMatches(PyExc_OverflowError))
- {
- PRINTMARK();
- goto INVALID;
- }
+ exc = PyErr_Occurred();
- return;
- }
- else
- if (PyString_Check(obj))
- {
- PRINTMARK();
- pc->PyTypeToJSON = PyStringToUTF8; tc->type = JT_UTF8;
- return;
- }
- else
- if (PyUnicode_Check(obj))
- {
- PRINTMARK();
- pc->PyTypeToJSON = PyUnicodeToUTF8; tc->type = JT_UTF8;
- return;
- }
- else
- if (PyFloat_Check(obj))
- {
- PRINTMARK();
- val = PyFloat_AS_DOUBLE (obj);
- if (npy_isnan(val) || npy_isinf(val))
- {
- tc->type = JT_NULL;
- }
- else
- {
- pc->PyTypeToJSON = PyFloatToDOUBLE; tc->type = JT_DOUBLE;
- }
- return;
- }
- else
- if (PyArray_IsScalar(obj, Float))
- {
- PRINTMARK();
- pc->PyTypeToJSON = NpyFloatToDOUBLE; tc->type = JT_DOUBLE;
- return;
- }
- else
- if (PyArray_IsScalar(obj, Datetime))
+ if (exc && PyErr_ExceptionMatches(PyExc_OverflowError))
{
- PRINTMARK();
- pc->PyTypeToJSON = NpyDateTimeToINT64; tc->type = JT_LONG;
- return;
+ PRINTMARK();
+ goto INVALID;
}
- else
- if (PyDateTime_Check(obj))
- {
- PRINTMARK();
- pc->PyTypeToJSON = PyDateTimeToINT64; tc->type = JT_LONG;
- return;
- }
- else
- if (PyDate_Check(obj))
+
+ return;
+ }
+ else
+ if (PyString_Check(obj))
+ {
+ PRINTMARK();
+ pc->PyTypeToJSON = PyStringToUTF8; tc->type = JT_UTF8;
+ return;
+ }
+ else
+ if (PyUnicode_Check(obj))
+ {
+ PRINTMARK();
+ pc->PyTypeToJSON = PyUnicodeToUTF8; tc->type = JT_UTF8;
+ return;
+ }
+ else
+ if (PyFloat_Check(obj))
+ {
+ PRINTMARK();
+ val = PyFloat_AS_DOUBLE (obj);
+ if (npy_isnan(val) || npy_isinf(val))
{
- PRINTMARK();
- pc->PyTypeToJSON = PyDateToINT64; tc->type = JT_LONG;
- return;
+ tc->type = JT_NULL;
}
else
- if (obj == Py_None)
{
- PRINTMARK();
- tc->type = JT_NULL;
- return;
+ pc->PyTypeToJSON = PyFloatToDOUBLE; tc->type = JT_DOUBLE;
}
+ return;
+ }
+ else
+ if (PyObject_IsInstance(obj, type_decimal))
+ {
+ PRINTMARK();
+ pc->PyTypeToJSON = PyFloatToDOUBLE; tc->type = JT_DOUBLE;
+ return;
+ }
+ else
+ if (PyArray_IsScalar(obj, Float))
+ {
+ PRINTMARK();
+ pc->PyTypeToJSON = NpyFloatToDOUBLE; tc->type = JT_DOUBLE;
+ return;
+ }
+ else
+ if (PyArray_IsScalar(obj, Datetime))
+ {
+ PRINTMARK();
+ pc->PyTypeToJSON = NpyDateTimeToINT64; tc->type = JT_LONG;
+ return;
+ }
+ else
+ if (PyDateTime_Check(obj))
+ {
+ PRINTMARK();
+ pc->PyTypeToJSON = PyDateTimeToINT64; tc->type = JT_LONG;
+ return;
+ }
+ else
+ if (PyDate_Check(obj))
+ {
+ PRINTMARK();
+ pc->PyTypeToJSON = PyDateToINT64; tc->type = JT_LONG;
+ return;
+ }
+ else
+ if (obj == Py_None)
+ {
+ PRINTMARK();
+ tc->type = JT_NULL;
+ return;
+ }
ISITERABLE:
- if (PyDict_Check(obj))
- {
- PRINTMARK();
- tc->type = JT_OBJECT;
- pc->iterBegin = Dict_iterBegin;
- pc->iterEnd = Dict_iterEnd;
- pc->iterNext = Dict_iterNext;
- pc->iterGetValue = Dict_iterGetValue;
- pc->iterGetName = Dict_iterGetName;
- pc->dictObj = obj;
- Py_INCREF(obj);
-
- return;
+ if (PyDict_Check(obj))
+ {
+ PRINTMARK();
+ tc->type = JT_OBJECT;
+ pc->iterBegin = Dict_iterBegin;
+ pc->iterEnd = Dict_iterEnd;
+ pc->iterNext = Dict_iterNext;
+ pc->iterGetValue = Dict_iterGetValue;
+ pc->iterGetName = Dict_iterGetName;
+ pc->dictObj = obj;
+ Py_INCREF(obj);
+
+ return;
+ }
+ else
+ if (PyList_Check(obj))
+ {
+ PRINTMARK();
+ tc->type = JT_ARRAY;
+ pc->iterBegin = List_iterBegin;
+ pc->iterEnd = List_iterEnd;
+ pc->iterNext = List_iterNext;
+ pc->iterGetValue = List_iterGetValue;
+ pc->iterGetName = List_iterGetName;
+ return;
+ }
+ else
+ if (PyTuple_Check(obj))
+ {
+ PRINTMARK();
+ tc->type = JT_ARRAY;
+ pc->iterBegin = Tuple_iterBegin;
+ pc->iterEnd = Tuple_iterEnd;
+ pc->iterNext = Tuple_iterNext;
+ pc->iterGetValue = Tuple_iterGetValue;
+ pc->iterGetName = Tuple_iterGetName;
+ return;
+ }
+ else
+ if (PyAnySet_Check(obj))
+ {
+ PRINTMARK();
+ tc->type = JT_ARRAY;
+ pc->iterBegin = Iter_iterBegin;
+ pc->iterEnd = Iter_iterEnd;
+ pc->iterNext = Iter_iterNext;
+ pc->iterGetValue = Iter_iterGetValue;
+ pc->iterGetName = Iter_iterGetName;
+ return;
+ }
+ else
+ if (PyObject_TypeCheck(obj, (PyTypeObject*) cls_index))
+ {
+ if (enc->outputFormat == SPLIT)
+ {
+ PRINTMARK();
+ tc->type = JT_OBJECT;
+ pc->iterBegin = Index_iterBegin;
+ pc->iterEnd = Index_iterEnd;
+ pc->iterNext = Index_iterNext;
+ pc->iterGetValue = Index_iterGetValue;
+ pc->iterGetName = Index_iterGetName;
+ return;
}
- else
- if (PyList_Check(obj))
- {
- PRINTMARK();
- tc->type = JT_ARRAY;
- pc->iterBegin = List_iterBegin;
- pc->iterEnd = List_iterEnd;
- pc->iterNext = List_iterNext;
- pc->iterGetValue = List_iterGetValue;
- pc->iterGetName = List_iterGetName;
- return;
+
+ PRINTMARK();
+ tc->type = JT_ARRAY;
+ pc->newObj = PyObject_GetAttrString(obj, "values");
+ pc->iterBegin = NpyArr_iterBegin;
+ pc->iterEnd = NpyArr_iterEnd;
+ pc->iterNext = NpyArr_iterNext;
+ pc->iterGetValue = NpyArr_iterGetValue;
+ pc->iterGetName = NpyArr_iterGetName;
+ return;
+ }
+ else
+ if (PyObject_TypeCheck(obj, (PyTypeObject*) cls_series))
+ {
+ if (enc->outputFormat == SPLIT)
+ {
+ PRINTMARK();
+ tc->type = JT_OBJECT;
+ pc->iterBegin = Series_iterBegin;
+ pc->iterEnd = Series_iterEnd;
+ pc->iterNext = Series_iterNext;
+ pc->iterGetValue = Series_iterGetValue;
+ pc->iterGetName = Series_iterGetName;
+ return;
+ }
+
+ if (enc->outputFormat == INDEX || enc->outputFormat == COLUMNS)
+ {
+ PRINTMARK();
+ tc->type = JT_OBJECT;
+ pc->columnLabelsLen = PyArray_SIZE(obj);
+ pc->columnLabels = NpyArr_encodeLabels((PyArrayObject*) PyObject_GetAttrString(obj, "index"), (JSONObjectEncoder*) enc, pc->columnLabelsLen);
+ if (!pc->columnLabels)
+ {
+ goto INVALID;
+ }
}
else
- if (PyTuple_Check(obj))
{
- PRINTMARK();
- tc->type = JT_ARRAY;
- pc->iterBegin = Tuple_iterBegin;
- pc->iterEnd = Tuple_iterEnd;
- pc->iterNext = Tuple_iterNext;
- pc->iterGetValue = Tuple_iterGetValue;
- pc->iterGetName = Tuple_iterGetName;
- return;
+ PRINTMARK();
+ tc->type = JT_ARRAY;
+ }
+ pc->newObj = PyObject_GetAttrString(obj, "values");
+ pc->iterBegin = NpyArr_iterBegin;
+ pc->iterEnd = NpyArr_iterEnd;
+ pc->iterNext = NpyArr_iterNext;
+ pc->iterGetValue = NpyArr_iterGetValue;
+ pc->iterGetName = NpyArr_iterGetName;
+ return;
+ }
+ else
+ if (PyArray_Check(obj))
+ {
+ if (enc->npyCtxtPassthru)
+ {
+ PRINTMARK();
+ pc->npyarr = enc->npyCtxtPassthru;
+ tc->type = (pc->npyarr->columnLabels ? JT_OBJECT : JT_ARRAY);
+ pc->iterBegin = NpyArrPassThru_iterBegin;
+ pc->iterEnd = NpyArrPassThru_iterEnd;
+ pc->iterNext = NpyArr_iterNext;
+ pc->iterGetValue = NpyArr_iterGetValue;
+ pc->iterGetName = NpyArr_iterGetName;
+ enc->npyCtxtPassthru = NULL;
+ return;
}
- else
- if (PyObject_TypeCheck(obj, (PyTypeObject*) cls_index))
- {
- if (enc->outputFormat == SPLIT)
- {
- PRINTMARK();
- tc->type = JT_OBJECT;
- pc->iterBegin = Index_iterBegin;
- pc->iterEnd = Index_iterEnd;
- pc->iterNext = Index_iterNext;
- pc->iterGetValue = Index_iterGetValue;
- pc->iterGetName = Index_iterGetName;
- return;
- }
- PRINTMARK();
- tc->type = JT_ARRAY;
- pc->newObj = PyObject_GetAttrString(obj, "values");
- pc->iterBegin = NpyArr_iterBegin;
- pc->iterEnd = NpyArr_iterEnd;
- pc->iterNext = NpyArr_iterNext;
- pc->iterGetValue = NpyArr_iterGetValue;
- pc->iterGetName = NpyArr_iterGetName;
- return;
+ PRINTMARK();
+ tc->type = JT_ARRAY;
+ pc->iterBegin = NpyArr_iterBegin;
+ pc->iterEnd = NpyArr_iterEnd;
+ pc->iterNext = NpyArr_iterNext;
+ pc->iterGetValue = NpyArr_iterGetValue;
+ pc->iterGetName = NpyArr_iterGetName;
+ return;
+ }
+ else
+ if (PyObject_TypeCheck(obj, (PyTypeObject*) cls_dataframe))
+ {
+ if (enc->outputFormat == SPLIT)
+ {
+ PRINTMARK();
+ tc->type = JT_OBJECT;
+ pc->iterBegin = DataFrame_iterBegin;
+ pc->iterEnd = DataFrame_iterEnd;
+ pc->iterNext = DataFrame_iterNext;
+ pc->iterGetValue = DataFrame_iterGetValue;
+ pc->iterGetName = DataFrame_iterGetName;
+ return;
}
- else
- if (PyObject_TypeCheck(obj, (PyTypeObject*) cls_series))
- {
- if (enc->outputFormat == SPLIT)
- {
- PRINTMARK();
- tc->type = JT_OBJECT;
- pc->iterBegin = Series_iterBegin;
- pc->iterEnd = Series_iterEnd;
- pc->iterNext = Series_iterNext;
- pc->iterGetValue = Series_iterGetValue;
- pc->iterGetName = Series_iterGetName;
- return;
- }
- if (enc->outputFormat == INDEX || enc->outputFormat == COLUMNS)
- {
- PRINTMARK();
- tc->type = JT_OBJECT;
- pc->columnLabelsLen = PyArray_SIZE(obj);
- pc->columnLabels = NpyArr_encodeLabels((PyArrayObject*) PyObject_GetAttrString(obj, "index"), (JSONObjectEncoder*) enc, pc->columnLabelsLen);
- if (!pc->columnLabels)
- {
- goto INVALID;
- }
- }
- else
- {
- PRINTMARK();
- tc->type = JT_ARRAY;
- }
- pc->newObj = PyObject_GetAttrString(obj, "values");
- pc->iterBegin = NpyArr_iterBegin;
- pc->iterEnd = NpyArr_iterEnd;
- pc->iterNext = NpyArr_iterNext;
- pc->iterGetValue = NpyArr_iterGetValue;
- pc->iterGetName = NpyArr_iterGetName;
- return;
+ PRINTMARK();
+ pc->newObj = PyObject_GetAttrString(obj, "values");
+ pc->iterBegin = NpyArr_iterBegin;
+ pc->iterEnd = NpyArr_iterEnd;
+ pc->iterNext = NpyArr_iterNext;
+ pc->iterGetValue = NpyArr_iterGetValue;
+ pc->iterGetName = NpyArr_iterGetName;
+ if (enc->outputFormat == VALUES)
+ {
+ PRINTMARK();
+ tc->type = JT_ARRAY;
}
else
- if (PyArray_Check(obj))
+ if (enc->outputFormat == RECORDS)
{
- if (enc->npyCtxtPassthru)
- {
- PRINTMARK();
- pc->npyarr = enc->npyCtxtPassthru;
- tc->type = (pc->npyarr->columnLabels ? JT_OBJECT : JT_ARRAY);
- pc->iterBegin = NpyArrPassThru_iterBegin;
- pc->iterEnd = NpyArrPassThru_iterEnd;
- pc->iterNext = NpyArr_iterNext;
- pc->iterGetValue = NpyArr_iterGetValue;
- pc->iterGetName = NpyArr_iterGetName;
- enc->npyCtxtPassthru = NULL;
- return;
- }
-
- PRINTMARK();
- tc->type = JT_ARRAY;
- pc->iterBegin = NpyArr_iterBegin;
- pc->iterEnd = NpyArr_iterEnd;
- pc->iterNext = NpyArr_iterNext;
- pc->iterGetValue = NpyArr_iterGetValue;
- pc->iterGetName = NpyArr_iterGetName;
- return;
+ PRINTMARK();
+ tc->type = JT_ARRAY;
+ pc->columnLabelsLen = PyArray_DIM(pc->newObj, 1);
+ pc->columnLabels = NpyArr_encodeLabels((PyArrayObject*) PyObject_GetAttrString(obj, "columns"), (JSONObjectEncoder*) enc, pc->columnLabelsLen);
+ if (!pc->columnLabels)
+ {
+ goto INVALID;
+ }
+ }
+ else
+ if (enc->outputFormat == INDEX)
+ {
+ PRINTMARK();
+ tc->type = JT_OBJECT;
+ pc->rowLabelsLen = PyArray_DIM(pc->newObj, 0);
+ pc->rowLabels = NpyArr_encodeLabels((PyArrayObject*) PyObject_GetAttrString(obj, "index"), (JSONObjectEncoder*) enc, pc->rowLabelsLen);
+ if (!pc->rowLabels)
+ {
+ goto INVALID;
+ }
+ pc->columnLabelsLen = PyArray_DIM(pc->newObj, 1);
+ pc->columnLabels = NpyArr_encodeLabels((PyArrayObject*) PyObject_GetAttrString(obj, "columns"), (JSONObjectEncoder*) enc, pc->columnLabelsLen);
+ if (!pc->columnLabels)
+ {
+ NpyArr_freeLabels(pc->rowLabels, pc->rowLabelsLen);
+ pc->rowLabels = NULL;
+ goto INVALID;
+ }
}
else
- if (PyObject_TypeCheck(obj, (PyTypeObject*) cls_dataframe))
{
- if (enc->outputFormat == SPLIT)
- {
- PRINTMARK();
- tc->type = JT_OBJECT;
- pc->iterBegin = DataFrame_iterBegin;
- pc->iterEnd = DataFrame_iterEnd;
- pc->iterNext = DataFrame_iterNext;
- pc->iterGetValue = DataFrame_iterGetValue;
- pc->iterGetName = DataFrame_iterGetName;
- return;
- }
-
- PRINTMARK();
- pc->newObj = PyObject_GetAttrString(obj, "values");
- pc->iterBegin = NpyArr_iterBegin;
- pc->iterEnd = NpyArr_iterEnd;
- pc->iterNext = NpyArr_iterNext;
- pc->iterGetValue = NpyArr_iterGetValue;
- pc->iterGetName = NpyArr_iterGetName;
- if (enc->outputFormat == VALUES)
- {
- PRINTMARK();
- tc->type = JT_ARRAY;
- }
- else
- if (enc->outputFormat == RECORDS)
- {
- PRINTMARK();
- tc->type = JT_ARRAY;
- pc->columnLabelsLen = PyArray_DIM(pc->newObj, 1);
- pc->columnLabels = NpyArr_encodeLabels((PyArrayObject*) PyObject_GetAttrString(obj, "columns"), (JSONObjectEncoder*) enc, pc->columnLabelsLen);
- if (!pc->columnLabels)
- {
- goto INVALID;
- }
- }
- else
- if (enc->outputFormat == INDEX)
- {
- PRINTMARK();
- tc->type = JT_OBJECT;
- pc->rowLabelsLen = PyArray_DIM(pc->newObj, 0);
- pc->rowLabels = NpyArr_encodeLabels((PyArrayObject*) PyObject_GetAttrString(obj, "index"), (JSONObjectEncoder*) enc, pc->rowLabelsLen);
- if (!pc->rowLabels)
- {
- goto INVALID;
- }
- pc->columnLabelsLen = PyArray_DIM(pc->newObj, 1);
- pc->columnLabels = NpyArr_encodeLabels((PyArrayObject*) PyObject_GetAttrString(obj, "columns"), (JSONObjectEncoder*) enc, pc->columnLabelsLen);
- if (!pc->columnLabels)
- {
- NpyArr_freeLabels(pc->rowLabels, pc->rowLabelsLen);
- pc->rowLabels = NULL;
- goto INVALID;
- }
- }
- else
- {
- PRINTMARK();
- tc->type = JT_OBJECT;
- pc->rowLabelsLen = PyArray_DIM(pc->newObj, 1);
- pc->rowLabels = NpyArr_encodeLabels((PyArrayObject*) PyObject_GetAttrString(obj, "columns"), (JSONObjectEncoder*) enc, pc->rowLabelsLen);
- if (!pc->rowLabels)
- {
- goto INVALID;
- }
- pc->columnLabelsLen = PyArray_DIM(pc->newObj, 0);
- pc->columnLabels = NpyArr_encodeLabels((PyArrayObject*) PyObject_GetAttrString(obj, "index"), (JSONObjectEncoder*) enc, pc->columnLabelsLen);
- if (!pc->columnLabels)
- {
- NpyArr_freeLabels(pc->rowLabels, pc->rowLabelsLen);
- pc->rowLabels = NULL;
- goto INVALID;
- }
- pc->transpose = 1;
- }
- return;
+ PRINTMARK();
+ tc->type = JT_OBJECT;
+ pc->rowLabelsLen = PyArray_DIM(pc->newObj, 1);
+ pc->rowLabels = NpyArr_encodeLabels((PyArrayObject*) PyObject_GetAttrString(obj, "columns"), (JSONObjectEncoder*) enc, pc->rowLabelsLen);
+ if (!pc->rowLabels)
+ {
+ goto INVALID;
+ }
+ pc->columnLabelsLen = PyArray_DIM(pc->newObj, 0);
+ pc->columnLabels = NpyArr_encodeLabels((PyArrayObject*) PyObject_GetAttrString(obj, "index"), (JSONObjectEncoder*) enc, pc->columnLabelsLen);
+ if (!pc->columnLabels)
+ {
+ NpyArr_freeLabels(pc->rowLabels, pc->rowLabelsLen);
+ pc->rowLabels = NULL;
+ goto INVALID;
+ }
+ pc->transpose = 1;
}
+ return;
+ }
+ toDictFunc = PyObject_GetAttrString(obj, "toDict");
- toDictFunc = PyObject_GetAttrString(obj, "toDict");
+ if (toDictFunc)
+ {
+ PyObject* tuple = PyTuple_New(0);
+ PyObject* toDictResult = PyObject_Call(toDictFunc, tuple, NULL);
+ Py_DECREF(tuple);
+ Py_DECREF(toDictFunc);
- if (toDictFunc)
+ if (toDictResult == NULL)
{
- PyObject* tuple = PyTuple_New(0);
- PyObject* toDictResult = PyObject_Call(toDictFunc, tuple, NULL);
- Py_DECREF(tuple);
- Py_DECREF(toDictFunc);
-
- if (toDictResult == NULL)
- {
- PyErr_Clear();
- tc->type = JT_NULL;
- return;
- }
-
- if (!PyDict_Check(toDictResult))
- {
- Py_DECREF(toDictResult);
- tc->type = JT_NULL;
- return;
- }
-
- PRINTMARK();
- tc->type = JT_OBJECT;
- pc->iterBegin = Dict_iterBegin;
- pc->iterEnd = Dict_iterEnd;
- pc->iterNext = Dict_iterNext;
- pc->iterGetValue = Dict_iterGetValue;
- pc->iterGetName = Dict_iterGetName;
- pc->dictObj = toDictResult;
- return;
+ PyErr_Clear();
+ tc->type = JT_NULL;
+ return;
}
- PyErr_Clear();
+ if (!PyDict_Check(toDictResult))
+ {
+ Py_DECREF(toDictResult);
+ tc->type = JT_NULL;
+ return;
+ }
+ PRINTMARK();
tc->type = JT_OBJECT;
- pc->iterBegin = Dir_iterBegin;
- pc->iterEnd = Dir_iterEnd;
- pc->iterNext = Dir_iterNext;
- pc->iterGetValue = Dir_iterGetValue;
- pc->iterGetName = Dir_iterGetName;
-
+ pc->iterBegin = Dict_iterBegin;
+ pc->iterEnd = Dict_iterEnd;
+ pc->iterNext = Dict_iterNext;
+ pc->iterGetValue = Dict_iterGetValue;
+ pc->iterGetName = Dict_iterGetName;
+ pc->dictObj = toDictResult;
return;
+ }
+
+ PyErr_Clear();
+
+ PRINTMARK();
+ tc->type = JT_OBJECT;
+ pc->iterBegin = Dir_iterBegin;
+ pc->iterEnd = Dir_iterEnd;
+ pc->iterNext = Dir_iterNext;
+ pc->iterGetValue = Dir_iterGetValue;
+ pc->iterGetName = Dir_iterGetName;
+ return;
INVALID:
- tc->type = JT_INVALID;
- PyObject_Free(tc->prv);
- tc->prv = NULL;
- return;
+ tc->type = JT_INVALID;
+ PyObject_Free(tc->prv);
+ tc->prv = NULL;
+ return;
}
-
void Object_endTypeContext(JSOBJ obj, JSONTypeContext *tc)
{
Py_XDECREF(GET_TC(tc)->newObj);
@@ -1462,244 +1583,244 @@ void Object_endTypeContext(JSOBJ obj, JSONTypeContext *tc)
const char *Object_getStringValue(JSOBJ obj, JSONTypeContext *tc, size_t *_outLen)
{
- return GET_TC(tc)->PyTypeToJSON (obj, tc, NULL, _outLen);
+ return GET_TC(tc)->PyTypeToJSON (obj, tc, NULL, _outLen);
}
JSINT64 Object_getLongValue(JSOBJ obj, JSONTypeContext *tc)
{
- JSINT64 ret;
- GET_TC(tc)->PyTypeToJSON (obj, tc, &ret, NULL);
-
- return ret;
+ JSINT64 ret;
+ GET_TC(tc)->PyTypeToJSON (obj, tc, &ret, NULL);
+ return ret;
}
JSINT32 Object_getIntValue(JSOBJ obj, JSONTypeContext *tc)
{
- JSINT32 ret;
- GET_TC(tc)->PyTypeToJSON (obj, tc, &ret, NULL);
- return ret;
+ JSINT32 ret;
+ GET_TC(tc)->PyTypeToJSON (obj, tc, &ret, NULL);
+ return ret;
}
-
double Object_getDoubleValue(JSOBJ obj, JSONTypeContext *tc)
{
- double ret;
- GET_TC(tc)->PyTypeToJSON (obj, tc, &ret, NULL);
- return ret;
+ double ret;
+ GET_TC(tc)->PyTypeToJSON (obj, tc, &ret, NULL);
+ return ret;
}
static void Object_releaseObject(JSOBJ _obj)
{
- Py_DECREF( (PyObject *) _obj);
+ Py_DECREF( (PyObject *) _obj);
}
-
-
void Object_iterBegin(JSOBJ obj, JSONTypeContext *tc)
{
- GET_TC(tc)->iterBegin(obj, tc);
+ GET_TC(tc)->iterBegin(obj, tc);
}
int Object_iterNext(JSOBJ obj, JSONTypeContext *tc)
{
- return GET_TC(tc)->iterNext(obj, tc);
+ return GET_TC(tc)->iterNext(obj, tc);
}
void Object_iterEnd(JSOBJ obj, JSONTypeContext *tc)
{
- GET_TC(tc)->iterEnd(obj, tc);
+ GET_TC(tc)->iterEnd(obj, tc);
}
JSOBJ Object_iterGetValue(JSOBJ obj, JSONTypeContext *tc)
{
- return GET_TC(tc)->iterGetValue(obj, tc);
+ return GET_TC(tc)->iterGetValue(obj, tc);
}
char *Object_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen)
{
- return GET_TC(tc)->iterGetName(obj, tc, outLen);
+ return GET_TC(tc)->iterGetName(obj, tc, outLen);
}
-
PyObject* objToJSON(PyObject* self, PyObject *args, PyObject *kwargs)
{
- static char *kwlist[] = { "obj", "ensure_ascii", "double_precision", "orient", NULL};
+ static char *kwlist[] = { "obj", "ensure_ascii", "double_precision", "encode_html_chars", "orient", NULL};
+
+ char buffer[65536];
+ char *ret;
+ PyObject *newobj;
+ PyObject *oinput = NULL;
+ PyObject *oensureAscii = NULL;
+ int idoublePrecision = 10; // default double precision setting
+ PyObject *oencodeHTMLChars = NULL;
+ char *sOrient = NULL;
+
+ PyObjectEncoder pyEncoder =
+ {
+ {
+ Object_beginTypeContext,
+ Object_endTypeContext,
+ Object_getStringValue,
+ Object_getLongValue,
+ Object_getIntValue,
+ Object_getDoubleValue,
+ Object_iterBegin,
+ Object_iterNext,
+ Object_iterEnd,
+ Object_iterGetValue,
+ Object_iterGetName,
+ Object_releaseObject,
+ PyObject_Malloc,
+ PyObject_Realloc,
+ PyObject_Free,
+ -1, //recursionMax
+ idoublePrecision,
+ 1, //forceAscii
+ 0, //encodeHTMLChars
+ }
+ };
+ JSONObjectEncoder* encoder = (JSONObjectEncoder*) &pyEncoder;
+
+ pyEncoder.npyCtxtPassthru = NULL;
+ pyEncoder.outputFormat = COLUMNS;
+
+ PRINTMARK();
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|OiOs", kwlist, &oinput, &oensureAscii, &idoublePrecision, &oencodeHTMLChars, &sOrient))
+ {
+ return NULL;
+ }
- char buffer[65536];
- char *ret;
- PyObject *newobj;
- PyObject *oinput = NULL;
- PyObject *oensureAscii = NULL;
- char *sOrient = NULL;
- int idoublePrecision = 5; // default double precision setting
+ if (oensureAscii != NULL && !PyObject_IsTrue(oensureAscii))
+ {
+ encoder->forceASCII = 0;
+ }
- PyObjectEncoder pyEncoder =
- {
- {
- Object_beginTypeContext, //void (*beginTypeContext)(JSOBJ obj, JSONTypeContext *tc);
- Object_endTypeContext, //void (*endTypeContext)(JSOBJ obj, JSONTypeContext *tc);
- Object_getStringValue, //const char *(*getStringValue)(JSOBJ obj, JSONTypeContext *tc, size_t *_outLen);
- Object_getLongValue, //JSLONG (*getLongValue)(JSOBJ obj, JSONTypeContext *tc);
- Object_getIntValue, //JSLONG (*getLongValue)(JSOBJ obj, JSONTypeContext *tc);
- Object_getDoubleValue, //double (*getDoubleValue)(JSOBJ obj, JSONTypeContext *tc);
- Object_iterBegin, //JSPFN_ITERBEGIN iterBegin;
- Object_iterNext, //JSPFN_ITERNEXT iterNext;
- Object_iterEnd, //JSPFN_ITEREND iterEnd;
- Object_iterGetValue, //JSPFN_ITERGETVALUE iterGetValue;
- Object_iterGetName, //JSPFN_ITERGETNAME iterGetName;
- Object_releaseObject, //void (*releaseValue)(JSONTypeContext *ti);
- PyObject_Malloc, //JSPFN_MALLOC malloc;
- PyObject_Realloc, //JSPFN_REALLOC realloc;
- PyObject_Free, //JSPFN_FREE free;
- -1, //recursionMax
- idoublePrecision,
- 1, //forceAscii
- }
- };
- JSONObjectEncoder* encoder = (JSONObjectEncoder*) &pyEncoder;
+ if (oencodeHTMLChars != NULL && PyObject_IsTrue(oencodeHTMLChars))
+ {
+ encoder->encodeHTMLChars = 1;
+ }
- pyEncoder.npyCtxtPassthru = NULL;
- pyEncoder.outputFormat = COLUMNS;
+ encoder->doublePrecision = idoublePrecision;
- PRINTMARK();
-
- if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|Ois", kwlist, &oinput, &oensureAscii, &idoublePrecision, &sOrient))
+ if (sOrient != NULL)
+ {
+ if (strcmp(sOrient, "records") == 0)
{
- return NULL;
+ pyEncoder.outputFormat = RECORDS;
}
-
- if (sOrient != NULL)
+ else
+ if (strcmp(sOrient, "index") == 0)
{
- if (strcmp(sOrient, "records") == 0)
- {
- pyEncoder.outputFormat = RECORDS;
- }
- else
- if (strcmp(sOrient, "index") == 0)
- {
- pyEncoder.outputFormat = INDEX;
- }
- else
- if (strcmp(sOrient, "split") == 0)
- {
- pyEncoder.outputFormat = SPLIT;
- }
- else
- if (strcmp(sOrient, "values") == 0)
- {
- pyEncoder.outputFormat = VALUES;
- }
- else
- if (strcmp(sOrient, "columns") != 0)
- {
- PyErr_Format (PyExc_ValueError, "Invalid value '%s' for option 'orient'", sOrient);
- return NULL;
- }
+ pyEncoder.outputFormat = INDEX;
}
-
- pyEncoder.originalOutputFormat = pyEncoder.outputFormat;
-
- if (oensureAscii != NULL && !PyObject_IsTrue(oensureAscii))
+ else
+ if (strcmp(sOrient, "split") == 0)
{
- encoder->forceASCII = 0;
+ pyEncoder.outputFormat = SPLIT;
}
-
- encoder->doublePrecision = idoublePrecision;
-
- PRINTMARK();
- ret = JSON_EncodeObject (oinput, encoder, buffer, sizeof (buffer));
- PRINTMARK();
-
- if (PyErr_Occurred())
+ else
+ if (strcmp(sOrient, "values") == 0)
{
- return NULL;
+ pyEncoder.outputFormat = VALUES;
}
-
- if (encoder->errorMsg)
+ else
+ if (strcmp(sOrient, "columns") != 0)
{
- if (ret != buffer)
- {
- encoder->free (ret);
- }
-
- PyErr_Format (PyExc_OverflowError, "%s", encoder->errorMsg);
- return NULL;
+ PyErr_Format (PyExc_ValueError, "Invalid value '%s' for option 'orient'", sOrient);
+ return NULL;
}
+ }
+
+ pyEncoder.originalOutputFormat = pyEncoder.outputFormat;
+ PRINTMARK();
+ ret = JSON_EncodeObject (oinput, encoder, buffer, sizeof (buffer));
+ PRINTMARK();
- newobj = PyString_FromString (ret);
+ if (PyErr_Occurred())
+ {
+ return NULL;
+ }
+ if (encoder->errorMsg)
+ {
if (ret != buffer)
{
- encoder->free (ret);
+ encoder->free (ret);
}
- PRINTMARK();
+ PyErr_Format (PyExc_OverflowError, "%s", encoder->errorMsg);
+ return NULL;
+ }
+
+ newobj = PyString_FromString (ret);
+
+ if (ret != buffer)
+ {
+ encoder->free (ret);
+ }
- return newobj;
+ PRINTMARK();
+
+ return newobj;
}
PyObject* objToJSONFile(PyObject* self, PyObject *args, PyObject *kwargs)
{
- PyObject *data;
- PyObject *file;
- PyObject *string;
- PyObject *write;
- PyObject *argtuple;
-
- PRINTMARK();
+ PyObject *data;
+ PyObject *file;
+ PyObject *string;
+ PyObject *write;
+ PyObject *argtuple;
- if (!PyArg_ParseTuple (args, "OO", &data, &file)) {
- return NULL;
- }
+ PRINTMARK();
- if (!PyObject_HasAttrString (file, "write"))
- {
- PyErr_Format (PyExc_TypeError, "expected file");
- return NULL;
- }
+ if (!PyArg_ParseTuple (args, "OO", &data, &file))
+ {
+ return NULL;
+ }
- write = PyObject_GetAttrString (file, "write");
+ if (!PyObject_HasAttrString (file, "write"))
+ {
+ PyErr_Format (PyExc_TypeError, "expected file");
+ return NULL;
+ }
- if (!PyCallable_Check (write)) {
- Py_XDECREF(write);
- PyErr_Format (PyExc_TypeError, "expected file");
- return NULL;
- }
+ write = PyObject_GetAttrString (file, "write");
- argtuple = PyTuple_Pack(1, data);
+ if (!PyCallable_Check (write))
+ {
+ Py_XDECREF(write);
+ PyErr_Format (PyExc_TypeError, "expected file");
+ return NULL;
+ }
- string = objToJSON (self, argtuple, kwargs);
+ argtuple = PyTuple_Pack(1, data);
- if (string == NULL)
- {
- Py_XDECREF(write);
- Py_XDECREF(argtuple);
- return NULL;
- }
+ string = objToJSON (self, argtuple, kwargs);
+ if (string == NULL)
+ {
+ Py_XDECREF(write);
Py_XDECREF(argtuple);
+ return NULL;
+ }
- argtuple = PyTuple_Pack (1, string);
- if (argtuple == NULL)
- {
- Py_XDECREF(write);
- return NULL;
- }
- if (PyObject_CallObject (write, argtuple) == NULL)
- {
- Py_XDECREF(write);
- Py_XDECREF(argtuple);
- return NULL;
- }
+ Py_XDECREF(argtuple);
+ argtuple = PyTuple_Pack (1, string);
+ if (argtuple == NULL)
+ {
Py_XDECREF(write);
- Py_DECREF(argtuple);
- Py_XDECREF(string);
-
- PRINTMARK();
+ return NULL;
+ }
+ if (PyObject_CallObject (write, argtuple) == NULL)
+ {
+ Py_XDECREF(write);
+ Py_XDECREF(argtuple);
+ return NULL;
+ }
- Py_RETURN_NONE;
+ Py_XDECREF(write);
+ Py_DECREF(argtuple);
+ Py_XDECREF(string);
+ PRINTMARK();
+ Py_RETURN_NONE;
}
-
diff --git a/pandas/src/ujson/python/py_defines.h b/pandas/src/ujson/python/py_defines.h
index 1544c2e3cf34d..312914217d8e3 100644
--- a/pandas/src/ujson/python/py_defines.h
+++ b/pandas/src/ujson/python/py_defines.h
@@ -1,3 +1,40 @@
+/*
+Copyright (c) 2011-2013, ESN Social Software AB and Jonas Tarnstrom
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of the ESN Social Software AB nor the
+ names of its contributors may be used to endorse or promote products
+ derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ESN SOCIAL SOFTWARE AB OR JONAS TARNSTROM BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+Portions of code from MODP_ASCII - Ascii transformations (upper/lower, etc)
+http://code.google.com/p/stringencoders/
+Copyright (c) 2007 Nick Galbreath -- nickg [at] modp [dot] com. All rights reserved.
+
+Numeric decoder derived from from TCL library
+http://www.opensource.apple.com/source/tcl/tcl-14/tcl/license.terms
+ * Copyright (c) 1988-1993 The Regents of the University of California.
+ * Copyright (c) 1994 Sun Microsystems, Inc.
+*/
+
#include <Python.h>
#if PY_MAJOR_VERSION >= 3
diff --git a/pandas/src/ujson/python/ujson.c b/pandas/src/ujson/python/ujson.c
index e04309e620a1d..33b01b341c20a 100644
--- a/pandas/src/ujson/python/ujson.c
+++ b/pandas/src/ujson/python/ujson.c
@@ -1,3 +1,40 @@
+/*
+Copyright (c) 2011-2013, ESN Social Software AB and Jonas Tarnstrom
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+* Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+* Neither the name of the ESN Social Software AB nor the
+names of its contributors may be used to endorse or promote products
+derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ESN SOCIAL SOFTWARE AB OR JONAS TARNSTROM BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+Portions of code from MODP_ASCII - Ascii transformations (upper/lower, etc)
+http://code.google.com/p/stringencoders/
+Copyright (c) 2007 Nick Galbreath -- nickg [at] modp [dot] com. All rights reserved.
+
+Numeric decoder derived from from TCL library
+http://www.opensource.apple.com/source/tcl/tcl-14/tcl/license.terms
+* Copyright (c) 1988-1993 The Regents of the University of California.
+* Copyright (c) 1994 Sun Microsystems, Inc.
+*/
+
#include "py_defines.h"
#include "version.h"
@@ -15,28 +52,30 @@ PyObject* objToJSONFile(PyObject* self, PyObject *args, PyObject *kwargs);
PyObject* JSONFileToObj(PyObject* self, PyObject *args, PyObject *kwargs);
+#define ENCODER_HELP_TEXT "Use ensure_ascii=false to output UTF-8. Pass in double_precision to alter the maximum digit precision of doubles. Set encode_html_chars=True to encode < > & as unicode escape sequences."
+
static PyMethodDef ujsonMethods[] = {
- {"encode", (PyCFunction) objToJSON, METH_VARARGS | METH_KEYWORDS, "Converts arbitrary object recursivly into JSON. Use ensure_ascii=false to output UTF-8. Pass in double_precision to alter the maximum digit precision with doubles"},
- {"decode", (PyCFunction) JSONToObj, METH_VARARGS | METH_KEYWORDS, "Converts JSON as string to dict object structure"},
- {"dumps", (PyCFunction) objToJSON, METH_VARARGS | METH_KEYWORDS, "Converts arbitrary object recursivly into JSON. Use ensure_ascii=false to output UTF-8"},
- {"loads", (PyCFunction) JSONToObj, METH_VARARGS | METH_KEYWORDS, "Converts JSON as string to dict object structure"},
- {"dump", (PyCFunction) objToJSONFile, METH_VARARGS | METH_KEYWORDS, "Converts arbitrary object recursively into JSON file. Use ensure_ascii=false to output UTF-8"},
- {"load", (PyCFunction) JSONFileToObj, METH_VARARGS | METH_KEYWORDS, "Converts JSON as file to dict object structure"},
- {NULL, NULL, 0, NULL} /* Sentinel */
+ {"encode", (PyCFunction) objToJSON, METH_VARARGS | METH_KEYWORDS, "Converts arbitrary object recursivly into JSON. " ENCODER_HELP_TEXT},
+ {"decode", (PyCFunction) JSONToObj, METH_VARARGS | METH_KEYWORDS, "Converts JSON as string to dict object structure. Use precise_float=True to use high precision float decoder."},
+ {"dumps", (PyCFunction) objToJSON, METH_VARARGS | METH_KEYWORDS, "Converts arbitrary object recursivly into JSON. " ENCODER_HELP_TEXT},
+ {"loads", (PyCFunction) JSONToObj, METH_VARARGS | METH_KEYWORDS, "Converts JSON as string to dict object structure. Use precise_float=True to use high precision float decoder."},
+ {"dump", (PyCFunction) objToJSONFile, METH_VARARGS | METH_KEYWORDS, "Converts arbitrary object recursively into JSON file. " ENCODER_HELP_TEXT},
+ {"load", (PyCFunction) JSONFileToObj, METH_VARARGS | METH_KEYWORDS, "Converts JSON as file to dict object structure. Use precise_float=True to use high precision float decoder."},
+ {NULL, NULL, 0, NULL} /* Sentinel */
};
#if PY_MAJOR_VERSION >= 3
static struct PyModuleDef moduledef = {
- PyModuleDef_HEAD_INIT,
- "_pandasujson",
- 0, /* m_doc */
- -1, /* m_size */
- ujsonMethods, /* m_methods */
- NULL, /* m_reload */
- NULL, /* m_traverse */
- NULL, /* m_clear */
- NULL /* m_free */
+ PyModuleDef_HEAD_INIT,
+ "_pandasujson",
+ 0, /* m_doc */
+ -1, /* m_size */
+ ujsonMethods, /* m_methods */
+ NULL, /* m_reload */
+ NULL, /* m_traverse */
+ NULL, /* m_clear */
+ NULL /* m_free */
};
#define PYMODINITFUNC PyObject *PyInit_json(void)
@@ -53,21 +92,21 @@ static struct PyModuleDef moduledef = {
PYMODINITFUNC
{
- PyObject *module;
- PyObject *version_string;
+ PyObject *module;
+ PyObject *version_string;
- initObjToJSON();
- module = PYMODULE_CREATE();
+ initObjToJSON();
+ module = PYMODULE_CREATE();
- if (module == NULL)
- {
- MODINITERROR;
- }
+ if (module == NULL)
+ {
+ MODINITERROR;
+ }
- version_string = PyString_FromString (UJSON_VERSION);
- PyModule_AddObject (module, "__version__", version_string);
+ version_string = PyString_FromString (UJSON_VERSION);
+ PyModule_AddObject (module, "__version__", version_string);
#if PY_MAJOR_VERSION >= 3
- return module;
+ return module;
#endif
}
diff --git a/pandas/src/ujson/python/version.h b/pandas/src/ujson/python/version.h
index 9449441411192..0ccfbfe74521c 100644
--- a/pandas/src/ujson/python/version.h
+++ b/pandas/src/ujson/python/version.h
@@ -1 +1,38 @@
-#define UJSON_VERSION "1.18"
+/*
+Copyright (c) 2011-2013, ESN Social Software AB and Jonas Tarnstrom
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of the ESN Social Software AB nor the
+ names of its contributors may be used to endorse or promote products
+ derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ESN SOCIAL SOFTWARE AB OR JONAS TARNSTROM BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+Portions of code from MODP_ASCII - Ascii transformations (upper/lower, etc)
+http://code.google.com/p/stringencoders/
+Copyright (c) 2007 Nick Galbreath -- nickg [at] modp [dot] com. All rights reserved.
+
+Numeric decoder derived from from TCL library
+http://www.opensource.apple.com/source/tcl/tcl-14/tcl/license.terms
+ * Copyright (c) 1988-1993 The Regents of the University of California.
+ * Copyright (c) 1994 Sun Microsystems, Inc.
+*/
+
+#define UJSON_VERSION "1.33"
| This updates pandas JSON to use the latest ujson version. A fair few fixes and enhancements are included. Unfortunately however there were a lot of whitespace changes so it looks like a lot more than it actually is.
All tests pass on py27 and py33. Valgrind run of JSON tests with Python 2.7 is clean.
Also included are two fixes encountered during merging and pushed upstream to ultrajson:
https://github.com/esnme/ultrajson/pull/93
https://github.com/esnme/ultrajson/pull/94
| https://api.github.com/repos/pandas-dev/pandas/pulls/3946 | 2013-06-18T14:12:54Z | 2013-06-19T01:46:00Z | 2013-06-19T01:46:00Z | 2014-07-04T07:35:19Z |
ENH: add last element to repring of sequences | diff --git a/pandas/core/common.py b/pandas/core/common.py
index a31c92caf4343..ebbd87c9bf073 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -1941,23 +1941,24 @@ def _pprint_seq(seq, _nest_lvl=0, **kwds):
bounds length of printed sequence, depending on options
"""
- if isinstance(seq,set):
+ if isinstance(seq, set):
fmt = u"set([%s])"
else:
fmt = u"[%s]" if hasattr(seq, '__setitem__') else u"(%s)"
- nitems = get_option("max_seq_items") or len(seq)
+ n = len(seq)
+ nitems = get_option("max_seq_items") or n
s = iter(seq)
r = []
- for i in range(min(nitems,len(seq))): # handle sets, no slicing
+ for i in xrange(min(nitems, n)): # handle sets, no slicing
r.append(pprint_thing(next(s), _nest_lvl + 1, **kwds))
- body = ", ".join(r)
+ body = u", ".join(r)
- if nitems < len(seq):
- body+= ", ..."
- elif isinstance(seq,tuple) and len(seq) == 1:
- body += ','
+ if nitems < n:
+ body += u", ..., {0}".format(pprint_thing(seq[-1], **kwds))
+ elif isinstance(seq, tuple) and n == 1:
+ body += u','
return fmt % body
@@ -2010,10 +2011,10 @@ def pprint_thing(thing, _nest_lvl=0, escape_chars=None, default_escapes=False,
result - unicode object on py2, str on py3. Always Unicode.
"""
- def as_escaped_unicode(thing,escape_chars=escape_chars):
+ def as_escaped_unicode(thing, escape_chars=escape_chars):
# Unicode is fine, else we try to decode using utf-8 and 'replace'
# if that's not it either, we have no way of knowing and the user
- #should deal with it himself.
+ # should deal with it.
try:
result = unicode(thing) # we should try this first
@@ -2043,12 +2044,12 @@ def as_escaped_unicode(thing,escape_chars=escape_chars):
return unicode(thing)
elif (isinstance(thing, dict) and
_nest_lvl < get_option("display.pprint_nest_depth")):
- result = _pprint_dict(thing, _nest_lvl,quote_strings=True)
+ result = _pprint_dict(thing, _nest_lvl, quote_strings=True)
elif _is_sequence(thing) and _nest_lvl < \
get_option("display.pprint_nest_depth"):
result = _pprint_seq(thing, _nest_lvl, escape_chars=escape_chars,
quote_strings=quote_strings)
- elif isinstance(thing,basestring) and quote_strings:
+ elif isinstance(thing, basestring) and quote_strings:
if py3compat.PY3:
fmt = "'%s'"
else:
diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py
index db01545fb3c9d..8589e09bf038f 100644
--- a/pandas/tests/test_common.py
+++ b/pandas/tests/test_common.py
@@ -3,10 +3,12 @@
import re
import nose
+from nose.tools import assert_equal
import unittest
from pandas import Series, DataFrame, date_range, DatetimeIndex
from pandas.core.common import notnull, isnull
+from pandas.util.py3compat import PY3
import pandas.core.common as com
import pandas.util.testing as tm
import pandas.core.config as cf
@@ -266,6 +268,7 @@ def test_ensure_int32():
result = com._ensure_int32(values)
assert(result.dtype == np.int32)
+
def test_ensure_platform_int():
# verify that when we create certain types of indices
@@ -286,6 +289,56 @@ def test_ensure_platform_int():
pi = com._ensure_platform_int(x)
assert(pi.dtype == np.int_)
+
+def test_pprint_max_seq_items():
+ # test with a specific setting
+ with cf.option_context('display.max_seq_items', 3):
+ s = 'Int64Index([0, 1, 2, ..., 4], dtype=int64)'
+ res = repr(tm.makeIntIndex(5))
+ assert_equal(s, res)
+
+ s = 'Int64Index([0, 1, 2, ..., 10], dtype=int64)'
+ res = repr(tm.makeIntIndex(11))
+ assert_equal(s, res)
+
+ s = 'Int64Index([0, 1, 2], dtype=int64)'
+ res = repr(tm.makeIntIndex(3))
+ assert_equal(s, res)
+
+ s = 'Int64Index([0, 1, 2, ..., 3], dtype=int64)'
+ res = repr(tm.makeIntIndex(4))
+ assert_equal(s, res)
+
+ s = 'Int64Index([], dtype=int64)'
+ res = repr(tm.makeIntIndex(0))
+ assert_equal(s, res)
+
+ # test with the default
+ s = 'Int64Index([0, 1, 2, 3, 4], dtype=int64)'
+ res = repr(tm.makeIntIndex(5))
+ assert_equal(s, res)
+
+ s = 'Int64Index([0, 1], dtype=int64)'
+ res = repr(tm.makeIntIndex(2))
+ assert_equal(s, res)
+
+ s = 'Int64Index([], dtype=int64)'
+ res = repr(tm.makeIntIndex(0))
+ assert_equal(s, res)
+
+ # test multiindex
+ with cf.option_context('display.max_seq_items', 2):
+ df = tm.makeCustomDataframe(2, 3, c_idx_nlevels=2)
+ mi = df.columns
+ if PY3:
+ s = ("MultiIndex\n[('C_l0_g0', 'C_l1_g0'), ('C_l0_g1', 'C_l1_g1'),"
+ " ..., ('C_l0_g2', 'C_l1_g2')]")
+ else:
+ s = ("MultiIndex\n[(u'C_l0_g0', u'C_l1_g0'), "
+ "(u'C_l0_g1', u'C_l1_g1'), ..., (u'C_l0_g2', u'C_l1_g2')]")
+ res = repr(mi)
+ assert_equal(s, res)
+
# TODO: fix this broken test
# def test_console_encode():
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index c297cfa554fa5..94631dd894324 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -302,7 +302,7 @@ def makeUnicodeIndex(k):
def makeIntIndex(k):
- return Index(range(k))
+ return Index(np.arange(k))
def makeFloatIndex(k):
| null | https://api.github.com/repos/pandas-dev/pandas/pulls/3941 | 2013-06-18T03:04:35Z | 2013-06-20T21:53:04Z | null | 2014-06-23T18:39:47Z |
DOC: partial string indexing docs in timeseries.rst (GH3938) | diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst
index 3dee843e75d3e..f8d1e8323b9f5 100644
--- a/doc/source/timeseries.rst
+++ b/doc/source/timeseries.rst
@@ -175,7 +175,7 @@ dates outside of those dates if specified.
.. _timeseries.datetimeindex:
DatetimeIndex
-~~~~~~~~~~~~~
+-------------
One of the main uses for ``DatetimeIndex`` is as an index for pandas objects.
The ``DatetimeIndex`` class contains many timeseries related optimizations:
@@ -189,6 +189,19 @@ The ``DatetimeIndex`` class contains many timeseries related optimizations:
- Quick access to date fields via properties such as ``year``, ``month``, etc.
- Regularization functions like ``snap`` and very fast ``asof`` logic
+DatetimeIndex objects has all the basic functionality of regular Index objects
+and a smorgasbord of advanced timeseries-specific methods for easy frequency
+processing.
+
+.. seealso::
+ :ref:`Reindexing methods <basics.reindexing>`
+
+.. note::
+
+ While pandas does not force you to have a sorted date index, some of these
+ methods may have unexpected or incorrect behavior if the dates are
+ unsorted. So please be careful.
+
``DatetimeIndex`` can be used like a regular index and offers all of its
intelligent functionality like selection, slicing, etc.
@@ -200,7 +213,10 @@ intelligent functionality like selection, slicing, etc.
ts[:5].index
ts[::2].index
-You can pass in dates and strings that parses to dates as indexing parameters:
+Partial String Indexing
+~~~~~~~~~~~~~~~~~~~~~~~
+
+You can pass in dates and strings that parse to dates as indexing parameters:
.. ipython:: python
@@ -210,12 +226,6 @@ You can pass in dates and strings that parses to dates as indexing parameters:
ts['10/31/2011':'12/31/2011']
-A ``truncate`` convenience function is provided that is equivalent to slicing:
-
-.. ipython:: python
-
- ts.truncate(before='10/31/2011', after='12/31/2011')
-
To provide convenience for accessing longer time series, you can also pass in
the year or year and month as strings:
@@ -225,26 +235,72 @@ the year or year and month as strings:
ts['2011-6']
-Even complicated fancy indexing that breaks the DatetimeIndex's frequency
-regularity will result in a ``DatetimeIndex`` (but frequency is lost):
+This type of slicing will work on a DataFrame with a ``DateTimeIndex`` as well. Since the
+partial string selection is a form of label slicing, the endpoints **will be** included. This
+would include matching times on an included date. Here's an example:
.. ipython:: python
- ts[[0, 2, 6]].index
+ dft = DataFrame(randn(100000,1),columns=['A'],index=date_range('20130101',periods=100000,freq='T'))
+ dft
+ dft['2013']
-DatetimeIndex objects has all the basic functionality of regular Index objects
-and a smorgasbord of advanced timeseries-specific methods for easy frequency
-processing.
+This starts on the very first time in the month, and includes the last date & time for the month
-.. seealso::
- :ref:`Reindexing methods <basics.reindexing>`
+.. ipython:: python
-.. note::
+ dft['2013-1':'2013-2']
- While pandas does not force you to have a sorted date index, some of these
- methods may have unexpected or incorrect behavior if the dates are
- unsorted. So please be careful.
+This specifies a stop time **that includes all of the times on the last day**
+.. ipython:: python
+
+ dft['2013-1':'2013-2-28']
+
+This specifies an **exact** stop time (and is not the same as the above)
+
+.. ipython:: python
+
+ dft['2013-1':'2013-2-28 00:00:00']
+
+We are stopping on the included end-point as its part of the index
+
+.. ipython:: python
+
+ dft['2013-1-15':'2013-1-15 12:30:00']
+
+.. warning::
+
+ The following selection will raises a ``KeyError``; otherwise this selection methodology
+ would be inconsistent with other selection methods in pandas (as this is not a *slice*, nor does it
+ resolve to one)
+
+ .. code-block:: python
+
+ dft['2013-1-15 12:30:00']
+
+ To select a single row, use ``.loc``
+
+ .. ipython:: python
+
+ dft.loc['2013-1-15 12:30:00']
+
+
+Truncating & Fancy Indexing
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+A ``truncate`` convenience function is provided that is equivalent to slicing:
+
+.. ipython:: python
+
+ ts.truncate(before='10/31/2011', after='12/31/2011')
+
+Even complicated fancy indexing that breaks the DatetimeIndex's frequency
+regularity will result in a ``DatetimeIndex`` (but frequency is lost):
+
+.. ipython:: python
+
+ ts[[0, 2, 6]].index
.. _timeseries.offsets:
| closes #3938
| https://api.github.com/repos/pandas-dev/pandas/pulls/3939 | 2013-06-17T23:41:32Z | 2013-06-18T00:42:28Z | 2013-06-18T00:42:28Z | 2014-06-14T06:04:28Z |
allow HDFStore to remain open when TableIterator is returned from read_hdf | diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 62aa1b99dfac0..34dddc03cc072 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -155,7 +155,7 @@ def h5_open(path, mode):
@contextmanager
def get_store(path, mode='a', complevel=None, complib=None,
- fletcher32=False):
+ fletcher32=False, iterator=False):
"""
Creates an HDFStore instance. This function can be used in a with statement
@@ -175,7 +175,7 @@ def get_store(path, mode='a', complevel=None, complib=None,
complib=complib, fletcher32=False)
yield store
finally:
- if store is not None:
+ if store is not None and not iterator:
store.close()
@@ -199,7 +199,7 @@ def read_hdf(path_or_buf, key, **kwargs):
f = lambda store: store.select(key, **kwargs)
if isinstance(path_or_buf, basestring):
- with get_store(path_or_buf) as store:
+ with get_store(path_or_buf, iterator=kwargs.get("iterator")) as store:
return f(store)
f(path_or_buf)
| Hi,
I'm using a TableIterator from pandas.read_hdf function (with the keyword argument iterator=True), I am unable to retrieve any data due to the error "ClosedNodeError: the node object is closed".
For instance:
```
pandas.DataFrame({'a':[1,2,3], 'b':[4,5,6]}).to_hdf("test.h5", "test", append=True)
it = pandas.read_hdf("test.h5","test",iterator=True)
iter(it).next()
Traceback (most recent call last):
File "<ipython-input-22-5634d86698ab>", line 1, in <module>
iter(it).next()
File "/usr/local/lib/python2.7/site-packages/pandas/io/pytables.py", line 912, in __iter__
v = self.func(current, stop)
...
File "/usr/local/lib/python2.7/site-packages/tables/node.py", line 355, in _g_check_open
raise ClosedNodeError("the node object is closed")
ClosedNodeError: the node object is closed
```
I looked through source code of panda.io.pytables and found that in the
get_store function, store.close() is always run when read_hdf returns, even if
it returns an TableIterator. My assumption is that store should remain open in
order for TableIterator to work. Can you please let me know if this fix is
acceptable, or is there an easier way to do this?
Thanks,
Sean
| https://api.github.com/repos/pandas-dev/pandas/pulls/3937 | 2013-06-17T19:45:39Z | 2013-06-18T22:15:20Z | null | 2014-07-16T08:14:42Z |
BUG: fix python3/2 numpy import_array macro build issue with clang | diff --git a/pandas/src/ujson/python/objToJSON.c b/pandas/src/ujson/python/objToJSON.c
index 534d60970dd81..4fdd8dc91ab04 100644
--- a/pandas/src/ujson/python/objToJSON.c
+++ b/pandas/src/ujson/python/objToJSON.c
@@ -100,7 +100,7 @@ enum PANDAS_FORMAT
//#define PRINTMARK() fprintf(stderr, "%s: MARK(%d)\n", __FILE__, __LINE__)
#define PRINTMARK()
-#if (PY_VERSION_HEX >= 0x03000000)
+#if (PY_VERSION_HEX < 0x03000000)
void initObjToJSON(void)
#else
int initObjToJSON(void)
| closes #3872. no really, it does.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3936 | 2013-06-17T19:28:06Z | 2013-06-17T20:22:52Z | 2013-06-17T20:22:51Z | 2014-06-21T19:34:31Z |
FIX: StataReader | diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index ddc9db0b76539..632e97c24721f 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -407,7 +407,7 @@ def _null_terminate(self, s):
def _next(self):
typlist = self.typlist
- if self._has_string_data:
+ if self.has_string_data:
data = [None] * self.nvar
for i in range(len(data)):
if type(typlist[i]) is int:
@@ -523,7 +523,8 @@ def data(self, convert_dates=True, convert_categoricals=True, index=None):
for i in cols_:
if self.dtyplist[i] is not None:
col = data.columns[i]
- data[col] = Series(data[col], data[col].index, self.dtyplist[i])
+ if data[col].dtype is not np.dtype(object):
+ data[col] = Series(data[col], data[col].index, self.dtyplist[i])
if convert_dates:
cols = np.where(map(lambda x: x in _date_formats, self.fmtlist))[0]
@@ -856,7 +857,7 @@ def _write_data_nodates(self):
typ = ord(typlist[i])
if typ <= 244: # we've got a string
if len(var) < typ:
- var = _pad_bytes(self._decode_bytes(var), len(var) + 1)
+ var = _pad_bytes(var, typ)
self._write(var)
else:
try:
@@ -884,15 +885,13 @@ def _write_data_dates(self):
if i in convert_dates:
var = _datetime_to_stata_elapsed(var, self.fmtlist[i])
if typ <= 244: # we've got a string
- if isnull(var):
- var = "" # missing string
if len(var) < typ:
- var = _pad_bytes(var, len(var) + 1)
+ var = _pad_bytes(var, typ)
self._write(var)
else:
if isnull(var): # this only matters for floats
var = MISSING_VALUES[typ]
- self._write(struct.pack(byteorder+TYPE_MAP[typ], var))
+ self._file.write(struct.pack(byteorder+TYPE_MAP[typ], var))
def _null_terminate(self, s, as_string=False):
null_byte = '\x00'
diff --git a/pandas/io/tests/test_stata.py b/pandas/io/tests/test_stata.py
index 4584976c41383..0e32fb91fc743 100644
--- a/pandas/io/tests/test_stata.py
+++ b/pandas/io/tests/test_stata.py
@@ -3,19 +3,19 @@
from datetime import datetime
import os
import unittest
-import sys
import warnings
import nose
import numpy as np
-from pandas.core.frame import DataFrame
+from pandas.core.frame import DataFrame, Series
from pandas.io.parsers import read_csv
from pandas.io.stata import read_stata, StataReader, StataWriter
import pandas.util.testing as tm
from pandas.util.testing import ensure_clean
from pandas.util.misc import is_little_endian
+
class StataTests(unittest.TestCase):
def setUp(self):
@@ -35,6 +35,7 @@ def setUp(self):
self.csv8 = os.path.join(self.dirpath, 'tbl19-3.csv')
self.dta9 = os.path.join(self.dirpath, 'lbw.dta')
self.csv9 = os.path.join(self.dirpath, 'lbw.csv')
+ self.dta10 = os.path.join(self.dirpath, 'stata10.dta')
def read_dta(self, file):
return read_stata(file, convert_dates=True)
@@ -189,9 +190,24 @@ def test_read_dta9(self):
decimal=3
)
+ def test_read_dta10(self):
+ original = DataFrame(
+ data=
+ [
+ ["string", "object", 1, 1.1, np.datetime64('2003-12-25')]
+ ],
+ columns=['string', 'object', 'integer', 'float', 'datetime'])
+ original["object"] = Series(original["object"], dtype=object)
+ original.index.name = 'index'
+
+ with ensure_clean(self.dta10) as path:
+ original.to_stata(path, {'datetime': 'tc'}, False)
+ written_and_read_again = self.read_dta(path)
+ tm.assert_frame_equal(written_and_read_again.set_index('index'), original)
+
def test_stata_doc_examples(self):
with ensure_clean(self.dta5) as path:
- df = DataFrame(np.random.randn(10,2),columns=list('AB'))
+ df = DataFrame(np.random.randn(10, 2), columns=list('AB'))
df.to_stata(path)
if __name__ == '__main__':
| Fix for a bug in StataReader resulting in errors when reading Stata file with string columns
| https://api.github.com/repos/pandas-dev/pandas/pulls/3935 | 2013-06-17T14:05:37Z | 2013-06-21T00:11:15Z | 2013-06-21T00:11:15Z | 2014-07-16T08:14:39Z |
CLN: cleaned up _try_cast in core/groupby.py to eliminate cruft (GH3920) | diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index 168615c060c2b..d15dcc1510577 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -432,23 +432,13 @@ def picker(arr):
def _try_cast(self, result, obj):
""" try to cast the result to our obj original type,
we may have roundtripped thru object in the mean-time """
- try:
- if obj.ndim > 1:
- dtype = obj.values.dtype
- else:
- dtype = obj.dtype
-
- if _is_numeric_dtype(dtype):
-
- # need to respect a non-number here (e.g. Decimal)
- if len(result) and issubclass(type(result[0]),(np.number,float,int)):
- result = _possibly_downcast_to_dtype(result, dtype)
+ if obj.ndim > 1:
+ dtype = obj.values.dtype
+ else:
+ dtype = obj.dtype
- elif issubclass(dtype.type, np.datetime64):
- if is_datetime64_dtype(obj.dtype):
- result = result.astype(obj.dtype)
- except:
- pass
+ if not np.isscalar(result):
+ result = _possibly_downcast_to_dtype(result, dtype)
return result
| raised on #3920
| https://api.github.com/repos/pandas-dev/pandas/pulls/3934 | 2013-06-17T12:20:16Z | 2013-06-17T12:47:20Z | 2013-06-17T12:47:20Z | 2014-07-16T08:14:37Z |
BUG: (GH3925) partial string selection with seconds resolution | diff --git a/RELEASE.rst b/RELEASE.rst
index 500ba2df1ed47..4e5d340c9ab1d 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -254,6 +254,7 @@ pandas 0.11.1
in the ``to_replace`` argument wasn't working (GH3907_)
- Fixed ``__truediv__`` in Python 2.7 with ``numexpr`` installed to actually do true division when dividing
two integer arrays with at least 10000 cells total (GH3764_)
+ - Indexing with a string with seconds resolution not selecting from a time index (GH3925_)
.. _GH3164: https://github.com/pydata/pandas/issues/3164
.. _GH2786: https://github.com/pydata/pandas/issues/2786
@@ -355,9 +356,13 @@ pandas 0.11.1
.. _GH3907: https://github.com/pydata/pandas/issues/3907
.. _GH3911: https://github.com/pydata/pandas/issues/3911
.. _GH3912: https://github.com/pydata/pandas/issues/3912
+<<<<<<< HEAD
.. _GH3764: https://github.com/pydata/pandas/issues/3764
.. _GH3888: https://github.com/pydata/pandas/issues/3888
+=======
+.. _GH3925: https://github.com/pydata/pandas/issues/3925
+>>>>>>> BUG: (GH3925) Indexing with a string with seconds resolution not selecting from a time index
pandas 0.11.0
=============
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 7a7210c479c67..33f72a0d15415 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -1,5 +1,6 @@
# pylint: disable=W0223
+from datetime import datetime
from pandas.core.common import _asarray_tuplesafe
from pandas.core.index import Index, MultiIndex, _ensure_index
import pandas.core.common as com
diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py
index 1cb986ee6cd7c..109ceced4fd9d 100644
--- a/pandas/tseries/index.py
+++ b/pandas/tseries/index.py
@@ -1102,6 +1102,13 @@ def _partial_date_slice(self, reso, parsed, use_lhs=True, use_rhs=True):
t1 = Timestamp(st, tz=self.tz)
t2 = Timestamp(Timestamp(st + offsets.Minute(),
tz=self.tz).value - 1)
+ elif (reso == 'second' and (
+ self._resolution == Resolution.RESO_SEC or not is_monotonic)):
+ st = datetime(parsed.year, parsed.month, parsed.day,
+ hour=parsed.hour, minute=parsed.minute, second=parsed.second)
+ t1 = Timestamp(st, tz=self.tz)
+ t2 = Timestamp(Timestamp(st + offsets.Second(),
+ tz=self.tz).value - 1)
else:
raise KeyError
@@ -1110,9 +1117,16 @@ def _partial_date_slice(self, reso, parsed, use_lhs=True, use_rhs=True):
if is_monotonic:
+ # we are out of range
+ if len(stamps) and (
+ (use_lhs and t1.value < stamps[0] and t2.value < stamps[0]) or (
+ (use_rhs and t1.value > stamps[-1] and t2.value > stamps[-1]))):
+ raise KeyError
+
# a monotonic (sorted) series can be sliced
left = stamps.searchsorted(t1.value, side='left') if use_lhs else None
right = stamps.searchsorted(t2.value, side='right') if use_rhs else None
+
return slice(left, right)
lhs_mask = (stamps>=t1.value) if use_lhs else True
diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py
index 51097cd157b99..08bcd9cfad8cc 100644
--- a/pandas/tseries/tests/test_timeseries.py
+++ b/pandas/tseries/tests/test_timeseries.py
@@ -251,6 +251,15 @@ def test_indexing(self):
expected = ts['2013']
assert_series_equal(expected,ts)
+ # GH 3925, indexing with a seconds resolution string / datetime object
+ df = DataFrame(randn(5,5),columns=['open','high','low','close','volume'],index=date_range('2012-01-02 18:01:00',periods=5,tz='US/Central',freq='s'))
+ expected = df.loc[[df.index[2]]]
+ result = df['2012-01-02 18:01:02']
+ self.assert_(result == expected)
+
+ # this is a single date, so will raise
+ self.assertRaises(KeyError, df.__getitem__, df.index[2],)
+
def assert_range_equal(left, right):
assert(left.equals(right))
assert(left.freq == right.freq)
| this no longer has much to do with #3925, and is only fixing a bug
Minor revision to select on second frequency
```
In [11]: df = DataFrame(randn(5,5),columns=['open','high','low','close','volume'],index=date_range('2012-01-02 18:01:00',periods=5,tz='US/Central',freq='s'))
In [12]: df
Out[12]:
open high low close volume
2012-01-02 18:01:00-06:00 0.131243 0.301542 0.128027 0.804162 1.296658
2012-01-02 18:01:01-06:00 0.341487 1.548695 0.703234 0.904201 1.422337
2012-01-02 18:01:02-06:00 -1.050453 -1.884035 1.537788 -0.821058 0.558631
2012-01-02 18:01:03-06:00 0.846885 1.045378 -0.722903 -0.613625 -0.476531
2012-01-02 18:01:04-06:00 1.186823 -0.018299 -0.513886 -1.103269 -0.311907
In [14]: df['2012-01-02 18:01:02']
Out[14]:
open high low close volume
2012-01-02 18:01:02-06:00 -1.050453 -1.884035 1.537788 -0.821058 0.558631
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/3931 | 2013-06-17T01:37:13Z | 2013-06-19T00:59:39Z | 2013-06-19T00:59:39Z | 2014-06-19T03:37:21Z |
DOC: fix to_json docstring nesting issue | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index bae85aa84a96e..16b3176521e28 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -539,25 +539,27 @@ def to_json(self, path_or_buf=None, orient=None, date_format='epoch',
----------
path_or_buf : the path or buffer to write the result string
if this is None, return a StringIO of the converted string
- orient :
+ orient : string
- Series :
- default is 'index'
- allowed values are: {'split','records','index'}
+ * Series
- DataFrame :
- default is 'columns'
- allowed values are: {'split','records','index','columns','values'}
+ - default is 'index'
+ - allowed values are: {'split','records','index'}
- The format of the JSON string
- split : dict like
- {index -> [index], columns -> [columns], data -> [values]}
- records : list like [{column -> value}, ... , {column -> value}]
- index : dict like {index -> {column -> value}}
- columns : dict like {column -> {index -> value}}
- values : just the values array
+ * DataFrame
- date_format : type of date conversion (epoch = epoch milliseconds, iso = ISO8601),
+ - default is 'columns'
+ - allowed values are: {'split','records','index','columns','values'}
+
+ * The format of the JSON string
+
+ - split : dict like {index -> [index], columns -> [columns], data -> [values]}
+ - records : list like [{column -> value}, ... , {column -> value}]
+ - index : dict like {index -> {column -> value}}
+ - columns : dict like {column -> {index -> value}}
+ - values : just the values array
+
+ date_format : type of date conversion (epoch = epoch milliseconds, iso = ISO8601)
default is epoch
double_precision : The number of decimal places to use when encoding
floating point values, default 10.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3930 | 2013-06-17T01:06:47Z | 2013-06-17T01:42:50Z | 2013-06-17T01:42:50Z | 2014-07-16T08:14:34Z | |
PTF no more | diff --git a/.travis.yml b/.travis.yml
index 8e2bb49d9df93..30f09deefd93a 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -5,14 +5,6 @@ python:
env:
global:
- - secure: "O04RU5QRKEDL/SrIWEsVe8O+1TxZqZQSa28Sd+Fz48NW/XddhefYyxzqcUXh\nk/NjWMqknJRQhApLolBianVpsE577OTllzlcyKn3nUL6hjOXcoszGaYray7S\niNGKGyO8xrtB/ZQDtmupz0ksK8sLoCTscdiGotFulczbx0zt+4g="
- - secure: "PUJ9nC1/v2vpFUtELSoSjI53OHCVXfFTb8+t5lIGIqHtjUBkhiJSNPfCv8Bx\ndsdrx30qP8KsSceYzaa/bog6p8YNU1iih23S0KbjucutvA0LNHBTNvnxmjBR\nSJfKd5FmwnXvizRyghYBzmQ3NmGO7ADw2DBwKOhgGMqCHZ8Tlc8="
- - secure: "IDcMrCCW+6pgJtsI3Q163OPc0iec1ogpitaqiRhHcrEBUCXZgVeclOeiZBlw\n/u+uGyW/O0NhHMaFXKB8BdDVwlQEEHv48syN6npS/A5+O6jriWKL4ozttOhE\npOlu+yLhHnEwx6wZVIHRTVn+t1GkOrjlBcjaQi+Z13G3XmDaSG8="
- - secure: "Zu9aj0dTGpvMqT/HqBGQgDYl/v5ubC7lFwfE8Fqb0N1UVXqbpjXnNH/7oal1\nUsIT7klO++LWm+LxsP/A1FWENTSgdYe99JQtNyauW+0x5YR1JTuDJ8atDgx9\nSq66CaVpS5t+ov7UVm2bKSUX+1S8+8zGbIDADrMxEzYEMF7WoGM="
- - secure: "AfIvLxvCxj22zrqg3ejGf/VePKT2AyGT9erYzlKpBS0H8yi5Pp1MfmJjhaR4\n51zBtzqHPHiIEY6ZdE06o9PioMWkXS+BqJNrxGSbt1ltxgOFrxW5zOpwiFGZ\nZOv1YeFkuPf8PEsWT7615mdydqTQT7B0pqUKK/d6aka4TQ/tg5Q="
- - secure: "EM4ySBUusReNu7H1QHXvjnP/J1QowvfpwEBmjysYxJuq7KcG8HhhlfpUF+Gh\nLBzLak9QBA67k4edhum3qtKuJR5cHuja3+zuV8xmx096B/m96liJFTrwZpea\n58op3W6ZULctEpQNgIkyae20bjxl4f99JhZRUlonoPfx/rBIMFc="
- - secure: "pgMYS/6MQqDGb58qdzTJesvAMmcJWTUEEM8gf9rVbfqfxceOL4Xpx8siR9B2\nC4U4MW1cHMPP3RFEb4Jy0uK49aHH10snwZY1S84YPPllpH5ZFXVdN68OayNj\nh4k5N/2hhaaQuJ6Uh8v8s783ye4oYTOW5RJUFqQu4QdG4IkTIMs="
-
- NOSE_ARGS="not slow" UPLOAD=true
matrix:
@@ -41,7 +33,6 @@ before_install:
# - export APT_ARGS=-qq # comment this to debug travis install issues
# - set -x # enable this to see bash commands
- export ZIP_FLAGS=-q # comment this to debug travis install issues
- - source ci/envars.sh # we need to source this to bring in the envars
- ci/before_install.sh
- python -V
diff --git a/ci/before_install.sh b/ci/before_install.sh
index 677ddfa642f80..e4376e1bf21c2 100755
--- a/ci/before_install.sh
+++ b/ci/before_install.sh
@@ -10,27 +10,4 @@ echo "inside $0"
# overview
sudo apt-get update $APT_ARGS # run apt-get update for all versions
-if $PLEASE_TRAVIS_FASTER ; then
- echo "Faster? well... I'll try."
-
- if $CACHE_FILE_AVAILABLE ; then
- echo retrieving "$CACHE_FILE_URL";
-
- wget -q "$CACHE_FILE_URL" -O "/tmp/_$CYTHON_HASH.zip";
- unzip $ZIP_FLAGS /tmp/_"$CYTHON_HASH.zip" -d "$BUILD_CACHE_DIR";
- rm -f /tmp/_"$CYTHON_HASH.zip"
- # copy cythonized c files over
- cp -R "$BUILD_CACHE_DIR"/pandas/*.c pandas/
- cp -R "$BUILD_CACHE_DIR"/pandas/src/*.c pandas/src/
- fi;
- echo "VENV_FILE_AVAILABLE=$VENV_FILE_AVAILABLE"
- if $VENV_FILE_AVAILABLE ; then
- echo "getting venv"
- wget -q $VENV_FILE_URL -O "/tmp/venv.zip";
- sudo unzip $ZIP_FLAGS -o /tmp/venv.zip -d "/";
- sudo chown travis -R "$VIRTUAL_ENV"
- rm -f /tmp/_"$CYTHON_HASH.zip"
- fi;
-fi
-
true # never fail because bad things happened here
diff --git a/ci/envars.sh b/ci/envars.sh
deleted file mode 100755
index 2b4cacfd96fe4..0000000000000
--- a/ci/envars.sh
+++ /dev/null
@@ -1,67 +0,0 @@
-#!/bin/bash
-
-# This must be sourced by .travis.yml, so any envars exported here will
-# be available to the rest of the build stages
-
-# - computes a hash based on the cython files in the codebade
-# - retrieves the decrypted key if any for all whitelisted forks
-# - checks whether the user optd int to use the cache
-# - if so, check for availablity of cache files on the server, based on hash
-# - set envars to control what the following scripts do
-
-# at most one of these will decrypt, so the end result is that $STORE_KEY
-# either holds a single key or does not
-export STORE_KEY="$STORE_KEY0""$STORE_KEY1""$STORE_KEY2""$STORE_KEY3""$STORE_KEY4"
-export STORE_KEY="$STORE_KEY""$STORE_KEY5""$STORE_KEY6""$STORE_KEY7"
-
-export CYTHON_HASH=$(find pandas | grep -P '\.(pyx|pxd)$' | sort \
- | while read N; do echo $(tail -n+1 $N | md5sum ) ;done | md5sum| cut -d ' ' -f 1)
-
-export CYTHON_HASH=$CYTHON_HASH-$TRAVIS_PYTHON_VERSION
-
-# where the cache files live on the server
-export CACHE_FILE_URL="https://cache27-pypandas.rhcloud.com/static/$STORE_KEY/$CYTHON_HASH.zip"
-export VENV_FILE_URL="https://cache27-pypandas.rhcloud.com/static/$STORE_KEY/venv-$TRAVIS_PYTHON_VERSION.zip"
-export CACHE_FILE_STORE_URL="https://cache27-pypandas.rhcloud.com/store/$STORE_KEY"
-
-echo "Hashing:"
-find pandas | grep -P '\.(pyx|pxd)$'
-echo "Key: $CYTHON_HASH"
-
-export CACHE_FILE_AVAILABLE=false
-export VENV_FILE_AVAILABLE=false
-export PLEASE_TRAVIS_FASTER=false
-
-# check whether the user opted in to use the cache via commit message
-if [ x"$(git log --format='%B' -n 1 | grep PLEASE_TRAVIS_FASTER | wc -l)" != x"0" ]; then
- export PLEASE_TRAVIS_FASTER=true
-fi;
-if [ x"$(git log --format='%B' -n 1 | grep PTF | wc -l)" != x"0" ]; then
- export PLEASE_TRAVIS_FASTER=true
-fi;
-
-if $PLEASE_TRAVIS_FASTER; then
-
- # check whether the files exists on the server
- curl -s -f -I "$CACHE_FILE_URL" # silent, don;t expose key
- if [ x"$?" == x"0" ] ; then
- export CACHE_FILE_AVAILABLE=true;
- fi
-
-
- curl -s -f -I "$VENV_FILE_URL" # silent, don;t expose key
- if [ x"$?" == x"0" ] ; then
- export VENV_FILE_AVAILABLE=true;
- fi
-
- # the pandas build cache machinery needs this set, and the directory created
- export BUILD_CACHE_DIR="/tmp/build_cache"
- mkdir "$BUILD_CACHE_DIR"
-fi;
-
-# debug
-echo "PLEASE_TRAVIS_FASTER=$PLEASE_TRAVIS_FASTER"
-echo "CACHE_FILE_AVAILABLE=$CACHE_FILE_AVAILABLE"
-echo "VENV_FILE_AVAILABLE=$VENV_FILE_AVAILABLE"
-
-true
diff --git a/ci/install.sh b/ci/install.sh
index 294db286a1001..60ea5643c6ad2 100755
--- a/ci/install.sh
+++ b/ci/install.sh
@@ -45,102 +45,60 @@ if [ x"$FULL_DEPS" == x"true" ] ; then
fi
fi
-# Everything installed inside this clause into site-packages
-# will get included in the cached venv downloaded from the net
-# in PTF mode
-if ( ! $VENV_FILE_AVAILABLE ); then
- echo "Running full monty"
- # Hard Deps
- pip install $PIP_ARGS nose python-dateutil pytz
+# Hard Deps
+pip install $PIP_ARGS nose python-dateutil pytz
+pip install $PIP_ARGS cython
+
+if [ ${TRAVIS_PYTHON_VERSION} == "3.3" ]; then # should be >=3,3
+ pip install $PIP_ARGS numpy==1.7.0
+elif [ ${TRAVIS_PYTHON_VERSION} == "3.2" ]; then
+ # sudo apt-get $APT_ARGS install python3-numpy; # 1.6.2 or precise
+ pip install $PIP_ARGS numpy==1.6.1
+else
+ pip install $PIP_ARGS numpy==1.6.1
+fi
+
+# Optional Deps
+if [ x"$FULL_DEPS" == x"true" ]; then
+ echo "Installing FULL_DEPS"
pip install $PIP_ARGS cython
- if [ ${TRAVIS_PYTHON_VERSION} == "3.3" ]; then # should be >=3,3
- pip install $PIP_ARGS numpy==1.7.0
- elif [ ${TRAVIS_PYTHON_VERSION} == "3.2" ]; then
- # sudo apt-get $APT_ARGS install python3-numpy; # 1.6.2 or precise
- pip install $PIP_ARGS numpy==1.6.1
+ if [ ${TRAVIS_PYTHON_VERSION:0:1} == "2" ]; then
+ pip install $PIP_ARGS xlwt
+ pip install $PIP_ARGS bottleneck
+ pip install $PIP_ARGS numexpr==2.0.1
+ pip install $PIP_ARGS tables==2.3.1
else
- pip install $PIP_ARGS numpy==1.6.1
+ pip install $PIP_ARGS numexpr
+ pip install $PIP_ARGS tables
fi
- # Optional Deps
- if [ x"$FULL_DEPS" == x"true" ]; then
- echo "Installing FULL_DEPS"
- pip install $PIP_ARGS cython
-
- if [ ${TRAVIS_PYTHON_VERSION:0:1} == "2" ]; then
- pip install $PIP_ARGS xlwt
- pip install $PIP_ARGS bottleneck
- pip install $PIP_ARGS numexpr==2.0.1
- pip install $PIP_ARGS tables==2.3.1
- else
- pip install $PIP_ARGS numexpr
- pip install $PIP_ARGS tables
- fi
-
- pip install $PIP_ARGS matplotlib
- pip install $PIP_ARGS openpyxl
- pip install $PIP_ARGS xlrd>=0.9.0
- pip install $PIP_ARGS 'http://downloads.sourceforge.net/project/pytseries/scikits.timeseries/0.91.3/scikits.timeseries-0.91.3.tar.gz?r='
- pip install $PIP_ARGS patsy
- pip install $PIP_ARGS html5lib
-
- if [ ${TRAVIS_PYTHON_VERSION:0:1} == "3" ]; then
- sudo apt-get $APT_ARGS remove python3-lxml
- elif [ ${TRAVIS_PYTHON_VERSION:0:1} == "2" ]; then
- sudo apt-get $APT_ARGS remove python-lxml
- fi
-
- pip install $PIP_ARGS lxml
- # fool statsmodels into thinking pandas was already installed
- # so it won't refuse to install itself. We want it in the zipped venv
-
- mkdir $SITE_PKG_DIR/pandas
- touch $SITE_PKG_DIR/pandas/__init__.py
- echo "version='0.10.0-phony'" > $SITE_PKG_DIR/pandas/version.py
- pip install $PIP_ARGS git+git://github.com/statsmodels/statsmodels@c9062e43b8a5f7385537ca95#egg=statsmodels
-
- rm -Rf $SITE_PKG_DIR/pandas # scrub phoney pandas
+ pip install $PIP_ARGS matplotlib
+ pip install $PIP_ARGS openpyxl
+ pip install $PIP_ARGS xlrd>=0.9.0
+ pip install $PIP_ARGS 'http://downloads.sourceforge.net/project/pytseries/scikits.timeseries/0.91.3/scikits.timeseries-0.91.3.tar.gz?r='
+ pip install $PIP_ARGS patsy
+ pip install $PIP_ARGS html5lib
+
+ if [ ${TRAVIS_PYTHON_VERSION:0:1} == "3" ]; then
+ sudo apt-get $APT_ARGS remove python3-lxml
+ elif [ ${TRAVIS_PYTHON_VERSION:0:1} == "2" ]; then
+ sudo apt-get $APT_ARGS remove python-lxml
fi
- # pack up the venv and cache it
- if [ x"$STORE_KEY" != x"" ] && $UPLOAD && $PLEASE_TRAVIS_FASTER ; then
- VENV_FNAME="venv-$TRAVIS_PYTHON_VERSION.zip"
-
- zip $ZIP_FLAGS -r "$HOME/$VENV_FNAME" $SITE_PKG_DIR/
- ls -l "$HOME/$VENV_FNAME"
- echo "posting venv"
- # silent, don't expose key
- curl -s --form upload=@"$HOME/$VENV_FNAME" "$CACHE_FILE_STORE_URL/$VENV_FNAME"
- fi
+ pip install $PIP_ARGS lxml
+ # fool statsmodels into thinking pandas was already installed
+ # so it won't refuse to install itself.
-fi;
+ mkdir $SITE_PKG_DIR/pandas
+ touch $SITE_PKG_DIR/pandas/__init__.py
+ echo "version='0.10.0-phony'" > $SITE_PKG_DIR/pandas/version.py
+ pip install $PIP_ARGS git+git://github.com/statsmodels/statsmodels@c9062e43b8a5f7385537ca95#egg=statsmodels
-#build and install pandas
-if [ x"$BUILD_CACHE_DIR" != x"" ]; then
- scripts/use_build_cache.py -d
- python setup.py install;
-else
- python setup.py build_ext install
+ rm -Rf $SITE_PKG_DIR/pandas # scrub phoney pandas
fi
-# package pandas build artifacts and send them home
-# that's everything the build cache (scripts/use_build_cache.py)
-# stored during the build (.so, pyx->.c and 2to3)
-if (! $CACHE_FILE_AVAILABLE) ; then
- if [ x"$STORE_KEY" != x"" ] && $UPLOAD && $PLEASE_TRAVIS_FASTER ; then
- echo "Posting artifacts"
- strip "$BUILD_CACHE_DIR/*" &> /dev/null
- echo "$BUILD_CACHE_DIR"
- cd "$BUILD_CACHE_DIR"/
- zip -r $ZIP_FLAGS "$HOME/$CYTHON_HASH".zip *
- cd "$TRAVIS_BUILD_DIR"
- pwd
- zip "$HOME/$CYTHON_HASH".zip $(find pandas | grep -P '\.(pyx|pxd)$' | sed -r 's/.(pyx|pxd)$/.c/')
-
- # silent, don't expose key
- curl --connect-timeout 5 -s --form upload=@"$HOME/$CYTHON_HASH".zip "$CACHE_FILE_STORE_URL/$CYTHON_HASH.zip"
- fi
-fi
+# build pandas
+python setup.py build_ext install
true
diff --git a/vb_suite/test_perf.py b/vb_suite/test_perf.py
index 2a2a5c9643c75..d019af3370ba9 100755
--- a/vb_suite/test_perf.py
+++ b/vb_suite/test_perf.py
@@ -37,18 +37,10 @@
import random
import numpy as np
-import pandas as pd
from pandas import DataFrame, Series
-try:
- import git # gitpython
-except Exception:
- print("Error: Please install the `gitpython` package\n")
- sys.exit(1)
-
from suite import REPO_PATH
-VB_DIR = os.path.dirname(os.path.abspath(__file__))
DEFAULT_MIN_DURATION = 0.01
HEAD_COL="head[ms]"
BASE_COL="base[ms]"
@@ -65,14 +57,6 @@
parser.add_argument('-t', '--target-commit',
help='The commit to compare against the baseline (default: HEAD).',
type=str)
-parser.add_argument('--base-pickle',
- help='name of pickle file with timings data generated by a former `-H -d FILE` run. '\
- 'filename must be of the form <hash>-*.* or specify --base-commit seperately',
- type=str)
-parser.add_argument('--target-pickle',
- help='name of pickle file with timings data generated by a former `-H -d FILE` run '\
- 'filename must be of the form <hash>-*.* or specify --target-commit seperately',
- type=str)
parser.add_argument('-m', '--min-duration',
help='Minimum duration (in ms) of baseline test for inclusion in report (default: %.3f).' % DEFAULT_MIN_DURATION,
type=float,
@@ -85,7 +69,7 @@
metavar="FNAME",
dest='outdf',
default=None,
- help='Name of file to df.to_pickle() the result table into. Will overwrite')
+ help='Name of file to df.save() the result table into. Will overwrite')
parser.add_argument('-r', '--regex',
metavar="REGEX",
dest='regex',
@@ -120,7 +104,8 @@
parser.add_argument('-a', '--affinity',
metavar="a",
dest='affinity',
- default=None,
+ default=1,
+ type=int,
help='set processor affinity of processm by default bind to cpu/core #1 only'
'requires the "affinity" python module , will raise Warning otherwise' )
@@ -221,74 +206,30 @@ def profile_comparative(benchmarks):
head_res = get_results_df(db, h_head)
baseline_res = get_results_df(db, h_baseline)
-
- report_comparative(head_res,baseline_res)
-
+ ratio = head_res['timing'] / baseline_res['timing']
+ totals = DataFrame({HEAD_COL:head_res['timing'],
+ BASE_COL:baseline_res['timing'],
+ 'ratio':ratio,
+ 'name':baseline_res.name},
+ columns=[HEAD_COL, BASE_COL, "ratio", "name"])
+ totals = totals.ix[totals[HEAD_COL] > args.min_duration]
+ # ignore below threshold
+ totals = totals.dropna(
+ ).sort("ratio").set_index('name') # sort in ascending order
+
+ h_msg = repo.messages.get(h_head, "")
+ b_msg = repo.messages.get(h_baseline, "")
+
+ print_report(totals,h_head=h_head,h_msg=h_msg,
+ h_baseline=h_baseline,b_msg=b_msg)
+
+ if args.outdf:
+ prprint("The results DataFrame was written to '%s'\n" % args.outdf)
+ totals.save(args.outdf)
finally:
# print("Disposing of TMP_DIR: %s" % TMP_DIR)
shutil.rmtree(TMP_DIR)
-def prep_pickle_for_total(df, agg_name='median'):
- """
- accepts a datafram resulting from invocation with -H -d o.pickle
- If multiple data columns are present (-N was used), the
- `agg_name` attr of the datafram will be used to reduce
- them to a single value per vbench, df.median is used by defa
- ult.
-
- Returns a datadrame of the form expected by prep_totals
- """
- def prep(df):
- agg = getattr(df,agg_name)
- df = DataFrame(agg(1))
- cols = list(df.columns)
- cols[0]='timing'
- df.columns=cols
- df['name'] = list(df.index)
- return df
-
- return prep(df)
-
-def prep_totals(head_res, baseline_res):
- """
- Each argument should be a dataframe with 'timing' and 'name' columns
- where name is the name of the vbench.
-
- returns a 'totals' dataframe, suitable as input for print_report.
- """
- head_res, baseline_res = head_res.align(baseline_res)
- ratio = head_res['timing'] / baseline_res['timing']
- totals = DataFrame({HEAD_COL:head_res['timing'],
- BASE_COL:baseline_res['timing'],
- 'ratio':ratio,
- 'name':baseline_res.name},
- columns=[HEAD_COL, BASE_COL, "ratio", "name"])
- totals = totals.ix[totals[HEAD_COL] > args.min_duration]
- # ignore below threshold
- totals = totals.dropna(
- ).sort("ratio").set_index('name') # sort in ascending order
- return totals
-
-def report_comparative(head_res,baseline_res):
- try:
- r=git.Repo(VB_DIR)
- except:
- import pdb
- pdb.set_trace()
-
- totals = prep_totals(head_res,baseline_res)
-
- h_head = args.target_commit
- h_baseline = args.base_commit
- h_msg = r.commit(h_head).message.strip()
- b_msg = r.commit(h_baseline).message.strip()
-
- print_report(totals,h_head=h_head,h_msg=h_msg,
- h_baseline=h_baseline,b_msg=b_msg)
-
- if args.outdf:
- prprint("The results DataFrame was written to '%s'\n" % args.outdf)
- totals.to_pickle(args.outdf)
def profile_head_single(benchmark):
import gc
@@ -364,7 +305,7 @@ def profile_head(benchmarks):
if args.outdf:
prprint("The results DataFrame was written to '%s'\n" % args.outdf)
- DataFrame(results).to_pickle(args.outdf)
+ DataFrame(results).save(args.outdf)
def print_report(df,h_head=None,h_msg="",h_baseline=None,b_msg=""):
@@ -447,23 +388,18 @@ def main():
random.seed(args.seed)
np.random.seed(args.seed)
- if args.base_pickle and args.target_pickle:
- baseline_res = prep_pickle_for_total(pd.read_pickle(args.base_pickle))
- target_res = prep_pickle_for_total(pd.read_pickle(args.target_pickle))
-
- report_comparative(target_res, baseline_res)
- sys.exit(0)
-
- if args.affinity is not None:
- try:
- import affinity
-
- affinity.set_process_affinity_mask(0,args.affinity)
- assert affinity.get_process_affinity_mask(0) == args.affinity
- print("CPU affinity set to %d" % args.affinity)
- except ImportError:
- print("-a/--afinity specified, but the 'affinity' module is not available, aborting.\n")
- sys.exit(1)
+ try:
+ import affinity
+ affinity.set_process_affinity_mask(0,args.affinity)
+ assert affinity.get_process_affinity_mask(0) == args.affinity
+ print("CPU affinity set to %d" % args.affinity)
+ except ImportError:
+ import warnings
+ print("\n\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"+
+ "The 'affinity' module is not available, results may be unreliable\n" +
+ "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n\n"
+ )
+ time.sleep(2)
print("\n")
prprint("LOG_FILE = %s" % args.log_file)
@@ -543,39 +479,10 @@ def inner(repo_path):
if __name__ == '__main__':
args = parser.parse_args()
- if (not args.head
- and not (args.base_commit and args.target_commit)
- and not (args.base_pickle and args.target_pickle)):
+ if not args.head and (not args.base_commit and not args.target_commit):
parser.print_help()
- sys.exit(1)
- elif ((args.base_pickle or args.target_pickle) and not
- (args.base_pickle and args.target_pickle)):
- print("Must specify Both --base-pickle and --target-pickle.")
- sys.exit(1)
-
- if ((args.base_pickle or args.target_pickle) and not
- (args.base_commit and args.target_commit)):
- if not args.base_commit:
- print("base_commit not specified, Assuming base_pickle is named <commit>-foo.*")
- args.base_commit = args.base_pickle.split('-')[0]
- if not args.target_commit:
- print("target_commit not specified, Assuming target_pickle is named <commit>-foo.*")
- args.target_commit = args.target_pickle.split('-')[0]
-
- import warnings
- warnings.filterwarnings('ignore',category=FutureWarning)
- warnings.filterwarnings('ignore',category=DeprecationWarning)
-
- if args.base_commit and args.target_commit:
- print("Verifying specified commits exist in repo...")
- r=git.Repo(VB_DIR)
- for c in [ args.base_commit, args.target_commit ]:
- try:
- msg = r.commit(c).message.strip()
- except git.BadObject:
- print("The commit '%s' was not found, aborting" % c)
- sys.exit(1)
- else:
- print("%s: %s" % (c,msg))
-
- main()
+ else:
+ import warnings
+ warnings.filterwarnings('ignore',category=FutureWarning)
+ warnings.filterwarnings('ignore',category=DeprecationWarning)
+ main()
| The travis network build cache (activated via commit message, for whitelisted commiters) was added a few months back. It worked well for a while, but ultimately
it's seen little use in practive by commiters.
While it was gratifying to see this actually worked, all the additional
magic in ci/ doesn't pay for itself in practice, so I've decided to retire this
unless there are strong objections and get a simpler ci/ in return.
The build cache system got an upgrade as a side-effect of this
work and `scripts/use_build_cache.py` remains a useful bit of kit.
Not entirely wasted effort after all.
xref https://github.com/pydata/pandas/pull/3383
| https://api.github.com/repos/pandas-dev/pandas/pulls/3929 | 2013-06-17T00:41:30Z | 2013-06-17T02:45:06Z | 2013-06-17T02:45:06Z | 2014-07-16T08:14:30Z |
TST: Fix error in assert_produces_warning. | diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index 20e59b6d3342a..c297cfa554fa5 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -790,5 +790,5 @@ def assert_produces_warning(expected_warning=Warning, filter_level="always"):
if expected_warning:
assert saw_warning, ("Did not see expected warning of class %r."
% expected_warning.__name__)
- assert not extra_warnings, ("Caused unexpected warning(s): %r."
- % extra_warnings)
+ assert not extra_warnings, ("Caused unexpected warning(s): %r."
+ % extra_warnings)
| When I copied this over and sent to @cpcloud,
I thought the indentation was wrong and changed it.
Turned out I was wrong -- sorry about that!
Now all the doctests pass...
| https://api.github.com/repos/pandas-dev/pandas/pulls/3927 | 2013-06-16T20:25:20Z | 2013-06-17T00:18:26Z | 2013-06-17T00:18:26Z | 2014-07-16T08:14:29Z |
CLN: Replace bare exceptions with more descriptive ones | diff --git a/pandas/core/common.py b/pandas/core/common.py
index a31c92caf4343..e86db79b90350 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -580,7 +580,7 @@ def take_2d_multi(arr, indexer, out=None, fill_value=np.nan,
mask_info = (row_mask, col_mask), (row_needs, col_needs)
if row_needs or col_needs:
if out is not None and out.dtype != dtype:
- raise Exception('Incompatible type for fill_value')
+ raise TypeError('Incompatible type for fill_value')
else:
# if not, then depromote, set fill_value to dummy
# (it won't be used but we don't want the cython code
@@ -814,7 +814,7 @@ def changeit():
# if we are trying to do something unsafe
# like put a bigger dtype in a smaller one, use the smaller one
if change.dtype.itemsize < r.dtype.itemsize:
- raise Exception("cannot change dtype of input to smaller size")
+ raise TypeError("cannot change dtype of input to smaller size")
change.dtype = r.dtype
change[:] = r
@@ -1259,7 +1259,7 @@ def ensure_float(arr):
def _mut_exclusive(arg1, arg2):
if arg1 is not None and arg2 is not None:
- raise Exception('mutually exclusive arguments')
+ raise TypeError('mutually exclusive arguments')
elif arg1 is not None:
return arg1
else:
diff --git a/pandas/core/format.py b/pandas/core/format.py
index 40d80e91f0264..f760e6226b454 100644
--- a/pandas/core/format.py
+++ b/pandas/core/format.py
@@ -825,7 +825,7 @@ def __init__(self, obj, path_or_buf, sep=",", na_rep='', float_format=None,
# validate mi options
if self.has_mi_columns:
if cols is not None:
- raise Exception("cannot specify cols with a multi_index on the columns")
+ raise TypeError("cannot specify cols with a multi_index on the columns")
if cols is not None:
if isinstance(cols,Index):
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index f0145364363ac..158543021c4e7 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -933,7 +933,7 @@ def dot(self, other):
lvals = self.values
rvals = np.asarray(other)
if lvals.shape[1] != rvals.shape[0]:
- raise Exception('Dot product shape mismatch, %s vs %s' %
+ raise ValueError('Dot product shape mismatch, %s vs %s' %
(lvals.shape, rvals.shape))
if isinstance(other, DataFrame):
@@ -2844,7 +2844,7 @@ def set_index(self, keys, drop=True, append=False, inplace=False,
if verify_integrity and not index.is_unique:
duplicates = index.get_duplicates()
- raise Exception('Index has duplicate keys: %s' % duplicates)
+ raise ValueError('Index has duplicate keys: %s' % duplicates)
for c in to_remove:
del frame[c]
@@ -3315,7 +3315,7 @@ def sortlevel(self, level=0, axis=0, ascending=True, inplace=False):
axis = self._get_axis_number(axis)
the_axis = self._get_axis(axis)
if not isinstance(the_axis, MultiIndex):
- raise Exception('can only sort by level with a hierarchical index')
+ raise TypeError('can only sort by level with a hierarchical index')
new_axis, indexer = the_axis.sortlevel(level, ascending=ascending)
@@ -3377,7 +3377,7 @@ def reorder_levels(self, order, axis=0):
axis = self._get_axis_number(axis)
if not isinstance(self._get_axis(axis),
MultiIndex): # pragma: no cover
- raise Exception('Can only reorder levels on a hierarchical axis.')
+ raise TypeError('Can only reorder levels on a hierarchical axis.')
result = self.copy()
@@ -3789,7 +3789,7 @@ def rename(self, index=None, columns=None, copy=True, inplace=False):
from pandas.core.series import _get_rename_function
if index is None and columns is None:
- raise Exception('must pass either index or columns')
+ raise TypeError('must pass either index or columns')
index_f = _get_rename_function(index)
columns_f = _get_rename_function(columns)
diff --git a/pandas/core/index.py b/pandas/core/index.py
index a5880b9f18670..a9ccda3778780 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -361,7 +361,7 @@ def __hash__(self):
return hash(self.view(np.ndarray))
def __setitem__(self, key, value):
- raise Exception(str(self.__class__) + ' object is immutable')
+ raise TypeError(str(self.__class__) + ' does not support item assignment')
def __getitem__(self, key):
"""Override numpy.ndarray's __getitem__ method to work as desired"""
@@ -547,7 +547,7 @@ def order(self, return_indexer=False, ascending=True):
return sorted_index
def sort(self, *args, **kwargs):
- raise Exception('Cannot sort an Index object')
+ raise TypeError('Cannot sort an %r object' % self.__class__.__name__)
def shift(self, periods=1, freq=None):
"""
@@ -606,7 +606,7 @@ def union(self, other):
union : Index
"""
if not hasattr(other, '__iter__'):
- raise Exception('Input must be iterable!')
+ raise TypeError('Input must be iterable!')
if len(other) == 0 or self.equals(other):
return self
@@ -671,7 +671,7 @@ def intersection(self, other):
intersection : Index
"""
if not hasattr(other, '__iter__'):
- raise Exception('Input must be iterable!')
+ raise TypeError('Input must be iterable!')
self._assert_can_do_setop(other)
@@ -713,7 +713,7 @@ def diff(self, other):
"""
if not hasattr(other, '__iter__'):
- raise Exception('Input must be iterable!')
+ raise TypeError('Input must be iterable!')
if self.equals(other):
return Index([], name=self.name)
@@ -1080,7 +1080,7 @@ def _join_level(self, other, level, how='left', return_indexers=False):
the MultiIndex will not be changed (currently)
"""
if isinstance(self, MultiIndex) and isinstance(other, MultiIndex):
- raise Exception('Join on level between two MultiIndex objects '
+ raise TypeError('Join on level between two MultiIndex objects '
'is ambiguous')
left, right = self, other
@@ -2298,7 +2298,7 @@ def _partial_tup_index(self, tup, side='left'):
if lab not in lev:
if not lev.is_type_compatible(lib.infer_dtype([lab])):
- raise Exception('Level type mismatch: %s' % lab)
+ raise TypeError('Level type mismatch: %s' % lab)
# short circuit
loc = lev.searchsorted(lab, side=side)
@@ -2738,7 +2738,7 @@ def _ensure_index(index_like):
def _validate_join_method(method):
if method not in ['left', 'right', 'inner', 'outer']:
- raise Exception('do not recognize join method %s' % method)
+ raise ValueError('do not recognize join method %s' % method)
# TODO: handle index names!
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 01e976e397111..0b6de7213ec1f 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -1571,7 +1571,7 @@ def xs(self, key, axis=1, copy=True):
new_blocks = []
if len(self.blocks) > 1:
if not copy:
- raise Exception('cannot get view of mixed-type or '
+ raise TypeError('cannot get view of mixed-type or '
'non-consolidated DataFrame')
for blk in self.blocks:
newb = make_block(blk.values[slicer],
@@ -1604,7 +1604,7 @@ def fast_2d_xs(self, loc, copy=False):
return result
if not copy:
- raise Exception('cannot get view of mixed-type or '
+ raise TypeError('cannot get view of mixed-type or '
'non-consolidated DataFrame')
dtype = _interleaved_dtype(self.blocks)
@@ -2093,7 +2093,7 @@ def _maybe_rename_join(self, other, lsuffix, rsuffix, copydata=True):
to_rename = self.items.intersection(other.items)
if len(to_rename) > 0:
if not lsuffix and not rsuffix:
- raise Exception('columns overlap: %s' % to_rename)
+ raise ValueError('columns overlap: %s' % to_rename)
def lrenamer(x):
if x in to_rename:
@@ -2377,7 +2377,7 @@ def _shape_compat(x):
else:
items = _ensure_index([ n for n in names if n in ref_items ])
if len(items) != len(stacked):
- raise Exception("invalid names passed _stack_arrays")
+ raise ValueError("invalid names passed _stack_arrays")
return items, stacked, placement
diff --git a/pandas/core/panel.py b/pandas/core/panel.py
index 0a099661c58f1..455ab96ac08ba 100644
--- a/pandas/core/panel.py
+++ b/pandas/core/panel.py
@@ -137,7 +137,7 @@ def f(self, other):
if isinstance(other, self._constructor):
return self._compare_constructor(other, func)
elif isinstance(other, (self._constructor_sliced, DataFrame, Series)):
- raise Exception("input needs alignment for this object [%s]" %
+ raise ValueError("input needs alignment for this object [%s]" %
self._constructor)
else:
return self._combine_const(other, na_op)
diff --git a/pandas/core/panelnd.py b/pandas/core/panelnd.py
index 08ff3b70dcb13..4928cb565147a 100644
--- a/pandas/core/panelnd.py
+++ b/pandas/core/panelnd.py
@@ -58,7 +58,7 @@ def create_nd_panel_factory(klass_name, axis_orders, axis_slices, slicer, axis_a
#### define the methods ####
def __init__(self, *args, **kwargs):
if not (kwargs.get('data') or len(args)):
- raise Exception(
+ raise TypeError(
"must supply at least a data argument to [%s]" % klass_name)
if 'copy' not in kwargs:
kwargs['copy'] = False
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 938cd99dcef8d..605f8188ccf4c 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -1962,7 +1962,8 @@ def clip(self, lower=None, upper=None, out=None):
clipped : Series
"""
if out is not None: # pragma: no cover
- raise Exception('out argument is not supported yet')
+ # TODO: Support out argument?
+ raise NotImplementedError('out argument is not supported yet')
result = self
if lower is not None:
@@ -2028,7 +2029,7 @@ def dot(self, other):
lvals = self.values
rvals = np.asarray(other)
if lvals.shape[0] != rvals.shape[0]:
- raise Exception('Dot product shape mismatch, %s vs %s' %
+ raise ValueError('Dot product shape mismatch, %s vs %s' %
(lvals.shape, rvals.shape))
if isinstance(other, DataFrame):
@@ -2379,7 +2380,7 @@ def sortlevel(self, level=0, ascending=True):
sorted : Series
"""
if not isinstance(self.index, MultiIndex):
- raise Exception('can only sort by level with a hierarchical index')
+ raise TypeError('can only sort by level with a hierarchical index')
new_index, indexer = self.index.sortlevel(level, ascending=ascending)
new_values = self.values.take(indexer)
@@ -2417,7 +2418,7 @@ def reorder_levels(self, order):
type of caller (new object)
"""
if not isinstance(self.index, MultiIndex): # pragma: no cover
- raise Exception('Can only reorder levels on a hierarchical axis.')
+ raise TypeError('Can only reorder levels on a hierarchical axis.')
result = self.copy()
result.index = result.index.reorder_levels(order)
@@ -3156,7 +3157,7 @@ def interpolate(self, method='linear'):
"""
if method == 'time':
if not isinstance(self, TimeSeries):
- raise Exception('time-weighted interpolation only works'
+ raise TypeError('time-weighted interpolation only works'
'on TimeSeries')
method = 'values'
# inds = pa.array([d.toordinal() for d in self.index])
@@ -3279,7 +3280,7 @@ def tz_localize(self, tz, copy=True):
if not isinstance(self.index, DatetimeIndex):
if len(self.index) > 0:
- raise Exception('Cannot tz-localize non-time series')
+ raise TypeError('Cannot tz-localize non-time series')
new_index = DatetimeIndex([], tz=tz)
else:
@@ -3413,7 +3414,7 @@ def _try_cast(arr, take_fast_path):
elif subarr.ndim > 1:
if isinstance(data, pa.Array):
- raise Exception('Data must be 1-dimensional')
+ raise ValueError('Data must be 1-dimensional')
else:
subarr = _asarray_tuplesafe(data, dtype=dtype)
diff --git a/pandas/io/excel.py b/pandas/io/excel.py
index 95702847d9c7f..0a57b13cc10fe 100644
--- a/pandas/io/excel.py
+++ b/pandas/io/excel.py
@@ -368,7 +368,7 @@ def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0):
if sheet_name is None:
sheet_name = self.cur_sheet
if sheet_name is None: # pragma: no cover
- raise Exception('Must pass explicit sheet_name or set '
+ raise TypeError('Must pass explicit sheet_name or set '
'cur_sheet property')
if self.use_xlsx:
self._writecells_xlsx(cells, sheet_name, startrow, startcol)
diff --git a/pandas/rpy/common.py b/pandas/rpy/common.py
index acc562925c925..4d0becdfc2cef 100644
--- a/pandas/rpy/common.py
+++ b/pandas/rpy/common.py
@@ -195,7 +195,7 @@ def convert_robj(obj, use_pandas=True):
if isinstance(obj, rpy_type):
return converter(obj)
- raise Exception('Do not know what to do with %s object' % type(obj))
+ raise TypeError('Do not know what to do with %s object' % type(obj))
def convert_to_r_posixct(obj):
diff --git a/pandas/sparse/array.py b/pandas/sparse/array.py
index 035db279064a0..028cad820abf2 100644
--- a/pandas/sparse/array.py
+++ b/pandas/sparse/array.py
@@ -261,7 +261,7 @@ def _get_val_at(self, loc):
loc += n
if loc >= len(self) or loc < 0:
- raise Exception('Out of bounds access')
+ raise IndexError('Out of bounds access')
sp_loc = self.sp_index.lookup(loc)
if sp_loc == -1:
@@ -283,7 +283,7 @@ def take(self, indices, axis=0):
n = len(self)
if (indices < 0).any() or (indices >= n).any():
- raise Exception('out of bounds access')
+ raise IndexError('out of bounds access')
if self.sp_index.npoints > 0:
locs = np.array([self.sp_index.lookup(loc) for loc in indices])
@@ -296,10 +296,10 @@ def take(self, indices, axis=0):
return result
def __setitem__(self, key, value):
- raise Exception('SparseArray objects are immutable')
+ raise TypeError('%r object does not support item assignment' % self.__class__.__name__)
def __setslice__(self, i, j, value):
- raise Exception('SparseArray objects are immutable')
+ raise TypeError('%r object does not support item assignment' % self.__class__.__name__)
def to_dense(self):
"""
@@ -313,7 +313,7 @@ def astype(self, dtype=None):
"""
dtype = np.dtype(dtype)
if dtype is not None and dtype not in (np.float_, float):
- raise Exception('Can only support floating point data for now')
+ raise TypeError('Can only support floating point data for now')
return self.copy()
def copy(self, deep=True):
diff --git a/pandas/sparse/frame.py b/pandas/sparse/frame.py
index 9694cc005d178..bb1f5555f60b9 100644
--- a/pandas/sparse/frame.py
+++ b/pandas/sparse/frame.py
@@ -195,10 +195,10 @@ def _init_matrix(self, data, index, columns, dtype=None):
columns = _default_index(K)
if len(columns) != K:
- raise Exception('Column length mismatch: %d vs. %d' %
+ raise ValueError('Column length mismatch: %d vs. %d' %
(len(columns), K))
if len(index) != N:
- raise Exception('Index length mismatch: %d vs. %d' %
+ raise ValueError('Index length mismatch: %d vs. %d' %
(len(index), N))
data = dict([(idx, data[:, i]) for i, idx in enumerate(columns)])
@@ -585,7 +585,7 @@ def _combine_const(self, other, func):
def _reindex_index(self, index, method, copy, level, fill_value=np.nan,
limit=None):
if level is not None:
- raise Exception('Reindex by level not supported for sparse')
+ raise TypeError('Reindex by level not supported for sparse')
if self.index.equals(index):
if copy:
@@ -616,7 +616,7 @@ def _reindex_index(self, index, method, copy, level, fill_value=np.nan,
def _reindex_columns(self, columns, copy, level, fill_value, limit=None):
if level is not None:
- raise Exception('Reindex by level not supported for sparse')
+ raise TypeError('Reindex by level not supported for sparse')
if com.notnull(fill_value):
raise NotImplementedError
@@ -891,7 +891,7 @@ def stack_sparse_frame(frame):
vals_to_concat = []
for _, series in frame.iteritems():
if not np.isnan(series.fill_value):
- raise Exception('This routine assumes NaN fill value')
+ raise TypeError('This routine assumes NaN fill value')
int_index = series.sp_index.to_int_index()
inds_to_concat.append(int_index.indices)
@@ -931,7 +931,7 @@ def homogenize(series_dict):
for _, series in series_dict.iteritems():
if not np.isnan(series.fill_value):
- raise Exception('this method is only valid with NaN fill values')
+ raise TypeError('this method is only valid with NaN fill values')
if index is None:
index = series.sp_index
diff --git a/pandas/sparse/panel.py b/pandas/sparse/panel.py
index 0b2842155b299..246e6fa93918f 100644
--- a/pandas/sparse/panel.py
+++ b/pandas/sparse/panel.py
@@ -249,7 +249,7 @@ def to_frame(self, filter_observations=True):
frame : DataFrame
"""
if not filter_observations:
- raise Exception('filter_observations=False not supported for '
+ raise TypeError('filter_observations=False not supported for '
'SparsePanel.to_long')
I, N, K = self.shape
@@ -325,7 +325,7 @@ def reindex(self, major=None, items=None, minor=None, major_axis=None,
if item in self._frames:
new_frames[item] = self._frames[item]
else:
- raise Exception('Reindexing with new items not yet '
+ raise NotImplementedError('Reindexing with new items not yet '
'supported')
else:
new_frames = self._frames
@@ -488,7 +488,7 @@ def _stack_sparse_info(frame):
series = frame[col]
if not np.isnan(series.fill_value):
- raise Exception('This routine assumes NaN fill value')
+ raise TypeError('This routine assumes NaN fill value')
int_index = series.sp_index.to_int_index()
inds_to_concat.append(int_index.indices)
diff --git a/pandas/sparse/series.py b/pandas/sparse/series.py
index bd01845a295b6..1b8d3541da289 100644
--- a/pandas/sparse/series.py
+++ b/pandas/sparse/series.py
@@ -133,7 +133,7 @@ def __new__(cls, data, index=None, sparse_index=None, kind='block',
raise AssertionError()
else:
if index is None:
- raise Exception('must pass index!')
+ raise TypeError('must pass index!')
length = len(index)
@@ -388,7 +388,7 @@ def astype(self, dtype=None):
"""
if dtype is not None and dtype not in (np.float_, float):
- raise Exception('Can only support floating point data')
+ raise TypeError('Can only support floating point data')
return self.copy()
diff --git a/pandas/stats/common.py b/pandas/stats/common.py
index c3034dbc390bf..85e8916b30169 100644
--- a/pandas/stats/common.py
+++ b/pandas/stats/common.py
@@ -10,7 +10,7 @@ def _get_cluster_type(cluster_type):
elif cluster_type_up == 'TIME':
return 'time'
else: # pragma: no cover
- raise Exception('Unrecognized cluster type: %s' % cluster_type)
+ raise ValueError('Unrecognized cluster type: %s' % cluster_type)
_CLUSTER_TYPES = {
0: 'time',
@@ -35,7 +35,7 @@ def _get_window_type(window_type):
elif window_type_up == 'EXPANDING':
return 'expanding'
else: # pragma: no cover
- raise Exception('Unrecognized window type: %s' % window_type)
+ raise ValueError('Unrecognized window type: %s' % window_type)
def banner(text, width=80):
diff --git a/pandas/tools/merge.py b/pandas/tools/merge.py
index 75e35b403dd78..d11fa4a20b084 100644
--- a/pandas/tools/merge.py
+++ b/pandas/tools/merge.py
@@ -1039,7 +1039,7 @@ def _concat_blocks(self, blocks):
if self.axis > 0:
# Not safe to remove this check, need to profile
if not _all_indexes_same([b.items for b in blocks]):
- raise Exception('dtypes are not consistent throughout '
+ raise TypeError('dtypes are not consistent throughout '
'DataFrames')
return make_block(concat_values, blocks[0].items, self.new_axes[0])
else:
@@ -1184,7 +1184,7 @@ def _maybe_check_integrity(self, concat_index):
if self.verify_integrity:
if not concat_index.is_unique:
overlap = concat_index.get_duplicates()
- raise Exception('Indexes have overlapping values: %s'
+ raise ValueError('Indexes have overlapping values: %s'
% str(overlap))
diff --git a/pandas/tools/tile.py b/pandas/tools/tile.py
index 4c68594a8a093..ffed6cafc1047 100644
--- a/pandas/tools/tile.py
+++ b/pandas/tools/tile.py
@@ -151,7 +151,7 @@ def _bins_to_cuts(x, bins, right=True, labels=None, retbins=False,
ids = bins.searchsorted(x, side=side)
if len(algos.unique(bins)) < len(bins):
- raise Exception('Bin edges must be unique: %s' % repr(bins))
+ raise ValueError('Bin edges must be unique: %s' % repr(bins))
if include_lowest:
ids[x == bins[0]] = 1
diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py
index 5985a8a898b27..28127c34c2e96 100644
--- a/pandas/tseries/frequencies.py
+++ b/pandas/tseries/frequencies.py
@@ -499,7 +499,7 @@ def get_offset(name):
if offset is not None:
return offset
else:
- raise Exception('Bad rule name requested: %s!' % name)
+ raise ValueError('Bad rule name requested: %s!' % name)
getOffset = get_offset
@@ -522,7 +522,7 @@ def get_offset_name(offset):
if name is not None:
return name
else:
- raise Exception('Bad rule given: %s!' % offset)
+ raise ValueError('Bad rule given: %s!' % offset)
def get_legacy_offset_name(offset):
diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py
index 1cb986ee6cd7c..7016a216f5d60 100644
--- a/pandas/tseries/index.py
+++ b/pandas/tseries/index.py
@@ -432,7 +432,7 @@ def _cached_range(cls, start=None, end=None, periods=None, offset=None,
end = Timestamp(end)
if offset is None:
- raise Exception('Must provide a DateOffset!')
+ raise TypeError('Must provide a DateOffset!')
drc = _daterange_cache
if offset not in _daterange_cache:
@@ -926,10 +926,10 @@ def _maybe_utc_convert(self, other):
if isinstance(other, DatetimeIndex):
if self.tz is not None:
if other.tz is None:
- raise Exception('Cannot join tz-naive with tz-aware '
+ raise TypeError('Cannot join tz-naive with tz-aware '
'DatetimeIndex')
elif other.tz is not None:
- raise Exception('Cannot join tz-naive with tz-aware '
+ raise TypeError('Cannot join tz-naive with tz-aware '
'DatetimeIndex')
if self.tz != other.tz:
@@ -1482,7 +1482,7 @@ def tz_convert(self, tz):
if self.tz is None:
# tz naive, use tz_localize
- raise Exception('Cannot convert tz-naive timestamps, use '
+ raise TypeError('Cannot convert tz-naive timestamps, use '
'tz_localize to localize')
# No conversion since timestamps are all UTC to begin with
diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py
index 025a12a17687e..9585d1f81e81d 100644
--- a/pandas/tseries/offsets.py
+++ b/pandas/tseries/offsets.py
@@ -351,7 +351,7 @@ def apply(self, other):
return BDay(self.n, offset=self.offset + other,
normalize=self.normalize)
else:
- raise Exception('Only know how to combine business day with '
+ raise TypeError('Only know how to combine business day with '
'datetime or timedelta!')
@classmethod
@@ -487,7 +487,7 @@ def __init__(self, n=1, **kwds):
if self.weekday is not None:
if self.weekday < 0 or self.weekday > 6:
- raise Exception('Day must be 0<=day<=6, got %d' %
+ raise ValueError('Day must be 0<=day<=6, got %d' %
self.weekday)
self._inc = timedelta(weeks=1)
@@ -562,13 +562,13 @@ def __init__(self, n=1, **kwds):
self.week = kwds['week']
if self.n == 0:
- raise Exception('N cannot be 0')
+ raise ValueError('N cannot be 0')
if self.weekday < 0 or self.weekday > 6:
- raise Exception('Day must be 0<=day<=6, got %d' %
+ raise ValueError('Day must be 0<=day<=6, got %d' %
self.weekday)
if self.week < 0 or self.week > 3:
- raise Exception('Week must be 0<=day<=3, got %d' %
+ raise ValueError('Week must be 0<=day<=3, got %d' %
self.week)
self.kwds = kwds
diff --git a/pandas/tseries/tools.py b/pandas/tseries/tools.py
index 90bc0beb8eb84..de6112f40b4ad 100644
--- a/pandas/tseries/tools.py
+++ b/pandas/tseries/tools.py
@@ -354,6 +354,6 @@ def ole2datetime(oledt):
# Excel has a bug where it thinks the date 2/29/1900 exists
# we just reject any date before 3/1/1900.
if val < 61:
- raise Exception("Value is outside of acceptable range: %s " % val)
+ raise ValueError("Value is outside of acceptable range: %s " % val)
return OLE_TIME_ZERO + timedelta(days=val)
| This takes care of the low-hanging fruit (really obvious Exception changes), there are still a bunch more to change, but I thought I would split it up into parts and deal with external exceptions before messing with anything that might potentially be raised internally. [I rebased this slightly to remove a few comments - but it did pass previously so it should be fine]
| https://api.github.com/repos/pandas-dev/pandas/pulls/3924 | 2013-06-16T17:56:52Z | 2013-06-19T01:57:07Z | null | 2014-06-20T16:24:15Z |
CLN: Remove unused Exceptions | diff --git a/pandas/core/panel.py b/pandas/core/panel.py
index 0a099661c58f1..92f69a7444aab 100644
--- a/pandas/core/panel.py
+++ b/pandas/core/panel.py
@@ -90,10 +90,6 @@ def panel_index(time, panels, names=['time', 'panel']):
return MultiIndex(levels, labels, sortorder=None, names=names)
-class PanelError(Exception):
- pass
-
-
def _arith_method(func, name):
# work only for scalars
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 37141e37d965c..054363d8cda06 100644
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -24,10 +24,6 @@
import pandas.parser as _parser
from pandas.tseries.period import Period
-
-class DateConversionError(Exception):
- pass
-
_parser_params = """Also supports optionally iterating or breaking of the file
into chunks.
@@ -1752,10 +1748,7 @@ def _try_convert_dates(parser, colspec, data_dict, columns):
new_name = '_'.join([str(x) for x in colnames])
to_parse = [data_dict[c] for c in colnames if c in data_dict]
- try:
- new_col = parser(*to_parse)
- except DateConversionError:
- new_col = parser(_concat_date_cols(to_parse))
+ new_col = parser(*to_parse)
return new_name, new_col, colnames
diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py
index 1cb986ee6cd7c..46d8d0cc00795 100644
--- a/pandas/tseries/index.py
+++ b/pandas/tseries/index.py
@@ -87,10 +87,6 @@ def _ensure_datetime64(other):
raise TypeError('%s type object %s' % (type(other), str(other)))
-class TimeSeriesError(Exception):
- pass
-
-
_midnight = time(0, 0)
class DatetimeIndex(Int64Index):
| There are at least 3 `Exception`s that are _never_ raised anywhere, so they can be removed without issue. (There are other exceptions that probably should be removed or refactored, but this is just a basic commit to make sure that these don't get used going forward.)
CLN: Remove 'DateConversionError' that is never raised anywhere
CLN: Remove TimeSeriesError that was never used
CLN: Remove unused 'PanelError'
| https://api.github.com/repos/pandas-dev/pandas/pulls/3921 | 2013-06-16T04:52:12Z | 2013-06-19T01:13:57Z | 2013-06-19T01:13:57Z | 2014-07-06T05:46:03Z |
BUG: Add import for 'is_datetime64_dtype' | diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index 168615c060c2b..92774c915dea7 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -13,7 +13,7 @@
from pandas.util.compat import OrderedDict
import pandas.core.algorithms as algos
import pandas.core.common as com
-from pandas.core.common import _possibly_downcast_to_dtype, notnull
+from pandas.core.common import _possibly_downcast_to_dtype, notnull, is_datetime64_dtype
import pandas.lib as lib
import pandas.algos as _algos
| This import was missing from `core/groupby.py`. This also suggests that there is an issue with the test cases, given that no test triggers this issue (suggesting that there is no current test that triggers the `elif` condition below).
``` python
def _try_cast(self, result, obj):
""" try to cast the result to our obj original type,
we may have roundtripped thru object in the mean-time """
try:
if obj.ndim > 1:
dtype = obj.values.dtype
else:
dtype = obj.dtype
if _is_numeric_dtype(dtype):
# need to respect a non-number here (e.g. Decimal)
if len(result) and issubclass(type(result[0]),(np.number,float,int)):
result = _possibly_downcast_to_dtype(result, dtype)
elif issubclass(dtype.type, np.datetime64):
if is_datetime64_dtype(obj.dtype):
result = result.astype(obj.dtype)
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/3920 | 2013-06-16T03:41:25Z | 2013-06-17T12:21:25Z | null | 2014-07-16T08:14:25Z |
CLN: Fix CRLFs in repo + add .gitattributes | diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 0000000000000..0ef16e42a0660
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1,15 @@
+* text=auto
+# enforce text on certain files
+*.py text
+*.pyx text
+*.pyd text
+*.c text
+*.h text
+*.html text
+*.csv text
+*.json text
+*.pickle binary
+*.h5 binary
+*.dta binary
+*.xls binary
+*.xlsx binary
diff --git a/doc/source/10min.rst b/doc/source/10min.rst
index d1246dc223626..af84efb93bb5e 100644
--- a/doc/source/10min.rst
+++ b/doc/source/10min.rst
@@ -1,708 +1,708 @@
-.. _10min:
-
-.. currentmodule:: pandas
-
-.. ipython:: python
- :suppress:
-
- import numpy as np
- import random
- import os
- np.random.seed(123456)
- from pandas import *
- import pandas as pd
- randn = np.random.randn
- randint = np.random.randint
- np.set_printoptions(precision=4, suppress=True)
- options.display.mpl_style='default'
-
- #### portions of this were borrowed from the
- #### Pandas cheatsheet
- #### created during the PyData Workshop-Sprint 2012
- #### Hannah Chen, Henry Chow, Eric Cox, Robert Mauriello
-
-
-********************
-10 Minutes to Pandas
-********************
-
-This is a short introduction to pandas, geared mainly for new users.
-You can see more complex recipes in the :ref:`Cookbook<cookbook>`
-
-Customarily, we import as follows
-
-.. ipython:: python
-
- import pandas as pd
- import numpy as np
-
-Object Creation
----------------
-
-See the :ref:`Data Structure Intro section <dsintro>`
-
-Creating a ``Series`` by passing a list of values, letting pandas create a default
-integer index
-
-.. ipython:: python
-
- s = pd.Series([1,3,5,np.nan,6,8])
- s
-
-Creating a ``DataFrame`` by passing a numpy array, with a datetime index and labeled columns.
-
-.. ipython:: python
-
- dates = pd.date_range('20130101',periods=6)
- dates
- df = pd.DataFrame(np.random.randn(6,4),index=dates,columns=list('ABCD'))
- df
-
-Creating a ``DataFrame`` by passing a dict of objects that can be converted to series-like.
-
-.. ipython:: python
-
- df2 = pd.DataFrame({ 'A' : 1.,
- 'B' : pd.Timestamp('20130102'),
- 'C' : pd.Series(1,index=range(4),dtype='float32'),
- 'D' : np.array([3] * 4,dtype='int32'),
- 'E' : 'foo' })
- df2
-
-Having specific :ref:`dtypes <basics.dtypes>`
-
-.. ipython:: python
-
- df2.dtypes
-
-Viewing Data
-------------
-
-See the :ref:`Basics section <basics>`
-
-See the top & bottom rows of the frame
-
-.. ipython:: python
-
- df.head()
- df.tail(3)
-
-Display the index,columns, and the underlying numpy data
-
-.. ipython:: python
-
- df.index
- df.columns
- df.values
-
-Describe shows a quick statistic summary of your data
-
-.. ipython:: python
-
- df.describe()
-
-Transposing your data
-
-.. ipython:: python
-
- df.T
-
-Sorting by an axis
-
-.. ipython:: python
-
- df.sort_index(axis=1, ascending=False)
-
-Sorting by values
-
-.. ipython:: python
-
- df.sort(columns='B')
-
-Selection
----------
-
-.. note::
-
- While standard Python / Numpy expressions for selecting and setting are
- intuitive and come in handy for interactive work, for production code, we
- recommend the optimized pandas data access methods, ``.at``, ``.iat``,
- ``.loc``, ``.iloc`` and ``.ix``.
-
-See the :ref:`Indexing section <indexing>` and below.
-
-Getting
-~~~~~~~
-
-Selecting a single column, which yields a ``Series``,
-equivalent to ``df.A``
-
-.. ipython:: python
-
- df['A']
-
-Selecting via ``[]``, which slices the rows.
-
-.. ipython:: python
-
- df[0:3]
- df['20130102':'20130104']
-
-Selection by Label
-~~~~~~~~~~~~~~~~~~
-
-See more in :ref:`Selection by Label <indexing.label>`
-
-For getting a cross section using a label
-
-.. ipython:: python
-
- df.loc[dates[0]]
-
-Selecting on a multi-axis by label
-
-.. ipython:: python
-
- df.loc[:,['A','B']]
-
-Showing label slicing, both endpoints are *included*
-
-.. ipython:: python
-
- df.loc['20130102':'20130104',['A','B']]
-
-Reduction in the dimensions of the returned object
-
-.. ipython:: python
-
- df.loc['20130102',['A','B']]
-
-For getting a scalar value
-
-.. ipython:: python
-
- df.loc[dates[0],'A']
-
-For getting fast access to a scalar (equiv to the prior method)
-
-.. ipython:: python
-
- df.at[dates[0],'A']
-
-Selection by Position
-~~~~~~~~~~~~~~~~~~~~~
-
-See more in :ref:`Selection by Position <indexing.integer>`
-
-Select via the position of the passed integers
-
-.. ipython:: python
-
- df.iloc[3]
-
-By integer slices, acting similar to numpy/python
-
-.. ipython:: python
-
- df.iloc[3:5,0:2]
-
-By lists of integer position locations, similar to the numpy/python style
-
-.. ipython:: python
-
- df.iloc[[1,2,4],[0,2]]
-
-For slicing rows explicitly
-
-.. ipython:: python
-
- df.iloc[1:3,:]
-
-For slicing columns explicitly
-
-.. ipython:: python
-
- df.iloc[:,1:3]
-
-For getting a value explicity
-
-.. ipython:: python
-
- df.iloc[1,1]
-
-For getting fast access to a scalar (equiv to the prior method)
-
-.. ipython:: python
-
- df.iat[1,1]
-
-There is one signficant departure from standard python/numpy slicing semantics.
-python/numpy allow slicing past the end of an array without an associated
-error.
-
-.. ipython:: python
-
- # these are allowed in python/numpy.
- x = list('abcdef')
- x[4:10]
- x[8:10]
-
-Pandas will detect this and raise ``IndexError``, rather than return an empty
-structure.
-
-::
-
- >>> df.iloc[:,8:10]
- IndexError: out-of-bounds on slice (end)
-
-Boolean Indexing
-~~~~~~~~~~~~~~~~
-
-Using a single column's values to select data.
-
-.. ipython:: python
-
- df[df.A > 0]
-
-A ``where`` operation for getting.
-
-.. ipython:: python
-
- df[df > 0]
-
-
-Setting
-~~~~~~~
-
-Setting a new column automatically aligns the data
-by the indexes
-
-.. ipython:: python
-
- s1 = pd.Series([1,2,3,4,5,6],index=date_range('20130102',periods=6))
- s1
- df['F'] = s1
-
-Setting values by label
-
-.. ipython:: python
-
- df.at[dates[0],'A'] = 0
-
-Setting values by position
-
-.. ipython:: python
-
- df.iat[0,1] = 0
-
-Setting by assigning with a numpy array
-
-.. ipython:: python
-
- df.loc[:,'D'] = np.array([5] * len(df))
-
-The result of the prior setting operations
-
-.. ipython:: python
-
- df
-
-A ``where`` operation with setting.
-
-.. ipython:: python
-
- df2 = df.copy()
- df2[df2 > 0] = -df2
- df2
-
-
-Missing Data
-------------
-
-Pandas primarily uses the value ``np.nan`` to represent missing data. It is by
-default not included in computations. See the :ref:`Missing Data section
-<missing_data>`
-
-Reindexing allows you to change/add/delete the index on a specified axis. This
-returns a copy of the data.
-
-.. ipython:: python
-
- df1 = df.reindex(index=dates[0:4],columns=list(df.columns) + ['E'])
- df1.loc[dates[0]:dates[1],'E'] = 1
- df1
-
-To drop any rows that have missing data.
-
-.. ipython:: python
-
- df1.dropna(how='any')
-
-Filling missing data
-
-.. ipython:: python
-
- df1.fillna(value=5)
-
-To get the boolean mask where values are ``nan``
-
-.. ipython:: python
-
- pd.isnull(df1)
-
-
-Operations
-----------
-
-See the :ref:`Basic section on Binary Ops <basics.binop>`
-
-Stats
-~~~~~
-
-Operations in general *exclude* missing data.
-
-Performing a descriptive statistic
-
-.. ipython:: python
-
- df.mean()
-
-Same operation on the other axis
-
-.. ipython:: python
-
- df.mean(1)
-
-Operating with objects that have different dimensionality and need alignment.
-In addition, pandas automatically broadcasts along the specified dimension.
-
-.. ipython:: python
-
- s = pd.Series([1,3,5,np.nan,6,8],index=dates).shift(2)
- s
- df.sub(s,axis='index')
-
-
-Apply
-~~~~~
-
-Applying functions to the data
-
-.. ipython:: python
-
- df.apply(np.cumsum)
- df.apply(lambda x: x.max() - x.min())
-
-Histogramming
-~~~~~~~~~~~~~
-
-See more at :ref:`Histogramming and Discretization <basics.discretization>`
-
-.. ipython:: python
-
- s = Series(np.random.randint(0,7,size=10))
- s
- s.value_counts()
-
-String Methods
-~~~~~~~~~~~~~~
-
-See more at :ref:`Vectorized String Methods <basics.string_methods>`
-
-.. ipython:: python
-
- s = Series(['A', 'B', 'C', 'Aaba', 'Baca', np.nan, 'CABA', 'dog', 'cat'])
- s.str.lower()
-
-Merge
------
-
-Concat
-~~~~~~
-
-Pandas provides various facilities for easily combining together Series,
-DataFrame, and Panel objects with various kinds of set logic for the indexes
-and relational algebra functionality in the case of join / merge-type
-operations.
-
-See the :ref:`Merging section <merging>`
-
-Concatenating pandas objects together
-
-.. ipython:: python
-
- df = pd.DataFrame(np.random.randn(10, 4))
- df
-
- # break it into pieces
- pieces = [df[:3], df[3:7], df[7:]]
-
- concat(pieces)
-
-Join
-~~~~
-
-SQL style merges. See the :ref:`Database style joining <merging.join>`
-
-.. ipython:: python
-
- left = pd.DataFrame({'key': ['foo', 'foo'], 'lval': [1, 2]})
- right = pd.DataFrame({'key': ['foo', 'foo'], 'rval': [4, 5]})
- left
- right
- merge(left, right, on='key')
-
-Append
-~~~~~~
-
-Append rows to a dataframe. See the :ref:`Appending <merging.concatenation>`
-
-.. ipython:: python
-
- df = pd.DataFrame(np.random.randn(8, 4), columns=['A','B','C','D'])
- df
- s = df.iloc[3]
- df.append(s, ignore_index=True)
-
-
-Grouping
---------
-
-By "group by" we are referring to a process involving one or more of the
-following steps
-
- - **Splitting** the data into groups based on some criteria
- - **Applying** a function to each group independently
- - **Combining** the results into a data structure
-
-See the :ref:`Grouping section <groupby>`
-
-.. ipython:: python
-
- df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
- 'foo', 'bar', 'foo', 'foo'],
- 'B' : ['one', 'one', 'two', 'three',
- 'two', 'two', 'one', 'three'],
- 'C' : randn(8), 'D' : randn(8)})
- df
-
-Grouping and then applying a function ``sum`` to the resulting groups.
-
-.. ipython:: python
-
- df.groupby('A').sum()
-
-Grouping by multiple columns forms a hierarchical index, which we then apply
-the function.
-
-.. ipython:: python
-
- df.groupby(['A','B']).sum()
-
-Reshaping
----------
-
-See the section on :ref:`Hierarchical Indexing <indexing.hierarchical>` and
-see the section on :ref:`Reshaping <reshaping.stacking>`).
-
-Stack
-~~~~~
-
-.. ipython:: python
-
- tuples = zip(*[['bar', 'bar', 'baz', 'baz',
- 'foo', 'foo', 'qux', 'qux'],
- ['one', 'two', 'one', 'two',
- 'one', 'two', 'one', 'two']])
- index = pd.MultiIndex.from_tuples(tuples, names=['first', 'second'])
- df = pd.DataFrame(randn(8, 2), index=index, columns=['A', 'B'])
- df2 = df[:4]
- df2
-
-The ``stack`` function "compresses" a level in the DataFrame's columns.
-
-.. ipython:: python
-
- stacked = df2.stack()
- stacked
-
-With a "stacked" DataFrame or Series (having a ``MultiIndex`` as the
-``index``), the inverse operation of ``stack`` is ``unstack``, which by default
-unstacks the **last level**:
-
-.. ipython:: python
-
- stacked.unstack()
- stacked.unstack(1)
- stacked.unstack(0)
-
-Pivot Tables
-~~~~~~~~~~~~
-See the section on :ref:`Pivot Tables <reshaping.pivot>`.
-
-.. ipython:: python
-
- df = DataFrame({'A' : ['one', 'one', 'two', 'three'] * 3,
- 'B' : ['A', 'B', 'C'] * 4,
- 'C' : ['foo', 'foo', 'foo', 'bar', 'bar', 'bar'] * 2,
- 'D' : np.random.randn(12),
- 'E' : np.random.randn(12)})
- df
-
-We can produce pivot tables from this data very easily:
-
-.. ipython:: python
-
- pivot_table(df, values='D', rows=['A', 'B'], cols=['C'])
-
-
-Time Series
------------
-
-Pandas has simple, powerful, and efficient functionality for performing
-resampling operations during frequency conversion (e.g., converting secondly
-data into 5-minutely data). This is extremely common in, but not limited to,
-financial applications. See the :ref:`Time Series section <timeseries>`
-
-.. ipython:: python
-
- rng = pd.date_range('1/1/2012', periods=100, freq='S')
- ts = pd.Series(randint(0, 500, len(rng)), index=rng)
- ts.resample('5Min', how='sum')
-
-Time zone representation
-
-.. ipython:: python
-
- rng = pd.date_range('3/6/2012 00:00', periods=5, freq='D')
- ts = pd.Series(randn(len(rng)), rng)
- ts_utc = ts.tz_localize('UTC')
- ts_utc
-
-Convert to another time zone
-
-.. ipython:: python
-
- ts_utc.tz_convert('US/Eastern')
-
-Converting between time span representations
-
-.. ipython:: python
-
- rng = pd.date_range('1/1/2012', periods=5, freq='M')
- ts = pd.Series(randn(len(rng)), index=rng)
- ts
- ps = ts.to_period()
- ps
- ps.to_timestamp()
-
-Converting between period and timestamp enables some convenient arithmetic
-functions to be used. In the following example, we convert a quarterly
-frequency with year ending in November to 9am of the end of the month following
-the quarter end:
-
-.. ipython:: python
-
- prng = period_range('1990Q1', '2000Q4', freq='Q-NOV')
- ts = Series(randn(len(prng)), prng)
- ts.index = (prng.asfreq('M', 'e') + 1).asfreq('H', 's') + 9
- ts.head()
-
-
-Plotting
---------
-
-:ref:`Plotting <visualization>` docs.
-
-.. ipython:: python
- :suppress:
-
- import matplotlib.pyplot as plt
- plt.close('all')
- options.display.mpl_style='default'
-
-.. ipython:: python
-
- ts = pd.Series(randn(1000), index=pd.date_range('1/1/2000', periods=1000))
- ts = ts.cumsum()
-
- @savefig series_plot_basic.png width=6in
- ts.plot()
-
-On DataFrame, ``plot`` is a convenience to plot all of the columns with labels:
-
-.. ipython:: python
-
- df = pd.DataFrame(randn(1000, 4), index=ts.index,
- columns=['A', 'B', 'C', 'D'])
- df = df.cumsum()
-
- @savefig frame_plot_basic.png width=6in
- plt.figure(); df.plot(); plt.legend(loc='best')
-
-Getting Data In/Out
--------------------
-
-CSV
-~~~
-
-:ref:`Writing to a csv file <io.store_in_csv>`
-
-.. ipython:: python
-
- df.to_csv('foo.csv')
-
-:ref:`Reading from a csv file <io.read_csv_table>`
-
-.. ipython:: python
-
- pd.read_csv('foo.csv')
-
-.. ipython:: python
- :suppress:
-
- os.remove('foo.csv')
-
-HDF5
-~~~~
-
-Reading and writing to :ref:`HDFStores <io.hdf5>`
-
-Writing to a HDF5 Store
-
-.. ipython:: python
-
- df.to_hdf('foo.h5','df')
-
-Reading from a HDF5 Store
-
-.. ipython:: python
-
- read_hdf('foo.h5','df')
-
-.. ipython:: python
- :suppress:
-
- os.remove('foo.h5')
-
-Excel
-~~~~~
-
-Reading and writing to :ref:`MS Excel <io.excel>`
-
-Writing to an excel file
-
-.. ipython:: python
-
- df.to_excel('foo.xlsx', sheet_name='sheet1')
-
-Reading from an excel file
-
-.. ipython:: python
-
- read_excel('foo.xlsx', 'sheet1', index_col=None, na_values=['NA'])
-
-.. ipython:: python
- :suppress:
-
- os.remove('foo.xlsx')
+.. _10min:
+
+.. currentmodule:: pandas
+
+.. ipython:: python
+ :suppress:
+
+ import numpy as np
+ import random
+ import os
+ np.random.seed(123456)
+ from pandas import *
+ import pandas as pd
+ randn = np.random.randn
+ randint = np.random.randint
+ np.set_printoptions(precision=4, suppress=True)
+ options.display.mpl_style='default'
+
+ #### portions of this were borrowed from the
+ #### Pandas cheatsheet
+ #### created during the PyData Workshop-Sprint 2012
+ #### Hannah Chen, Henry Chow, Eric Cox, Robert Mauriello
+
+
+********************
+10 Minutes to Pandas
+********************
+
+This is a short introduction to pandas, geared mainly for new users.
+You can see more complex recipes in the :ref:`Cookbook<cookbook>`
+
+Customarily, we import as follows
+
+.. ipython:: python
+
+ import pandas as pd
+ import numpy as np
+
+Object Creation
+---------------
+
+See the :ref:`Data Structure Intro section <dsintro>`
+
+Creating a ``Series`` by passing a list of values, letting pandas create a default
+integer index
+
+.. ipython:: python
+
+ s = pd.Series([1,3,5,np.nan,6,8])
+ s
+
+Creating a ``DataFrame`` by passing a numpy array, with a datetime index and labeled columns.
+
+.. ipython:: python
+
+ dates = pd.date_range('20130101',periods=6)
+ dates
+ df = pd.DataFrame(np.random.randn(6,4),index=dates,columns=list('ABCD'))
+ df
+
+Creating a ``DataFrame`` by passing a dict of objects that can be converted to series-like.
+
+.. ipython:: python
+
+ df2 = pd.DataFrame({ 'A' : 1.,
+ 'B' : pd.Timestamp('20130102'),
+ 'C' : pd.Series(1,index=range(4),dtype='float32'),
+ 'D' : np.array([3] * 4,dtype='int32'),
+ 'E' : 'foo' })
+ df2
+
+Having specific :ref:`dtypes <basics.dtypes>`
+
+.. ipython:: python
+
+ df2.dtypes
+
+Viewing Data
+------------
+
+See the :ref:`Basics section <basics>`
+
+See the top & bottom rows of the frame
+
+.. ipython:: python
+
+ df.head()
+ df.tail(3)
+
+Display the index,columns, and the underlying numpy data
+
+.. ipython:: python
+
+ df.index
+ df.columns
+ df.values
+
+Describe shows a quick statistic summary of your data
+
+.. ipython:: python
+
+ df.describe()
+
+Transposing your data
+
+.. ipython:: python
+
+ df.T
+
+Sorting by an axis
+
+.. ipython:: python
+
+ df.sort_index(axis=1, ascending=False)
+
+Sorting by values
+
+.. ipython:: python
+
+ df.sort(columns='B')
+
+Selection
+---------
+
+.. note::
+
+ While standard Python / Numpy expressions for selecting and setting are
+ intuitive and come in handy for interactive work, for production code, we
+ recommend the optimized pandas data access methods, ``.at``, ``.iat``,
+ ``.loc``, ``.iloc`` and ``.ix``.
+
+See the :ref:`Indexing section <indexing>` and below.
+
+Getting
+~~~~~~~
+
+Selecting a single column, which yields a ``Series``,
+equivalent to ``df.A``
+
+.. ipython:: python
+
+ df['A']
+
+Selecting via ``[]``, which slices the rows.
+
+.. ipython:: python
+
+ df[0:3]
+ df['20130102':'20130104']
+
+Selection by Label
+~~~~~~~~~~~~~~~~~~
+
+See more in :ref:`Selection by Label <indexing.label>`
+
+For getting a cross section using a label
+
+.. ipython:: python
+
+ df.loc[dates[0]]
+
+Selecting on a multi-axis by label
+
+.. ipython:: python
+
+ df.loc[:,['A','B']]
+
+Showing label slicing, both endpoints are *included*
+
+.. ipython:: python
+
+ df.loc['20130102':'20130104',['A','B']]
+
+Reduction in the dimensions of the returned object
+
+.. ipython:: python
+
+ df.loc['20130102',['A','B']]
+
+For getting a scalar value
+
+.. ipython:: python
+
+ df.loc[dates[0],'A']
+
+For getting fast access to a scalar (equiv to the prior method)
+
+.. ipython:: python
+
+ df.at[dates[0],'A']
+
+Selection by Position
+~~~~~~~~~~~~~~~~~~~~~
+
+See more in :ref:`Selection by Position <indexing.integer>`
+
+Select via the position of the passed integers
+
+.. ipython:: python
+
+ df.iloc[3]
+
+By integer slices, acting similar to numpy/python
+
+.. ipython:: python
+
+ df.iloc[3:5,0:2]
+
+By lists of integer position locations, similar to the numpy/python style
+
+.. ipython:: python
+
+ df.iloc[[1,2,4],[0,2]]
+
+For slicing rows explicitly
+
+.. ipython:: python
+
+ df.iloc[1:3,:]
+
+For slicing columns explicitly
+
+.. ipython:: python
+
+ df.iloc[:,1:3]
+
+For getting a value explicity
+
+.. ipython:: python
+
+ df.iloc[1,1]
+
+For getting fast access to a scalar (equiv to the prior method)
+
+.. ipython:: python
+
+ df.iat[1,1]
+
+There is one signficant departure from standard python/numpy slicing semantics.
+python/numpy allow slicing past the end of an array without an associated
+error.
+
+.. ipython:: python
+
+ # these are allowed in python/numpy.
+ x = list('abcdef')
+ x[4:10]
+ x[8:10]
+
+Pandas will detect this and raise ``IndexError``, rather than return an empty
+structure.
+
+::
+
+ >>> df.iloc[:,8:10]
+ IndexError: out-of-bounds on slice (end)
+
+Boolean Indexing
+~~~~~~~~~~~~~~~~
+
+Using a single column's values to select data.
+
+.. ipython:: python
+
+ df[df.A > 0]
+
+A ``where`` operation for getting.
+
+.. ipython:: python
+
+ df[df > 0]
+
+
+Setting
+~~~~~~~
+
+Setting a new column automatically aligns the data
+by the indexes
+
+.. ipython:: python
+
+ s1 = pd.Series([1,2,3,4,5,6],index=date_range('20130102',periods=6))
+ s1
+ df['F'] = s1
+
+Setting values by label
+
+.. ipython:: python
+
+ df.at[dates[0],'A'] = 0
+
+Setting values by position
+
+.. ipython:: python
+
+ df.iat[0,1] = 0
+
+Setting by assigning with a numpy array
+
+.. ipython:: python
+
+ df.loc[:,'D'] = np.array([5] * len(df))
+
+The result of the prior setting operations
+
+.. ipython:: python
+
+ df
+
+A ``where`` operation with setting.
+
+.. ipython:: python
+
+ df2 = df.copy()
+ df2[df2 > 0] = -df2
+ df2
+
+
+Missing Data
+------------
+
+Pandas primarily uses the value ``np.nan`` to represent missing data. It is by
+default not included in computations. See the :ref:`Missing Data section
+<missing_data>`
+
+Reindexing allows you to change/add/delete the index on a specified axis. This
+returns a copy of the data.
+
+.. ipython:: python
+
+ df1 = df.reindex(index=dates[0:4],columns=list(df.columns) + ['E'])
+ df1.loc[dates[0]:dates[1],'E'] = 1
+ df1
+
+To drop any rows that have missing data.
+
+.. ipython:: python
+
+ df1.dropna(how='any')
+
+Filling missing data
+
+.. ipython:: python
+
+ df1.fillna(value=5)
+
+To get the boolean mask where values are ``nan``
+
+.. ipython:: python
+
+ pd.isnull(df1)
+
+
+Operations
+----------
+
+See the :ref:`Basic section on Binary Ops <basics.binop>`
+
+Stats
+~~~~~
+
+Operations in general *exclude* missing data.
+
+Performing a descriptive statistic
+
+.. ipython:: python
+
+ df.mean()
+
+Same operation on the other axis
+
+.. ipython:: python
+
+ df.mean(1)
+
+Operating with objects that have different dimensionality and need alignment.
+In addition, pandas automatically broadcasts along the specified dimension.
+
+.. ipython:: python
+
+ s = pd.Series([1,3,5,np.nan,6,8],index=dates).shift(2)
+ s
+ df.sub(s,axis='index')
+
+
+Apply
+~~~~~
+
+Applying functions to the data
+
+.. ipython:: python
+
+ df.apply(np.cumsum)
+ df.apply(lambda x: x.max() - x.min())
+
+Histogramming
+~~~~~~~~~~~~~
+
+See more at :ref:`Histogramming and Discretization <basics.discretization>`
+
+.. ipython:: python
+
+ s = Series(np.random.randint(0,7,size=10))
+ s
+ s.value_counts()
+
+String Methods
+~~~~~~~~~~~~~~
+
+See more at :ref:`Vectorized String Methods <basics.string_methods>`
+
+.. ipython:: python
+
+ s = Series(['A', 'B', 'C', 'Aaba', 'Baca', np.nan, 'CABA', 'dog', 'cat'])
+ s.str.lower()
+
+Merge
+-----
+
+Concat
+~~~~~~
+
+Pandas provides various facilities for easily combining together Series,
+DataFrame, and Panel objects with various kinds of set logic for the indexes
+and relational algebra functionality in the case of join / merge-type
+operations.
+
+See the :ref:`Merging section <merging>`
+
+Concatenating pandas objects together
+
+.. ipython:: python
+
+ df = pd.DataFrame(np.random.randn(10, 4))
+ df
+
+ # break it into pieces
+ pieces = [df[:3], df[3:7], df[7:]]
+
+ concat(pieces)
+
+Join
+~~~~
+
+SQL style merges. See the :ref:`Database style joining <merging.join>`
+
+.. ipython:: python
+
+ left = pd.DataFrame({'key': ['foo', 'foo'], 'lval': [1, 2]})
+ right = pd.DataFrame({'key': ['foo', 'foo'], 'rval': [4, 5]})
+ left
+ right
+ merge(left, right, on='key')
+
+Append
+~~~~~~
+
+Append rows to a dataframe. See the :ref:`Appending <merging.concatenation>`
+
+.. ipython:: python
+
+ df = pd.DataFrame(np.random.randn(8, 4), columns=['A','B','C','D'])
+ df
+ s = df.iloc[3]
+ df.append(s, ignore_index=True)
+
+
+Grouping
+--------
+
+By "group by" we are referring to a process involving one or more of the
+following steps
+
+ - **Splitting** the data into groups based on some criteria
+ - **Applying** a function to each group independently
+ - **Combining** the results into a data structure
+
+See the :ref:`Grouping section <groupby>`
+
+.. ipython:: python
+
+ df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
+ 'foo', 'bar', 'foo', 'foo'],
+ 'B' : ['one', 'one', 'two', 'three',
+ 'two', 'two', 'one', 'three'],
+ 'C' : randn(8), 'D' : randn(8)})
+ df
+
+Grouping and then applying a function ``sum`` to the resulting groups.
+
+.. ipython:: python
+
+ df.groupby('A').sum()
+
+Grouping by multiple columns forms a hierarchical index, which we then apply
+the function.
+
+.. ipython:: python
+
+ df.groupby(['A','B']).sum()
+
+Reshaping
+---------
+
+See the section on :ref:`Hierarchical Indexing <indexing.hierarchical>` and
+see the section on :ref:`Reshaping <reshaping.stacking>`).
+
+Stack
+~~~~~
+
+.. ipython:: python
+
+ tuples = zip(*[['bar', 'bar', 'baz', 'baz',
+ 'foo', 'foo', 'qux', 'qux'],
+ ['one', 'two', 'one', 'two',
+ 'one', 'two', 'one', 'two']])
+ index = pd.MultiIndex.from_tuples(tuples, names=['first', 'second'])
+ df = pd.DataFrame(randn(8, 2), index=index, columns=['A', 'B'])
+ df2 = df[:4]
+ df2
+
+The ``stack`` function "compresses" a level in the DataFrame's columns.
+
+.. ipython:: python
+
+ stacked = df2.stack()
+ stacked
+
+With a "stacked" DataFrame or Series (having a ``MultiIndex`` as the
+``index``), the inverse operation of ``stack`` is ``unstack``, which by default
+unstacks the **last level**:
+
+.. ipython:: python
+
+ stacked.unstack()
+ stacked.unstack(1)
+ stacked.unstack(0)
+
+Pivot Tables
+~~~~~~~~~~~~
+See the section on :ref:`Pivot Tables <reshaping.pivot>`.
+
+.. ipython:: python
+
+ df = DataFrame({'A' : ['one', 'one', 'two', 'three'] * 3,
+ 'B' : ['A', 'B', 'C'] * 4,
+ 'C' : ['foo', 'foo', 'foo', 'bar', 'bar', 'bar'] * 2,
+ 'D' : np.random.randn(12),
+ 'E' : np.random.randn(12)})
+ df
+
+We can produce pivot tables from this data very easily:
+
+.. ipython:: python
+
+ pivot_table(df, values='D', rows=['A', 'B'], cols=['C'])
+
+
+Time Series
+-----------
+
+Pandas has simple, powerful, and efficient functionality for performing
+resampling operations during frequency conversion (e.g., converting secondly
+data into 5-minutely data). This is extremely common in, but not limited to,
+financial applications. See the :ref:`Time Series section <timeseries>`
+
+.. ipython:: python
+
+ rng = pd.date_range('1/1/2012', periods=100, freq='S')
+ ts = pd.Series(randint(0, 500, len(rng)), index=rng)
+ ts.resample('5Min', how='sum')
+
+Time zone representation
+
+.. ipython:: python
+
+ rng = pd.date_range('3/6/2012 00:00', periods=5, freq='D')
+ ts = pd.Series(randn(len(rng)), rng)
+ ts_utc = ts.tz_localize('UTC')
+ ts_utc
+
+Convert to another time zone
+
+.. ipython:: python
+
+ ts_utc.tz_convert('US/Eastern')
+
+Converting between time span representations
+
+.. ipython:: python
+
+ rng = pd.date_range('1/1/2012', periods=5, freq='M')
+ ts = pd.Series(randn(len(rng)), index=rng)
+ ts
+ ps = ts.to_period()
+ ps
+ ps.to_timestamp()
+
+Converting between period and timestamp enables some convenient arithmetic
+functions to be used. In the following example, we convert a quarterly
+frequency with year ending in November to 9am of the end of the month following
+the quarter end:
+
+.. ipython:: python
+
+ prng = period_range('1990Q1', '2000Q4', freq='Q-NOV')
+ ts = Series(randn(len(prng)), prng)
+ ts.index = (prng.asfreq('M', 'e') + 1).asfreq('H', 's') + 9
+ ts.head()
+
+
+Plotting
+--------
+
+:ref:`Plotting <visualization>` docs.
+
+.. ipython:: python
+ :suppress:
+
+ import matplotlib.pyplot as plt
+ plt.close('all')
+ options.display.mpl_style='default'
+
+.. ipython:: python
+
+ ts = pd.Series(randn(1000), index=pd.date_range('1/1/2000', periods=1000))
+ ts = ts.cumsum()
+
+ @savefig series_plot_basic.png width=6in
+ ts.plot()
+
+On DataFrame, ``plot`` is a convenience to plot all of the columns with labels:
+
+.. ipython:: python
+
+ df = pd.DataFrame(randn(1000, 4), index=ts.index,
+ columns=['A', 'B', 'C', 'D'])
+ df = df.cumsum()
+
+ @savefig frame_plot_basic.png width=6in
+ plt.figure(); df.plot(); plt.legend(loc='best')
+
+Getting Data In/Out
+-------------------
+
+CSV
+~~~
+
+:ref:`Writing to a csv file <io.store_in_csv>`
+
+.. ipython:: python
+
+ df.to_csv('foo.csv')
+
+:ref:`Reading from a csv file <io.read_csv_table>`
+
+.. ipython:: python
+
+ pd.read_csv('foo.csv')
+
+.. ipython:: python
+ :suppress:
+
+ os.remove('foo.csv')
+
+HDF5
+~~~~
+
+Reading and writing to :ref:`HDFStores <io.hdf5>`
+
+Writing to a HDF5 Store
+
+.. ipython:: python
+
+ df.to_hdf('foo.h5','df')
+
+Reading from a HDF5 Store
+
+.. ipython:: python
+
+ read_hdf('foo.h5','df')
+
+.. ipython:: python
+ :suppress:
+
+ os.remove('foo.h5')
+
+Excel
+~~~~~
+
+Reading and writing to :ref:`MS Excel <io.excel>`
+
+Writing to an excel file
+
+.. ipython:: python
+
+ df.to_excel('foo.xlsx', sheet_name='sheet1')
+
+Reading from an excel file
+
+.. ipython:: python
+
+ read_excel('foo.xlsx', 'sheet1', index_col=None, na_values=['NA'])
+
+.. ipython:: python
+ :suppress:
+
+ os.remove('foo.xlsx')
diff --git a/pandas/core/expressions.py b/pandas/core/expressions.py
index de93394872e12..34e56fe576a07 100644
--- a/pandas/core/expressions.py
+++ b/pandas/core/expressions.py
@@ -1,180 +1,180 @@
-"""
-Expressions
------------
-
-Offer fast expression evaluation thru numexpr
-
-"""
-import numpy as np
-
-try:
- import numexpr as ne
- _NUMEXPR_INSTALLED = True
-except ImportError: # pragma: no cover
- _NUMEXPR_INSTALLED = False
-
-_USE_NUMEXPR = _NUMEXPR_INSTALLED
-_evaluate = None
-_where = None
-
-# the set of dtypes that we will allow pass to numexpr
-_ALLOWED_DTYPES = dict(evaluate = set(['int64','int32','float64','float32','bool']),
- where = set(['int64','float64','bool']))
-
-# the minimum prod shape that we will use numexpr
-_MIN_ELEMENTS = 10000
-
-def set_use_numexpr(v = True):
- # set/unset to use numexpr
- global _USE_NUMEXPR
- if _NUMEXPR_INSTALLED:
- _USE_NUMEXPR = v
-
- # choose what we are going to do
- global _evaluate, _where
- if not _USE_NUMEXPR:
- _evaluate = _evaluate_standard
- _where = _where_standard
- else:
- _evaluate = _evaluate_numexpr
- _where = _where_numexpr
-
-def set_numexpr_threads(n = None):
- # if we are using numexpr, set the threads to n
- # otherwise reset
- try:
- if _NUMEXPR_INSTALLED and _USE_NUMEXPR:
- if n is None:
- n = ne.detect_number_of_cores()
- ne.set_num_threads(n)
- except:
- pass
-
-
-def _evaluate_standard(op, op_str, a, b, raise_on_error=True):
- """ standard evaluation """
- return op(a,b)
-
-def _can_use_numexpr(op, op_str, a, b, dtype_check):
- """ return a boolean if we WILL be using numexpr """
- if op_str is not None:
-
- # required min elements (otherwise we are adding overhead)
- if np.prod(a.shape) > _MIN_ELEMENTS:
-
- # check for dtype compatiblity
- dtypes = set()
- for o in [ a, b ]:
- if hasattr(o,'get_dtype_counts'):
- s = o.get_dtype_counts()
- if len(s) > 1:
- return False
- dtypes |= set(s.index)
- elif isinstance(o,np.ndarray):
- dtypes |= set([o.dtype.name])
-
- # allowed are a superset
- if not len(dtypes) or _ALLOWED_DTYPES[dtype_check] >= dtypes:
- return True
-
- return False
-
-def _evaluate_numexpr(op, op_str, a, b, raise_on_error = False):
- result = None
-
- if _can_use_numexpr(op, op_str, a, b, 'evaluate'):
- try:
- a_value, b_value = a, b
- if hasattr(a_value,'values'):
- a_value = a_value.values
- if hasattr(b_value,'values'):
- b_value = b_value.values
- result = ne.evaluate('a_value %s b_value' % op_str,
- local_dict={ 'a_value' : a_value,
- 'b_value' : b_value },
- casting='safe')
- except (ValueError), detail:
- if 'unknown type object' in str(detail):
- pass
- except (Exception), detail:
- if raise_on_error:
- raise TypeError(str(detail))
-
- if result is None:
- result = _evaluate_standard(op,op_str,a,b,raise_on_error)
-
- return result
-
-def _where_standard(cond, a, b, raise_on_error=True):
- return np.where(cond, a, b)
-
-def _where_numexpr(cond, a, b, raise_on_error = False):
- result = None
-
- if _can_use_numexpr(None, 'where', a, b, 'where'):
-
- try:
- cond_value, a_value, b_value = cond, a, b
- if hasattr(cond_value,'values'):
- cond_value = cond_value.values
- if hasattr(a_value,'values'):
- a_value = a_value.values
- if hasattr(b_value,'values'):
- b_value = b_value.values
- result = ne.evaluate('where(cond_value,a_value,b_value)',
- local_dict={ 'cond_value' : cond_value,
- 'a_value' : a_value,
- 'b_value' : b_value },
- casting='safe')
- except (ValueError), detail:
- if 'unknown type object' in str(detail):
- pass
- except (Exception), detail:
- if raise_on_error:
- raise TypeError(str(detail))
-
- if result is None:
- result = _where_standard(cond,a,b,raise_on_error)
-
- return result
-
-
-# turn myself on
-set_use_numexpr(True)
-
-def evaluate(op, op_str, a, b, raise_on_error=False, use_numexpr=True):
- """ evaluate and return the expression of the op on a and b
-
- Parameters
- ----------
-
- op : the actual operand
- op_str: the string version of the op
- a : left operand
- b : right operand
- raise_on_error : pass the error to the higher level if indicated (default is False),
- otherwise evaluate the op with and return the results
- use_numexpr : whether to try to use numexpr (default True)
- """
-
- if use_numexpr:
- return _evaluate(op, op_str, a, b, raise_on_error=raise_on_error)
- return _evaluate_standard(op, op_str, a, b, raise_on_error=raise_on_error)
-
-def where(cond, a, b, raise_on_error=False, use_numexpr=True):
- """ evaluate the where condition cond on a and b
-
- Parameters
- ----------
-
- cond : a boolean array
- a : return if cond is True
- b : return if cond is False
- raise_on_error : pass the error to the higher level if indicated (default is False),
- otherwise evaluate the op with and return the results
- use_numexpr : whether to try to use numexpr (default True)
- """
-
- if use_numexpr:
- return _where(cond, a, b, raise_on_error=raise_on_error)
- return _where_standard(cond, a, b, raise_on_error=raise_on_error)
+"""
+Expressions
+-----------
+
+Offer fast expression evaluation thru numexpr
+
+"""
+import numpy as np
+
+try:
+ import numexpr as ne
+ _NUMEXPR_INSTALLED = True
+except ImportError: # pragma: no cover
+ _NUMEXPR_INSTALLED = False
+
+_USE_NUMEXPR = _NUMEXPR_INSTALLED
+_evaluate = None
+_where = None
+
+# the set of dtypes that we will allow pass to numexpr
+_ALLOWED_DTYPES = dict(evaluate = set(['int64','int32','float64','float32','bool']),
+ where = set(['int64','float64','bool']))
+
+# the minimum prod shape that we will use numexpr
+_MIN_ELEMENTS = 10000
+
+def set_use_numexpr(v = True):
+ # set/unset to use numexpr
+ global _USE_NUMEXPR
+ if _NUMEXPR_INSTALLED:
+ _USE_NUMEXPR = v
+
+ # choose what we are going to do
+ global _evaluate, _where
+ if not _USE_NUMEXPR:
+ _evaluate = _evaluate_standard
+ _where = _where_standard
+ else:
+ _evaluate = _evaluate_numexpr
+ _where = _where_numexpr
+
+def set_numexpr_threads(n = None):
+ # if we are using numexpr, set the threads to n
+ # otherwise reset
+ try:
+ if _NUMEXPR_INSTALLED and _USE_NUMEXPR:
+ if n is None:
+ n = ne.detect_number_of_cores()
+ ne.set_num_threads(n)
+ except:
+ pass
+
+
+def _evaluate_standard(op, op_str, a, b, raise_on_error=True):
+ """ standard evaluation """
+ return op(a,b)
+
+def _can_use_numexpr(op, op_str, a, b, dtype_check):
+ """ return a boolean if we WILL be using numexpr """
+ if op_str is not None:
+
+ # required min elements (otherwise we are adding overhead)
+ if np.prod(a.shape) > _MIN_ELEMENTS:
+
+ # check for dtype compatiblity
+ dtypes = set()
+ for o in [ a, b ]:
+ if hasattr(o,'get_dtype_counts'):
+ s = o.get_dtype_counts()
+ if len(s) > 1:
+ return False
+ dtypes |= set(s.index)
+ elif isinstance(o,np.ndarray):
+ dtypes |= set([o.dtype.name])
+
+ # allowed are a superset
+ if not len(dtypes) or _ALLOWED_DTYPES[dtype_check] >= dtypes:
+ return True
+
+ return False
+
+def _evaluate_numexpr(op, op_str, a, b, raise_on_error = False):
+ result = None
+
+ if _can_use_numexpr(op, op_str, a, b, 'evaluate'):
+ try:
+ a_value, b_value = a, b
+ if hasattr(a_value,'values'):
+ a_value = a_value.values
+ if hasattr(b_value,'values'):
+ b_value = b_value.values
+ result = ne.evaluate('a_value %s b_value' % op_str,
+ local_dict={ 'a_value' : a_value,
+ 'b_value' : b_value },
+ casting='safe')
+ except (ValueError), detail:
+ if 'unknown type object' in str(detail):
+ pass
+ except (Exception), detail:
+ if raise_on_error:
+ raise TypeError(str(detail))
+
+ if result is None:
+ result = _evaluate_standard(op,op_str,a,b,raise_on_error)
+
+ return result
+
+def _where_standard(cond, a, b, raise_on_error=True):
+ return np.where(cond, a, b)
+
+def _where_numexpr(cond, a, b, raise_on_error = False):
+ result = None
+
+ if _can_use_numexpr(None, 'where', a, b, 'where'):
+
+ try:
+ cond_value, a_value, b_value = cond, a, b
+ if hasattr(cond_value,'values'):
+ cond_value = cond_value.values
+ if hasattr(a_value,'values'):
+ a_value = a_value.values
+ if hasattr(b_value,'values'):
+ b_value = b_value.values
+ result = ne.evaluate('where(cond_value,a_value,b_value)',
+ local_dict={ 'cond_value' : cond_value,
+ 'a_value' : a_value,
+ 'b_value' : b_value },
+ casting='safe')
+ except (ValueError), detail:
+ if 'unknown type object' in str(detail):
+ pass
+ except (Exception), detail:
+ if raise_on_error:
+ raise TypeError(str(detail))
+
+ if result is None:
+ result = _where_standard(cond,a,b,raise_on_error)
+
+ return result
+
+
+# turn myself on
+set_use_numexpr(True)
+
+def evaluate(op, op_str, a, b, raise_on_error=False, use_numexpr=True):
+ """ evaluate and return the expression of the op on a and b
+
+ Parameters
+ ----------
+
+ op : the actual operand
+ op_str: the string version of the op
+ a : left operand
+ b : right operand
+ raise_on_error : pass the error to the higher level if indicated (default is False),
+ otherwise evaluate the op with and return the results
+ use_numexpr : whether to try to use numexpr (default True)
+ """
+
+ if use_numexpr:
+ return _evaluate(op, op_str, a, b, raise_on_error=raise_on_error)
+ return _evaluate_standard(op, op_str, a, b, raise_on_error=raise_on_error)
+
+def where(cond, a, b, raise_on_error=False, use_numexpr=True):
+ """ evaluate the where condition cond on a and b
+
+ Parameters
+ ----------
+
+ cond : a boolean array
+ a : return if cond is True
+ b : return if cond is False
+ raise_on_error : pass the error to the higher level if indicated (default is False),
+ otherwise evaluate the op with and return the results
+ use_numexpr : whether to try to use numexpr (default True)
+ """
+
+ if use_numexpr:
+ return _where(cond, a, b, raise_on_error=raise_on_error)
+ return _where_standard(cond, a, b, raise_on_error=raise_on_error)
diff --git a/pandas/io/tests/data/banklist.csv b/pandas/io/tests/data/banklist.csv
index 85cebb56f6adf..e7900830140d2 100644
--- a/pandas/io/tests/data/banklist.csv
+++ b/pandas/io/tests/data/banklist.csv
@@ -1,507 +1,507 @@
-Bank Name,City,ST,CERT,Acquiring Institution,Closing Date,Updated Date
-Banks of Wisconsin d/b/a Bank of Kenosha,Kenosha,WI,35386,"North Shore Bank, FSB",31-May-13,31-May-13
-Central Arizona Bank,Scottsdale,AZ,34527,Western State Bank,14-May-13,20-May-13
-Sunrise Bank,Valdosta,GA,58185,Synovus Bank,10-May-13,21-May-13
-Pisgah Community Bank,Asheville,NC,58701,"Capital Bank, N.A.",10-May-13,14-May-13
-Douglas County Bank,Douglasville,GA,21649,Hamilton State Bank,26-Apr-13,16-May-13
-Parkway Bank,Lenoir,NC,57158,"CertusBank, National Association",26-Apr-13,17-May-13
-Chipola Community Bank,Marianna,FL,58034,First Federal Bank of Florida,19-Apr-13,16-May-13
-Heritage Bank of North Florida,Orange Park,FL,26680,FirstAtlantic Bank,19-Apr-13,16-May-13
-First Federal Bank,Lexington,KY,29594,Your Community Bank,19-Apr-13,23-Apr-13
-Gold Canyon Bank,Gold Canyon,AZ,58066,"First Scottsdale Bank, National Association",5-Apr-13,9-Apr-13
-Frontier Bank,LaGrange,GA,16431,HeritageBank of the South,8-Mar-13,26-Mar-13
-Covenant Bank,Chicago,IL,22476,Liberty Bank and Trust Company,15-Feb-13,4-Mar-13
-1st Regents Bank,Andover,MN,57157,First Minnesota Bank,18-Jan-13,28-Feb-13
-Westside Community Bank,University Place,WA,33997,Sunwest Bank,11-Jan-13,24-Jan-13
-Community Bank of the Ozarks,Sunrise Beach,MO,27331,Bank of Sullivan,14-Dec-12,24-Jan-13
-Hometown Community Bank,Braselton,GA,57928,"CertusBank, National Association",16-Nov-12,24-Jan-13
-Citizens First National Bank,Princeton,IL,3731,Heartland Bank and Trust Company,2-Nov-12,24-Jan-13
-Heritage Bank of Florida,Lutz,FL,35009,Centennial Bank,2-Nov-12,24-Jan-13
-NOVA Bank,Berwyn,PA,27148,No Acquirer,26-Oct-12,24-Jan-13
-Excel Bank,Sedalia,MO,19189,Simmons First National Bank,19-Oct-12,24-Jan-13
-First East Side Savings Bank,Tamarac,FL,28144,Stearns Bank N.A.,19-Oct-12,24-Jan-13
-GulfSouth Private Bank,Destin,FL,58073,SmartBank,19-Oct-12,24-Jan-13
-First United Bank,Crete,IL,20685,"Old Plank Trail Community Bank, National Association",28-Sep-12,15-Nov-12
-Truman Bank,St. Louis,MO,27316,Simmons First National Bank,14-Sep-12,17-Dec-12
-First Commercial Bank,Bloomington,MN,35246,Republic Bank & Trust Company,7-Sep-12,17-Dec-12
-Waukegan Savings Bank,Waukegan,IL,28243,First Midwest Bank,3-Aug-12,11-Oct-12
-Jasper Banking Company,Jasper,GA,16240,Stearns Bank N.A.,27-Jul-12,17-Dec-12
-Second Federal Savings and Loan Association of Chicago,Chicago,IL,27986,Hinsdale Bank & Trust Company,20-Jul-12,14-Jan-13
-Heartland Bank,Leawood,KS,1361,Metcalf Bank,20-Jul-12,17-Dec-12
-First Cherokee State Bank,Woodstock,GA,32711,Community & Southern Bank,20-Jul-12,31-Oct-12
-Georgia Trust Bank,Buford,GA,57847,Community & Southern Bank,20-Jul-12,17-Dec-12
-The Royal Palm Bank of Florida,Naples,FL,57096,First National Bank of the Gulf Coast,20-Jul-12,7-Jan-13
-Glasgow Savings Bank,Glasgow,MO,1056,Regional Missouri Bank,13-Jul-12,11-Oct-12
-Montgomery Bank & Trust,Ailey,GA,19498,Ameris Bank,6-Jul-12,31-Oct-12
-The Farmers Bank of Lynchburg,Lynchburg,TN,1690,Clayton Bank and Trust,15-Jun-12,31-Oct-12
-Security Exchange Bank,Marietta,GA,35299,Fidelity Bank,15-Jun-12,10-Oct-12
-Putnam State Bank,Palatka,FL,27405,Harbor Community Bank,15-Jun-12,10-Oct-12
-Waccamaw Bank,Whiteville,NC,34515,First Community Bank,8-Jun-12,8-Nov-12
-Farmers' and Traders' State Bank,Shabbona,IL,9257,First State Bank,8-Jun-12,10-Oct-12
-Carolina Federal Savings Bank,Charleston,SC,35372,Bank of North Carolina,8-Jun-12,31-Oct-12
-First Capital Bank,Kingfisher,OK,416,F & M Bank,8-Jun-12,10-Oct-12
-"Alabama Trust Bank, National Association",Sylacauga,AL,35224,Southern States Bank,18-May-12,20-May-13
-"Security Bank, National Association",North Lauderdale,FL,23156,Banesco USA,4-May-12,31-Oct-12
-Palm Desert National Bank,Palm Desert,CA,23632,Pacific Premier Bank,27-Apr-12,17-May-13
-Plantation Federal Bank,Pawleys Island,SC,32503,First Federal Bank,27-Apr-12,17-May-13
-"Inter Savings Bank, fsb D/B/A InterBank, fsb",Maple Grove,MN,31495,Great Southern Bank,27-Apr-12,17-May-13
-HarVest Bank of Maryland,Gaithersburg,MD,57766,Sonabank,27-Apr-12,17-May-13
-Bank of the Eastern Shore,Cambridge,MD,26759,No Acquirer,27-Apr-12,17-Oct-12
-"Fort Lee Federal Savings Bank, FSB",Fort Lee,NJ,35527,Alma Bank,20-Apr-12,17-May-13
-Fidelity Bank,Dearborn,MI,33883,The Huntington National Bank,30-Mar-12,16-May-13
-Premier Bank,Wilmette,IL,35419,International Bank of Chicago,23-Mar-12,17-Oct-12
-Covenant Bank & Trust,Rock Spring,GA,58068,"Stearns Bank, N.A.",23-Mar-12,31-Oct-12
-New City Bank,Chicago,IL,57597,No Acquirer,9-Mar-12,29-Oct-12
-Global Commerce Bank,Doraville,GA,34046,Metro City Bank,2-Mar-12,31-Oct-12
-Home Savings of America,Little Falls,MN,29178,No Acquirer,24-Feb-12,17-Dec-12
-Central Bank of Georgia,Ellaville,GA,5687,Ameris Bank,24-Feb-12,9-Aug-12
-SCB Bank,Shelbyville,IN,29761,"First Merchants Bank, National Association",10-Feb-12,25-Mar-13
-Charter National Bank and Trust,Hoffman Estates,IL,23187,"Barrington Bank & Trust Company, National Association",10-Feb-12,25-Mar-13
-BankEast,Knoxville,TN,19869,U.S.Bank National Association,27-Jan-12,8-Mar-13
-Patriot Bank Minnesota,Forest Lake,MN,34823,First Resource Bank,27-Jan-12,12-Sep-12
-Tennessee Commerce Bank,Franklin,TN,35296,Republic Bank & Trust Company,27-Jan-12,20-Nov-12
-First Guaranty Bank and Trust Company of Jacksonville,Jacksonville,FL,16579,"CenterState Bank of Florida, N.A.",27-Jan-12,12-Sep-12
-American Eagle Savings Bank,Boothwyn,PA,31581,"Capital Bank, N.A.",20-Jan-12,25-Jan-13
-The First State Bank,Stockbridge,GA,19252,Hamilton State Bank,20-Jan-12,25-Jan-13
-Central Florida State Bank,Belleview,FL,57186,"CenterState Bank of Florida, N.A.",20-Jan-12,25-Jan-13
-Western National Bank,Phoenix,AZ,57917,Washington Federal,16-Dec-11,13-Aug-12
-Premier Community Bank of the Emerald Coast,Crestview,FL,58343,Summit Bank,16-Dec-11,12-Sep-12
-Central Progressive Bank,Lacombe,LA,19657,First NBC Bank,18-Nov-11,13-Aug-12
-Polk County Bank,Johnston,IA,14194,Grinnell State Bank,18-Nov-11,15-Aug-12
-Community Bank of Rockmart,Rockmart,GA,57860,Century Bank of Georgia,10-Nov-11,13-Aug-12
-SunFirst Bank,Saint George,UT,57087,Cache Valley Bank,4-Nov-11,16-Nov-12
-"Mid City Bank, Inc.",Omaha,NE,19397,Premier Bank,4-Nov-11,15-Aug-12
-All American Bank,Des Plaines,IL,57759,International Bank of Chicago,28-Oct-11,15-Aug-12
-Community Banks of Colorado,Greenwood Village,CO,21132,"Bank Midwest, N.A.",21-Oct-11,2-Jan-13
-Community Capital Bank,Jonesboro,GA,57036,State Bank and Trust Company,21-Oct-11,8-Nov-12
-Decatur First Bank,Decatur,GA,34392,Fidelity Bank,21-Oct-11,8-Nov-12
-Old Harbor Bank,Clearwater,FL,57537,1st United Bank,21-Oct-11,8-Nov-12
-Country Bank,Aledo,IL,35395,Blackhawk Bank & Trust,14-Oct-11,15-Aug-12
-First State Bank,Cranford,NJ,58046,Northfield Bank,14-Oct-11,8-Nov-12
-"Blue Ridge Savings Bank, Inc.",Asheville,NC,32347,Bank of North Carolina,14-Oct-11,8-Nov-12
-Piedmont Community Bank,Gray,GA,57256,State Bank and Trust Company,14-Oct-11,22-Jan-13
-Sun Security Bank,Ellington,MO,20115,Great Southern Bank,7-Oct-11,7-Nov-12
-The RiverBank,Wyoming,MN,10216,Central Bank,7-Oct-11,7-Nov-12
-First International Bank,Plano,TX,33513,American First National Bank,30-Sep-11,9-Oct-12
-Citizens Bank of Northern California,Nevada City,CA,33983,Tri Counties Bank,23-Sep-11,9-Oct-12
-Bank of the Commonwealth,Norfolk,VA,20408,Southern Bank and Trust Company,23-Sep-11,9-Oct-12
-The First National Bank of Florida,Milton,FL,25155,CharterBank,9-Sep-11,6-Sep-12
-CreekSide Bank,Woodstock,GA,58226,Georgia Commerce Bank,2-Sep-11,6-Sep-12
-Patriot Bank of Georgia,Cumming,GA,58273,Georgia Commerce Bank,2-Sep-11,2-Nov-12
-First Choice Bank,Geneva,IL,57212,Inland Bank & Trust,19-Aug-11,15-Aug-12
-First Southern National Bank,Statesboro,GA,57239,Heritage Bank of the South,19-Aug-11,2-Nov-12
-Lydian Private Bank,Palm Beach,FL,35356,"Sabadell United Bank, N.A.",19-Aug-11,2-Nov-12
-Public Savings Bank,Huntingdon Valley,PA,34130,"Capital Bank, N.A.",18-Aug-11,15-Aug-12
-The First National Bank of Olathe,Olathe,KS,4744,Enterprise Bank & Trust,12-Aug-11,23-Aug-12
-Bank of Whitman,Colfax,WA,22528,Columbia State Bank,5-Aug-11,16-Aug-12
-Bank of Shorewood,Shorewood,IL,22637,Heartland Bank and Trust Company,5-Aug-11,16-Aug-12
-Integra Bank National Association,Evansville,IN,4392,Old National Bank,29-Jul-11,16-Aug-12
-"BankMeridian, N.A.",Columbia,SC,58222,SCBT National Association,29-Jul-11,2-Nov-12
-Virginia Business Bank,Richmond,VA,58283,Xenith Bank,29-Jul-11,9-Oct-12
-Bank of Choice,Greeley,CO,2994,"Bank Midwest, N.A.",22-Jul-11,12-Sep-12
-LandMark Bank of Florida,Sarasota,FL,35244,American Momentum Bank,22-Jul-11,2-Nov-12
-Southshore Community Bank,Apollo Beach,FL,58056,American Momentum Bank,22-Jul-11,2-Nov-12
-Summit Bank,Prescott,AZ,57442,The Foothills Bank,15-Jul-11,16-Aug-12
-First Peoples Bank,Port St. Lucie,FL,34870,"Premier American Bank, N.A.",15-Jul-11,2-Nov-12
-High Trust Bank,Stockbridge,GA,19554,Ameris Bank,15-Jul-11,2-Nov-12
-One Georgia Bank,Atlanta,GA,58238,Ameris Bank,15-Jul-11,2-Nov-12
-Signature Bank,Windsor,CO,57835,Points West Community Bank,8-Jul-11,26-Oct-12
-Colorado Capital Bank,Castle Rock,CO,34522,First-Citizens Bank & Trust Company,8-Jul-11,15-Jan-13
-First Chicago Bank & Trust,Chicago,IL,27935,Northbrook Bank & Trust Company,8-Jul-11,9-Sep-12
-Mountain Heritage Bank,Clayton,GA,57593,First American Bank and Trust Company,24-Jun-11,2-Nov-12
-First Commercial Bank of Tampa Bay,Tampa,FL,27583,Stonegate Bank,17-Jun-11,2-Nov-12
-McIntosh State Bank,Jackson,GA,19237,Hamilton State Bank,17-Jun-11,2-Nov-12
-Atlantic Bank and Trust,Charleston,SC,58420,"First Citizens Bank and Trust Company, Inc.",3-Jun-11,31-Oct-12
-First Heritage Bank,Snohomish,WA,23626,Columbia State Bank,27-May-11,28-Jan-13
-Summit Bank,Burlington,WA,513,Columbia State Bank,20-May-11,22-Jan-13
-First Georgia Banking Company,Franklin,GA,57647,"CertusBank, National Association",20-May-11,13-Nov-12
-Atlantic Southern Bank,Macon,GA,57213,"CertusBank, National Association",20-May-11,31-Oct-12
-Coastal Bank,Cocoa Beach,FL,34898,"Florida Community Bank, a division of Premier American Bank, N.A.",6-May-11,30-Nov-12
-Community Central Bank,Mount Clemens,MI,34234,Talmer Bank & Trust,29-Apr-11,16-Aug-12
-The Park Avenue Bank,Valdosta,GA,19797,Bank of the Ozarks,29-Apr-11,30-Nov-12
-First Choice Community Bank,Dallas,GA,58539,Bank of the Ozarks,29-Apr-11,22-Jan-13
-Cortez Community Bank,Brooksville,FL,57625,"Florida Community Bank, a division of Premier American Bank, N.A.",29-Apr-11,30-Nov-12
-First National Bank of Central Florida,Winter Park,FL,26297,"Florida Community Bank, a division of Premier American Bank, N.A.",29-Apr-11,30-Nov-12
-Heritage Banking Group,Carthage,MS,14273,Trustmark National Bank,15-Apr-11,30-Nov-12
-Rosemount National Bank,Rosemount,MN,24099,Central Bank,15-Apr-11,16-Aug-12
-Superior Bank,Birmingham,AL,17750,"Superior Bank, National Association",15-Apr-11,30-Nov-12
-Nexity Bank,Birmingham,AL,19794,AloStar Bank of Commerce,15-Apr-11,4-Sep-12
-New Horizons Bank,East Ellijay,GA,57705,Citizens South Bank,15-Apr-11,16-Aug-12
-Bartow County Bank,Cartersville,GA,21495,Hamilton State Bank,15-Apr-11,22-Jan-13
-Nevada Commerce Bank,Las Vegas,NV,35418,City National Bank,8-Apr-11,9-Sep-12
-Western Springs National Bank and Trust,Western Springs,IL,10086,Heartland Bank and Trust Company,8-Apr-11,22-Jan-13
-The Bank of Commerce,Wood Dale,IL,34292,Advantage National Bank Group,25-Mar-11,22-Jan-13
-Legacy Bank,Milwaukee,WI,34818,Seaway Bank and Trust Company,11-Mar-11,12-Sep-12
-First National Bank of Davis,Davis,OK,4077,The Pauls Valley National Bank,11-Mar-11,20-Aug-12
-Valley Community Bank,St. Charles,IL,34187,First State Bank,25-Feb-11,12-Sep-12
-"San Luis Trust Bank, FSB",San Luis Obispo,CA,34783,First California Bank,18-Feb-11,20-Aug-12
-Charter Oak Bank,Napa,CA,57855,Bank of Marin,18-Feb-11,12-Sep-12
-Citizens Bank of Effingham,Springfield,GA,34601,Heritage Bank of the South,18-Feb-11,2-Nov-12
-Habersham Bank,Clarkesville,GA,151,SCBT National Association,18-Feb-11,2-Nov-12
-Canyon National Bank,Palm Springs,CA,34692,Pacific Premier Bank,11-Feb-11,12-Sep-12
-Badger State Bank,Cassville,WI,13272,Royal Bank,11-Feb-11,12-Sep-12
-Peoples State Bank,Hamtramck,MI,14939,First Michigan Bank,11-Feb-11,22-Jan-13
-Sunshine State Community Bank,Port Orange,FL,35478,"Premier American Bank, N.A.",11-Feb-11,2-Nov-12
-Community First Bank Chicago,Chicago,IL,57948,Northbrook Bank & Trust Company,4-Feb-11,20-Aug-12
-North Georgia Bank,Watkinsville,GA,35242,BankSouth,4-Feb-11,2-Nov-12
-American Trust Bank,Roswell,GA,57432,Renasant Bank,4-Feb-11,31-Oct-12
-First Community Bank,Taos,NM,12261,"U.S. Bank, N.A.",28-Jan-11,12-Sep-12
-FirsTier Bank,Louisville,CO,57646,No Acquirer,28-Jan-11,12-Sep-12
-Evergreen State Bank,Stoughton,WI,5328,McFarland State Bank,28-Jan-11,12-Sep-12
-The First State Bank,Camargo,OK,2303,Bank 7,28-Jan-11,12-Sep-12
-United Western Bank,Denver,CO,31293,First-Citizens Bank & Trust Company,21-Jan-11,12-Sep-12
-The Bank of Asheville,Asheville,NC,34516,First Bank,21-Jan-11,2-Nov-12
-CommunitySouth Bank & Trust,Easley,SC,57868,"CertusBank, National Association",21-Jan-11,2-Nov-12
-Enterprise Banking Company,McDonough,GA,19758,No Acquirer,21-Jan-11,2-Nov-12
-Oglethorpe Bank,Brunswick,GA,57440,Bank of the Ozarks,14-Jan-11,2-Nov-12
-Legacy Bank,Scottsdale,AZ,57820,Enterprise Bank & Trust,7-Jan-11,12-Sep-12
-First Commercial Bank of Florida,Orlando,FL,34965,First Southern Bank,7-Jan-11,2-Nov-12
-Community National Bank,Lino Lakes,MN,23306,Farmers & Merchants Savings Bank,17-Dec-10,20-Aug-12
-First Southern Bank,Batesville,AR,58052,Southern Bank,17-Dec-10,20-Aug-12
-"United Americas Bank, N.A.",Atlanta,GA,35065,State Bank and Trust Company,17-Dec-10,2-Nov-12
-"Appalachian Community Bank, FSB",McCaysville,GA,58495,Peoples Bank of East Tennessee,17-Dec-10,31-Oct-12
-Chestatee State Bank,Dawsonville,GA,34578,Bank of the Ozarks,17-Dec-10,2-Nov-12
-"The Bank of Miami,N.A.",Coral Gables,FL,19040,1st United Bank,17-Dec-10,2-Nov-12
-Earthstar Bank,Southampton,PA,35561,Polonia Bank,10-Dec-10,20-Aug-12
-Paramount Bank,Farmington Hills,MI,34673,Level One Bank,10-Dec-10,20-Aug-12
-First Banking Center,Burlington,WI,5287,First Michigan Bank,19-Nov-10,20-Aug-12
-Allegiance Bank of North America,Bala Cynwyd,PA,35078,VIST Bank,19-Nov-10,20-Aug-12
-Gulf State Community Bank,Carrabelle,FL,20340,Centennial Bank,19-Nov-10,2-Nov-12
-Copper Star Bank,Scottsdale,AZ,35463,"Stearns Bank, N.A.",12-Nov-10,20-Aug-12
-Darby Bank & Trust Co.,Vidalia,GA,14580,Ameris Bank,12-Nov-10,15-Jan-13
-Tifton Banking Company,Tifton,GA,57831,Ameris Bank,12-Nov-10,2-Nov-12
-First Vietnamese American Bank,Westminster,CA,57885,Grandpoint Bank,5-Nov-10,12-Sep-12
-Pierce Commercial Bank,Tacoma,WA,34411,Heritage Bank,5-Nov-10,20-Aug-12
-Western Commercial Bank,Woodland Hills,CA,58087,First California Bank,5-Nov-10,12-Sep-12
-K Bank,Randallstown,MD,31263,Manufacturers and Traders Trust Company (M&T Bank),5-Nov-10,20-Aug-12
-"First Arizona Savings, A FSB",Scottsdale,AZ,32582,No Acquirer,22-Oct-10,20-Aug-12
-Hillcrest Bank,Overland Park,KS,22173,"Hillcrest Bank, N.A.",22-Oct-10,20-Aug-12
-First Suburban National Bank,Maywood,IL,16089,Seaway Bank and Trust Company,22-Oct-10,20-Aug-12
-The First National Bank of Barnesville,Barnesville,GA,2119,United Bank,22-Oct-10,2-Nov-12
-The Gordon Bank,Gordon,GA,33904,Morris Bank,22-Oct-10,2-Nov-12
-Progress Bank of Florida,Tampa,FL,32251,Bay Cities Bank,22-Oct-10,2-Nov-12
-First Bank of Jacksonville,Jacksonville,FL,27573,Ameris Bank,22-Oct-10,2-Nov-12
-Premier Bank,Jefferson City,MO,34016,Providence Bank,15-Oct-10,20-Aug-12
-WestBridge Bank and Trust Company,Chesterfield,MO,58205,Midland States Bank,15-Oct-10,20-Aug-12
-"Security Savings Bank, F.S.B.",Olathe,KS,30898,Simmons First National Bank,15-Oct-10,20-Aug-12
-Shoreline Bank,Shoreline,WA,35250,GBC International Bank,1-Oct-10,20-Aug-12
-Wakulla Bank,Crawfordville,FL,21777,Centennial Bank,1-Oct-10,2-Nov-12
-North County Bank,Arlington,WA,35053,Whidbey Island Bank,24-Sep-10,20-Aug-12
-Haven Trust Bank Florida,Ponte Vedra Beach,FL,58308,First Southern Bank,24-Sep-10,5-Nov-12
-Maritime Savings Bank,West Allis,WI,28612,"North Shore Bank, FSB",17-Sep-10,20-Aug-12
-Bramble Savings Bank,Milford,OH,27808,Foundation Bank,17-Sep-10,20-Aug-12
-The Peoples Bank,Winder,GA,182,Community & Southern Bank,17-Sep-10,5-Nov-12
-First Commerce Community Bank,Douglasville,GA,57448,Community & Southern Bank,17-Sep-10,15-Jan-13
-Bank of Ellijay,Ellijay,GA,58197,Community & Southern Bank,17-Sep-10,15-Jan-13
-ISN Bank,Cherry Hill,NJ,57107,Customers Bank,17-Sep-10,22-Aug-12
-Horizon Bank,Bradenton,FL,35061,Bank of the Ozarks,10-Sep-10,5-Nov-12
-Sonoma Valley Bank,Sonoma,CA,27259,Westamerica Bank,20-Aug-10,12-Sep-12
-Los Padres Bank,Solvang,CA,32165,Pacific Western Bank,20-Aug-10,12-Sep-12
-Butte Community Bank,Chico,CA,33219,"Rabobank, N.A.",20-Aug-10,12-Sep-12
-Pacific State Bank,Stockton,CA,27090,"Rabobank, N.A.",20-Aug-10,12-Sep-12
-ShoreBank,Chicago,IL,15640,Urban Partnership Bank,20-Aug-10,16-May-13
-Imperial Savings and Loan Association,Martinsville,VA,31623,"River Community Bank, N.A.",20-Aug-10,24-Aug-12
-Independent National Bank,Ocala,FL,27344,"CenterState Bank of Florida, N.A.",20-Aug-10,5-Nov-12
-Community National Bank at Bartow,Bartow,FL,25266,"CenterState Bank of Florida, N.A.",20-Aug-10,5-Nov-12
-Palos Bank and Trust Company,Palos Heights,IL,17599,First Midwest Bank,13-Aug-10,22-Aug-12
-Ravenswood Bank,Chicago,IL,34231,Northbrook Bank & Trust Company,6-Aug-10,22-Aug-12
-LibertyBank,Eugene,OR,31964,Home Federal Bank,30-Jul-10,22-Aug-12
-The Cowlitz Bank,Longview,WA,22643,Heritage Bank,30-Jul-10,22-Aug-12
-Coastal Community Bank,Panama City Beach,FL,9619,Centennial Bank,30-Jul-10,5-Nov-12
-Bayside Savings Bank,Port Saint Joe,FL,57669,Centennial Bank,30-Jul-10,5-Nov-12
-Northwest Bank & Trust,Acworth,GA,57658,State Bank and Trust Company,30-Jul-10,5-Nov-12
-Home Valley Bank,Cave Junction,OR,23181,South Valley Bank & Trust,23-Jul-10,12-Sep-12
-SouthwestUSA Bank,Las Vegas,NV,35434,Plaza Bank,23-Jul-10,22-Aug-12
-Community Security Bank,New Prague,MN,34486,Roundbank,23-Jul-10,12-Sep-12
-Thunder Bank,Sylvan Grove,KS,10506,The Bennington State Bank,23-Jul-10,13-Sep-12
-Williamsburg First National Bank,Kingstree,SC,17837,"First Citizens Bank and Trust Company, Inc.",23-Jul-10,5-Nov-12
-Crescent Bank and Trust Company,Jasper,GA,27559,Renasant Bank,23-Jul-10,5-Nov-12
-Sterling Bank,Lantana,FL,32536,IBERIABANK,23-Jul-10,5-Nov-12
-"Mainstreet Savings Bank, FSB",Hastings,MI,28136,Commercial Bank,16-Jul-10,13-Sep-12
-Olde Cypress Community Bank,Clewiston,FL,28864,"CenterState Bank of Florida, N.A.",16-Jul-10,5-Nov-12
-Turnberry Bank,Aventura,FL,32280,NAFH National Bank,16-Jul-10,5-Nov-12
-Metro Bank of Dade County,Miami,FL,25172,NAFH National Bank,16-Jul-10,5-Nov-12
-First National Bank of the South,Spartanburg,SC,35383,NAFH National Bank,16-Jul-10,5-Nov-12
-Woodlands Bank,Bluffton,SC,32571,Bank of the Ozarks,16-Jul-10,5-Nov-12
-Home National Bank,Blackwell,OK,11636,RCB Bank,9-Jul-10,10-Dec-12
-USA Bank,Port Chester,NY,58072,New Century Bank,9-Jul-10,14-Sep-12
-Ideal Federal Savings Bank,Baltimore,MD,32456,No Acquirer,9-Jul-10,14-Sep-12
-Bay National Bank,Baltimore,MD,35462,"Bay Bank, FSB",9-Jul-10,15-Jan-13
-High Desert State Bank,Albuquerque,NM,35279,First American Bank,25-Jun-10,14-Sep-12
-First National Bank,Savannah,GA,34152,"The Savannah Bank, N.A.",25-Jun-10,5-Nov-12
-Peninsula Bank,Englewood,FL,26563,"Premier American Bank, N.A.",25-Jun-10,5-Nov-12
-Nevada Security Bank,Reno,NV,57110,Umpqua Bank,18-Jun-10,23-Aug-12
-Washington First International Bank,Seattle,WA,32955,East West Bank,11-Jun-10,14-Sep-12
-TierOne Bank,Lincoln,NE,29341,Great Western Bank,4-Jun-10,14-Sep-12
-Arcola Homestead Savings Bank,Arcola,IL,31813,No Acquirer,4-Jun-10,14-Sep-12
-First National Bank,Rosedale,MS,15814,The Jefferson Bank,4-Jun-10,5-Nov-12
-Sun West Bank,Las Vegas,NV,34785,City National Bank,28-May-10,14-Sep-12
-"Granite Community Bank, NA",Granite Bay,CA,57315,Tri Counties Bank,28-May-10,14-Sep-12
-Bank of Florida - Tampa,Tampa,FL,57814,EverBank,28-May-10,5-Nov-12
-Bank of Florida - Southwest,Naples,FL,35106,EverBank,28-May-10,5-Nov-12
-Bank of Florida - Southeast,Fort Lauderdale,FL,57360,EverBank,28-May-10,5-Nov-12
-Pinehurst Bank,Saint Paul,MN,57735,Coulee Bank,21-May-10,26-Oct-12
-Midwest Bank and Trust Company,Elmwood Park,IL,18117,"FirstMerit Bank, N.A.",14-May-10,23-Aug-12
-Southwest Community Bank,Springfield,MO,34255,Simmons First National Bank,14-May-10,23-Aug-12
-New Liberty Bank,Plymouth,MI,35586,Bank of Ann Arbor,14-May-10,23-Aug-12
-Satilla Community Bank,Saint Marys,GA,35114,Ameris Bank,14-May-10,5-Nov-12
-1st Pacific Bank of California,San Diego,CA,35517,City National Bank,7-May-10,13-Dec-12
-Towne Bank of Arizona,Mesa,AZ,57697,Commerce Bank of Arizona,7-May-10,23-Aug-12
-Access Bank,Champlin,MN,16476,PrinsBank,7-May-10,23-Aug-12
-The Bank of Bonifay,Bonifay,FL,14246,First Federal Bank of Florida,7-May-10,5-Nov-12
-Frontier Bank,Everett,WA,22710,"Union Bank, N.A.",30-Apr-10,15-Jan-13
-BC National Banks,Butler,MO,17792,Community First Bank,30-Apr-10,23-Aug-12
-Champion Bank,Creve Coeur,MO,58362,BankLiberty,30-Apr-10,23-Aug-12
-CF Bancorp,Port Huron,MI,30005,First Michigan Bank,30-Apr-10,15-Jan-13
-Westernbank Puerto Rico,Mayaguez,PR,31027,Banco Popular de Puerto Rico,30-Apr-10,5-Nov-12
-R-G Premier Bank of Puerto Rico,Hato Rey,PR,32185,Scotiabank de Puerto Rico,30-Apr-10,5-Nov-12
-Eurobank,San Juan,PR,27150,Oriental Bank and Trust,30-Apr-10,5-Nov-12
-Wheatland Bank,Naperville,IL,58429,Wheaton Bank & Trust,23-Apr-10,23-Aug-12
-Peotone Bank and Trust Company,Peotone,IL,10888,First Midwest Bank,23-Apr-10,23-Aug-12
-Lincoln Park Savings Bank,Chicago,IL,30600,Northbrook Bank & Trust Company,23-Apr-10,23-Aug-12
-New Century Bank,Chicago,IL,34821,"MB Financial Bank, N.A.",23-Apr-10,23-Aug-12
-Citizens Bank and Trust Company of Chicago,Chicago,IL,34658,Republic Bank of Chicago,23-Apr-10,23-Aug-12
-Broadway Bank,Chicago,IL,22853,"MB Financial Bank, N.A.",23-Apr-10,23-Aug-12
-"Amcore Bank, National Association",Rockford,IL,3735,Harris N.A.,23-Apr-10,23-Aug-12
-City Bank,Lynnwood,WA,21521,Whidbey Island Bank,16-Apr-10,14-Sep-12
-Tamalpais Bank,San Rafael,CA,33493,"Union Bank, N.A.",16-Apr-10,23-Aug-12
-Innovative Bank,Oakland,CA,23876,Center Bank,16-Apr-10,23-Aug-12
-Butler Bank,Lowell,MA,26619,People's United Bank,16-Apr-10,23-Aug-12
-Riverside National Bank of Florida,Fort Pierce,FL,24067,"TD Bank, N.A.",16-Apr-10,5-Nov-12
-AmericanFirst Bank,Clermont,FL,57724,"TD Bank, N.A.",16-Apr-10,31-Oct-12
-First Federal Bank of North Florida,Palatka,FL,28886,"TD Bank, N.A.",16-Apr-10,15-Jan-13
-Lakeside Community Bank,Sterling Heights,MI,34878,No Acquirer,16-Apr-10,23-Aug-12
-Beach First National Bank,Myrtle Beach,SC,34242,Bank of North Carolina,9-Apr-10,5-Nov-12
-Desert Hills Bank,Phoenix,AZ,57060,New York Community Bank,26-Mar-10,23-Aug-12
-Unity National Bank,Cartersville,GA,34678,Bank of the Ozarks,26-Mar-10,14-Sep-12
-Key West Bank,Key West,FL,34684,Centennial Bank,26-Mar-10,23-Aug-12
-McIntosh Commercial Bank,Carrollton,GA,57399,CharterBank,26-Mar-10,23-Aug-12
-State Bank of Aurora,Aurora,MN,8221,Northern State Bank,19-Mar-10,23-Aug-12
-First Lowndes Bank,Fort Deposit,AL,24957,First Citizens Bank,19-Mar-10,23-Aug-12
-Bank of Hiawassee,Hiawassee,GA,10054,Citizens South Bank,19-Mar-10,23-Aug-12
-Appalachian Community Bank,Ellijay,GA,33989,Community & Southern Bank,19-Mar-10,31-Oct-12
-Advanta Bank Corp.,Draper,UT,33535,No Acquirer,19-Mar-10,14-Sep-12
-Century Security Bank,Duluth,GA,58104,Bank of Upson,19-Mar-10,23-Aug-12
-American National Bank,Parma,OH,18806,The National Bank and Trust Company,19-Mar-10,23-Aug-12
-Statewide Bank,Covington,LA,29561,Home Bank,12-Mar-10,23-Aug-12
-Old Southern Bank,Orlando,FL,58182,Centennial Bank,12-Mar-10,23-Aug-12
-The Park Avenue Bank,New York,NY,27096,Valley National Bank,12-Mar-10,23-Aug-12
-LibertyPointe Bank,New York,NY,58071,Valley National Bank,11-Mar-10,23-Aug-12
-Centennial Bank,Ogden,UT,34430,No Acquirer,5-Mar-10,14-Sep-12
-Waterfield Bank,Germantown,MD,34976,No Acquirer,5-Mar-10,23-Aug-12
-Bank of Illinois,Normal,IL,9268,Heartland Bank and Trust Company,5-Mar-10,23-Aug-12
-Sun American Bank,Boca Raton,FL,27126,First-Citizens Bank & Trust Company,5-Mar-10,23-Aug-12
-Rainier Pacific Bank,Tacoma,WA,38129,Umpqua Bank,26-Feb-10,23-Aug-12
-Carson River Community Bank,Carson City,NV,58352,Heritage Bank of Nevada,26-Feb-10,15-Jan-13
-"La Jolla Bank, FSB",La Jolla,CA,32423,"OneWest Bank, FSB",19-Feb-10,24-Aug-12
-George Washington Savings Bank,Orland Park,IL,29952,"FirstMerit Bank, N.A.",19-Feb-10,24-Aug-12
-The La Coste National Bank,La Coste,TX,3287,Community National Bank,19-Feb-10,14-Sep-12
-Marco Community Bank,Marco Island,FL,57586,Mutual of Omaha Bank,19-Feb-10,24-Aug-12
-1st American State Bank of Minnesota,Hancock,MN,15448,"Community Development Bank, FSB",5-Feb-10,24-Aug-12
-American Marine Bank,Bainbridge Island,WA,16730,Columbia State Bank,29-Jan-10,24-Aug-12
-First Regional Bank,Los Angeles,CA,23011,First-Citizens Bank & Trust Company,29-Jan-10,24-Aug-12
-Community Bank and Trust,Cornelia,GA,5702,SCBT National Association,29-Jan-10,15-Jan-13
-"Marshall Bank, N.A.",Hallock,MN,16133,United Valley Bank,29-Jan-10,23-Aug-12
-Florida Community Bank,Immokalee,FL,5672,"Premier American Bank, N.A.",29-Jan-10,15-Jan-13
-First National Bank of Georgia,Carrollton,GA,16480,Community & Southern Bank,29-Jan-10,13-Dec-12
-Columbia River Bank,The Dalles,OR,22469,Columbia State Bank,22-Jan-10,14-Sep-12
-Evergreen Bank,Seattle,WA,20501,Umpqua Bank,22-Jan-10,15-Jan-13
-Charter Bank,Santa Fe,NM,32498,Charter Bank,22-Jan-10,23-Aug-12
-Bank of Leeton,Leeton,MO,8265,"Sunflower Bank, N.A.",22-Jan-10,15-Jan-13
-Premier American Bank,Miami,FL,57147,"Premier American Bank, N.A.",22-Jan-10,13-Dec-12
-Barnes Banking Company,Kaysville,UT,1252,No Acquirer,15-Jan-10,23-Aug-12
-St. Stephen State Bank,St. Stephen,MN,17522,First State Bank of St. Joseph,15-Jan-10,23-Aug-12
-Town Community Bank & Trust,Antioch,IL,34705,First American Bank,15-Jan-10,23-Aug-12
-Horizon Bank,Bellingham,WA,22977,Washington Federal Savings and Loan Association,8-Jan-10,23-Aug-12
-"First Federal Bank of California, F.S.B.",Santa Monica,CA,28536,"OneWest Bank, FSB",18-Dec-09,23-Aug-12
-Imperial Capital Bank,La Jolla,CA,26348,City National Bank,18-Dec-09,5-Sep-12
-Independent Bankers' Bank,Springfield,IL,26820,The Independent BankersBank (TIB),18-Dec-09,23-Aug-12
-New South Federal Savings Bank,Irondale,AL,32276,Beal Bank,18-Dec-09,23-Aug-12
-Citizens State Bank,New Baltimore,MI,1006,No Acquirer,18-Dec-09,5-Nov-12
-Peoples First Community Bank,Panama City,FL,32167,Hancock Bank,18-Dec-09,5-Nov-12
-RockBridge Commercial Bank,Atlanta,GA,58315,No Acquirer,18-Dec-09,5-Nov-12
-SolutionsBank,Overland Park,KS,4731,Arvest Bank,11-Dec-09,23-Aug-12
-"Valley Capital Bank, N.A.",Mesa,AZ,58399,Enterprise Bank & Trust,11-Dec-09,23-Aug-12
-"Republic Federal Bank, N.A.",Miami,FL,22846,1st United Bank,11-Dec-09,5-Nov-12
-Greater Atlantic Bank,Reston,VA,32583,Sonabank,4-Dec-09,5-Nov-12
-Benchmark Bank,Aurora,IL,10440,"MB Financial Bank, N.A.",4-Dec-09,23-Aug-12
-AmTrust Bank,Cleveland,OH,29776,New York Community Bank,4-Dec-09,5-Nov-12
-The Tattnall Bank,Reidsville,GA,12080,Heritage Bank of the South,4-Dec-09,5-Nov-12
-First Security National Bank,Norcross,GA,26290,State Bank and Trust Company,4-Dec-09,5-Nov-12
-The Buckhead Community Bank,Atlanta,GA,34663,State Bank and Trust Company,4-Dec-09,5-Nov-12
-Commerce Bank of Southwest Florida,Fort Myers,FL,58016,Central Bank,20-Nov-09,5-Nov-12
-Pacific Coast National Bank,San Clemente,CA,57914,Sunwest Bank,13-Nov-09,22-Aug-12
-Orion Bank,Naples,FL,22427,IBERIABANK,13-Nov-09,5-Nov-12
-"Century Bank, F.S.B.",Sarasota,FL,32267,IBERIABANK,13-Nov-09,22-Aug-12
-United Commercial Bank,San Francisco,CA,32469,East West Bank,6-Nov-09,5-Nov-12
-Gateway Bank of St. Louis,St. Louis,MO,19450,Central Bank of Kansas City,6-Nov-09,22-Aug-12
-Prosperan Bank,Oakdale,MN,35074,"Alerus Financial, N.A.",6-Nov-09,22-Aug-12
-Home Federal Savings Bank,Detroit,MI,30329,Liberty Bank and Trust Company,6-Nov-09,22-Aug-12
-United Security Bank,Sparta,GA,22286,Ameris Bank,6-Nov-09,15-Jan-13
-North Houston Bank,Houston,TX,18776,U.S. Bank N.A.,30-Oct-09,22-Aug-12
-Madisonville State Bank,Madisonville,TX,33782,U.S. Bank N.A.,30-Oct-09,22-Aug-12
-Citizens National Bank,Teague,TX,25222,U.S. Bank N.A.,30-Oct-09,22-Aug-12
-Park National Bank,Chicago,IL,11677,U.S. Bank N.A.,30-Oct-09,22-Aug-12
-Pacific National Bank,San Francisco,CA,30006,U.S. Bank N.A.,30-Oct-09,22-Aug-12
-California National Bank,Los Angeles,CA,34659,U.S. Bank N.A.,30-Oct-09,5-Sep-12
-San Diego National Bank,San Diego,CA,23594,U.S. Bank N.A.,30-Oct-09,22-Aug-12
-Community Bank of Lemont,Lemont,IL,35291,U.S. Bank N.A.,30-Oct-09,15-Jan-13
-"Bank USA, N.A.",Phoenix,AZ,32218,U.S. Bank N.A.,30-Oct-09,22-Aug-12
-First DuPage Bank,Westmont,IL,35038,First Midwest Bank,23-Oct-09,22-Aug-12
-Riverview Community Bank,Otsego,MN,57525,Central Bank,23-Oct-09,22-Aug-12
-Bank of Elmwood,Racine,WI,18321,Tri City National Bank,23-Oct-09,22-Aug-12
-Flagship National Bank,Bradenton,FL,35044,First Federal Bank of Florida,23-Oct-09,22-Aug-12
-Hillcrest Bank Florida,Naples,FL,58336,Stonegate Bank,23-Oct-09,22-Aug-12
-American United Bank,Lawrenceville,GA,57794,Ameris Bank,23-Oct-09,5-Sep-12
-Partners Bank,Naples,FL,57959,Stonegate Bank,23-Oct-09,15-Jan-13
-San Joaquin Bank,Bakersfield,CA,23266,Citizens Business Bank,16-Oct-09,22-Aug-12
-Southern Colorado National Bank,Pueblo,CO,57263,Legacy Bank,2-Oct-09,5-Sep-12
-Jennings State Bank,Spring Grove,MN,11416,Central Bank,2-Oct-09,21-Aug-12
-Warren Bank,Warren,MI,34824,The Huntington National Bank,2-Oct-09,21-Aug-12
-Georgian Bank,Atlanta,GA,57151,"First Citizens Bank and Trust Company, Inc.",25-Sep-09,21-Aug-12
-"Irwin Union Bank, F.S.B.",Louisville,KY,57068,"First Financial Bank, N.A.",18-Sep-09,5-Sep-12
-Irwin Union Bank and Trust Company,Columbus,IN,10100,"First Financial Bank, N.A.",18-Sep-09,21-Aug-12
-Venture Bank,Lacey,WA,22868,First-Citizens Bank & Trust Company,11-Sep-09,21-Aug-12
-Brickwell Community Bank,Woodbury,MN,57736,CorTrust Bank N.A.,11-Sep-09,15-Jan-13
-"Corus Bank, N.A.",Chicago,IL,13693,"MB Financial Bank, N.A.",11-Sep-09,21-Aug-12
-First State Bank,Flagstaff,AZ,34875,Sunwest Bank,4-Sep-09,15-Jan-13
-Platinum Community Bank,Rolling Meadows,IL,35030,No Acquirer,4-Sep-09,21-Aug-12
-Vantus Bank,Sioux City,IN,27732,Great Southern Bank,4-Sep-09,21-Aug-12
-InBank,Oak Forest,IL,20203,"MB Financial Bank, N.A.",4-Sep-09,21-Aug-12
-First Bank of Kansas City,Kansas City,MO,25231,Great American Bank,4-Sep-09,21-Aug-12
-Affinity Bank,Ventura,CA,27197,Pacific Western Bank,28-Aug-09,21-Aug-12
-Mainstreet Bank,Forest Lake,MN,1909,Central Bank,28-Aug-09,21-Aug-12
-Bradford Bank,Baltimore,MD,28312,Manufacturers and Traders Trust Company (M&T Bank),28-Aug-09,15-Jan-13
-Guaranty Bank,Austin,TX,32618,BBVA Compass,21-Aug-09,21-Aug-12
-CapitalSouth Bank,Birmingham,AL,22130,IBERIABANK,21-Aug-09,15-Jan-13
-First Coweta Bank,Newnan,GA,57702,United Bank,21-Aug-09,15-Jan-13
-ebank,Atlanta,GA,34682,"Stearns Bank, N.A.",21-Aug-09,21-Aug-12
-Community Bank of Nevada,Las Vegas,NV,34043,No Acquirer,14-Aug-09,21-Aug-12
-Community Bank of Arizona,Phoenix,AZ,57645,MidFirst Bank,14-Aug-09,21-Aug-12
-"Union Bank, National Association",Gilbert,AZ,34485,MidFirst Bank,14-Aug-09,21-Aug-12
-Colonial Bank,Montgomery,AL,9609,"Branch Banking & Trust Company, (BB&T)",14-Aug-09,5-Sep-12
-Dwelling House Savings and Loan Association,Pittsburgh,PA,31559,"PNC Bank, N.A.",14-Aug-09,15-Jan-13
-Community First Bank,Prineville,OR,23268,Home Federal Bank,7-Aug-09,15-Jan-13
-Community National Bank of Sarasota County,Venice,FL,27183,"Stearns Bank, N.A.",7-Aug-09,20-Aug-12
-First State Bank,Sarasota,FL,27364,"Stearns Bank, N.A.",7-Aug-09,20-Aug-12
-Mutual Bank,Harvey,IL,18659,United Central Bank,31-Jul-09,20-Aug-12
-First BankAmericano,Elizabeth,NJ,34270,Crown Bank,31-Jul-09,20-Aug-12
-Peoples Community Bank,West Chester,OH,32288,"First Financial Bank, N.A.",31-Jul-09,20-Aug-12
-Integrity Bank,Jupiter,FL,57604,Stonegate Bank,31-Jul-09,20-Aug-12
-First State Bank of Altus,Altus,OK,9873,Herring Bank,31-Jul-09,20-Aug-12
-Security Bank of Jones County,Gray,GA,8486,State Bank and Trust Company,24-Jul-09,20-Aug-12
-Security Bank of Houston County,Perry,GA,27048,State Bank and Trust Company,24-Jul-09,20-Aug-12
-Security Bank of Bibb County,Macon,GA,27367,State Bank and Trust Company,24-Jul-09,20-Aug-12
-Security Bank of North Metro,Woodstock,GA,57105,State Bank and Trust Company,24-Jul-09,20-Aug-12
-Security Bank of North Fulton,Alpharetta,GA,57430,State Bank and Trust Company,24-Jul-09,20-Aug-12
-Security Bank of Gwinnett County,Suwanee,GA,57346,State Bank and Trust Company,24-Jul-09,20-Aug-12
-Waterford Village Bank,Williamsville,NY,58065,"Evans Bank, N.A.",24-Jul-09,20-Aug-12
-Temecula Valley Bank,Temecula,CA,34341,First-Citizens Bank & Trust Company,17-Jul-09,20-Aug-12
-Vineyard Bank,Rancho Cucamonga,CA,23556,California Bank & Trust,17-Jul-09,20-Aug-12
-BankFirst,Sioux Falls,SD,34103,"Alerus Financial, N.A.",17-Jul-09,20-Aug-12
-First Piedmont Bank,Winder,GA,34594,First American Bank and Trust Company,17-Jul-09,15-Jan-13
-Bank of Wyoming,Thermopolis,WY,22754,Central Bank & Trust,10-Jul-09,20-Aug-12
-Founders Bank,Worth,IL,18390,The PrivateBank and Trust Company,2-Jul-09,20-Aug-12
-Millennium State Bank of Texas,Dallas,TX,57667,State Bank of Texas,2-Jul-09,26-Oct-12
-First National Bank of Danville,Danville,IL,3644,"First Financial Bank, N.A.",2-Jul-09,20-Aug-12
-Elizabeth State Bank,Elizabeth,IL,9262,Galena State Bank and Trust Company,2-Jul-09,20-Aug-12
-Rock River Bank,Oregon,IL,15302,The Harvard State Bank,2-Jul-09,20-Aug-12
-First State Bank of Winchester,Winchester,IL,11710,The First National Bank of Beardstown,2-Jul-09,20-Aug-12
-John Warner Bank,Clinton,IL,12093,State Bank of Lincoln,2-Jul-09,20-Aug-12
-Mirae Bank,Los Angeles,CA,57332,Wilshire State Bank,26-Jun-09,20-Aug-12
-MetroPacific Bank,Irvine,CA,57893,Sunwest Bank,26-Jun-09,20-Aug-12
-Horizon Bank,Pine City,MN,9744,"Stearns Bank, N.A.",26-Jun-09,20-Aug-12
-Neighborhood Community Bank,Newnan,GA,35285,CharterBank,26-Jun-09,20-Aug-12
-Community Bank of West Georgia,Villa Rica,GA,57436,No Acquirer,26-Jun-09,17-Aug-12
-First National Bank of Anthony,Anthony,KS,4614,Bank of Kansas,19-Jun-09,17-Aug-12
-Cooperative Bank,Wilmington,NC,27837,First Bank,19-Jun-09,17-Aug-12
-Southern Community Bank,Fayetteville,GA,35251,United Community Bank,19-Jun-09,17-Aug-12
-Bank of Lincolnwood,Lincolnwood,IL,17309,Republic Bank of Chicago,5-Jun-09,17-Aug-12
-Citizens National Bank,Macomb,IL,5757,Morton Community Bank,22-May-09,4-Sep-12
-Strategic Capital Bank,Champaign,IL,35175,Midland States Bank,22-May-09,4-Sep-12
-"BankUnited, FSB",Coral Gables,FL,32247,BankUnited,21-May-09,17-Aug-12
-Westsound Bank,Bremerton,WA,34843,Kitsap Bank,8-May-09,4-Sep-12
-America West Bank,Layton,UT,35461,Cache Valley Bank,1-May-09,17-Aug-12
-Citizens Community Bank,Ridgewood,NJ,57563,North Jersey Community Bank,1-May-09,4-Sep-12
-"Silverton Bank, NA",Atlanta,GA,26535,No Acquirer,1-May-09,17-Aug-12
-First Bank of Idaho,Ketchum,ID,34396,"U.S. Bank, N.A.",24-Apr-09,17-Aug-12
-First Bank of Beverly Hills,Calabasas,CA,32069,No Acquirer,24-Apr-09,4-Sep-12
-Michigan Heritage Bank,Farmington Hills,MI,34369,Level One Bank,24-Apr-09,17-Aug-12
-American Southern Bank,Kennesaw,GA,57943,Bank of North Georgia,24-Apr-09,17-Aug-12
-Great Basin Bank of Nevada,Elko,NV,33824,Nevada State Bank,17-Apr-09,4-Sep-12
-American Sterling Bank,Sugar Creek,MO,8266,Metcalf Bank,17-Apr-09,31-Aug-12
-New Frontier Bank,Greeley,CO,34881,No Acquirer,10-Apr-09,4-Sep-12
-Cape Fear Bank,Wilmington,NC,34639,First Federal Savings and Loan Association,10-Apr-09,17-Aug-12
-Omni National Bank,Atlanta,GA,22238,No Acquirer,27-Mar-09,17-Aug-12
-"TeamBank, NA",Paola,KS,4754,Great Southern Bank,20-Mar-09,17-Aug-12
-Colorado National Bank,Colorado Springs,CO,18896,Herring Bank,20-Mar-09,17-Aug-12
-FirstCity Bank,Stockbridge,GA,18243,No Acquirer,20-Mar-09,17-Aug-12
-Freedom Bank of Georgia,Commerce,GA,57558,Northeast Georgia Bank,6-Mar-09,17-Aug-12
-Security Savings Bank,Henderson,NV,34820,Bank of Nevada,27-Feb-09,7-Sep-12
-Heritage Community Bank,Glenwood,IL,20078,"MB Financial Bank, N.A.",27-Feb-09,17-Aug-12
-Silver Falls Bank,Silverton,OR,35399,Citizens Bank,20-Feb-09,17-Aug-12
-Pinnacle Bank of Oregon,Beaverton,OR,57342,Washington Trust Bank of Spokane,13-Feb-09,17-Aug-12
-Corn Belt Bank & Trust Co.,Pittsfield,IL,16500,The Carlinville National Bank,13-Feb-09,17-Aug-12
-Riverside Bank of the Gulf Coast,Cape Coral,FL,34563,TIB Bank,13-Feb-09,17-Aug-12
-Sherman County Bank,Loup City,NE,5431,Heritage Bank,13-Feb-09,17-Aug-12
-County Bank,Merced,CA,22574,Westamerica Bank,6-Feb-09,4-Sep-12
-Alliance Bank,Culver City,CA,23124,California Bank & Trust,6-Feb-09,16-Aug-12
-FirstBank Financial Services,McDonough,GA,57017,Regions Bank,6-Feb-09,16-Aug-12
-Ocala National Bank,Ocala,FL,26538,"CenterState Bank of Florida, N.A.",30-Jan-09,4-Sep-12
-Suburban FSB,Crofton,MD,30763,Bank of Essex,30-Jan-09,16-Aug-12
-MagnetBank,Salt Lake City,UT,58001,No Acquirer,30-Jan-09,16-Aug-12
-1st Centennial Bank,Redlands,CA,33025,First California Bank,23-Jan-09,16-Aug-12
-Bank of Clark County,Vancouver,WA,34959,Umpqua Bank,16-Jan-09,16-Aug-12
-National Bank of Commerce,Berkeley,IL,19733,Republic Bank of Chicago,16-Jan-09,16-Aug-12
-Sanderson State Bank,Sanderson,TX,11568,The Pecos County State Bank,12-Dec-08,4-Sep-12
-Haven Trust Bank,Duluth,GA,35379,"Branch Banking & Trust Company, (BB&T)",12-Dec-08,16-Aug-12
-First Georgia Community Bank,Jackson,GA,34301,United Bank,5-Dec-08,16-Aug-12
-PFF Bank & Trust,Pomona,CA,28344,"U.S. Bank, N.A.",21-Nov-08,4-Jan-13
-Downey Savings & Loan,Newport Beach,CA,30968,"U.S. Bank, N.A.",21-Nov-08,4-Jan-13
-Community Bank,Loganville,GA,16490,Bank of Essex,21-Nov-08,4-Sep-12
-Security Pacific Bank,Los Angeles,CA,23595,Pacific Western Bank,7-Nov-08,28-Aug-12
-"Franklin Bank, SSB",Houston,TX,26870,Prosperity Bank,7-Nov-08,16-Aug-12
-Freedom Bank,Bradenton,FL,57930,Fifth Third Bank,31-Oct-08,16-Aug-12
-Alpha Bank & Trust,Alpharetta,GA,58241,"Stearns Bank, N.A.",24-Oct-08,16-Aug-12
-Meridian Bank,Eldred,IL,13789,National Bank,10-Oct-08,31-May-12
-Main Street Bank,Northville,MI,57654,Monroe Bank & Trust,10-Oct-08,16-Aug-12
-Washington Mutual Bank,Henderson,NV,32633,JP Morgan Chase Bank,25-Sep-08,16-Aug-12
-Ameribank,Northfork,WV,6782,The Citizens Savings Bank,19-Sep-08,16-Aug-12
-Silver State Bank,Henderson,NV,34194,Nevada State Bank,5-Sep-08,16-Aug-12
-Integrity Bank,Alpharetta,GA,35469,Regions Bank,29-Aug-08,16-Aug-12
-Columbian Bank & Trust,Topeka,KS,22728,Citizens Bank & Trust,22-Aug-08,16-Aug-12
-First Priority Bank,Bradenton,FL,57523,SunTrust Bank,1-Aug-08,16-Aug-12
-"First Heritage Bank, NA",Newport Beach,CA,57961,Mutual of Omaha Bank,25-Jul-08,28-Aug-12
-First National Bank of Nevada,Reno,NV,27011,Mutual of Omaha Bank,25-Jul-08,28-Aug-12
-IndyMac Bank,Pasadena,CA,29730,"OneWest Bank, FSB",11-Jul-08,28-Aug-12
-"First Integrity Bank, NA",Staples,MN,12736,First International Bank and Trust,30-May-08,28-Aug-12
-"ANB Financial, NA",Bentonville,AR,33901,Pulaski Bank and Trust Company,9-May-08,28-Aug-12
-Hume Bank,Hume,MO,1971,Security Bank,7-Mar-08,28-Aug-12
-Douglass National Bank,Kansas City,MO,24660,Liberty Bank and Trust Company,25-Jan-08,26-Oct-12
-Miami Valley Bank,Lakeview,OH,16848,The Citizens Banking Company,4-Oct-07,28-Aug-12
-NetBank,Alpharetta,GA,32575,ING DIRECT,28-Sep-07,28-Aug-12
-Metropolitan Savings Bank,Pittsburgh,PA,35353,Allegheny Valley Bank of Pittsburgh,2-Feb-07,27-Oct-10
-Bank of Ephraim,Ephraim,UT,1249,Far West Bank,25-Jun-04,9-Apr-08
-Reliance Bank,White Plains,NY,26778,Union State Bank,19-Mar-04,9-Apr-08
-Guaranty National Bank of Tallahassee,Tallahassee,FL,26838,Hancock Bank of Florida,12-Mar-04,5-Jun-12
-Dollar Savings Bank,Newark,NJ,31330,No Acquirer,14-Feb-04,9-Apr-08
-Pulaski Savings Bank,Philadelphia,PA,27203,Earthstar Bank,14-Nov-03,22-Jul-05
-First National Bank of Blanchardville,Blanchardville,WI,11639,The Park Bank,9-May-03,5-Jun-12
-Southern Pacific Bank,Torrance,CA,27094,Beal Bank,7-Feb-03,20-Oct-08
-Farmers Bank of Cheneyville,Cheneyville,LA,16445,Sabine State Bank & Trust,17-Dec-02,20-Oct-04
-Bank of Alamo,Alamo,TN,9961,No Acquirer,8-Nov-02,18-Mar-05
-AmTrade International Bank,Atlanta,GA,33784,No Acquirer,30-Sep-02,11-Sep-06
-Universal Federal Savings Bank,Chicago,IL,29355,Chicago Community Bank,27-Jun-02,9-Apr-08
-Connecticut Bank of Commerce,Stamford,CT,19183,Hudson United Bank,26-Jun-02,14-Feb-12
-New Century Bank,Shelby Township,MI,34979,No Acquirer,28-Mar-02,18-Mar-05
-Net 1st National Bank,Boca Raton,FL,26652,Bank Leumi USA,1-Mar-02,9-Apr-08
-"NextBank, NA",Phoenix,AZ,22314,No Acquirer,7-Feb-02,27-Aug-10
-Oakwood Deposit Bank Co.,Oakwood,OH,8966,The State Bank & Trust Company,1-Feb-02,25-Oct-12
-Bank of Sierra Blanca,Sierra Blanca,TX,22002,The Security State Bank of Pecos,18-Jan-02,6-Nov-03
-"Hamilton Bank, NA",Miami,FL,24382,Israel Discount Bank of New York,11-Jan-02,5-Jun-12
-Sinclair National Bank,Gravette,AR,34248,Delta Trust & Bank,7-Sep-01,10-Feb-04
-"Superior Bank, FSB",Hinsdale,IL,32646,"Superior Federal, FSB",27-Jul-01,5-Jun-12
-Malta National Bank,Malta,OH,6629,North Valley Bank,3-May-01,18-Nov-02
-First Alliance Bank & Trust Co.,Manchester,NH,34264,Southern New Hampshire Bank & Trust,2-Feb-01,18-Feb-03
-National State Bank of Metropolis,Metropolis,IL,3815,Banterra Bank of Marion,14-Dec-00,17-Mar-05
-Bank of Honolulu,Honolulu,HI,21029,Bank of the Orient,13-Oct-00,17-Mar-05
+Bank Name,City,ST,CERT,Acquiring Institution,Closing Date,Updated Date
+Banks of Wisconsin d/b/a Bank of Kenosha,Kenosha,WI,35386,"North Shore Bank, FSB",31-May-13,31-May-13
+Central Arizona Bank,Scottsdale,AZ,34527,Western State Bank,14-May-13,20-May-13
+Sunrise Bank,Valdosta,GA,58185,Synovus Bank,10-May-13,21-May-13
+Pisgah Community Bank,Asheville,NC,58701,"Capital Bank, N.A.",10-May-13,14-May-13
+Douglas County Bank,Douglasville,GA,21649,Hamilton State Bank,26-Apr-13,16-May-13
+Parkway Bank,Lenoir,NC,57158,"CertusBank, National Association",26-Apr-13,17-May-13
+Chipola Community Bank,Marianna,FL,58034,First Federal Bank of Florida,19-Apr-13,16-May-13
+Heritage Bank of North Florida,Orange Park,FL,26680,FirstAtlantic Bank,19-Apr-13,16-May-13
+First Federal Bank,Lexington,KY,29594,Your Community Bank,19-Apr-13,23-Apr-13
+Gold Canyon Bank,Gold Canyon,AZ,58066,"First Scottsdale Bank, National Association",5-Apr-13,9-Apr-13
+Frontier Bank,LaGrange,GA,16431,HeritageBank of the South,8-Mar-13,26-Mar-13
+Covenant Bank,Chicago,IL,22476,Liberty Bank and Trust Company,15-Feb-13,4-Mar-13
+1st Regents Bank,Andover,MN,57157,First Minnesota Bank,18-Jan-13,28-Feb-13
+Westside Community Bank,University Place,WA,33997,Sunwest Bank,11-Jan-13,24-Jan-13
+Community Bank of the Ozarks,Sunrise Beach,MO,27331,Bank of Sullivan,14-Dec-12,24-Jan-13
+Hometown Community Bank,Braselton,GA,57928,"CertusBank, National Association",16-Nov-12,24-Jan-13
+Citizens First National Bank,Princeton,IL,3731,Heartland Bank and Trust Company,2-Nov-12,24-Jan-13
+Heritage Bank of Florida,Lutz,FL,35009,Centennial Bank,2-Nov-12,24-Jan-13
+NOVA Bank,Berwyn,PA,27148,No Acquirer,26-Oct-12,24-Jan-13
+Excel Bank,Sedalia,MO,19189,Simmons First National Bank,19-Oct-12,24-Jan-13
+First East Side Savings Bank,Tamarac,FL,28144,Stearns Bank N.A.,19-Oct-12,24-Jan-13
+GulfSouth Private Bank,Destin,FL,58073,SmartBank,19-Oct-12,24-Jan-13
+First United Bank,Crete,IL,20685,"Old Plank Trail Community Bank, National Association",28-Sep-12,15-Nov-12
+Truman Bank,St. Louis,MO,27316,Simmons First National Bank,14-Sep-12,17-Dec-12
+First Commercial Bank,Bloomington,MN,35246,Republic Bank & Trust Company,7-Sep-12,17-Dec-12
+Waukegan Savings Bank,Waukegan,IL,28243,First Midwest Bank,3-Aug-12,11-Oct-12
+Jasper Banking Company,Jasper,GA,16240,Stearns Bank N.A.,27-Jul-12,17-Dec-12
+Second Federal Savings and Loan Association of Chicago,Chicago,IL,27986,Hinsdale Bank & Trust Company,20-Jul-12,14-Jan-13
+Heartland Bank,Leawood,KS,1361,Metcalf Bank,20-Jul-12,17-Dec-12
+First Cherokee State Bank,Woodstock,GA,32711,Community & Southern Bank,20-Jul-12,31-Oct-12
+Georgia Trust Bank,Buford,GA,57847,Community & Southern Bank,20-Jul-12,17-Dec-12
+The Royal Palm Bank of Florida,Naples,FL,57096,First National Bank of the Gulf Coast,20-Jul-12,7-Jan-13
+Glasgow Savings Bank,Glasgow,MO,1056,Regional Missouri Bank,13-Jul-12,11-Oct-12
+Montgomery Bank & Trust,Ailey,GA,19498,Ameris Bank,6-Jul-12,31-Oct-12
+The Farmers Bank of Lynchburg,Lynchburg,TN,1690,Clayton Bank and Trust,15-Jun-12,31-Oct-12
+Security Exchange Bank,Marietta,GA,35299,Fidelity Bank,15-Jun-12,10-Oct-12
+Putnam State Bank,Palatka,FL,27405,Harbor Community Bank,15-Jun-12,10-Oct-12
+Waccamaw Bank,Whiteville,NC,34515,First Community Bank,8-Jun-12,8-Nov-12
+Farmers' and Traders' State Bank,Shabbona,IL,9257,First State Bank,8-Jun-12,10-Oct-12
+Carolina Federal Savings Bank,Charleston,SC,35372,Bank of North Carolina,8-Jun-12,31-Oct-12
+First Capital Bank,Kingfisher,OK,416,F & M Bank,8-Jun-12,10-Oct-12
+"Alabama Trust Bank, National Association",Sylacauga,AL,35224,Southern States Bank,18-May-12,20-May-13
+"Security Bank, National Association",North Lauderdale,FL,23156,Banesco USA,4-May-12,31-Oct-12
+Palm Desert National Bank,Palm Desert,CA,23632,Pacific Premier Bank,27-Apr-12,17-May-13
+Plantation Federal Bank,Pawleys Island,SC,32503,First Federal Bank,27-Apr-12,17-May-13
+"Inter Savings Bank, fsb D/B/A InterBank, fsb",Maple Grove,MN,31495,Great Southern Bank,27-Apr-12,17-May-13
+HarVest Bank of Maryland,Gaithersburg,MD,57766,Sonabank,27-Apr-12,17-May-13
+Bank of the Eastern Shore,Cambridge,MD,26759,No Acquirer,27-Apr-12,17-Oct-12
+"Fort Lee Federal Savings Bank, FSB",Fort Lee,NJ,35527,Alma Bank,20-Apr-12,17-May-13
+Fidelity Bank,Dearborn,MI,33883,The Huntington National Bank,30-Mar-12,16-May-13
+Premier Bank,Wilmette,IL,35419,International Bank of Chicago,23-Mar-12,17-Oct-12
+Covenant Bank & Trust,Rock Spring,GA,58068,"Stearns Bank, N.A.",23-Mar-12,31-Oct-12
+New City Bank,Chicago,IL,57597,No Acquirer,9-Mar-12,29-Oct-12
+Global Commerce Bank,Doraville,GA,34046,Metro City Bank,2-Mar-12,31-Oct-12
+Home Savings of America,Little Falls,MN,29178,No Acquirer,24-Feb-12,17-Dec-12
+Central Bank of Georgia,Ellaville,GA,5687,Ameris Bank,24-Feb-12,9-Aug-12
+SCB Bank,Shelbyville,IN,29761,"First Merchants Bank, National Association",10-Feb-12,25-Mar-13
+Charter National Bank and Trust,Hoffman Estates,IL,23187,"Barrington Bank & Trust Company, National Association",10-Feb-12,25-Mar-13
+BankEast,Knoxville,TN,19869,U.S.Bank National Association,27-Jan-12,8-Mar-13
+Patriot Bank Minnesota,Forest Lake,MN,34823,First Resource Bank,27-Jan-12,12-Sep-12
+Tennessee Commerce Bank,Franklin,TN,35296,Republic Bank & Trust Company,27-Jan-12,20-Nov-12
+First Guaranty Bank and Trust Company of Jacksonville,Jacksonville,FL,16579,"CenterState Bank of Florida, N.A.",27-Jan-12,12-Sep-12
+American Eagle Savings Bank,Boothwyn,PA,31581,"Capital Bank, N.A.",20-Jan-12,25-Jan-13
+The First State Bank,Stockbridge,GA,19252,Hamilton State Bank,20-Jan-12,25-Jan-13
+Central Florida State Bank,Belleview,FL,57186,"CenterState Bank of Florida, N.A.",20-Jan-12,25-Jan-13
+Western National Bank,Phoenix,AZ,57917,Washington Federal,16-Dec-11,13-Aug-12
+Premier Community Bank of the Emerald Coast,Crestview,FL,58343,Summit Bank,16-Dec-11,12-Sep-12
+Central Progressive Bank,Lacombe,LA,19657,First NBC Bank,18-Nov-11,13-Aug-12
+Polk County Bank,Johnston,IA,14194,Grinnell State Bank,18-Nov-11,15-Aug-12
+Community Bank of Rockmart,Rockmart,GA,57860,Century Bank of Georgia,10-Nov-11,13-Aug-12
+SunFirst Bank,Saint George,UT,57087,Cache Valley Bank,4-Nov-11,16-Nov-12
+"Mid City Bank, Inc.",Omaha,NE,19397,Premier Bank,4-Nov-11,15-Aug-12
+All American Bank,Des Plaines,IL,57759,International Bank of Chicago,28-Oct-11,15-Aug-12
+Community Banks of Colorado,Greenwood Village,CO,21132,"Bank Midwest, N.A.",21-Oct-11,2-Jan-13
+Community Capital Bank,Jonesboro,GA,57036,State Bank and Trust Company,21-Oct-11,8-Nov-12
+Decatur First Bank,Decatur,GA,34392,Fidelity Bank,21-Oct-11,8-Nov-12
+Old Harbor Bank,Clearwater,FL,57537,1st United Bank,21-Oct-11,8-Nov-12
+Country Bank,Aledo,IL,35395,Blackhawk Bank & Trust,14-Oct-11,15-Aug-12
+First State Bank,Cranford,NJ,58046,Northfield Bank,14-Oct-11,8-Nov-12
+"Blue Ridge Savings Bank, Inc.",Asheville,NC,32347,Bank of North Carolina,14-Oct-11,8-Nov-12
+Piedmont Community Bank,Gray,GA,57256,State Bank and Trust Company,14-Oct-11,22-Jan-13
+Sun Security Bank,Ellington,MO,20115,Great Southern Bank,7-Oct-11,7-Nov-12
+The RiverBank,Wyoming,MN,10216,Central Bank,7-Oct-11,7-Nov-12
+First International Bank,Plano,TX,33513,American First National Bank,30-Sep-11,9-Oct-12
+Citizens Bank of Northern California,Nevada City,CA,33983,Tri Counties Bank,23-Sep-11,9-Oct-12
+Bank of the Commonwealth,Norfolk,VA,20408,Southern Bank and Trust Company,23-Sep-11,9-Oct-12
+The First National Bank of Florida,Milton,FL,25155,CharterBank,9-Sep-11,6-Sep-12
+CreekSide Bank,Woodstock,GA,58226,Georgia Commerce Bank,2-Sep-11,6-Sep-12
+Patriot Bank of Georgia,Cumming,GA,58273,Georgia Commerce Bank,2-Sep-11,2-Nov-12
+First Choice Bank,Geneva,IL,57212,Inland Bank & Trust,19-Aug-11,15-Aug-12
+First Southern National Bank,Statesboro,GA,57239,Heritage Bank of the South,19-Aug-11,2-Nov-12
+Lydian Private Bank,Palm Beach,FL,35356,"Sabadell United Bank, N.A.",19-Aug-11,2-Nov-12
+Public Savings Bank,Huntingdon Valley,PA,34130,"Capital Bank, N.A.",18-Aug-11,15-Aug-12
+The First National Bank of Olathe,Olathe,KS,4744,Enterprise Bank & Trust,12-Aug-11,23-Aug-12
+Bank of Whitman,Colfax,WA,22528,Columbia State Bank,5-Aug-11,16-Aug-12
+Bank of Shorewood,Shorewood,IL,22637,Heartland Bank and Trust Company,5-Aug-11,16-Aug-12
+Integra Bank National Association,Evansville,IN,4392,Old National Bank,29-Jul-11,16-Aug-12
+"BankMeridian, N.A.",Columbia,SC,58222,SCBT National Association,29-Jul-11,2-Nov-12
+Virginia Business Bank,Richmond,VA,58283,Xenith Bank,29-Jul-11,9-Oct-12
+Bank of Choice,Greeley,CO,2994,"Bank Midwest, N.A.",22-Jul-11,12-Sep-12
+LandMark Bank of Florida,Sarasota,FL,35244,American Momentum Bank,22-Jul-11,2-Nov-12
+Southshore Community Bank,Apollo Beach,FL,58056,American Momentum Bank,22-Jul-11,2-Nov-12
+Summit Bank,Prescott,AZ,57442,The Foothills Bank,15-Jul-11,16-Aug-12
+First Peoples Bank,Port St. Lucie,FL,34870,"Premier American Bank, N.A.",15-Jul-11,2-Nov-12
+High Trust Bank,Stockbridge,GA,19554,Ameris Bank,15-Jul-11,2-Nov-12
+One Georgia Bank,Atlanta,GA,58238,Ameris Bank,15-Jul-11,2-Nov-12
+Signature Bank,Windsor,CO,57835,Points West Community Bank,8-Jul-11,26-Oct-12
+Colorado Capital Bank,Castle Rock,CO,34522,First-Citizens Bank & Trust Company,8-Jul-11,15-Jan-13
+First Chicago Bank & Trust,Chicago,IL,27935,Northbrook Bank & Trust Company,8-Jul-11,9-Sep-12
+Mountain Heritage Bank,Clayton,GA,57593,First American Bank and Trust Company,24-Jun-11,2-Nov-12
+First Commercial Bank of Tampa Bay,Tampa,FL,27583,Stonegate Bank,17-Jun-11,2-Nov-12
+McIntosh State Bank,Jackson,GA,19237,Hamilton State Bank,17-Jun-11,2-Nov-12
+Atlantic Bank and Trust,Charleston,SC,58420,"First Citizens Bank and Trust Company, Inc.",3-Jun-11,31-Oct-12
+First Heritage Bank,Snohomish,WA,23626,Columbia State Bank,27-May-11,28-Jan-13
+Summit Bank,Burlington,WA,513,Columbia State Bank,20-May-11,22-Jan-13
+First Georgia Banking Company,Franklin,GA,57647,"CertusBank, National Association",20-May-11,13-Nov-12
+Atlantic Southern Bank,Macon,GA,57213,"CertusBank, National Association",20-May-11,31-Oct-12
+Coastal Bank,Cocoa Beach,FL,34898,"Florida Community Bank, a division of Premier American Bank, N.A.",6-May-11,30-Nov-12
+Community Central Bank,Mount Clemens,MI,34234,Talmer Bank & Trust,29-Apr-11,16-Aug-12
+The Park Avenue Bank,Valdosta,GA,19797,Bank of the Ozarks,29-Apr-11,30-Nov-12
+First Choice Community Bank,Dallas,GA,58539,Bank of the Ozarks,29-Apr-11,22-Jan-13
+Cortez Community Bank,Brooksville,FL,57625,"Florida Community Bank, a division of Premier American Bank, N.A.",29-Apr-11,30-Nov-12
+First National Bank of Central Florida,Winter Park,FL,26297,"Florida Community Bank, a division of Premier American Bank, N.A.",29-Apr-11,30-Nov-12
+Heritage Banking Group,Carthage,MS,14273,Trustmark National Bank,15-Apr-11,30-Nov-12
+Rosemount National Bank,Rosemount,MN,24099,Central Bank,15-Apr-11,16-Aug-12
+Superior Bank,Birmingham,AL,17750,"Superior Bank, National Association",15-Apr-11,30-Nov-12
+Nexity Bank,Birmingham,AL,19794,AloStar Bank of Commerce,15-Apr-11,4-Sep-12
+New Horizons Bank,East Ellijay,GA,57705,Citizens South Bank,15-Apr-11,16-Aug-12
+Bartow County Bank,Cartersville,GA,21495,Hamilton State Bank,15-Apr-11,22-Jan-13
+Nevada Commerce Bank,Las Vegas,NV,35418,City National Bank,8-Apr-11,9-Sep-12
+Western Springs National Bank and Trust,Western Springs,IL,10086,Heartland Bank and Trust Company,8-Apr-11,22-Jan-13
+The Bank of Commerce,Wood Dale,IL,34292,Advantage National Bank Group,25-Mar-11,22-Jan-13
+Legacy Bank,Milwaukee,WI,34818,Seaway Bank and Trust Company,11-Mar-11,12-Sep-12
+First National Bank of Davis,Davis,OK,4077,The Pauls Valley National Bank,11-Mar-11,20-Aug-12
+Valley Community Bank,St. Charles,IL,34187,First State Bank,25-Feb-11,12-Sep-12
+"San Luis Trust Bank, FSB",San Luis Obispo,CA,34783,First California Bank,18-Feb-11,20-Aug-12
+Charter Oak Bank,Napa,CA,57855,Bank of Marin,18-Feb-11,12-Sep-12
+Citizens Bank of Effingham,Springfield,GA,34601,Heritage Bank of the South,18-Feb-11,2-Nov-12
+Habersham Bank,Clarkesville,GA,151,SCBT National Association,18-Feb-11,2-Nov-12
+Canyon National Bank,Palm Springs,CA,34692,Pacific Premier Bank,11-Feb-11,12-Sep-12
+Badger State Bank,Cassville,WI,13272,Royal Bank,11-Feb-11,12-Sep-12
+Peoples State Bank,Hamtramck,MI,14939,First Michigan Bank,11-Feb-11,22-Jan-13
+Sunshine State Community Bank,Port Orange,FL,35478,"Premier American Bank, N.A.",11-Feb-11,2-Nov-12
+Community First Bank Chicago,Chicago,IL,57948,Northbrook Bank & Trust Company,4-Feb-11,20-Aug-12
+North Georgia Bank,Watkinsville,GA,35242,BankSouth,4-Feb-11,2-Nov-12
+American Trust Bank,Roswell,GA,57432,Renasant Bank,4-Feb-11,31-Oct-12
+First Community Bank,Taos,NM,12261,"U.S. Bank, N.A.",28-Jan-11,12-Sep-12
+FirsTier Bank,Louisville,CO,57646,No Acquirer,28-Jan-11,12-Sep-12
+Evergreen State Bank,Stoughton,WI,5328,McFarland State Bank,28-Jan-11,12-Sep-12
+The First State Bank,Camargo,OK,2303,Bank 7,28-Jan-11,12-Sep-12
+United Western Bank,Denver,CO,31293,First-Citizens Bank & Trust Company,21-Jan-11,12-Sep-12
+The Bank of Asheville,Asheville,NC,34516,First Bank,21-Jan-11,2-Nov-12
+CommunitySouth Bank & Trust,Easley,SC,57868,"CertusBank, National Association",21-Jan-11,2-Nov-12
+Enterprise Banking Company,McDonough,GA,19758,No Acquirer,21-Jan-11,2-Nov-12
+Oglethorpe Bank,Brunswick,GA,57440,Bank of the Ozarks,14-Jan-11,2-Nov-12
+Legacy Bank,Scottsdale,AZ,57820,Enterprise Bank & Trust,7-Jan-11,12-Sep-12
+First Commercial Bank of Florida,Orlando,FL,34965,First Southern Bank,7-Jan-11,2-Nov-12
+Community National Bank,Lino Lakes,MN,23306,Farmers & Merchants Savings Bank,17-Dec-10,20-Aug-12
+First Southern Bank,Batesville,AR,58052,Southern Bank,17-Dec-10,20-Aug-12
+"United Americas Bank, N.A.",Atlanta,GA,35065,State Bank and Trust Company,17-Dec-10,2-Nov-12
+"Appalachian Community Bank, FSB",McCaysville,GA,58495,Peoples Bank of East Tennessee,17-Dec-10,31-Oct-12
+Chestatee State Bank,Dawsonville,GA,34578,Bank of the Ozarks,17-Dec-10,2-Nov-12
+"The Bank of Miami,N.A.",Coral Gables,FL,19040,1st United Bank,17-Dec-10,2-Nov-12
+Earthstar Bank,Southampton,PA,35561,Polonia Bank,10-Dec-10,20-Aug-12
+Paramount Bank,Farmington Hills,MI,34673,Level One Bank,10-Dec-10,20-Aug-12
+First Banking Center,Burlington,WI,5287,First Michigan Bank,19-Nov-10,20-Aug-12
+Allegiance Bank of North America,Bala Cynwyd,PA,35078,VIST Bank,19-Nov-10,20-Aug-12
+Gulf State Community Bank,Carrabelle,FL,20340,Centennial Bank,19-Nov-10,2-Nov-12
+Copper Star Bank,Scottsdale,AZ,35463,"Stearns Bank, N.A.",12-Nov-10,20-Aug-12
+Darby Bank & Trust Co.,Vidalia,GA,14580,Ameris Bank,12-Nov-10,15-Jan-13
+Tifton Banking Company,Tifton,GA,57831,Ameris Bank,12-Nov-10,2-Nov-12
+First Vietnamese American Bank,Westminster,CA,57885,Grandpoint Bank,5-Nov-10,12-Sep-12
+Pierce Commercial Bank,Tacoma,WA,34411,Heritage Bank,5-Nov-10,20-Aug-12
+Western Commercial Bank,Woodland Hills,CA,58087,First California Bank,5-Nov-10,12-Sep-12
+K Bank,Randallstown,MD,31263,Manufacturers and Traders Trust Company (M&T Bank),5-Nov-10,20-Aug-12
+"First Arizona Savings, A FSB",Scottsdale,AZ,32582,No Acquirer,22-Oct-10,20-Aug-12
+Hillcrest Bank,Overland Park,KS,22173,"Hillcrest Bank, N.A.",22-Oct-10,20-Aug-12
+First Suburban National Bank,Maywood,IL,16089,Seaway Bank and Trust Company,22-Oct-10,20-Aug-12
+The First National Bank of Barnesville,Barnesville,GA,2119,United Bank,22-Oct-10,2-Nov-12
+The Gordon Bank,Gordon,GA,33904,Morris Bank,22-Oct-10,2-Nov-12
+Progress Bank of Florida,Tampa,FL,32251,Bay Cities Bank,22-Oct-10,2-Nov-12
+First Bank of Jacksonville,Jacksonville,FL,27573,Ameris Bank,22-Oct-10,2-Nov-12
+Premier Bank,Jefferson City,MO,34016,Providence Bank,15-Oct-10,20-Aug-12
+WestBridge Bank and Trust Company,Chesterfield,MO,58205,Midland States Bank,15-Oct-10,20-Aug-12
+"Security Savings Bank, F.S.B.",Olathe,KS,30898,Simmons First National Bank,15-Oct-10,20-Aug-12
+Shoreline Bank,Shoreline,WA,35250,GBC International Bank,1-Oct-10,20-Aug-12
+Wakulla Bank,Crawfordville,FL,21777,Centennial Bank,1-Oct-10,2-Nov-12
+North County Bank,Arlington,WA,35053,Whidbey Island Bank,24-Sep-10,20-Aug-12
+Haven Trust Bank Florida,Ponte Vedra Beach,FL,58308,First Southern Bank,24-Sep-10,5-Nov-12
+Maritime Savings Bank,West Allis,WI,28612,"North Shore Bank, FSB",17-Sep-10,20-Aug-12
+Bramble Savings Bank,Milford,OH,27808,Foundation Bank,17-Sep-10,20-Aug-12
+The Peoples Bank,Winder,GA,182,Community & Southern Bank,17-Sep-10,5-Nov-12
+First Commerce Community Bank,Douglasville,GA,57448,Community & Southern Bank,17-Sep-10,15-Jan-13
+Bank of Ellijay,Ellijay,GA,58197,Community & Southern Bank,17-Sep-10,15-Jan-13
+ISN Bank,Cherry Hill,NJ,57107,Customers Bank,17-Sep-10,22-Aug-12
+Horizon Bank,Bradenton,FL,35061,Bank of the Ozarks,10-Sep-10,5-Nov-12
+Sonoma Valley Bank,Sonoma,CA,27259,Westamerica Bank,20-Aug-10,12-Sep-12
+Los Padres Bank,Solvang,CA,32165,Pacific Western Bank,20-Aug-10,12-Sep-12
+Butte Community Bank,Chico,CA,33219,"Rabobank, N.A.",20-Aug-10,12-Sep-12
+Pacific State Bank,Stockton,CA,27090,"Rabobank, N.A.",20-Aug-10,12-Sep-12
+ShoreBank,Chicago,IL,15640,Urban Partnership Bank,20-Aug-10,16-May-13
+Imperial Savings and Loan Association,Martinsville,VA,31623,"River Community Bank, N.A.",20-Aug-10,24-Aug-12
+Independent National Bank,Ocala,FL,27344,"CenterState Bank of Florida, N.A.",20-Aug-10,5-Nov-12
+Community National Bank at Bartow,Bartow,FL,25266,"CenterState Bank of Florida, N.A.",20-Aug-10,5-Nov-12
+Palos Bank and Trust Company,Palos Heights,IL,17599,First Midwest Bank,13-Aug-10,22-Aug-12
+Ravenswood Bank,Chicago,IL,34231,Northbrook Bank & Trust Company,6-Aug-10,22-Aug-12
+LibertyBank,Eugene,OR,31964,Home Federal Bank,30-Jul-10,22-Aug-12
+The Cowlitz Bank,Longview,WA,22643,Heritage Bank,30-Jul-10,22-Aug-12
+Coastal Community Bank,Panama City Beach,FL,9619,Centennial Bank,30-Jul-10,5-Nov-12
+Bayside Savings Bank,Port Saint Joe,FL,57669,Centennial Bank,30-Jul-10,5-Nov-12
+Northwest Bank & Trust,Acworth,GA,57658,State Bank and Trust Company,30-Jul-10,5-Nov-12
+Home Valley Bank,Cave Junction,OR,23181,South Valley Bank & Trust,23-Jul-10,12-Sep-12
+SouthwestUSA Bank,Las Vegas,NV,35434,Plaza Bank,23-Jul-10,22-Aug-12
+Community Security Bank,New Prague,MN,34486,Roundbank,23-Jul-10,12-Sep-12
+Thunder Bank,Sylvan Grove,KS,10506,The Bennington State Bank,23-Jul-10,13-Sep-12
+Williamsburg First National Bank,Kingstree,SC,17837,"First Citizens Bank and Trust Company, Inc.",23-Jul-10,5-Nov-12
+Crescent Bank and Trust Company,Jasper,GA,27559,Renasant Bank,23-Jul-10,5-Nov-12
+Sterling Bank,Lantana,FL,32536,IBERIABANK,23-Jul-10,5-Nov-12
+"Mainstreet Savings Bank, FSB",Hastings,MI,28136,Commercial Bank,16-Jul-10,13-Sep-12
+Olde Cypress Community Bank,Clewiston,FL,28864,"CenterState Bank of Florida, N.A.",16-Jul-10,5-Nov-12
+Turnberry Bank,Aventura,FL,32280,NAFH National Bank,16-Jul-10,5-Nov-12
+Metro Bank of Dade County,Miami,FL,25172,NAFH National Bank,16-Jul-10,5-Nov-12
+First National Bank of the South,Spartanburg,SC,35383,NAFH National Bank,16-Jul-10,5-Nov-12
+Woodlands Bank,Bluffton,SC,32571,Bank of the Ozarks,16-Jul-10,5-Nov-12
+Home National Bank,Blackwell,OK,11636,RCB Bank,9-Jul-10,10-Dec-12
+USA Bank,Port Chester,NY,58072,New Century Bank,9-Jul-10,14-Sep-12
+Ideal Federal Savings Bank,Baltimore,MD,32456,No Acquirer,9-Jul-10,14-Sep-12
+Bay National Bank,Baltimore,MD,35462,"Bay Bank, FSB",9-Jul-10,15-Jan-13
+High Desert State Bank,Albuquerque,NM,35279,First American Bank,25-Jun-10,14-Sep-12
+First National Bank,Savannah,GA,34152,"The Savannah Bank, N.A.",25-Jun-10,5-Nov-12
+Peninsula Bank,Englewood,FL,26563,"Premier American Bank, N.A.",25-Jun-10,5-Nov-12
+Nevada Security Bank,Reno,NV,57110,Umpqua Bank,18-Jun-10,23-Aug-12
+Washington First International Bank,Seattle,WA,32955,East West Bank,11-Jun-10,14-Sep-12
+TierOne Bank,Lincoln,NE,29341,Great Western Bank,4-Jun-10,14-Sep-12
+Arcola Homestead Savings Bank,Arcola,IL,31813,No Acquirer,4-Jun-10,14-Sep-12
+First National Bank,Rosedale,MS,15814,The Jefferson Bank,4-Jun-10,5-Nov-12
+Sun West Bank,Las Vegas,NV,34785,City National Bank,28-May-10,14-Sep-12
+"Granite Community Bank, NA",Granite Bay,CA,57315,Tri Counties Bank,28-May-10,14-Sep-12
+Bank of Florida - Tampa,Tampa,FL,57814,EverBank,28-May-10,5-Nov-12
+Bank of Florida - Southwest,Naples,FL,35106,EverBank,28-May-10,5-Nov-12
+Bank of Florida - Southeast,Fort Lauderdale,FL,57360,EverBank,28-May-10,5-Nov-12
+Pinehurst Bank,Saint Paul,MN,57735,Coulee Bank,21-May-10,26-Oct-12
+Midwest Bank and Trust Company,Elmwood Park,IL,18117,"FirstMerit Bank, N.A.",14-May-10,23-Aug-12
+Southwest Community Bank,Springfield,MO,34255,Simmons First National Bank,14-May-10,23-Aug-12
+New Liberty Bank,Plymouth,MI,35586,Bank of Ann Arbor,14-May-10,23-Aug-12
+Satilla Community Bank,Saint Marys,GA,35114,Ameris Bank,14-May-10,5-Nov-12
+1st Pacific Bank of California,San Diego,CA,35517,City National Bank,7-May-10,13-Dec-12
+Towne Bank of Arizona,Mesa,AZ,57697,Commerce Bank of Arizona,7-May-10,23-Aug-12
+Access Bank,Champlin,MN,16476,PrinsBank,7-May-10,23-Aug-12
+The Bank of Bonifay,Bonifay,FL,14246,First Federal Bank of Florida,7-May-10,5-Nov-12
+Frontier Bank,Everett,WA,22710,"Union Bank, N.A.",30-Apr-10,15-Jan-13
+BC National Banks,Butler,MO,17792,Community First Bank,30-Apr-10,23-Aug-12
+Champion Bank,Creve Coeur,MO,58362,BankLiberty,30-Apr-10,23-Aug-12
+CF Bancorp,Port Huron,MI,30005,First Michigan Bank,30-Apr-10,15-Jan-13
+Westernbank Puerto Rico,Mayaguez,PR,31027,Banco Popular de Puerto Rico,30-Apr-10,5-Nov-12
+R-G Premier Bank of Puerto Rico,Hato Rey,PR,32185,Scotiabank de Puerto Rico,30-Apr-10,5-Nov-12
+Eurobank,San Juan,PR,27150,Oriental Bank and Trust,30-Apr-10,5-Nov-12
+Wheatland Bank,Naperville,IL,58429,Wheaton Bank & Trust,23-Apr-10,23-Aug-12
+Peotone Bank and Trust Company,Peotone,IL,10888,First Midwest Bank,23-Apr-10,23-Aug-12
+Lincoln Park Savings Bank,Chicago,IL,30600,Northbrook Bank & Trust Company,23-Apr-10,23-Aug-12
+New Century Bank,Chicago,IL,34821,"MB Financial Bank, N.A.",23-Apr-10,23-Aug-12
+Citizens Bank and Trust Company of Chicago,Chicago,IL,34658,Republic Bank of Chicago,23-Apr-10,23-Aug-12
+Broadway Bank,Chicago,IL,22853,"MB Financial Bank, N.A.",23-Apr-10,23-Aug-12
+"Amcore Bank, National Association",Rockford,IL,3735,Harris N.A.,23-Apr-10,23-Aug-12
+City Bank,Lynnwood,WA,21521,Whidbey Island Bank,16-Apr-10,14-Sep-12
+Tamalpais Bank,San Rafael,CA,33493,"Union Bank, N.A.",16-Apr-10,23-Aug-12
+Innovative Bank,Oakland,CA,23876,Center Bank,16-Apr-10,23-Aug-12
+Butler Bank,Lowell,MA,26619,People's United Bank,16-Apr-10,23-Aug-12
+Riverside National Bank of Florida,Fort Pierce,FL,24067,"TD Bank, N.A.",16-Apr-10,5-Nov-12
+AmericanFirst Bank,Clermont,FL,57724,"TD Bank, N.A.",16-Apr-10,31-Oct-12
+First Federal Bank of North Florida,Palatka,FL,28886,"TD Bank, N.A.",16-Apr-10,15-Jan-13
+Lakeside Community Bank,Sterling Heights,MI,34878,No Acquirer,16-Apr-10,23-Aug-12
+Beach First National Bank,Myrtle Beach,SC,34242,Bank of North Carolina,9-Apr-10,5-Nov-12
+Desert Hills Bank,Phoenix,AZ,57060,New York Community Bank,26-Mar-10,23-Aug-12
+Unity National Bank,Cartersville,GA,34678,Bank of the Ozarks,26-Mar-10,14-Sep-12
+Key West Bank,Key West,FL,34684,Centennial Bank,26-Mar-10,23-Aug-12
+McIntosh Commercial Bank,Carrollton,GA,57399,CharterBank,26-Mar-10,23-Aug-12
+State Bank of Aurora,Aurora,MN,8221,Northern State Bank,19-Mar-10,23-Aug-12
+First Lowndes Bank,Fort Deposit,AL,24957,First Citizens Bank,19-Mar-10,23-Aug-12
+Bank of Hiawassee,Hiawassee,GA,10054,Citizens South Bank,19-Mar-10,23-Aug-12
+Appalachian Community Bank,Ellijay,GA,33989,Community & Southern Bank,19-Mar-10,31-Oct-12
+Advanta Bank Corp.,Draper,UT,33535,No Acquirer,19-Mar-10,14-Sep-12
+Century Security Bank,Duluth,GA,58104,Bank of Upson,19-Mar-10,23-Aug-12
+American National Bank,Parma,OH,18806,The National Bank and Trust Company,19-Mar-10,23-Aug-12
+Statewide Bank,Covington,LA,29561,Home Bank,12-Mar-10,23-Aug-12
+Old Southern Bank,Orlando,FL,58182,Centennial Bank,12-Mar-10,23-Aug-12
+The Park Avenue Bank,New York,NY,27096,Valley National Bank,12-Mar-10,23-Aug-12
+LibertyPointe Bank,New York,NY,58071,Valley National Bank,11-Mar-10,23-Aug-12
+Centennial Bank,Ogden,UT,34430,No Acquirer,5-Mar-10,14-Sep-12
+Waterfield Bank,Germantown,MD,34976,No Acquirer,5-Mar-10,23-Aug-12
+Bank of Illinois,Normal,IL,9268,Heartland Bank and Trust Company,5-Mar-10,23-Aug-12
+Sun American Bank,Boca Raton,FL,27126,First-Citizens Bank & Trust Company,5-Mar-10,23-Aug-12
+Rainier Pacific Bank,Tacoma,WA,38129,Umpqua Bank,26-Feb-10,23-Aug-12
+Carson River Community Bank,Carson City,NV,58352,Heritage Bank of Nevada,26-Feb-10,15-Jan-13
+"La Jolla Bank, FSB",La Jolla,CA,32423,"OneWest Bank, FSB",19-Feb-10,24-Aug-12
+George Washington Savings Bank,Orland Park,IL,29952,"FirstMerit Bank, N.A.",19-Feb-10,24-Aug-12
+The La Coste National Bank,La Coste,TX,3287,Community National Bank,19-Feb-10,14-Sep-12
+Marco Community Bank,Marco Island,FL,57586,Mutual of Omaha Bank,19-Feb-10,24-Aug-12
+1st American State Bank of Minnesota,Hancock,MN,15448,"Community Development Bank, FSB",5-Feb-10,24-Aug-12
+American Marine Bank,Bainbridge Island,WA,16730,Columbia State Bank,29-Jan-10,24-Aug-12
+First Regional Bank,Los Angeles,CA,23011,First-Citizens Bank & Trust Company,29-Jan-10,24-Aug-12
+Community Bank and Trust,Cornelia,GA,5702,SCBT National Association,29-Jan-10,15-Jan-13
+"Marshall Bank, N.A.",Hallock,MN,16133,United Valley Bank,29-Jan-10,23-Aug-12
+Florida Community Bank,Immokalee,FL,5672,"Premier American Bank, N.A.",29-Jan-10,15-Jan-13
+First National Bank of Georgia,Carrollton,GA,16480,Community & Southern Bank,29-Jan-10,13-Dec-12
+Columbia River Bank,The Dalles,OR,22469,Columbia State Bank,22-Jan-10,14-Sep-12
+Evergreen Bank,Seattle,WA,20501,Umpqua Bank,22-Jan-10,15-Jan-13
+Charter Bank,Santa Fe,NM,32498,Charter Bank,22-Jan-10,23-Aug-12
+Bank of Leeton,Leeton,MO,8265,"Sunflower Bank, N.A.",22-Jan-10,15-Jan-13
+Premier American Bank,Miami,FL,57147,"Premier American Bank, N.A.",22-Jan-10,13-Dec-12
+Barnes Banking Company,Kaysville,UT,1252,No Acquirer,15-Jan-10,23-Aug-12
+St. Stephen State Bank,St. Stephen,MN,17522,First State Bank of St. Joseph,15-Jan-10,23-Aug-12
+Town Community Bank & Trust,Antioch,IL,34705,First American Bank,15-Jan-10,23-Aug-12
+Horizon Bank,Bellingham,WA,22977,Washington Federal Savings and Loan Association,8-Jan-10,23-Aug-12
+"First Federal Bank of California, F.S.B.",Santa Monica,CA,28536,"OneWest Bank, FSB",18-Dec-09,23-Aug-12
+Imperial Capital Bank,La Jolla,CA,26348,City National Bank,18-Dec-09,5-Sep-12
+Independent Bankers' Bank,Springfield,IL,26820,The Independent BankersBank (TIB),18-Dec-09,23-Aug-12
+New South Federal Savings Bank,Irondale,AL,32276,Beal Bank,18-Dec-09,23-Aug-12
+Citizens State Bank,New Baltimore,MI,1006,No Acquirer,18-Dec-09,5-Nov-12
+Peoples First Community Bank,Panama City,FL,32167,Hancock Bank,18-Dec-09,5-Nov-12
+RockBridge Commercial Bank,Atlanta,GA,58315,No Acquirer,18-Dec-09,5-Nov-12
+SolutionsBank,Overland Park,KS,4731,Arvest Bank,11-Dec-09,23-Aug-12
+"Valley Capital Bank, N.A.",Mesa,AZ,58399,Enterprise Bank & Trust,11-Dec-09,23-Aug-12
+"Republic Federal Bank, N.A.",Miami,FL,22846,1st United Bank,11-Dec-09,5-Nov-12
+Greater Atlantic Bank,Reston,VA,32583,Sonabank,4-Dec-09,5-Nov-12
+Benchmark Bank,Aurora,IL,10440,"MB Financial Bank, N.A.",4-Dec-09,23-Aug-12
+AmTrust Bank,Cleveland,OH,29776,New York Community Bank,4-Dec-09,5-Nov-12
+The Tattnall Bank,Reidsville,GA,12080,Heritage Bank of the South,4-Dec-09,5-Nov-12
+First Security National Bank,Norcross,GA,26290,State Bank and Trust Company,4-Dec-09,5-Nov-12
+The Buckhead Community Bank,Atlanta,GA,34663,State Bank and Trust Company,4-Dec-09,5-Nov-12
+Commerce Bank of Southwest Florida,Fort Myers,FL,58016,Central Bank,20-Nov-09,5-Nov-12
+Pacific Coast National Bank,San Clemente,CA,57914,Sunwest Bank,13-Nov-09,22-Aug-12
+Orion Bank,Naples,FL,22427,IBERIABANK,13-Nov-09,5-Nov-12
+"Century Bank, F.S.B.",Sarasota,FL,32267,IBERIABANK,13-Nov-09,22-Aug-12
+United Commercial Bank,San Francisco,CA,32469,East West Bank,6-Nov-09,5-Nov-12
+Gateway Bank of St. Louis,St. Louis,MO,19450,Central Bank of Kansas City,6-Nov-09,22-Aug-12
+Prosperan Bank,Oakdale,MN,35074,"Alerus Financial, N.A.",6-Nov-09,22-Aug-12
+Home Federal Savings Bank,Detroit,MI,30329,Liberty Bank and Trust Company,6-Nov-09,22-Aug-12
+United Security Bank,Sparta,GA,22286,Ameris Bank,6-Nov-09,15-Jan-13
+North Houston Bank,Houston,TX,18776,U.S. Bank N.A.,30-Oct-09,22-Aug-12
+Madisonville State Bank,Madisonville,TX,33782,U.S. Bank N.A.,30-Oct-09,22-Aug-12
+Citizens National Bank,Teague,TX,25222,U.S. Bank N.A.,30-Oct-09,22-Aug-12
+Park National Bank,Chicago,IL,11677,U.S. Bank N.A.,30-Oct-09,22-Aug-12
+Pacific National Bank,San Francisco,CA,30006,U.S. Bank N.A.,30-Oct-09,22-Aug-12
+California National Bank,Los Angeles,CA,34659,U.S. Bank N.A.,30-Oct-09,5-Sep-12
+San Diego National Bank,San Diego,CA,23594,U.S. Bank N.A.,30-Oct-09,22-Aug-12
+Community Bank of Lemont,Lemont,IL,35291,U.S. Bank N.A.,30-Oct-09,15-Jan-13
+"Bank USA, N.A.",Phoenix,AZ,32218,U.S. Bank N.A.,30-Oct-09,22-Aug-12
+First DuPage Bank,Westmont,IL,35038,First Midwest Bank,23-Oct-09,22-Aug-12
+Riverview Community Bank,Otsego,MN,57525,Central Bank,23-Oct-09,22-Aug-12
+Bank of Elmwood,Racine,WI,18321,Tri City National Bank,23-Oct-09,22-Aug-12
+Flagship National Bank,Bradenton,FL,35044,First Federal Bank of Florida,23-Oct-09,22-Aug-12
+Hillcrest Bank Florida,Naples,FL,58336,Stonegate Bank,23-Oct-09,22-Aug-12
+American United Bank,Lawrenceville,GA,57794,Ameris Bank,23-Oct-09,5-Sep-12
+Partners Bank,Naples,FL,57959,Stonegate Bank,23-Oct-09,15-Jan-13
+San Joaquin Bank,Bakersfield,CA,23266,Citizens Business Bank,16-Oct-09,22-Aug-12
+Southern Colorado National Bank,Pueblo,CO,57263,Legacy Bank,2-Oct-09,5-Sep-12
+Jennings State Bank,Spring Grove,MN,11416,Central Bank,2-Oct-09,21-Aug-12
+Warren Bank,Warren,MI,34824,The Huntington National Bank,2-Oct-09,21-Aug-12
+Georgian Bank,Atlanta,GA,57151,"First Citizens Bank and Trust Company, Inc.",25-Sep-09,21-Aug-12
+"Irwin Union Bank, F.S.B.",Louisville,KY,57068,"First Financial Bank, N.A.",18-Sep-09,5-Sep-12
+Irwin Union Bank and Trust Company,Columbus,IN,10100,"First Financial Bank, N.A.",18-Sep-09,21-Aug-12
+Venture Bank,Lacey,WA,22868,First-Citizens Bank & Trust Company,11-Sep-09,21-Aug-12
+Brickwell Community Bank,Woodbury,MN,57736,CorTrust Bank N.A.,11-Sep-09,15-Jan-13
+"Corus Bank, N.A.",Chicago,IL,13693,"MB Financial Bank, N.A.",11-Sep-09,21-Aug-12
+First State Bank,Flagstaff,AZ,34875,Sunwest Bank,4-Sep-09,15-Jan-13
+Platinum Community Bank,Rolling Meadows,IL,35030,No Acquirer,4-Sep-09,21-Aug-12
+Vantus Bank,Sioux City,IN,27732,Great Southern Bank,4-Sep-09,21-Aug-12
+InBank,Oak Forest,IL,20203,"MB Financial Bank, N.A.",4-Sep-09,21-Aug-12
+First Bank of Kansas City,Kansas City,MO,25231,Great American Bank,4-Sep-09,21-Aug-12
+Affinity Bank,Ventura,CA,27197,Pacific Western Bank,28-Aug-09,21-Aug-12
+Mainstreet Bank,Forest Lake,MN,1909,Central Bank,28-Aug-09,21-Aug-12
+Bradford Bank,Baltimore,MD,28312,Manufacturers and Traders Trust Company (M&T Bank),28-Aug-09,15-Jan-13
+Guaranty Bank,Austin,TX,32618,BBVA Compass,21-Aug-09,21-Aug-12
+CapitalSouth Bank,Birmingham,AL,22130,IBERIABANK,21-Aug-09,15-Jan-13
+First Coweta Bank,Newnan,GA,57702,United Bank,21-Aug-09,15-Jan-13
+ebank,Atlanta,GA,34682,"Stearns Bank, N.A.",21-Aug-09,21-Aug-12
+Community Bank of Nevada,Las Vegas,NV,34043,No Acquirer,14-Aug-09,21-Aug-12
+Community Bank of Arizona,Phoenix,AZ,57645,MidFirst Bank,14-Aug-09,21-Aug-12
+"Union Bank, National Association",Gilbert,AZ,34485,MidFirst Bank,14-Aug-09,21-Aug-12
+Colonial Bank,Montgomery,AL,9609,"Branch Banking & Trust Company, (BB&T)",14-Aug-09,5-Sep-12
+Dwelling House Savings and Loan Association,Pittsburgh,PA,31559,"PNC Bank, N.A.",14-Aug-09,15-Jan-13
+Community First Bank,Prineville,OR,23268,Home Federal Bank,7-Aug-09,15-Jan-13
+Community National Bank of Sarasota County,Venice,FL,27183,"Stearns Bank, N.A.",7-Aug-09,20-Aug-12
+First State Bank,Sarasota,FL,27364,"Stearns Bank, N.A.",7-Aug-09,20-Aug-12
+Mutual Bank,Harvey,IL,18659,United Central Bank,31-Jul-09,20-Aug-12
+First BankAmericano,Elizabeth,NJ,34270,Crown Bank,31-Jul-09,20-Aug-12
+Peoples Community Bank,West Chester,OH,32288,"First Financial Bank, N.A.",31-Jul-09,20-Aug-12
+Integrity Bank,Jupiter,FL,57604,Stonegate Bank,31-Jul-09,20-Aug-12
+First State Bank of Altus,Altus,OK,9873,Herring Bank,31-Jul-09,20-Aug-12
+Security Bank of Jones County,Gray,GA,8486,State Bank and Trust Company,24-Jul-09,20-Aug-12
+Security Bank of Houston County,Perry,GA,27048,State Bank and Trust Company,24-Jul-09,20-Aug-12
+Security Bank of Bibb County,Macon,GA,27367,State Bank and Trust Company,24-Jul-09,20-Aug-12
+Security Bank of North Metro,Woodstock,GA,57105,State Bank and Trust Company,24-Jul-09,20-Aug-12
+Security Bank of North Fulton,Alpharetta,GA,57430,State Bank and Trust Company,24-Jul-09,20-Aug-12
+Security Bank of Gwinnett County,Suwanee,GA,57346,State Bank and Trust Company,24-Jul-09,20-Aug-12
+Waterford Village Bank,Williamsville,NY,58065,"Evans Bank, N.A.",24-Jul-09,20-Aug-12
+Temecula Valley Bank,Temecula,CA,34341,First-Citizens Bank & Trust Company,17-Jul-09,20-Aug-12
+Vineyard Bank,Rancho Cucamonga,CA,23556,California Bank & Trust,17-Jul-09,20-Aug-12
+BankFirst,Sioux Falls,SD,34103,"Alerus Financial, N.A.",17-Jul-09,20-Aug-12
+First Piedmont Bank,Winder,GA,34594,First American Bank and Trust Company,17-Jul-09,15-Jan-13
+Bank of Wyoming,Thermopolis,WY,22754,Central Bank & Trust,10-Jul-09,20-Aug-12
+Founders Bank,Worth,IL,18390,The PrivateBank and Trust Company,2-Jul-09,20-Aug-12
+Millennium State Bank of Texas,Dallas,TX,57667,State Bank of Texas,2-Jul-09,26-Oct-12
+First National Bank of Danville,Danville,IL,3644,"First Financial Bank, N.A.",2-Jul-09,20-Aug-12
+Elizabeth State Bank,Elizabeth,IL,9262,Galena State Bank and Trust Company,2-Jul-09,20-Aug-12
+Rock River Bank,Oregon,IL,15302,The Harvard State Bank,2-Jul-09,20-Aug-12
+First State Bank of Winchester,Winchester,IL,11710,The First National Bank of Beardstown,2-Jul-09,20-Aug-12
+John Warner Bank,Clinton,IL,12093,State Bank of Lincoln,2-Jul-09,20-Aug-12
+Mirae Bank,Los Angeles,CA,57332,Wilshire State Bank,26-Jun-09,20-Aug-12
+MetroPacific Bank,Irvine,CA,57893,Sunwest Bank,26-Jun-09,20-Aug-12
+Horizon Bank,Pine City,MN,9744,"Stearns Bank, N.A.",26-Jun-09,20-Aug-12
+Neighborhood Community Bank,Newnan,GA,35285,CharterBank,26-Jun-09,20-Aug-12
+Community Bank of West Georgia,Villa Rica,GA,57436,No Acquirer,26-Jun-09,17-Aug-12
+First National Bank of Anthony,Anthony,KS,4614,Bank of Kansas,19-Jun-09,17-Aug-12
+Cooperative Bank,Wilmington,NC,27837,First Bank,19-Jun-09,17-Aug-12
+Southern Community Bank,Fayetteville,GA,35251,United Community Bank,19-Jun-09,17-Aug-12
+Bank of Lincolnwood,Lincolnwood,IL,17309,Republic Bank of Chicago,5-Jun-09,17-Aug-12
+Citizens National Bank,Macomb,IL,5757,Morton Community Bank,22-May-09,4-Sep-12
+Strategic Capital Bank,Champaign,IL,35175,Midland States Bank,22-May-09,4-Sep-12
+"BankUnited, FSB",Coral Gables,FL,32247,BankUnited,21-May-09,17-Aug-12
+Westsound Bank,Bremerton,WA,34843,Kitsap Bank,8-May-09,4-Sep-12
+America West Bank,Layton,UT,35461,Cache Valley Bank,1-May-09,17-Aug-12
+Citizens Community Bank,Ridgewood,NJ,57563,North Jersey Community Bank,1-May-09,4-Sep-12
+"Silverton Bank, NA",Atlanta,GA,26535,No Acquirer,1-May-09,17-Aug-12
+First Bank of Idaho,Ketchum,ID,34396,"U.S. Bank, N.A.",24-Apr-09,17-Aug-12
+First Bank of Beverly Hills,Calabasas,CA,32069,No Acquirer,24-Apr-09,4-Sep-12
+Michigan Heritage Bank,Farmington Hills,MI,34369,Level One Bank,24-Apr-09,17-Aug-12
+American Southern Bank,Kennesaw,GA,57943,Bank of North Georgia,24-Apr-09,17-Aug-12
+Great Basin Bank of Nevada,Elko,NV,33824,Nevada State Bank,17-Apr-09,4-Sep-12
+American Sterling Bank,Sugar Creek,MO,8266,Metcalf Bank,17-Apr-09,31-Aug-12
+New Frontier Bank,Greeley,CO,34881,No Acquirer,10-Apr-09,4-Sep-12
+Cape Fear Bank,Wilmington,NC,34639,First Federal Savings and Loan Association,10-Apr-09,17-Aug-12
+Omni National Bank,Atlanta,GA,22238,No Acquirer,27-Mar-09,17-Aug-12
+"TeamBank, NA",Paola,KS,4754,Great Southern Bank,20-Mar-09,17-Aug-12
+Colorado National Bank,Colorado Springs,CO,18896,Herring Bank,20-Mar-09,17-Aug-12
+FirstCity Bank,Stockbridge,GA,18243,No Acquirer,20-Mar-09,17-Aug-12
+Freedom Bank of Georgia,Commerce,GA,57558,Northeast Georgia Bank,6-Mar-09,17-Aug-12
+Security Savings Bank,Henderson,NV,34820,Bank of Nevada,27-Feb-09,7-Sep-12
+Heritage Community Bank,Glenwood,IL,20078,"MB Financial Bank, N.A.",27-Feb-09,17-Aug-12
+Silver Falls Bank,Silverton,OR,35399,Citizens Bank,20-Feb-09,17-Aug-12
+Pinnacle Bank of Oregon,Beaverton,OR,57342,Washington Trust Bank of Spokane,13-Feb-09,17-Aug-12
+Corn Belt Bank & Trust Co.,Pittsfield,IL,16500,The Carlinville National Bank,13-Feb-09,17-Aug-12
+Riverside Bank of the Gulf Coast,Cape Coral,FL,34563,TIB Bank,13-Feb-09,17-Aug-12
+Sherman County Bank,Loup City,NE,5431,Heritage Bank,13-Feb-09,17-Aug-12
+County Bank,Merced,CA,22574,Westamerica Bank,6-Feb-09,4-Sep-12
+Alliance Bank,Culver City,CA,23124,California Bank & Trust,6-Feb-09,16-Aug-12
+FirstBank Financial Services,McDonough,GA,57017,Regions Bank,6-Feb-09,16-Aug-12
+Ocala National Bank,Ocala,FL,26538,"CenterState Bank of Florida, N.A.",30-Jan-09,4-Sep-12
+Suburban FSB,Crofton,MD,30763,Bank of Essex,30-Jan-09,16-Aug-12
+MagnetBank,Salt Lake City,UT,58001,No Acquirer,30-Jan-09,16-Aug-12
+1st Centennial Bank,Redlands,CA,33025,First California Bank,23-Jan-09,16-Aug-12
+Bank of Clark County,Vancouver,WA,34959,Umpqua Bank,16-Jan-09,16-Aug-12
+National Bank of Commerce,Berkeley,IL,19733,Republic Bank of Chicago,16-Jan-09,16-Aug-12
+Sanderson State Bank,Sanderson,TX,11568,The Pecos County State Bank,12-Dec-08,4-Sep-12
+Haven Trust Bank,Duluth,GA,35379,"Branch Banking & Trust Company, (BB&T)",12-Dec-08,16-Aug-12
+First Georgia Community Bank,Jackson,GA,34301,United Bank,5-Dec-08,16-Aug-12
+PFF Bank & Trust,Pomona,CA,28344,"U.S. Bank, N.A.",21-Nov-08,4-Jan-13
+Downey Savings & Loan,Newport Beach,CA,30968,"U.S. Bank, N.A.",21-Nov-08,4-Jan-13
+Community Bank,Loganville,GA,16490,Bank of Essex,21-Nov-08,4-Sep-12
+Security Pacific Bank,Los Angeles,CA,23595,Pacific Western Bank,7-Nov-08,28-Aug-12
+"Franklin Bank, SSB",Houston,TX,26870,Prosperity Bank,7-Nov-08,16-Aug-12
+Freedom Bank,Bradenton,FL,57930,Fifth Third Bank,31-Oct-08,16-Aug-12
+Alpha Bank & Trust,Alpharetta,GA,58241,"Stearns Bank, N.A.",24-Oct-08,16-Aug-12
+Meridian Bank,Eldred,IL,13789,National Bank,10-Oct-08,31-May-12
+Main Street Bank,Northville,MI,57654,Monroe Bank & Trust,10-Oct-08,16-Aug-12
+Washington Mutual Bank,Henderson,NV,32633,JP Morgan Chase Bank,25-Sep-08,16-Aug-12
+Ameribank,Northfork,WV,6782,The Citizens Savings Bank,19-Sep-08,16-Aug-12
+Silver State Bank,Henderson,NV,34194,Nevada State Bank,5-Sep-08,16-Aug-12
+Integrity Bank,Alpharetta,GA,35469,Regions Bank,29-Aug-08,16-Aug-12
+Columbian Bank & Trust,Topeka,KS,22728,Citizens Bank & Trust,22-Aug-08,16-Aug-12
+First Priority Bank,Bradenton,FL,57523,SunTrust Bank,1-Aug-08,16-Aug-12
+"First Heritage Bank, NA",Newport Beach,CA,57961,Mutual of Omaha Bank,25-Jul-08,28-Aug-12
+First National Bank of Nevada,Reno,NV,27011,Mutual of Omaha Bank,25-Jul-08,28-Aug-12
+IndyMac Bank,Pasadena,CA,29730,"OneWest Bank, FSB",11-Jul-08,28-Aug-12
+"First Integrity Bank, NA",Staples,MN,12736,First International Bank and Trust,30-May-08,28-Aug-12
+"ANB Financial, NA",Bentonville,AR,33901,Pulaski Bank and Trust Company,9-May-08,28-Aug-12
+Hume Bank,Hume,MO,1971,Security Bank,7-Mar-08,28-Aug-12
+Douglass National Bank,Kansas City,MO,24660,Liberty Bank and Trust Company,25-Jan-08,26-Oct-12
+Miami Valley Bank,Lakeview,OH,16848,The Citizens Banking Company,4-Oct-07,28-Aug-12
+NetBank,Alpharetta,GA,32575,ING DIRECT,28-Sep-07,28-Aug-12
+Metropolitan Savings Bank,Pittsburgh,PA,35353,Allegheny Valley Bank of Pittsburgh,2-Feb-07,27-Oct-10
+Bank of Ephraim,Ephraim,UT,1249,Far West Bank,25-Jun-04,9-Apr-08
+Reliance Bank,White Plains,NY,26778,Union State Bank,19-Mar-04,9-Apr-08
+Guaranty National Bank of Tallahassee,Tallahassee,FL,26838,Hancock Bank of Florida,12-Mar-04,5-Jun-12
+Dollar Savings Bank,Newark,NJ,31330,No Acquirer,14-Feb-04,9-Apr-08
+Pulaski Savings Bank,Philadelphia,PA,27203,Earthstar Bank,14-Nov-03,22-Jul-05
+First National Bank of Blanchardville,Blanchardville,WI,11639,The Park Bank,9-May-03,5-Jun-12
+Southern Pacific Bank,Torrance,CA,27094,Beal Bank,7-Feb-03,20-Oct-08
+Farmers Bank of Cheneyville,Cheneyville,LA,16445,Sabine State Bank & Trust,17-Dec-02,20-Oct-04
+Bank of Alamo,Alamo,TN,9961,No Acquirer,8-Nov-02,18-Mar-05
+AmTrade International Bank,Atlanta,GA,33784,No Acquirer,30-Sep-02,11-Sep-06
+Universal Federal Savings Bank,Chicago,IL,29355,Chicago Community Bank,27-Jun-02,9-Apr-08
+Connecticut Bank of Commerce,Stamford,CT,19183,Hudson United Bank,26-Jun-02,14-Feb-12
+New Century Bank,Shelby Township,MI,34979,No Acquirer,28-Mar-02,18-Mar-05
+Net 1st National Bank,Boca Raton,FL,26652,Bank Leumi USA,1-Mar-02,9-Apr-08
+"NextBank, NA",Phoenix,AZ,22314,No Acquirer,7-Feb-02,27-Aug-10
+Oakwood Deposit Bank Co.,Oakwood,OH,8966,The State Bank & Trust Company,1-Feb-02,25-Oct-12
+Bank of Sierra Blanca,Sierra Blanca,TX,22002,The Security State Bank of Pecos,18-Jan-02,6-Nov-03
+"Hamilton Bank, NA",Miami,FL,24382,Israel Discount Bank of New York,11-Jan-02,5-Jun-12
+Sinclair National Bank,Gravette,AR,34248,Delta Trust & Bank,7-Sep-01,10-Feb-04
+"Superior Bank, FSB",Hinsdale,IL,32646,"Superior Federal, FSB",27-Jul-01,5-Jun-12
+Malta National Bank,Malta,OH,6629,North Valley Bank,3-May-01,18-Nov-02
+First Alliance Bank & Trust Co.,Manchester,NH,34264,Southern New Hampshire Bank & Trust,2-Feb-01,18-Feb-03
+National State Bank of Metropolis,Metropolis,IL,3815,Banterra Bank of Marion,14-Dec-00,17-Mar-05
+Bank of Honolulu,Honolulu,HI,21029,Bank of the Orient,13-Oct-00,17-Mar-05
diff --git a/pandas/io/tests/data/banklist.html b/pandas/io/tests/data/banklist.html
index 801016e7a5478..8ec1561f8c394 100644
--- a/pandas/io/tests/data/banklist.html
+++ b/pandas/io/tests/data/banklist.html
@@ -1,4885 +1,4885 @@
-<!DOCTYPE html><!-- HTML5 -->
-<html lang="en-US">
-<!-- Content language is American English. -->
-<head>
-<title>FDIC: Failed Bank List</title>
-<!-- Meta Tags -->
-<meta charset="UTF-8">
-<!-- Unicode character encoding -->
-<meta http-equiv="X-UA-Compatible" content="IE=edge">
-<!-- Turns off IE Compatiblity Mode -->
-<meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=1">
-<!-- Makes it so phones don't auto zoom out. -->
-<meta name="author" content="DRR">
-<meta http-equiv="keywords" name="keywords" content="banks, financial institutions, failed, failure, closing, deposits, depositors, banking services, assuming institution, acquiring institution, claims">
-<!-- CSS -->
-<link rel="stylesheet" type="text/css" href="/responsive/css/responsive.css">
-<link rel="stylesheet" type="text/css" href="banklist.css">
-</head>
-<body>
-
-<!-- START of Header -->
-<script type="text/javascript" src="/responsive/header/js/header.js"></script>
-<link rel="stylesheet" type="text/css" href="/responsive/header/css/header.css" />
-<!-- googleac.html includes Autocomplete functionality -->
-<!-- Autocomplete files -->
-<link rel="stylesheet" type="text/css" href="/responsive/header/css/jquery.autocomplete.css" />
-<script type="text/javascript" src="/responsive/js/jquery-1.4.1.min.js"></script>
-<script type="text/javascript" src="/responsive/header/js/jquery.autocomplete-1.4.2.js"></script>
-<script type="text/javascript">
-function findValue(li) {
- if( li == null ) return alert("No match!");
-
- // if coming from an AJAX call, let's use the Id as the value
- if( !!li.extra ) var sValue = li.extra[0];
-
- // otherwise, let's just display the value in the text box
- else var sValue = li.selectValue;
-
- $('#googlesearch').submit();
-
-}
-function findValue2(li) {
- if( li == null ) return alert("No match!");
-
- // if coming from an AJAX call, let's use the Id as the value
- if( !!li.extra ) var sValue = li.extra[0];
-
- // otherwise, let's just display the value in the text box
- else var sValue = li.selectValue;
-
- $('#googlesearch2').submit();
-}
-function selectItem(li) {
- findValue(li);
-}
-function selectItem2(li) {
- findValue2(li);
-}
-
-$().ready(function() {
-
- function log(event, data, formatted) {
- $("<li>").html( !data ? "No match!" : "Selected: " + formatted).appendTo("#result");
- }
-
- function formatItem(row) {
- return row[0] + " (<strong>id: " + row[1] + "</strong>)";
- }
- function formatResult(row) {
- return row[0].replace(/(<.+?>)/gi, '');
- }
-
- $("#newSearch").autocomplete("/searchjs.asp", {
- width: 179,
- autoFill: false,
- //delay:10,
- minChars:2,
- cacheLength: 10,
- onFindValue:findValue,
- onItemSelect: selectItem,
- selectFirst: false
-
- });
-
- $("#search2").autocomplete("/searchjs.asp", {
- width: 160,
- autoFill: false,
- //delay:10,
- minChars:2,
- cacheLength: 10,
- onFindValue:findValue2,
- onItemSelect: selectItem2,
- selectFirst: false
-
- });
-
-});
-
-</script>
-<!-- END CODE NEEDED TO MAKE THE SEARCH BOX WORK -->
-
-<!-- FORESEE Code -->
-<script type="text/javascript" src="/foresee/foresee-trigger.js"></script>
-
-<a href="#after_header" class="responsive_header-skip_header">Skip Header</a>
-<header>
-<div id="responsive_header">
- <div id="responsive_header-right_side">
- <ul id="responsive_header-links">
- <li id="responsive_header-twitter" title="Visit FDIC on Twitter"><a tabindex="1" href="/social.html?site=http://twitter.com/FDICgov">Visit FDIC on Twitter</a></li>
- <li id="responsive_header-facebook" title="Visit FDIC on Facebook"><a tabindex="1" href="/social.html?site=http://www.facebook.com/FDICgov">Visit FDIC on Facebook</a></li>
- <li id="responsive_header-fdicchannel" title="Visit FDIC on YouTube"><a tabindex="1" href="/social.html?site=http://www.youtube.com/user/FDICchannel">Visit FDIC on YouTube</a></li>
- <li id="responsive_header-rss" title="FDIC RSS Feed"><a tabindex="1" href="/rss.html">FDIC RSS Feed</a></li>
- <li id="responsive_header-subscribe" title="Subscribe to FDIC alerts"><a tabindex="1" href="http://service.govdelivery.com/service/multi_subscribe.html?code=USFDIC">Subscribe to FDIC alerts</a></li>
- </ul>
- <div id="responsive_header-search">
- <a href="/search/advanced.html" class="search" title="Advanced Search">Advanced Search</a>
- <form id="googlesearch" action="http://search.fdic.gov/search" method="get" name="Search box for FDIC.gov">
- <fieldset>
- <div class="form">
- <label for="q">Search FDIC.gov</label>
- <input tabindex="1" id="newSearch" name="q" class="field" type="text" style="outline: 0 none;" value="Search FDIC..." onblur="if(this.value == '') {this.value = 'Search FDIC...';}" onfocus="if(this.value == 'Search FDIC...') {this.value = '';}" />
- <input tabindex="1" id="searchsubmit" class="submit" alt="Search Icon" title="Search Icon" type="submit" value="" />
- <input value="date:D:L:d1" name="sort" type="hidden" />
-
- <input value="xml_no_dtd" name="output" type="hidden" />
- <input value="UTF-8" name="ie" type="hidden" />
- <input value="UTF-8" name="oe" type="hidden" />
- <input value="wwwGOV" name="client" type="hidden" />
- <input value="wwwGOV" name="proxystylesheet" type="hidden" />
- <input value="default" name="site" type="hidden" />
- </div>
- </fieldset>
- </form>
- </div>
- </div>
- <!-- close right side -->
- <a id="responsive_header-fdic_logo" href="/" title="FDIC Homepage">FDIC Homepage</a>
- <h1>Federal Deposit<br>Insurance Corporation</h1>
- <h2>Each depositor insured to at least $250,000 per insured bank</h2>
- <div class="clear"></div>
- <nav>
- <div id="responsive_header_nav">
- <div id="responsive_header-topnav">
- <div id="responsive_header-topnav-downarrow" onclick="show_rwdnav(this)"></div>
- <ul id="responsive_header-topnav-list">
- <li id="responsive_header-topnav-home" title="Home" onmouseover="show_responsive_header_subnav(this)"><a href="/">Home</a></li>
- <li id="responsive_header-topnav-deposit" title="Deposit Insurance" onmouseover="show_responsive_header_subnav(this)"><a href="/deposit/">Deposit Insurance</a></li>
- <li id="responsive_header-topnav-consumers" title="Consumer Protection" onmouseover="show_responsive_header_subnav(this)"><a href="/consumers/">Consumer Protection</a></li>
- <li id="responsive_header-topnav-bank" title="Industry Analysis" onmouseover="show_responsive_header_subnav(this)"><a href="/bank/">Industry Analysis</a></li>
- <li id="responsive_header-topnav-regulations" title="Regulations & Examinations" onmouseover="show_responsive_header_subnav(this)"><a href="/regulations/">Regulations & Examinations</a></li>
- <li id="responsive_header-topnav-buying" title="Asset Sales" onmouseover="show_responsive_header_subnav(this)"><a href="/buying/">Asset Sales</a></li>
- <li id="responsive_header-topnav-news" title="News & Events" onmouseover="show_responsive_header_subnav(this)"><a href="/news/">News & Events</a></li>
- <li id="responsive_header-topnav-about" title="About FDIC" onmouseover="show_responsive_header_subnav(this)"><a href="/about/">About FDIC</a></li>
- </ul>
- <div class="clear"></div>
- </div>
- <div id="responsive_header-topnav_subnav">
- <div id="responsive_header-topnav_subnav-downarrow" onclick="show_rwdnav(this)"></div>
- <ul id="responsive_header-topnav-home_subnav"><li><a> </a></li></ul>
- <ul id="responsive_header-topnav-deposit_subnav">
- <li title="BankFind"><a href="http://research.fdic.gov/bankfind/">BankFind</a></li>
- <li title="Are My Deposits Insured?"><a href="/deposit/deposits/">Are My Deposits Insured?</a></li>
- <li title="Uninsured Investments"><a href="/deposit/investments/">Uninsured Investments</a></li>
- <li title="The Deposit Insurance Fund"><a href="/deposit/insurance/index.html">The Deposit Insurance Fund</a></li>
- <li title="International Deposit Insurance"><a href="/deposit/deposits/international/">International Deposit Insurance</a></li>
- </ul>
- <ul id="responsive_header-topnav-consumers_subnav">
- <li title="Consumer News & Information"><a href="/consumers/consumer/">Consumer News & Information</a></li>
- <li title="Loans & Mortgages"><a href="/consumers/loans/">Loans & Mortgages</a></li>
- <li title="Banking & Your Money"><a href="/consumers/banking/">Banking & Your Money</a></li>
- <li title="Financial Education & Literacy"><a href="/consumers/education/">Financial Education & Literacy</a></li>
- <li title="Community Affairs"><a href="/consumers/community/">Community Affairs</a></li>
- <li title="Identity Theft & Fraud"><a href="/consumers/theft/">Identity Theft & Fraud</a></li>
- <li title="Consumer Financial Privacy"><a href="/consumers/privacy/">Consumer Financial Privacy</a></li>
- </ul>
- <ul id="responsive_header-topnav-bank_subnav">
- <li title="Bank Data & Statistics"><a href="/bank/statistical/">Bank Data & Statistics</a></li>
- <li title="Research & Analysis"><a href="/bank/analytical/">Research & Analysis</a></li>
- <li title="Failed Banks"><a href="/bank/individual/failed/">Failed Banks</a></li>
- </ul>
- <ul id="responsive_header-topnav-regulations_subnav">
- <li title="Bank Examinations"><a href="/regulations/examinations/">Bank Examinations</a></li>
- <li title="Laws & Regulations"><a href="/regulations/laws/">Laws & Regulations</a></li>
- <li title="Resources for Bank Officers & Directors"><a href="/regulations/resources/">Resources for Bank Officers & Directors</a></li>
- <li title="FDICconnect"><a href="http://www.fdicconnect.gov/">FDIC<em>connect</em></a></li>
- <li title="Required Financial Reports"><a href="/regulations/required/">Required Financial Reports</a></li>
- <li title="Examiner Training Programs"><a href="/regulations/examiner/">Examiner Training Programs</a></li>
- </ul>
- <ul id="responsive_header-topnav-buying_subnav">
- <li title="Loan Sales"><a href="/buying/loan/">Loan Sales</a></li>
- <li title="Real Estate Sales"><a href="/buying/owned/">Real Estate and Property Marketplace</a></li>
- <li title="Financial Asset Sales"><a href="/buying/financial/">Financial Asset Sales</a></li>
- <li title="Servicing Sales Announcements"><a href="/buying/servicing/">Servicing Sales Announcements</a></li>
- <li title="Other Asset Sales"><a href="/buying/otherasset/">Other Asset Sales</a></li>
- <li title="Historical Sales"><a href="/buying/historical/">Historical Sales</a></li>
- </ul>
- <ul id="responsive_header-topnav-news_subnav">
- <li title="Press Releases"><a href="/news/news/press/2013/">Press Releases</a></li>
- <li title="Online Press Room"><a href="https://fdicsurvey.inquisiteasp.com/fdic/cgi-bin/qwebcorporate.dll?M58TRS">Online Press Room</a></li>
- <li title="Conferences & Events"><a href="/news/conferences/">Conferences & Events</a></li>
- <li title="Financial Institution Letters"><a href="/news/news/financial/2013/">Financial Institution Letters</a></li>
- <li title="Special Alerts"><a href="/news/news/SpecialAlert/2012/">Special Alerts</a></li>
- <li title="Letters to the Editor/Opinion Editorials"><a href="/news/letters/">Letters to the Editor/Opinion Editorials</a></li>
- <li title="Speeches & Testimony"><a href="/news/news/speeches/chairman/">Speeches & Testimony</a></li>
- </ul>
- <ul id="responsive_header-topnav-about_subnav">
- <li title="Mission & Purpose"><a href="/about/index.html#1">Mission & Purpose</a></span></li>
- <li title="Advisory Committees"><a href="/about/index.html#2">Advisory Committees</a></span></li>
- <li title="Careers with the FDIC"><a href="/about/index.html#3">Careers with the FDIC</a></span></li>
- <li title="Management Team"><a href="/about/index.html#4">Management Team</a></span></li>
- <li title="Plans & Reports"><a href="/about/index.html#5">Plans & Reports</a></span></li>
- <li title="What We Can Do for You"><a href="/about/index.html#6">What We Can Do for You</a></span></li>
- <li title="Diversity at the FDIC"><a href="/about/index.html#7">Diversity at the FDIC</a></span></li>
- </ul>
- </div><!-- Close subnav -->
- <div class="clear"></div>
- </div>
- </nav>
-</div>
-</header>
-<a id="after_header" name="after_header"></a>
-<script type="text/javascript">
-prepare_responsive_header_nav();
-</script>
-<!-- END of Header -->
-
-<div id="breadcrumbs"><a href="/">Home</a> > <a href="/bank/">Industry Analysis</a> > <a href="/bank/individual/failed/">Failed Banks</a> > Failed Bank List</div>
-
-<div id="content" class="failed_bank_list">
-
- <h1 class="page_title">Failed Bank List</h1>
-
- <p>The FDIC is often appointed as receiver for failed banks. This page contains useful information for the customers and vendors of these banks. This includes information on the acquiring bank (if applicable), how your accounts and loans are affected, and how vendors can file claims against the receivership. <a href="http://www2.fdic.gov/drrip/cs/index.asp">Failed Financial Institution Contact Search</a> displays point of contact information related to failed banks.</p>
-
- <p>This list includes banks which have failed since October 1, 2000. To search for banks that failed prior to those on this page, visit this link: <a href="http://www2.fdic.gov/hsob/SelectRpt.asp?EntryTyp=30">Failures and Assistance Transactions</a></p>
-
- <p><a href="banklist.csv">Failed Bank List</a> - CSV file (Updated on Mondays. Also opens in Excel - <a href="/excel.html">Excel Help</a>)</p>
-
- <p class="small_screen_warning">Due to the small screen size some information is no longer visible.<br>Full information available when viewed on a larger screen.</p>
-
- <script type="text/javascript">
- <!--
- document.writeln("<p><em>Click arrows next to headers to sort in Ascending or Descending order.</em></p>");
- //-->
- </script>
-
- <div id="table_wrapper">
- <table id="table" class="sortable">
- <thead>
- <tr>
- <th id="institution" scope="col">Bank Name</th>
- <th id="city" class="nosort" scope="col">City</th>
- <th id="state" scope="col">ST</th>
- <th id="cert" class="nosort" scope="col">CERT</th>
- <th id="ai" scope="col">Acquiring Institution</th>
- <th id="closing" scope="col">Closing Date</th>
- <th id="updated" scope="col">Updated Date</th>
- </tr>
- </thead>
- <tbody>
- <tr>
- <td class="institution"><a href="kenosha.html">Banks of Wisconsin d/b/a Bank of Kenosha</a></td>
- <td class="city">Kenosha</td>
- <td class="state">WI</td>
- <td class="cert">35386</td>
- <td class="ai">North Shore Bank, FSB</td>
- <td class="closing">May 31, 2013</td>
- <td class="updated">May 31, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="centralaz.html">Central Arizona Bank</a></td>
- <td class="city">Scottsdale</td>
- <td class="state">AZ</td>
- <td class="cert">34527</td>
- <td class="ai">Western State Bank</td>
- <td class="closing">May 14, 2013</td>
- <td class="updated">May 20, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="sunrisebank.html">Sunrise Bank</a></td>
- <td class="city">Valdosta</td>
- <td class="state">GA</td>
- <td class="cert">58185</td>
- <td class="ai">Synovus Bank</td>
- <td class="closing">May 10, 2013</td>
- <td class="updated">May 21, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="pisgahcommbk.html">Pisgah Community Bank</a></td>
- <td class="city">Asheville</td>
- <td class="state">NC</td>
- <td class="cert">58701</td>
- <td class="ai">Capital Bank, N.A.</td>
- <td class="closing">May 10, 2013</td>
- <td class="updated">May 14, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="douglascb.html">Douglas County Bank</a></td>
- <td class="city">Douglasville</td>
- <td class="state">GA</td>
- <td class="cert">21649</td>
- <td class="ai">Hamilton State Bank</td>
- <td class="closing">April 26, 2013</td>
- <td class="updated">May 16, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="parkway.html">Parkway Bank</a></td>
- <td class="city">Lenoir</td>
- <td class="state">NC</td>
- <td class="cert">57158</td>
- <td class="ai">CertusBank, National Association</td>
- <td class="closing">April 26, 2013</td>
- <td class="updated">May 17, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="chipola.html">Chipola Community Bank</a></td>
- <td class="city">Marianna</td>
- <td class="state">FL</td>
- <td class="cert">58034</td>
- <td class="ai">First Federal Bank of Florida</td>
- <td class="closing">April 19, 2013</td>
- <td class="updated">May 16, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="heritagebank-fl.html">Heritage Bank of North Florida</a></td>
- <td class="city">Orange Park</td>
- <td class="state">FL</td>
- <td class="cert">26680</td>
- <td class="ai">FirstAtlantic Bank</td>
- <td class="closing">April 19, 2013</td>
- <td class="updated">May 16, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="firstfederal-ky.html">First Federal Bank</a></td>
- <td class="city">Lexington</td>
- <td class="state">KY</td>
- <td class="cert">29594</td>
- <td class="ai">Your Community Bank</td>
- <td class="closing">April 19, 2013</td>
- <td class="updated">April 23, 2013</td>
- </tr>
- <td class="institution"><a href="goldcanyon.html">Gold Canyon Bank</a></td>
- <td class="city">Gold Canyon</td>
- <td class="state">AZ</td>
- <td class="cert">58066</td>
- <td class="ai">First Scottsdale Bank, National Association</td>
- <td class="closing">April 5, 2013</td>
- <td class="updated">April 9, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="frontier-ga.html">Frontier Bank</a></td>
- <td class="city">LaGrange</td>
- <td class="state">GA</td>
- <td class="cert">16431</td>
- <td class="ai">HeritageBank of the South</td>
- <td class="closing">March 8, 2013</td>
- <td class="updated">March 26, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="covenant-il.html">Covenant Bank</a></td>
- <td class="city">Chicago</td>
- <td class="state">IL</td>
- <td class="cert">22476</td>
- <td class="ai">Liberty Bank and Trust Company</td>
- <td class="closing">February 15, 2013</td>
- <td class="updated">March 4, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="1stregents.html">1st Regents Bank</a></td>
- <td class="city">Andover</td>
- <td class="state">MN</td>
- <td class="cert">57157</td>
- <td class="ai">First Minnesota Bank</td>
- <td class="closing">January 18, 2013</td>
- <td class="updated">February 28, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="westside.html">Westside Community Bank</a></td>
- <td class="city">University Place</td>
- <td class="state">WA</td>
- <td class="cert">33997</td>
- <td class="ai">Sunwest Bank</td>
- <td class="closing">January 11, 2013</td>
- <td class="updated">January 24, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="cmbkozarks.html">Community Bank of the Ozarks</a></td>
- <td class="city">Sunrise Beach</td>
- <td class="state">MO</td>
- <td class="cert">27331</td>
- <td class="ai">Bank of Sullivan</td>
- <td class="closing">December 14, 2012</td>
- <td class="updated">January 24, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="hometown.html">Hometown Community Bank</a></td>
- <td class="city">Braselton</td>
- <td class="state">GA</td>
- <td class="cert">57928</td>
- <td class="ai">CertusBank, National Association</td>
- <td class="closing">November 16, 2012</td>
- <td class="updated">January 24, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="cfnb.html">Citizens First National Bank</a></td>
- <td class="city">Princeton</td>
- <td class="state">IL</td>
- <td class="cert">3731</td>
- <td class="ai">Heartland Bank and Trust Company</td>
- <td class="closing">November 2, 2012</td>
- <td class="updated">January 24, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="heritage_fl.html">Heritage Bank of Florida</a></td>
- <td class="city">Lutz</td>
- <td class="state">FL</td>
- <td class="cert">35009</td>
- <td class="ai">Centennial Bank</td>
- <td class="closing">November 2, 2012</td>
- <td class="updated">January 24, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="novabank.html">NOVA Bank</a></td>
- <td class="city">Berwyn</td>
- <td class="state">PA</td>
- <td class="cert">27148</td>
- <td class="ai">No Acquirer</td>
- <td class="closing">October 26, 2012</td>
- <td class="updated">January 24, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="excelbank.html">Excel Bank</a></td>
- <td class="city">Sedalia</td>
- <td class="state">MO</td>
- <td class="cert">19189</td>
- <td class="ai">Simmons First National Bank</td>
- <td class="closing">October 19, 2012</td>
- <td class="updated">January 24, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="firsteastside.html">First East Side Savings Bank</a></td>
- <td class="city">Tamarac</td>
- <td class="state">FL</td>
- <td class="cert">28144</td>
- <td class="ai">Stearns Bank N.A.</td>
- <td class="closing">October 19, 2012</td>
- <td class="updated">January 24, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="gulfsouth.html">GulfSouth Private Bank</a></td>
- <td class="city">Destin</td>
- <td class="state">FL</td>
- <td class="cert">58073</td>
- <td class="ai">SmartBank</td>
- <td class="closing">October 19, 2012</td>
- <td class="updated">January 24, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="firstunited.html">First United Bank</a></td>
- <td class="city">Crete</td>
- <td class="state">IL</td>
- <td class="cert">20685</td>
- <td class="ai">Old Plank Trail Community Bank, National Association</td>
- <td class="closing">September 28, 2012</td>
- <td class="updated">November 15, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="truman.html">Truman Bank</a></td>
- <td class="city">St. Louis</td>
- <td class="state">MO</td>
- <td class="cert">27316</td>
- <td class="ai">Simmons First National Bank</td>
- <td class="closing">September 14, 2012</td>
- <td class="updated">December 17, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="firstcommbk_mn.html">First Commercial Bank</a></td>
- <td class="city">Bloomington</td>
- <td class="state">MN</td>
- <td class="cert">35246</td>
- <td class="ai">Republic Bank & Trust Company</td>
- <td class="closing">September 7, 2012</td>
- <td class="updated">December 17, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="waukegan.html">Waukegan Savings Bank</a></td>
- <td class="city">Waukegan</td>
- <td class="state">IL</td>
- <td class="cert">28243</td>
- <td class="ai">First Midwest Bank</td>
- <td class="closing">August 3, 2012</td>
- <td class="updated">October 11, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="jasper.html">Jasper Banking Company</a></td>
- <td class="city">Jasper</td>
- <td class="state">GA</td>
- <td class="cert">16240</td>
- <td class="ai">Stearns Bank N.A.</td>
- <td class="closing">July 27, 2012</td>
- <td class="updated">December 17, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="secondfederal.html">Second Federal Savings and Loan Association of Chicago</a></td>
- <td class="city">Chicago</td>
- <td class="state">IL</td>
- <td class="cert">27986</td>
- <td class="ai">Hinsdale Bank & Trust Company</td>
- <td class="closing">July 20, 2012</td>
- <td class="updated">January 14, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="heartland.html">Heartland Bank</a></td>
- <td class="city">Leawood</td>
- <td class="state">KS</td>
- <td class="cert">1361</td>
- <td class="ai">Metcalf Bank</td>
- <td class="closing">July 20, 2012</td>
- <td class="updated">December 17, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="cherokee.html">First Cherokee State Bank</a></td>
- <td class="city">Woodstock</td>
- <td class="state">GA</td>
- <td class="cert">32711</td>
- <td class="ai">Community & Southern Bank</td>
- <td class="closing">July 20, 2012</td>
- <td class="updated">October 31, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="georgiatrust.html">Georgia Trust Bank</a></td>
- <td class="city">Buford</td>
- <td class="state">GA</td>
- <td class="cert">57847</td>
- <td class="ai">Community & Southern Bank</td>
- <td class="closing">July 20, 2012</td>
- <td class="updated">December 17, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="royalpalm.html">The Royal Palm Bank of Florida</a></td>
- <td class="city">Naples</td>
- <td class="state">FL</td>
- <td class="cert">57096</td>
- <td class="ai">First National Bank of the Gulf Coast</td>
- <td class="closing">July 20, 2012</td>
- <td class="updated">January 7, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="glasgow.html">Glasgow Savings Bank</a></td>
- <td class="city">Glasgow</td>
- <td class="state">MO</td>
- <td class="cert">1056</td>
- <td class="ai">Regional Missouri Bank</td>
- <td class="closing">July 13, 2012</td>
- <td class="updated">October 11, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="montgomery.html">Montgomery Bank & Trust</a></td>
- <td class="city">Ailey</td>
- <td class="state">GA</td>
- <td class="cert">19498</td>
- <td class="ai">Ameris Bank</td>
- <td class="closing">July 6, 2012</td>
- <td class="updated">October 31, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="farmersbank.html">The Farmers Bank of Lynchburg</a></td>
- <td class="city">Lynchburg</td>
- <td class="state">TN</td>
- <td class="cert">1690</td>
- <td class="ai">Clayton Bank and Trust</td>
- <td class="closing">June 15, 2012</td>
- <td class="updated">October 31, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="securityexchange.html">Security Exchange Bank</a></td>
- <td class="city">Marietta</td>
- <td class="state">GA</td>
- <td class="cert">35299</td>
- <td class="ai">Fidelity Bank</td>
- <td class="closing">June 15, 2012</td>
- <td class="updated">October 10, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="putnam.html">Putnam State Bank</a></td>
- <td class="city">Palatka</td>
- <td class="state">FL</td>
- <td class="cert">27405</td>
- <td class="ai">Harbor Community Bank</td>
- <td class="closing">June 15, 2012</td>
- <td class="updated">October 10, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="waccamaw.html">Waccamaw Bank</a></td>
- <td class="city">Whiteville</td>
- <td class="state">NC</td>
- <td class="cert">34515</td>
- <td class="ai">First Community Bank</td>
- <td class="closing">June 8, 2012</td>
- <td class="updated">November 8, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="ftsb.html">Farmers' and Traders' State Bank</a></td>
- <td class="city">Shabbona</td>
- <td class="state">IL</td>
- <td class="cert">9257</td>
- <td class="ai">First State Bank</td>
- <td class="closing">June 8, 2012</td>
- <td class="updated">October 10, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="carolina.html">Carolina Federal Savings Bank</a></td>
- <td class="city">Charleston</td>
- <td class="state">SC</td>
- <td class="cert">35372</td>
- <td class="ai">Bank of North Carolina</td>
- <td class="closing">June 8, 2012</td>
- <td class="updated">October 31, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="firstcapital.html">First Capital Bank</a></td>
- <td class="city">Kingfisher</td>
- <td class="state">OK</td>
- <td class="cert">416</td>
- <td class="ai">F & M Bank</td>
- <td class="closing">June 8, 2012</td>
- <td class="updated">October 10, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="alabamatrust.html">Alabama Trust Bank, National Association</a></td>
- <td class="city">Sylacauga</td>
- <td class="state">AL</td>
- <td class="cert">35224</td>
- <td class="ai">Southern States Bank</td>
- <td class="closing">May 18, 2012</td>
- <td class="updated">May 20, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="securitybank.html">Security Bank, National Association</a></td>
- <td class="city">North Lauderdale</td>
- <td class="state">FL</td>
- <td class="cert">23156</td>
- <td class="ai">Banesco USA</td>
- <td class="closing">May 4, 2012</td>
- <td class="updated">October 31, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="palmdesert.html">Palm Desert National Bank</a></td>
- <td class="city">Palm Desert</td>
- <td class="state">CA</td>
- <td class="cert">23632</td>
- <td class="ai">Pacific Premier Bank</td>
- <td class="closing">April 27, 2012</td>
- <td class="updated">May 17, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="plantation.html">Plantation Federal Bank</a></td>
- <td class="city">Pawleys Island</td>
- <td class="state">SC</td>
- <td class="cert">32503</td>
- <td class="ai">First Federal Bank</td>
- <td class="closing">April 27, 2012</td>
- <td class="updated">May 17, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="interbank.html">Inter Savings Bank, fsb D/B/A InterBank, fsb</a></td>
- <td class="city">Maple Grove</td>
- <td class="state">MN</td>
- <td class="cert">31495</td>
- <td class="ai">Great Southern Bank</td>
- <td class="closing">April 27, 2012</td>
- <td class="updated">May 17, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="harvest.html">HarVest Bank of Maryland</a></td>
- <td class="city">Gaithersburg</td>
- <td class="state">MD</td>
- <td class="cert">57766</td>
- <td class="ai">Sonabank</td>
- <td class="closing">April 27, 2012</td>
- <td class="updated">May 17, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="easternshore.html">Bank of the Eastern Shore</a></td>
- <td class="city">Cambridge</td>
- <td class="state">MD</td>
- <td class="cert">26759</td>
- <td class="ai">No Acquirer</td>
- <td class="closing">April 27, 2012</td>
- <td class="updated">October 17, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="fortlee.html">Fort Lee Federal Savings Bank, FSB</a></td>
- <td class="city">Fort Lee</td>
- <td class="state">NJ</td>
- <td class="cert">35527</td>
- <td class="ai">Alma Bank</td>
- <td class="closing">April 20, 2012</td>
- <td class="updated">May 17, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="fidelity.html">Fidelity Bank</a></td>
- <td class="city">Dearborn</td>
- <td class="state">MI</td>
- <td class="cert">33883</td>
- <td class="ai">The Huntington National Bank</td>
- <td class="closing">March 30, 2012</td>
- <td class="updated">May 16, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="premier-il.html">Premier Bank</a></td>
- <td class="city">Wilmette</td>
- <td class="state">IL</td>
- <td class="cert">35419</td>
- <td class="ai">International Bank of Chicago</td>
- <td class="closing">March 23, 2012</td>
- <td class="updated">October 17, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="covenant.html">Covenant Bank & Trust</a></td>
- <td class="city">Rock Spring</td>
- <td class="state">GA</td>
- <td class="cert">58068</td>
- <td class="ai">Stearns Bank, N.A.</td>
- <td class="closing">March 23, 2012</td>
- <td class="updated">October 31, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="newcity.html">New City Bank</a></td>
- <td class="city">Chicago</td>
- <td class="state">IL</td>
- <td class="cert">57597</td>
- <td class="ai">No Acquirer</td>
- <td class="closing">March 9, 2012</td>
- <td class="updated">October 29, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="global.html">Global Commerce Bank</a></td>
- <td class="city">Doraville</td>
- <td class="state">GA</td>
- <td class="cert">34046</td>
- <td class="ai">Metro City Bank</td>
- <td class="closing">March 2, 2012</td>
- <td class="updated">October 31, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="homesvgs.html">Home Savings of America</a></td>
- <td class="city">Little Falls</td>
- <td class="state">MN</td>
- <td class="cert">29178</td>
- <td class="ai">No Acquirer</td>
- <td class="closing">February 24, 2012</td>
- <td class="updated">December 17, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="cbg.html">Central Bank of Georgia</a></td>
- <td class="city">Ellaville</td>
- <td class="state">GA</td>
- <td class="cert">5687</td>
- <td class="ai">Ameris Bank</td>
- <td class="closing">February 24, 2012</td>
- <td class="updated">August 9, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="scbbank.html">SCB Bank</a></td>
- <td class="city">Shelbyville</td>
- <td class="state">IN</td>
- <td class="cert">29761</td>
- <td class="ai">First Merchants Bank, National Association</td>
- <td class="closing">February 10, 2012</td>
- <td class="updated">March 25, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="cnbt.html">Charter National Bank and Trust</a></td>
- <td class="city">Hoffman Estates</td>
- <td class="state">IL</td>
- <td class="cert">23187</td>
- <td class="ai">Barrington Bank & Trust Company, National Association</td>
- <td class="closing">February 10, 2012</td>
- <td class="updated">March 25, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="bankeast.html">BankEast</a></td>
- <td class="city">Knoxville</td>
- <td class="state">TN</td>
- <td class="cert">19869</td>
- <td class="ai">U.S.Bank National Association</td>
- <td class="closing">January 27, 2012</td>
- <td class="updated">March 8, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="patriot-mn.html">Patriot Bank Minnesota</a></td>
- <td class="city">Forest Lake</td>
- <td class="state">MN</td>
- <td class="cert">34823</td>
- <td class="ai">First Resource Bank</td>
- <td class="closing">January 27, 2012</td>
- <td class="updated">September 12, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="tcb.html">Tennessee Commerce Bank</a></td>
- <td class="city">Franklin</td>
- <td class="state">TN</td>
- <td class="cert">35296</td>
- <td class="ai">Republic Bank & Trust Company</td>
- <td class="closing">January 27, 2012</td>
- <td class="updated">November 20, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="fgbtcj.html">First Guaranty Bank and Trust Company of Jacksonville</a></td>
- <td class="city">Jacksonville</td>
- <td class="state">FL</td>
- <td class="cert">16579</td>
- <td class="ai">CenterState Bank of Florida, N.A.</td>
- <td class="closing">January 27, 2012</td>
- <td class="updated">September 12, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="americaneagle.html">American Eagle Savings Bank</a></td>
- <td class="city">Boothwyn</td>
- <td class="state">PA</td>
- <td class="cert">31581</td>
- <td class="ai">Capital Bank, N.A.</td>
- <td class="closing">January 20, 2012</td>
- <td class="updated">January 25, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="firststatebank-ga.html">The First State Bank</a></td>
- <td class="city">Stockbridge</td>
- <td class="state">GA</td>
- <td class="cert">19252</td>
- <td class="ai">Hamilton State Bank</td>
- <td class="closing">January 20, 2012</td>
- <td class="updated">January 25, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="cfsb.html">Central Florida State Bank</a></td>
- <td class="city">Belleview</td>
- <td class="state">FL</td>
- <td class="cert">57186</td>
- <td class="ai">CenterState Bank of Florida, N.A.</td>
- <td class="closing">January 20, 2012</td>
- <td class="updated">January 25, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="westernnatl.html">Western National Bank</a></td>
- <td class="city">Phoenix</td>
- <td class="state">AZ</td>
- <td class="cert">57917</td>
- <td class="ai">Washington Federal</td>
- <td class="closing">December 16, 2011</td>
- <td class="updated">August 13, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="premier-fl.html">Premier Community Bank of the Emerald Coast</a></td>
- <td class="city">Crestview</td>
- <td class="state">FL</td>
- <td class="cert">58343</td>
- <td class="ai">Summit Bank</td>
- <td class="closing">December 16, 2011</td>
- <td class="updated">September 12, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="centralprog.html">Central Progressive Bank</a></td>
- <td class="city">Lacombe</td>
- <td class="state">LA</td>
- <td class="cert">19657</td>
- <td class="ai">First NBC Bank</td>
- <td class="closing">November 18, 2011</td>
- <td class="updated">August 13, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="polkcounty.html">Polk County Bank</a></td>
- <td class="city">Johnston</td>
- <td class="state">IA</td>
- <td class="cert">14194</td>
- <td class="ai">Grinnell State Bank</td>
- <td class="closing">November 18, 2011</td>
- <td class="updated">August 15, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="rockmart.html">Community Bank of Rockmart</a></td>
- <td class="city">Rockmart</td>
- <td class="state">GA</td>
- <td class="cert">57860</td>
- <td class="ai">Century Bank of Georgia</td>
- <td class="closing">November 10, 2011</td>
- <td class="updated">August 13, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="sunfirst.html">SunFirst Bank</a></td>
- <td class="city">Saint George</td>
- <td class="state">UT</td>
- <td class="cert">57087</td>
- <td class="ai">Cache Valley Bank</td>
- <td class="closing">November 4, 2011</td>
- <td class="updated">November 16, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="midcity.html">Mid City Bank, Inc.</a></td>
- <td class="city">Omaha</td>
- <td class="state">NE</td>
- <td class="cert">19397</td>
- <td class="ai">Premier Bank</td>
- <td class="closing">November 4, 2011</td>
- <td class="updated">August 15, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="allamerican.html ">All American Bank</a></td>
- <td class="city">Des Plaines</td>
- <td class="state">IL</td>
- <td class="cert">57759</td>
- <td class="ai">International Bank of Chicago</td>
- <td class="closing">October 28, 2011</td>
- <td class="updated">August 15, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="commbanksco.html">Community Banks of Colorado</a></td>
- <td class="city">Greenwood Village</td>
- <td class="state">CO</td>
- <td class="cert">21132</td>
- <td class="ai">Bank Midwest, N.A.</td>
- <td class="closing">October 21, 2011</td>
- <td class="updated">January 2, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="commcapbk.html">Community Capital Bank</a></td>
- <td class="city">Jonesboro</td>
- <td class="state">GA</td>
- <td class="cert">57036</td>
- <td class="ai">State Bank and Trust Company</td>
- <td class="closing">October 21, 2011</td>
- <td class="updated">November 8, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="decatur.html">Decatur First Bank</a></td>
- <td class="city">Decatur</td>
- <td class="state">GA</td>
- <td class="cert">34392</td>
- <td class="ai">Fidelity Bank</td>
- <td class="closing">October 21, 2011</td>
- <td class="updated">November 8, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="oldharbor.html">Old Harbor Bank</a></td>
- <td class="city">Clearwater</td>
- <td class="state">FL</td>
- <td class="cert">57537</td>
- <td class="ai">1st United Bank</td>
- <td class="closing">October 21, 2011</td>
- <td class="updated">November 8, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="countrybank.html">Country Bank</a></td>
- <td class="city">Aledo</td>
- <td class="state">IL</td>
- <td class="cert">35395</td>
- <td class="ai">Blackhawk Bank & Trust</td>
- <td class="closing">October 14, 2011</td>
- <td class="updated">August 15, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="firststatebank-nj.html">First State Bank</a></td>
- <td class="city">Cranford</td>
- <td class="state">NJ</td>
- <td class="cert">58046</td>
- <td class="ai">Northfield Bank</td>
- <td class="closing">October 14, 2011</td>
- <td class="updated">November 8, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="blueridge.html">Blue Ridge Savings Bank, Inc.</a></td>
- <td class="city">Asheville</td>
- <td class="state">NC</td>
- <td class="cert">32347</td>
- <td class="ai">Bank of North Carolina</td>
- <td class="closing">October 14, 2011</td>
- <td class="updated">November 8, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="piedmont-ga.html">Piedmont Community Bank</a></td>
- <td class="city">Gray</td>
- <td class="state">GA</td>
- <td class="cert">57256</td>
- <td class="ai">State Bank and Trust Company</td>
- <td class="closing">October 14, 2011</td>
- <td class="updated">January 22, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="sunsecurity.html">Sun Security Bank</a></td>
- <td class="city">Ellington</td>
- <td class="state">MO</td>
- <td class="cert">20115</td>
- <td class="ai">Great Southern Bank</td>
- <td class="closing">October 7, 2011</td>
- <td class="updated">November 7, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="riverbank.html">The RiverBank</a></td>
- <td class="city">Wyoming</td>
- <td class="state">MN</td>
- <td class="cert">10216</td>
- <td class="ai">Central Bank</td>
- <td class="closing">October 7, 2011</td>
- <td class="updated">November 7, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="firstintlbank.html">First International Bank</a></td>
- <td class="city">Plano</td>
- <td class="state">TX</td>
- <td class="cert">33513</td>
- <td class="ai">American First National Bank</td>
- <td class="closing">September 30, 2011</td>
- <td class="updated">October 9, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="cbnc.html">Citizens Bank of Northern California</a></td>
- <td class="city">Nevada City</td>
- <td class="state">CA</td>
- <td class="cert">33983</td>
- <td class="ai">Tri Counties Bank</td>
- <td class="closing">September 23, 2011</td>
- <td class="updated">October 9, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="boc-va.html">Bank of the Commonwealth</a></td>
- <td class="city">Norfolk</td>
- <td class="state">VA</td>
- <td class="cert">20408</td>
- <td class="ai">Southern Bank and Trust Company</td>
- <td class="closing">September 23, 2011</td>
- <td class="updated">October 9, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="fnbf.html">The First National Bank of Florida</a></td>
- <td class="city">Milton</td>
- <td class="state">FL</td>
- <td class="cert">25155</td>
- <td class="ai">CharterBank</td>
- <td class="closing">September 9, 2011</td>
- <td class="updated">September 6, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="creekside.html">CreekSide Bank</a></td>
- <td class="city">Woodstock</td>
- <td class="state">GA</td>
- <td class="cert">58226</td>
- <td class="ai">Georgia Commerce Bank</td>
- <td class="closing">September 2, 2011</td>
- <td class="updated">September 6, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="patriot.html">Patriot Bank of Georgia</a></td>
- <td class="city">Cumming</td>
- <td class="state">GA</td>
- <td class="cert">58273</td>
- <td class="ai">Georgia Commerce Bank</td>
- <td class="closing">September 2, 2011</td>
- <td class="updated">November 2, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="firstchoice-il.html">First Choice Bank</a></td>
- <td class="city">Geneva</td>
- <td class="state">IL</td>
- <td class="cert">57212</td>
- <td class="ai">Inland Bank & Trust</td>
- <td class="closing">August 19, 2011</td>
- <td class="updated">August 15, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="firstsouthern-ga.html">First Southern National Bank</a></td>
- <td class="city">Statesboro</td>
- <td class="state">GA</td>
- <td class="cert">57239</td>
- <td class="ai">Heritage Bank of the South</td>
- <td class="closing">August 19, 2011</td>
- <td class="updated">November 2, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="lydian.html">Lydian Private Bank</a></td>
- <td class="city">Palm Beach</td>
- <td class="state">FL</td>
- <td class="cert">35356</td>
- <td class="ai">Sabadell United Bank, N.A.</td>
- <td class="closing">August 19, 2011</td>
- <td class="updated">November 2, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="publicsvgs.html">Public Savings Bank</a></td>
- <td class="city">Huntingdon Valley</td>
- <td class="state">PA</td>
- <td class="cert">34130</td>
- <td class="ai">Capital Bank, N.A.</td>
- <td class="closing">August 18, 2011</td>
- <td class="updated">August 15, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="fnbo.html">The First National Bank of Olathe</a></td>
- <td class="city">Olathe</td>
- <td class="state">KS</td>
- <td class="cert">4744</td>
- <td class="ai">Enterprise Bank & Trust</td>
- <td class="closing">August 12, 2011</td>
- <td class="updated">August 23, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="whitman.html">Bank of Whitman</a></td>
- <td class="city">Colfax</td>
- <td class="state">WA</td>
- <td class="cert">22528</td>
- <td class="ai">Columbia State Bank</td>
- <td class="closing">August 5, 2011</td>
- <td class="updated">August 16, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="shorewood.html">Bank of Shorewood</a></td>
- <td class="city">Shorewood</td>
- <td class="state">IL</td>
- <td class="cert">22637</td>
- <td class="ai">Heartland Bank and Trust Company</td>
- <td class="closing">August 5, 2011</td>
- <td class="updated">August 16, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="integra.html">Integra Bank National Association</a></td>
- <td class="city">Evansville</td>
- <td class="state">IN</td>
- <td class="cert">4392</td>
- <td class="ai">Old National Bank</td>
- <td class="closing">July 29, 2011</td>
- <td class="updated">August 16, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="bankmeridian.html">BankMeridian, N.A.</a></td>
- <td class="city">Columbia</td>
- <td class="state">SC</td>
- <td class="cert">58222</td>
- <td class="ai">SCBT National Association</td>
- <td class="closing">July 29, 2011</td>
- <td class="updated">November 2, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="vbb.html">Virginia Business Bank</a></td>
- <td class="city">Richmond</td>
- <td class="state">VA</td>
- <td class="cert">58283</td>
- <td class="ai">Xenith Bank</td>
- <td class="closing">July 29, 2011</td>
- <td class="updated">October 9, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="bankofchoice.html">Bank of Choice</a></td>
- <td class="city">Greeley</td>
- <td class="state">CO</td>
- <td class="cert">2994</td>
- <td class="ai">Bank Midwest, N.A.</td>
- <td class="closing">July 22, 2011</td>
- <td class="updated">September 12, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="landmark.html">LandMark Bank of Florida</a></td>
- <td class="city">Sarasota</td>
- <td class="state">FL</td>
- <td class="cert">35244</td>
- <td class="ai">American Momentum Bank</td>
- <td class="closing">July 22, 2011</td>
- <td class="updated">November 2, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="southshore.html">Southshore Community Bank</a></td>
- <td class="city">Apollo Beach</td>
- <td class="state">FL</td>
- <td class="cert">58056</td>
- <td class="ai">American Momentum Bank</td>
- <td class="closing">July 22, 2011</td>
- <td class="updated">November 2, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="summitbank.html">Summit Bank</a></td>
- <td class="city">Prescott</td>
- <td class="state">AZ</td>
- <td class="cert">57442</td>
- <td class="ai">The Foothills Bank</td>
- <td class="closing">July 15, 2011</td>
- <td class="updated">August 16, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="firstpeoples.html">First Peoples Bank</a></td>
- <td class="city">Port St. Lucie</td>
- <td class="state">FL</td>
- <td class="cert">34870</td>
- <td class="ai">Premier American Bank, N.A.</td>
- <td class="closing">July 15, 2011</td>
- <td class="updated">November 2, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="hightrust.html">High Trust Bank</a></td>
- <td class="city">Stockbridge</td>
- <td class="state">GA</td>
- <td class="cert">19554</td>
- <td class="ai">Ameris Bank</td>
- <td class="closing">July 15, 2011</td>
- <td class="updated">November 2, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="onegeorgia.html">One Georgia Bank</a></td>
- <td class="city">Atlanta</td>
- <td class="state">GA</td>
- <td class="cert">58238</td>
- <td class="ai">Ameris Bank</td>
- <td class="closing">July 15, 2011</td>
- <td class="updated">November 2, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="signaturebank.html">Signature Bank</a></td>
- <td class="city">Windsor</td>
- <td class="state">CO</td>
- <td class="cert">57835</td>
- <td class="ai">Points West Community Bank</td>
- <td class="closing">July 8, 2011</td>
- <td class="updated">October 26, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="coloradocapital.html">Colorado Capital Bank</a></td>
- <td class="city">Castle Rock</td>
- <td class="state">CO</td>
- <td class="cert">34522</td>
- <td class="ai">First-Citizens Bank & Trust Company</td>
- <td class="closing">July 8, 2011</td>
- <td class="updated">January 15, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="firstchicago.html">First Chicago Bank & Trust</a></td>
- <td class="city">Chicago</td>
- <td class="state">IL</td>
- <td class="cert">27935</td>
- <td class="ai">Northbrook Bank & Trust Company</td>
- <td class="closing">July 8, 2011</td>
- <td class="updated">September 9, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="mountain.html">Mountain Heritage Bank</a></td>
- <td class="city">Clayton</td>
- <td class="state">GA</td>
- <td class="cert">57593</td>
- <td class="ai">First American Bank and Trust Company</td>
- <td class="closing">June 24, 2011</td>
- <td class="updated">November 2, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="fcbtb.html">First Commercial Bank of Tampa Bay</a></td>
- <td class="city">Tampa</td>
- <td class="state">FL</td>
- <td class="cert">27583</td>
- <td class="ai">Stonegate Bank</td>
- <td class="closing">June 17, 2011</td>
- <td class="updated">November 2, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="mcintoshstate.html">McIntosh State Bank</a></td>
- <td class="city">Jackson</td>
- <td class="state">GA</td>
- <td class="cert">19237</td>
- <td class="ai">Hamilton State Bank</td>
- <td class="closing">June 17, 2011</td>
- <td class="updated">November 2, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="atlanticbanktrust.html">Atlantic Bank and Trust</a></td>
- <td class="city">Charleston</td>
- <td class="state">SC</td>
- <td class="cert">58420</td>
- <td class="ai">First Citizens Bank and Trust Company, Inc.</td>
- <td class="closing">June 3, 2011</td>
- <td class="updated">October 31, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="firstheritage.html">First Heritage Bank</a></td>
- <td class="city">Snohomish</td>
- <td class="state">WA</td>
- <td class="cert">23626</td>
- <td class="ai">Columbia State Bank</td>
- <td class="closing">May 27, 2011</td>
- <td class="updated">January 28, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="summit.html">Summit Bank</a></td>
- <td class="city">Burlington</td>
- <td class="state">WA</td>
- <td class="cert">513</td>
- <td class="ai">Columbia State Bank</td>
- <td class="closing">May 20, 2011</td>
- <td class="updated">January 22, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="fgbc.html">First Georgia Banking Company</a></td>
- <td class="city">Franklin</td>
- <td class="state">GA</td>
- <td class="cert">57647</td>
- <td class="ai">CertusBank, National Association</td>
- <td class="closing">May 20, 2011</td>
- <td class="updated">November 13, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="atlanticsthrn.html">Atlantic Southern Bank</a></td>
- <td class="city">Macon</td>
- <td class="state">GA</td>
- <td class="cert">57213</td>
- <td class="ai">CertusBank, National Association</td>
- <td class="closing">May 20, 2011</td>
- <td class="updated">October 31, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="coastal_fl.html">Coastal Bank</a></td>
- <td class="city">Cocoa Beach</td>
- <td class="state">FL</td>
- <td class="cert">34898</td>
- <td class="ai">Florida Community Bank, a division of Premier American Bank, N.A.</td>
- <td class="closing">May 6, 2011</td>
- <td class="updated">November 30, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="communitycentral.html">Community Central Bank</a></td>
- <td class="city">Mount Clemens</td>
- <td class="state">MI</td>
- <td class="cert">34234</td>
- <td class="ai">Talmer Bank & Trust</td>
- <td class="closing">April 29, 2011</td>
- <td class="updated">August 16, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="parkavenue_ga.html">The Park Avenue Bank</a></td>
- <td class="city">Valdosta</td>
- <td class="state">GA</td>
- <td class="cert">19797</td>
- <td class="ai">Bank of the Ozarks</td>
- <td class="closing">April 29, 2011</td>
- <td class="updated">November 30, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="firstchoice.html">First Choice Community Bank</a></td>
- <td class="city">Dallas</td>
- <td class="state">GA</td>
- <td class="cert">58539</td>
- <td class="ai">Bank of the Ozarks</td>
- <td class="closing">April 29, 2011</td>
- <td class="updated">January 22, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="cortez.html">Cortez Community Bank</a></td>
- <td class="city">Brooksville</td>
- <td class="state">FL</td>
- <td class="cert">57625</td>
- <td class="ai">Florida Community Bank, a division of Premier American Bank, N.A.</td>
- <td class="closing">April 29, 2011</td>
- <td class="updated">November 30, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="fnbcf.html">First National Bank of Central Florida</a></td>
- <td class="city">Winter Park</td>
- <td class="state">FL</td>
- <td class="cert">26297</td>
- <td class="ai">Florida Community Bank, a division of Premier American Bank, N.A.</td>
- <td class="closing">April 29, 2011</td>
- <td class="updated">November 30, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="heritage_ms.html">Heritage Banking Group</a></td>
- <td class="city">Carthage</td>
- <td class="state">MS</td>
- <td class="cert">14273</td>
- <td class="ai">Trustmark National Bank</td>
- <td class="closing">April 15, 2011</td>
- <td class="updated">November 30, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="rosemount.html">Rosemount National Bank</a></td>
- <td class="city">Rosemount</td>
- <td class="state">MN</td>
- <td class="cert">24099</td>
- <td class="ai">Central Bank</td>
- <td class="closing">April 15, 2011</td>
- <td class="updated">August 16, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="superior_al.html">Superior Bank</a></td>
- <td class="city">Birmingham</td>
- <td class="state">AL</td>
- <td class="cert">17750</td>
- <td class="ai">Superior Bank, National Association</td>
- <td class="closing">April 15, 2011</td>
- <td class="updated">November 30, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="nexity.html">Nexity Bank</a></td>
- <td class="city">Birmingham</td>
- <td class="state">AL</td>
- <td class="cert">19794</td>
- <td class="ai">AloStar Bank of Commerce</td>
- <td class="closing">April 15, 2011</td>
- <td class="updated">September 4, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="newhorizons.html">New Horizons Bank</a></td>
- <td class="city">East Ellijay</td>
- <td class="state">GA</td>
- <td class="cert">57705</td>
- <td class="ai">Citizens South Bank</td>
- <td class="closing">April 15, 2011</td>
- <td class="updated">August 16, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="bartow.html">Bartow County Bank</a></td>
- <td class="city">Cartersville</td>
- <td class="state">GA</td>
- <td class="cert">21495</td>
- <td class="ai">Hamilton State Bank</td>
- <td class="closing">April 15, 2011</td>
- <td class="updated">January 22, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="nevadacommerce.html">Nevada Commerce Bank</a></td>
- <td class="city">Las Vegas</td>
- <td class="state">NV</td>
- <td class="cert">35418</td>
- <td class="ai">City National Bank</td>
- <td class="closing">April 8, 2011</td>
- <td class="updated">September 9, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="westernsprings.html">Western Springs National Bank and Trust</a></td>
- <td class="city">Western Springs</td>
- <td class="state">IL</td>
- <td class="cert">10086</td>
- <td class="ai">Heartland Bank and Trust Company</td>
- <td class="closing">April 8, 2011</td>
- <td class="updated">January 22, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="bankofcommerce.html">The Bank of Commerce</a></td>
- <td class="city">Wood Dale</td>
- <td class="state">IL</td>
- <td class="cert">34292</td>
- <td class="ai">Advantage National Bank Group</td>
- <td class="closing">March 25, 2011</td>
- <td class="updated">January 22, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="legacy-wi.html">Legacy Bank</a></td>
- <td class="city">Milwaukee</td>
- <td class="state">WI</td>
- <td class="cert">34818</td>
- <td class="ai">Seaway Bank and Trust Company</td>
- <td class="closing">March 11, 2011</td>
- <td class="updated">September 12, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="firstnatldavis.html">First National Bank of Davis</a></td>
- <td class="city">Davis</td>
- <td class="state">OK</td>
- <td class="cert">4077</td>
- <td class="ai">The Pauls Valley National Bank</td>
- <td class="closing">March 11, 2011</td>
- <td class="updated">August 20, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="valleycomm.html">Valley Community Bank</a></td>
- <td class="city">St. Charles</td>
- <td class="state">IL</td>
- <td class="cert">34187</td>
- <td class="ai">First State Bank</td>
- <td class="closing">February 25, 2011</td>
- <td class="updated">September 12, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="sanluistrust.html">San Luis Trust Bank, FSB</a></td>
- <td class="city">San Luis Obispo</td>
- <td class="state">CA</td>
- <td class="cert">34783</td>
- <td class="ai">First California Bank</td>
- <td class="closing">February 18, 2011</td>
- <td class="updated">August 20, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="charteroak.html">Charter Oak Bank</a></td>
- <td class="city">Napa</td>
- <td class="state">CA</td>
- <td class="cert">57855</td>
- <td class="ai">Bank of Marin</td>
- <td class="closing">February 18, 2011</td>
- <td class="updated">September 12, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="citizensbk_ga.html">Citizens Bank of Effingham</a></td>
- <td class="city">Springfield</td>
- <td class="state">GA</td>
- <td class="cert">34601</td>
- <td class="ai">Heritage Bank of the South</td>
- <td class="closing">February 18, 2011</td>
- <td class="updated">November 2, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="habersham.html">Habersham Bank</a></td>
- <td class="city">Clarkesville</td>
- <td class="state">GA</td>
- <td class="cert">151</td>
- <td class="ai">SCBT National Association</td>
- <td class="closing">February 18, 2011</td>
- <td class="updated">November 2, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="canyonstate.html">Canyon National Bank</a></td>
- <td class="city">Palm Springs</td>
- <td class="state">CA</td>
- <td class="cert">34692</td>
- <td class="ai">Pacific Premier Bank</td>
- <td class="closing">February 11, 2011</td>
- <td class="updated">September 12, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="badgerstate.html">Badger State Bank</a></td>
- <td class="city">Cassville</td>
- <td class="state">WI</td>
- <td class="cert">13272</td>
- <td class="ai">Royal Bank</td>
- <td class="closing">February 11, 2011</td>
- <td class="updated">September 12, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="peoplesstatebank.html">Peoples State Bank</a></td>
- <td class="city">Hamtramck</td>
- <td class="state">MI</td>
- <td class="cert">14939</td>
- <td class="ai">First Michigan Bank</td>
- <td class="closing">February 11, 2011</td>
- <td class="updated">January 22, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="sunshinestate.html">Sunshine State Community Bank</a></td>
- <td class="city">Port Orange</td>
- <td class="state">FL</td>
- <td class="cert">35478</td>
- <td class="ai">Premier American Bank, N.A.</td>
- <td class="closing">February 11, 2011</td>
- <td class="updated">November 2, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="commfirst_il.html">Community First Bank Chicago</a></td>
- <td class="city">Chicago</td>
- <td class="state">IL</td>
- <td class="cert">57948</td>
- <td class="ai">Northbrook Bank & Trust Company</td>
- <td class="closing">February 4, 2011</td>
- <td class="updated">August 20, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="northgabank.html">North Georgia Bank</a></td>
- <td class="city">Watkinsville</td>
- <td class="state">GA</td>
- <td class="cert">35242</td>
- <td class="ai">BankSouth</td>
- <td class="closing">February 4, 2011</td>
- <td class="updated">November 2, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="americantrust.html">American Trust Bank</a></td>
- <td class="city">Roswell</td>
- <td class="state">GA</td>
- <td class="cert">57432</td>
- <td class="ai">Renasant Bank</td>
- <td class="closing">February 4, 2011</td>
- <td class="updated">October 31, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="firstcomm_nm.html">First Community Bank</a></td>
- <td class="city">Taos</td>
- <td class="state">NM</td>
- <td class="cert">12261</td>
- <td class="ai">U.S. Bank, N.A.</td>
- <td class="closing">January 28, 2011</td>
- <td class="updated">September 12, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="firstier.html">FirsTier Bank</a></td>
- <td class="city">Louisville</td>
- <td class="state">CO</td>
- <td class="cert">57646</td>
- <td class="ai">No Acquirer</td>
- <td class="closing">January 28, 2011</td>
- <td class="updated">September 12, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="evergreenstatewi.html">Evergreen State Bank</a></td>
- <td class="city">Stoughton</td>
- <td class="state">WI</td>
- <td class="cert">5328</td>
- <td class="ai">McFarland State Bank</td>
- <td class="closing">January 28, 2011</td>
- <td class="updated">September 12, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="firststatebank_ok.html">The First State Bank</a></td>
- <td class="city">Camargo</td>
- <td class="state">OK</td>
- <td class="cert">2303</td>
- <td class="ai">Bank 7</td>
- <td class="closing">January 28, 2011</td>
- <td class="updated">September 12, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="unitedwestern.html">United Western Bank</a></td>
- <td class="city">Denver</td>
- <td class="state">CO</td>
- <td class="cert">31293</td>
- <td class="ai">First-Citizens Bank & Trust Company</td>
- <td class="closing">January 21, 2011</td>
- <td class="updated">September 12, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="bankofasheville.html">The Bank of Asheville</a></td>
- <td class="city">Asheville</td>
- <td class="state">NC</td>
- <td class="cert">34516</td>
- <td class="ai">First Bank</td>
- <td class="closing">January 21, 2011</td>
- <td class="updated">November 2, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="commsouth.html">CommunitySouth Bank & Trust</a></td>
- <td class="city">Easley</td>
- <td class="state">SC</td>
- <td class="cert">57868</td>
- <td class="ai">CertusBank, National Association</td>
- <td class="closing">January 21, 2011</td>
- <td class="updated">November 2, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="enterprise.html">Enterprise Banking Company</a></td>
- <td class="city">McDonough</td>
- <td class="state">GA</td>
- <td class="cert">19758</td>
- <td class="ai">No Acquirer</td>
- <td class="closing">January 21, 2011</td>
- <td class="updated">November 2, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="oglethorpe.html">Oglethorpe Bank</a></td>
- <td class="city">Brunswick</td>
- <td class="state">GA</td>
- <td class="cert">57440</td>
- <td class="ai">Bank of the Ozarks</td>
- <td class="closing">January 14, 2011</td>
- <td class="updated">November 2, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="legacybank.html">Legacy Bank</a></td>
- <td class="city">Scottsdale</td>
- <td class="state">AZ</td>
- <td class="cert">57820</td>
- <td class="ai">Enterprise Bank & Trust</td>
- <td class="closing">January 7, 2011</td>
- <td class="updated">September 12, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="firstcommercial.html">First Commercial Bank of Florida</a></td>
- <td class="city">Orlando</td>
- <td class="state">FL</td>
- <td class="cert">34965</td>
- <td class="ai">First Southern Bank</td>
- <td class="closing">January 7, 2011</td>
- <td class="updated">November 2, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="communitynatl.html">Community National Bank</a></td>
- <td class="city">Lino Lakes</td>
- <td class="state">MN</td>
- <td class="cert">23306</td>
- <td class="ai">Farmers & Merchants Savings Bank</td>
- <td class="closing">December 17, 2010</td>
- <td class="updated">August 20, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="firstsouthern.html">First Southern Bank</a></td>
- <td class="city">Batesville</td>
- <td class="state">AR</td>
- <td class="cert">58052</td>
- <td class="ai">Southern Bank</td>
- <td class="closing">December 17, 2010</td>
- <td class="updated">August 20, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="unitedamericas.html">United Americas Bank, N.A.</a></td>
- <td class="city">Atlanta</td>
- <td class="state">GA</td>
- <td class="cert">35065</td>
- <td class="ai">State Bank and Trust Company</td>
- <td class="closing">December 17, 2010</td>
- <td class="updated">November 2, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="appalachianga.html">Appalachian Community Bank, FSB</a></td>
- <td class="city">McCaysville</td>
- <td class="state">GA</td>
- <td class="cert">58495</td>
- <td class="ai">Peoples Bank of East Tennessee</td>
- <td class="closing">December 17, 2010</td>
- <td class="updated">October 31, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="chestatee.html">Chestatee State Bank</a></td>
- <td class="city">Dawsonville</td>
- <td class="state">GA</td>
- <td class="cert">34578</td>
- <td class="ai">Bank of the Ozarks</td>
- <td class="closing">December 17, 2010</td>
- <td class="updated">November 2, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="bankofmiami.html">The Bank of Miami,N.A.</a></td>
- <td class="city">Coral Gables</td>
- <td class="state">FL</td>
- <td class="cert">19040</td>
- <td class="ai">1st United Bank</td>
- <td class="closing">December 17, 2010</td>
- <td class="updated">November 2, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="earthstar.html">Earthstar Bank</a></td>
- <td class="city">Southampton</td>
- <td class="state">PA</td>
- <td class="cert">35561</td>
- <td class="ai">Polonia Bank</td>
- <td class="closing">December 10, 2010</td>
- <td class="updated">August 20, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="paramount.html">Paramount Bank</a></td>
- <td class="city">Farmington Hills</td>
- <td class="state">MI</td>
- <td class="cert">34673</td>
- <td class="ai">Level One Bank</td>
- <td class="closing">December 10, 2010</td>
- <td class="updated">August 20, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="firstbanking.html">First Banking Center</a></td>
- <td class="city">Burlington</td>
- <td class="state">WI</td>
- <td class="cert">5287</td>
- <td class="ai">First Michigan Bank</td>
- <td class="closing">November 19, 2010</td>
- <td class="updated">August 20, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="allegbank.html">Allegiance Bank of North America</a></td>
- <td class="city">Bala Cynwyd</td>
- <td class="state">PA</td>
- <td class="cert">35078</td>
- <td class="ai">VIST Bank</td>
- <td class="closing">November 19, 2010</td>
- <td class="updated">August 20, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="gulfstate.html">Gulf State Community Bank</a></td>
- <td class="city">Carrabelle</td>
- <td class="state">FL</td>
- <td class="cert">20340</td>
- <td class="ai">Centennial Bank</td>
- <td class="closing">November 19, 2010</td>
- <td class="updated">November 2, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="copperstar.html">Copper Star Bank</a></td>
- <td class="city">Scottsdale</td>
- <td class="state">AZ</td>
- <td class="cert">35463</td>
- <td class="ai">Stearns Bank, N.A.</td>
- <td class="closing">November 12, 2010</td>
- <td class="updated">August 20, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="darbybank.html">Darby Bank & Trust Co.</a></td>
- <td class="city">Vidalia</td>
- <td class="state">GA</td>
- <td class="cert">14580</td>
- <td class="ai">Ameris Bank</td>
- <td class="closing">November 12, 2010</td>
- <td class="updated">January 15, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="tifton.html">Tifton Banking Company</a></td>
- <td class="city">Tifton</td>
- <td class="state">GA</td>
- <td class="cert">57831</td>
- <td class="ai">Ameris Bank</td>
- <td class="closing">November 12, 2010</td>
- <td class="updated">November 2, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="firstvietnamese.html">First Vietnamese American Bank</a><br><a href="firstvietnamese_viet.pdf">In Vietnamese</a></td>
- <td class="city">Westminster</td>
- <td class="state">CA</td>
- <td class="cert">57885</td>
- <td class="ai">Grandpoint Bank</td>
- <td class="closing">November 5, 2010</td>
- <td class="updated">September 12, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="piercecommercial.html">Pierce Commercial Bank</a></td>
- <td class="city">Tacoma</td>
- <td class="state">WA</td>
- <td class="cert">34411</td>
- <td class="ai">Heritage Bank</td>
- <td class="closing">November 5, 2010</td>
- <td class="updated">August 20, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="westerncommercial_ca.html">Western Commercial Bank</a></td>
- <td class="city">Woodland Hills</td>
- <td class="state">CA</td>
- <td class="cert">58087</td>
- <td class="ai">First California Bank</td>
- <td class="closing">November 5, 2010</td>
- <td class="updated">September 12, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="kbank.html">K Bank</a></td>
- <td class="city">Randallstown</td>
- <td class="state">MD</td>
- <td class="cert">31263</td>
- <td class="ai">Manufacturers and Traders Trust Company (M&T Bank)</td>
- <td class="closing">November 5, 2010</td>
- <td class="updated">August 20, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="firstazfsb.html">First Arizona Savings, A FSB</a></td>
- <td class="city">Scottsdale</td>
- <td class="state">AZ</td>
- <td class="cert">32582</td>
- <td class="ai">No Acquirer</td>
- <td class="closing">October 22, 2010</td>
- <td class="updated">August 20, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="hillcrest_ks.html">Hillcrest Bank</a></td>
- <td class="city">Overland Park</td>
- <td class="state">KS</td>
- <td class="cert">22173</td>
- <td class="ai">Hillcrest Bank, N.A.</td>
- <td class="closing">October 22, 2010</td>
- <td class="updated">August 20, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="firstsuburban.html">First Suburban National Bank</a></td>
- <td class="city">Maywood</td>
- <td class="state">IL</td>
- <td class="cert">16089</td>
- <td class="ai">Seaway Bank and Trust Company</td>
- <td class="closing">October 22, 2010</td>
- <td class="updated">August 20, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="fnbbarnesville.html">The First National Bank of Barnesville</a></td>
- <td class="city">Barnesville</td>
- <td class="state">GA</td>
- <td class="cert">2119</td>
- <td class="ai">United Bank</td>
- <td class="closing">October 22, 2010</td>
- <td class="updated">November 2, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="gordon.html">The Gordon Bank</a></td>
- <td class="city">Gordon</td>
- <td class="state">GA</td>
- <td class="cert">33904</td>
- <td class="ai">Morris Bank</td>
- <td class="closing">October 22, 2010</td>
- <td class="updated">November 2, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="progress_fl.html">Progress Bank of Florida</a></td>
- <td class="city">Tampa</td>
- <td class="state">FL</td>
- <td class="cert">32251</td>
- <td class="ai">Bay Cities Bank</td>
- <td class="closing">October 22, 2010</td>
- <td class="updated">November 2, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="firstbankjacksonville.html">First Bank of Jacksonville</a></td>
- <td class="city">Jacksonville</td>
- <td class="state">FL</td>
- <td class="cert">27573</td>
- <td class="ai">Ameris Bank</td>
- <td class="closing">October 22, 2010</td>
- <td class="updated">November 2, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="premier_mo.html">Premier Bank</a></td>
- <td class="city">Jefferson City</td>
- <td class="state">MO</td>
- <td class="cert">34016</td>
- <td class="ai">Providence Bank</td>
- <td class="closing">October 15, 2010</td>
- <td class="updated">August 20, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="westbridge.html">WestBridge Bank and Trust Company</a></td>
- <td class="city">Chesterfield</td>
- <td class="state">MO</td>
- <td class="cert">58205</td>
- <td class="ai">Midland States Bank</td>
- <td class="closing">October 15, 2010</td>
- <td class="updated">August 20, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="securitysavingsfsb.html">Security Savings Bank, F.S.B.</a></td>
- <td class="city">Olathe</td>
- <td class="state">KS</td>
- <td class="cert">30898</td>
- <td class="ai">Simmons First National Bank</td>
- <td class="closing">October 15, 2010</td>
- <td class="updated">August 20, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="shoreline.html">Shoreline Bank</a></td>
- <td class="city">Shoreline</td>
- <td class="state">WA</td>
- <td class="cert">35250</td>
- <td class="ai">GBC International Bank</td>
- <td class="closing">October 1, 2010</td>
- <td class="updated">August 20, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="wakulla.html">Wakulla Bank</a></td>
- <td class="city">Crawfordville</td>
- <td class="state">FL</td>
- <td class="cert">21777</td>
- <td class="ai">Centennial Bank</td>
- <td class="closing">October 1, 2010</td>
- <td class="updated">November 2, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="northcounty.html">North County Bank</a></td>
- <td class="city">Arlington</td>
- <td class="state">WA</td>
- <td class="cert">35053</td>
- <td class="ai">Whidbey Island Bank</td>
- <td class="closing">September 24, 2010</td>
- <td class="updated">August 20, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="haventrust_fl.html">Haven Trust Bank Florida</a></td>
- <td class="city">Ponte Vedra Beach</td>
- <td class="state">FL</td>
- <td class="cert">58308</td>
- <td class="ai">First Southern Bank</td>
- <td class="closing">September 24, 2010</td>
- <td class="updated">November 5, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="maritimesavings.html">Maritime Savings Bank</a></td>
- <td class="city">West Allis</td>
- <td class="state">WI</td>
- <td class="cert">28612</td>
- <td class="ai">North Shore Bank, FSB</td>
- <td class="closing">September 17, 2010</td>
- <td class="updated">August 20, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="bramblesavings.html">Bramble Savings Bank</a></td>
- <td class="city">Milford</td>
- <td class="state">OH</td>
- <td class="cert">27808</td>
- <td class="ai">Foundation Bank</td>
- <td class="closing">September 17, 2010</td>
- <td class="updated">August 20, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="peoplesbank_ga.html">The Peoples Bank</a></td>
- <td class="city">Winder</td>
- <td class="state">GA</td>
- <td class="cert">182</td>
- <td class="ai">Community & Southern Bank</td>
- <td class="closing">September 17, 2010</td>
- <td class="updated">November 5, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="firstcommerce_ga.html">First Commerce Community Bank</a></td>
- <td class="city">Douglasville</td>
- <td class="state">GA</td>
- <td class="cert">57448</td>
- <td class="ai">Community & Southern Bank</td>
- <td class="closing">September 17, 2010</td>
- <td class="updated">January 15, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="ellijay.html">Bank of Ellijay</a></td>
- <td class="city">Ellijay</td>
- <td class="state">GA</td>
- <td class="cert">58197</td>
- <td class="ai">Community & Southern Bank</td>
- <td class="closing">September 17, 2010</td>
- <td class="updated">January 15, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="isnbank.html">ISN Bank</a></td>
- <td class="city">Cherry Hill</td>
- <td class="state">NJ</td>
- <td class="cert">57107</td>
- <td class="ai">Customers Bank</td>
- <td class="closing">September 17, 2010</td>
- <td class="updated">August 22, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="horizonfl.html">Horizon Bank</a></td>
- <td class="city">Bradenton</td>
- <td class="state">FL</td>
- <td class="cert">35061</td>
- <td class="ai">Bank of the Ozarks</td>
- <td class="closing">September 10, 2010</td>
- <td class="updated">November 5, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="sonoma.html">Sonoma Valley Bank</a></td>
- <td class="city">Sonoma</td>
- <td class="state">CA</td>
- <td class="cert">27259</td>
- <td class="ai">Westamerica Bank</td>
- <td class="closing">August 20, 2010</td>
- <td class="updated">September 12, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="lospadres.html">Los Padres Bank</a></td>
- <td class="city">Solvang</td>
- <td class="state">CA</td>
- <td class="cert">32165</td>
- <td class="ai">Pacific Western Bank</td>
- <td class="closing">August 20, 2010</td>
- <td class="updated">September 12, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="butte.html">Butte Community Bank</a></td>
- <td class="city">Chico</td>
- <td class="state">CA</td>
- <td class="cert">33219</td>
- <td class="ai">Rabobank, N.A.</td>
- <td class="closing">August 20, 2010</td>
- <td class="updated">September 12, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="pacificbk.html">Pacific State Bank</a></td>
- <td class="city">Stockton</td>
- <td class="state">CA</td>
- <td class="cert">27090</td>
- <td class="ai">Rabobank, N.A.</td>
- <td class="closing">August 20, 2010</td>
- <td class="updated">September 12, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="shorebank.html">ShoreBank</a></td>
- <td class="city">Chicago</td>
- <td class="state">IL</td>
- <td class="cert">15640</td>
- <td class="ai">Urban Partnership Bank</td>
- <td class="closing">August 20, 2010</td>
- <td class="updated">May 16, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="imperialsvgs.html">Imperial Savings and Loan Association</a></td>
- <td class="city">Martinsville</td>
- <td class="state">VA</td>
- <td class="cert">31623</td>
- <td class="ai">River Community Bank, N.A.</td>
- <td class="closing">August 20, 2010</td>
- <td class="updated">August 24, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="inatbank.html">Independent National Bank</a></td>
- <td class="city">Ocala</td>
- <td class="state">FL</td>
- <td class="cert">27344</td>
- <td class="ai">CenterState Bank of Florida, N.A.</td>
- <td class="closing">August 20, 2010</td>
- <td class="updated">November 5, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="cnbbartow.html">Community National Bank at Bartow</a></td>
- <td class="city">Bartow</td>
- <td class="state">FL</td>
- <td class="cert">25266</td>
- <td class="ai">CenterState Bank of Florida, N.A.</td>
- <td class="closing">August 20, 2010</td>
- <td class="updated">November 5, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="palosbank.html">Palos Bank and Trust Company</a></td>
- <td class="city">Palos Heights</td>
- <td class="state">IL</td>
- <td class="cert">17599</td>
- <td class="ai">First Midwest Bank</td>
- <td class="closing">August 13, 2010</td>
- <td class="updated">August 22, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="ravenswood.html">Ravenswood Bank</a></td>
- <td class="city">Chicago</td>
- <td class="state">IL</td>
- <td class="cert">34231</td>
- <td class="ai">Northbrook Bank & Trust Company</td>
- <td class="closing">August 6, 2010</td>
- <td class="updated">August 22, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="libertyor.html">LibertyBank</a></td>
- <td class="city">Eugene</td>
- <td class="state">OR</td>
- <td class="cert">31964</td>
- <td class="ai">Home Federal Bank</td>
- <td class="closing">July 30, 2010</td>
- <td class="updated">August 22, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="cowlitz.html">The Cowlitz Bank</a></td>
- <td class="city">Longview</td>
- <td class="state">WA</td>
- <td class="cert">22643</td>
- <td class="ai">Heritage Bank</td>
- <td class="closing">July 30, 2010</td>
- <td class="updated">August 22, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="coastal.html">Coastal Community Bank</a></td>
- <td class="city">Panama City Beach</td>
- <td class="state">FL</td>
- <td class="cert">9619</td>
- <td class="ai">Centennial Bank</td>
- <td class="closing">July 30, 2010</td>
- <td class="updated">November 5, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="bayside.html">Bayside Savings Bank</a></td>
- <td class="city">Port Saint Joe</td>
- <td class="state">FL</td>
- <td class="cert">57669</td>
- <td class="ai">Centennial Bank</td>
- <td class="closing">July 30, 2010</td>
- <td class="updated">November 5, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="northwestga.html">Northwest Bank & Trust</a></td>
- <td class="city">Acworth</td>
- <td class="state">GA</td>
- <td class="cert">57658</td>
- <td class="ai">State Bank and Trust Company</td>
- <td class="closing">July 30, 2010</td>
- <td class="updated">November 5, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="homevalleyor.html">Home Valley Bank</a></td>
- <td class="city">Cave Junction</td>
- <td class="state">OR</td>
- <td class="cert">23181</td>
- <td class="ai">South Valley Bank & Trust</td>
- <td class="closing">July 23, 2010</td>
- <td class="updated">September 12, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="southwestusanv.html">SouthwestUSA Bank</a></td>
- <td class="city">Las Vegas</td>
- <td class="state">NV</td>
- <td class="cert">35434</td>
- <td class="ai">Plaza Bank</td>
- <td class="closing">July 23, 2010</td>
- <td class="updated">August 22, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="communitysecmn.html">Community Security Bank</a></td>
- <td class="city">New Prague</td>
- <td class="state">MN</td>
- <td class="cert">34486</td>
- <td class="ai">Roundbank</td>
- <td class="closing">July 23, 2010</td>
- <td class="updated">September 12, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="thunderbankks.html">Thunder Bank</a></td>
- <td class="city">Sylvan Grove</td>
- <td class="state">KS</td>
- <td class="cert">10506</td>
- <td class="ai">The Bennington State Bank</td>
- <td class="closing">July 23, 2010</td>
- <td class="updated">September 13, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="williamsburgsc.html">Williamsburg First National Bank</a></td>
- <td class="city">Kingstree</td>
- <td class="state">SC</td>
- <td class="cert">17837</td>
- <td class="ai">First Citizens Bank and Trust Company, Inc.</td>
- <td class="closing">July 23, 2010</td>
- <td class="updated">November 5, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="crescentga.html">Crescent Bank and Trust Company</a></td>
- <td class="city">Jasper</td>
- <td class="state">GA</td>
- <td class="cert">27559</td>
- <td class="ai">Renasant Bank</td>
- <td class="closing">July 23, 2010</td>
- <td class="updated">November 5, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="sterlingfl.html">Sterling Bank</a></td>
- <td class="city">Lantana</td>
- <td class="state">FL</td>
- <td class="cert">32536</td>
- <td class="ai">IBERIABANK</td>
- <td class="closing">July 23, 2010</td>
- <td class="updated">November 5, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="mainstsvgs.html">Mainstreet Savings Bank, FSB</a></td>
- <td class="city">Hastings</td>
- <td class="state">MI</td>
- <td class="cert">28136</td>
- <td class="ai">Commercial Bank</td>
- <td class="closing">July 16, 2010</td>
- <td class="updated">September 13, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="oldecypress.html">Olde Cypress Community Bank</a></td>
- <td class="city">Clewiston</td>
- <td class="state">FL</td>
- <td class="cert">28864</td>
- <td class="ai">CenterState Bank of Florida, N.A.</td>
- <td class="closing">July 16, 2010</td>
- <td class="updated">November 5, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="turnberry.html">Turnberry Bank</a></td>
- <td class="city">Aventura</td>
- <td class="state">FL</td>
- <td class="cert">32280</td>
- <td class="ai">NAFH National Bank</td>
- <td class="closing">July 16, 2010</td>
- <td class="updated">November 5, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="metrobankfl.html">Metro Bank of Dade County</a></td>
- <td class="city">Miami</td>
- <td class="state">FL</td>
- <td class="cert">25172</td>
- <td class="ai">NAFH National Bank</td>
- <td class="closing">July 16, 2010</td>
- <td class="updated">November 5, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="firstnatlsc.html">First National Bank of the South</a></td>
- <td class="city">Spartanburg</td>
- <td class="state">SC</td>
- <td class="cert">35383</td>
- <td class="ai">NAFH National Bank</td>
- <td class="closing">July 16, 2010</td>
- <td class="updated">November 5, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="woodlands.html">Woodlands Bank</a></td>
- <td class="city">Bluffton</td>
- <td class="state">SC</td>
- <td class="cert">32571</td>
- <td class="ai">Bank of the Ozarks</td>
- <td class="closing">July 16, 2010</td>
- <td class="updated">November 5, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="homenatlok.html">Home National Bank</a></td>
- <td class="city">Blackwell</td>
- <td class="state">OK</td>
- <td class="cert">11636</td>
- <td class="ai">RCB Bank</td>
- <td class="closing">July 9, 2010</td>
- <td class="updated">December 10, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="usabankny.html">USA Bank</a></td>
- <td class="city">Port Chester</td>
- <td class="state">NY</td>
- <td class="cert">58072</td>
- <td class="ai">New Century Bank</td>
- <td class="closing">July 9, 2010</td>
- <td class="updated">September 14, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="idealfedsvngsmd.html">Ideal Federal Savings Bank</a></td>
- <td class="city">Baltimore</td>
- <td class="state">MD</td>
- <td class="cert">32456</td>
- <td class="ai">No Acquirer</td>
- <td class="closing">July 9, 2010</td>
- <td class="updated">September 14, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="baynatlmd.html">Bay National Bank</a></td>
- <td class="city">Baltimore</td>
- <td class="state">MD</td>
- <td class="cert">35462</td>
- <td class="ai">Bay Bank, FSB</td>
- <td class="closing">July 9, 2010</td>
- <td class="updated">January 15, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="highdesertnm.html">High Desert State Bank</a></td>
- <td class="city">Albuquerque</td>
- <td class="state">NM</td>
- <td class="cert">35279</td>
- <td class="ai">First American Bank</td>
- <td class="closing">June 25, 2010</td>
- <td class="updated">September 14, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="firstnatga.html">First National Bank</a></td>
- <td class="city">Savannah</td>
- <td class="state">GA</td>
- <td class="cert">34152</td>
- <td class="ai">The Savannah Bank, N.A.</td>
- <td class="closing">June 25, 2010</td>
- <td class="updated">November 5, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="peninsulafl.html">Peninsula Bank</a></td>
- <td class="city">Englewood</td>
- <td class="state">FL</td>
- <td class="cert">26563</td>
- <td class="ai">Premier American Bank, N.A.</td>
- <td class="closing">June 25, 2010</td>
- <td class="updated">November 5, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="nevsecbank.html">Nevada Security Bank</a></td>
- <td class="city">Reno</td>
- <td class="state">NV</td>
- <td class="cert">57110</td>
- <td class="ai">Umpqua Bank</td>
- <td class="closing">June 18, 2010</td>
- <td class="updated">August 23, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="washfirstintl.html">Washington First International Bank</a></td>
- <td class="city">Seattle</td>
- <td class="state">WA</td>
- <td class="cert">32955</td>
- <td class="ai">East West Bank</td>
- <td class="closing">June 11, 2010</td>
- <td class="updated">September 14, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="tieronebankne.html">TierOne Bank</a></td>
- <td class="city">Lincoln</td>
- <td class="state">NE</td>
- <td class="cert">29341</td>
- <td class="ai">Great Western Bank</td>
- <td class="closing">June 4, 2010</td>
- <td class="updated">September 14, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="arcolail.html">Arcola Homestead Savings Bank</a></td>
- <td class="city">Arcola</td>
- <td class="state">IL</td>
- <td class="cert">31813</td>
- <td class="ai">No Acquirer</td>
- <td class="closing">June 4, 2010</td>
- <td class="updated">September 14, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="firstnatms.html">First National Bank</a></td>
- <td class="city">Rosedale</td>
- <td class="state">MS</td>
- <td class="cert">15814</td>
- <td class="ai">The Jefferson Bank</td>
- <td class="closing">June 4, 2010</td>
- <td class="updated">November 5, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="swbnevada.html">Sun West Bank</a></td>
- <td class="city">Las Vegas</td>
- <td class="state">NV</td>
- <td class="cert">34785</td>
- <td class="ai">City National Bank</td>
- <td class="closing">May 28, 2010</td>
- <td class="updated">September 14, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="graniteca.html">Granite Community Bank, NA</a></td>
- <td class="city">Granite Bay</td>
- <td class="state">CA</td>
- <td class="cert">57315</td>
- <td class="ai">Tri Counties Bank</td>
- <td class="closing">May 28, 2010</td>
- <td class="updated">September 14, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="bankoffloridatb.html">Bank of Florida - Tampa</a></td>
- <td class="city">Tampa</td>
- <td class="state">FL</td>
- <td class="cert">57814</td>
- <td class="ai">EverBank</td>
- <td class="closing">May 28, 2010</td>
- <td class="updated">November 5, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="bankoffloridasw.html">Bank of Florida - Southwest</a></td>
- <td class="city">Naples</td>
- <td class="state">FL</td>
- <td class="cert">35106</td>
- <td class="ai">EverBank</td>
- <td class="closing">May 28, 2010</td>
- <td class="updated">November 5, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="bankoffloridase.html">Bank of Florida - Southeast</a></td>
- <td class="city">Fort Lauderdale</td>
- <td class="state">FL</td>
- <td class="cert">57360</td>
- <td class="ai">EverBank</td>
- <td class="closing">May 28, 2010</td>
- <td class="updated">November 5, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="pinehurstmn.html">Pinehurst Bank</a></td>
- <td class="city">Saint Paul</td>
- <td class="state">MN</td>
- <td class="cert">57735</td>
- <td class="ai">Coulee Bank</td>
- <td class="closing">May 21, 2010</td>
- <td class="updated">October 26, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="midwestil.html">Midwest Bank and Trust Company</a></td>
- <td class="city">Elmwood Park</td>
- <td class="state">IL</td>
- <td class="cert">18117</td>
- <td class="ai">FirstMerit Bank, N.A.</td>
- <td class="closing">May 14, 2010</td>
- <td class="updated">August 23, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="swcmntymo.html">Southwest Community Bank</a></td>
- <td class="city">Springfield</td>
- <td class="state">MO</td>
- <td class="cert">34255</td>
- <td class="ai">Simmons First National Bank</td>
- <td class="closing">May 14, 2010</td>
- <td class="updated">August 23, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="newlibertymi.html">New Liberty Bank</a></td>
- <td class="city">Plymouth</td>
- <td class="state">MI</td>
- <td class="cert">35586</td>
- <td class="ai">Bank of Ann Arbor</td>
- <td class="closing">May 14, 2010</td>
- <td class="updated">August 23, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="satillacmntyga.html">Satilla Community Bank</a></td>
- <td class="city">Saint Marys</td>
- <td class="state">GA</td>
- <td class="cert">35114</td>
- <td class="ai">Ameris Bank</td>
- <td class="closing">May 14, 2010</td>
- <td class="updated">November 5, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="1stpacific.html">1st Pacific Bank of California</a></td>
- <td class="city">San Diego</td>
- <td class="state">CA</td>
- <td class="cert">35517</td>
- <td class="ai">City National Bank</td>
- <td class="closing">May 7, 2010</td>
- <td class="updated">December 13, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="townebank.html">Towne Bank of Arizona</a></td>
- <td class="city">Mesa</td>
- <td class="state">AZ</td>
- <td class="cert">57697</td>
- <td class="ai">Commerce Bank of Arizona</td>
- <td class="closing">May 7, 2010</td>
- <td class="updated">August 23, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="accessbank.html">Access Bank</a></td>
- <td class="city">Champlin</td>
- <td class="state">MN</td>
- <td class="cert">16476</td>
- <td class="ai">PrinsBank</td>
- <td class="closing">May 7, 2010</td>
- <td class="updated">August 23, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="bonifay.html">The Bank of Bonifay</a></td>
- <td class="city">Bonifay</td>
- <td class="state">FL</td>
- <td class="cert">14246</td>
- <td class="ai">First Federal Bank of Florida</td>
- <td class="closing">May 7, 2010</td>
- <td class="updated">November 5, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="frontier.html">Frontier Bank</a></td>
- <td class="city">Everett</td>
- <td class="state">WA</td>
- <td class="cert">22710</td>
- <td class="ai">Union Bank, N.A.</td>
- <td class="closing">April 30, 2010</td>
- <td class="updated">January 15, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="bc-natl.html">BC National Banks</a></td>
- <td class="city">Butler</td>
- <td class="state">MO</td>
- <td class="cert">17792</td>
- <td class="ai">Community First Bank</td>
- <td class="closing">April 30, 2010</td>
- <td class="updated">August 23, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="champion.html">Champion Bank</a></td>
- <td class="city">Creve Coeur</td>
- <td class="state">MO</td>
- <td class="cert">58362</td>
- <td class="ai">BankLiberty</td>
- <td class="closing">April 30, 2010</td>
- <td class="updated">August 23, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="cfbancorp.html">CF Bancorp</a></td>
- <td class="city">Port Huron</td>
- <td class="state">MI</td>
- <td class="cert">30005</td>
- <td class="ai">First Michigan Bank</td>
- <td class="closing">April 30, 2010</td>
- <td class="updated">January 15, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="westernbank-puertorico.html">Westernbank Puerto Rico</a><br><a href="westernbank-puertorico_spanish.html">En Espanol</a></td>
- <td class="city">Mayaguez</td>
- <td class="state">PR</td>
- <td class="cert">31027</td>
- <td class="ai">Banco Popular de Puerto Rico</td>
- <td class="closing">April 30, 2010</td>
- <td class="updated">November 5, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="r-gpremier-puertorico.html">R-G Premier Bank of Puerto Rico</a><br><a href="r-gpremier-puertorico_spanish.html">En Espanol</a></td>
- <td class="city">Hato Rey</td>
- <td class="state">PR</td>
- <td class="cert">32185</td>
- <td class="ai">Scotiabank de Puerto Rico</td>
- <td class="closing">April 30, 2010</td>
- <td class="updated">November 5, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="eurobank-puertorico.html">Eurobank</a><br><a href="eurobank-puertorico_spanish.html">En Espanol</a></td>
- <td class="city">San Juan</td>
- <td class="state">PR</td>
- <td class="cert">27150</td>
- <td class="ai">Oriental Bank and Trust</td>
- <td class="closing">April 30, 2010</td>
- <td class="updated">November 5, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="wheatland.html">Wheatland Bank</a></td>
- <td class="city">Naperville</td>
- <td class="state">IL</td>
- <td class="cert">58429</td>
- <td class="ai">Wheaton Bank & Trust</td>
- <td class="closing">April 23, 2010</td>
- <td class="updated">August 23, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="peotone.html">Peotone Bank and Trust Company</a></td>
- <td class="city">Peotone</td>
- <td class="state">IL</td>
- <td class="cert">10888</td>
- <td class="ai">First Midwest Bank</td>
- <td class="closing">April 23, 2010</td>
- <td class="updated">August 23, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="lincoln-park.html">Lincoln Park Savings Bank</a></td>
- <td class="city">Chicago</td>
- <td class="state">IL</td>
- <td class="cert">30600</td>
- <td class="ai">Northbrook Bank & Trust Company</td>
- <td class="closing">April 23, 2010</td>
- <td class="updated">August 23, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="new-century-il.html">New Century Bank</a></td>
- <td class="city">Chicago</td>
- <td class="state">IL</td>
- <td class="cert">34821</td>
- <td class="ai">MB Financial Bank, N.A.</td>
- <td class="closing">April 23, 2010</td>
- <td class="updated">August 23, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="citizens-bank.html">Citizens Bank and Trust Company of Chicago</a></td>
- <td class="city">Chicago</td>
- <td class="state">IL</td>
- <td class="cert">34658</td>
- <td class="ai">Republic Bank of Chicago</td>
- <td class="closing">April 23, 2010</td>
- <td class="updated">August 23, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="broadway.html">Broadway Bank</a></td>
- <td class="city">Chicago</td>
- <td class="state">IL</td>
- <td class="cert">22853</td>
- <td class="ai">MB Financial Bank, N.A.</td>
- <td class="closing">April 23, 2010</td>
- <td class="updated">August 23, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="amcore.html">Amcore Bank, National Association</a></td>
- <td class="city">Rockford</td>
- <td class="state">IL</td>
- <td class="cert">3735</td>
- <td class="ai">Harris N.A.</td>
- <td class="closing">April 23, 2010</td>
- <td class="updated">August 23, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="citybank.html">City Bank</a></td>
- <td class="city">Lynnwood</td>
- <td class="state">WA</td>
- <td class="cert">21521</td>
- <td class="ai">Whidbey Island Bank</td>
- <td class="closing">April 16, 2010</td>
- <td class="updated">September 14, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="tamalpais.html">Tamalpais Bank</a></td>
- <td class="city">San Rafael</td>
- <td class="state">CA</td>
- <td class="cert">33493</td>
- <td class="ai">Union Bank, N.A.</td>
- <td class="closing">April 16, 2010</td>
- <td class="updated">August 23, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="innovative.html">Innovative Bank</a></td>
- <td class="city">Oakland</td>
- <td class="state">CA</td>
- <td class="cert">23876</td>
- <td class="ai">Center Bank</td>
- <td class="closing">April 16, 2010</td>
- <td class="updated">August 23, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="butlerbank.html">Butler Bank</a></td>
- <td class="city">Lowell</td>
- <td class="state">MA</td>
- <td class="cert">26619</td>
- <td class="ai">People's United Bank</td>
- <td class="closing">April 16, 2010</td>
- <td class="updated">August 23, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="riverside-natl.html">Riverside National Bank of Florida</a></td>
- <td class="city">Fort Pierce</td>
- <td class="state">FL</td>
- <td class="cert">24067</td>
- <td class="ai">TD Bank, N.A.</td>
- <td class="closing">April 16, 2010</td>
- <td class="updated">November 5, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="americanfirst.html">AmericanFirst Bank</a></td>
- <td class="city">Clermont</td>
- <td class="state">FL</td>
- <td class="cert">57724</td>
- <td class="ai">TD Bank, N.A.</td>
- <td class="closing">April 16, 2010</td>
- <td class="updated">October 31, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="ffbnf.html">First Federal Bank of North Florida</a></td>
- <td class="city">Palatka</td>
- <td class="state">FL</td>
- <td class="cert">28886</td>
- <td class="ai">TD Bank, N.A.</td>
- <td class="closing">April 16, 2010</td>
- <td class="updated">January 15, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="lakeside-comm.html">Lakeside Community Bank</a></td>
- <td class="city">Sterling Heights</td>
- <td class="state">MI</td>
- <td class="cert">34878</td>
- <td class="ai">No Acquirer</td>
- <td class="closing">April 16, 2010</td>
- <td class="updated">August 23, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="beachfirst.html">Beach First National Bank</a></td>
- <td class="city">Myrtle Beach</td>
- <td class="state">SC</td>
- <td class="cert">34242</td>
- <td class="ai">Bank of North Carolina</td>
- <td class="closing">April 9, 2010</td>
- <td class="updated">November 5, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="deserthills.html">Desert Hills Bank</a></td>
- <td class="city">Phoenix</td>
- <td class="state">AZ</td>
- <td class="cert">57060</td>
- <td class="ai">New York Community Bank</td>
- <td class="closing">March 26, 2010</td>
- <td class="updated">August 23, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="unity-natl.html">Unity National Bank</a></td>
- <td class="city">Cartersville</td>
- <td class="state">GA</td>
- <td class="cert">34678</td>
- <td class="ai">Bank of the Ozarks</td>
- <td class="closing">March 26, 2010</td>
- <td class="updated">September 14, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="key-west.html">Key West Bank</a></td>
- <td class="city">Key West</td>
- <td class="state">FL</td>
- <td class="cert">34684</td>
- <td class="ai">Centennial Bank</td>
- <td class="closing">March 26, 2010</td>
- <td class="updated">August 23, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="mcintosh.html">McIntosh Commercial Bank</a></td>
- <td class="city">Carrollton</td>
- <td class="state">GA</td>
- <td class="cert">57399</td>
- <td class="ai">CharterBank</td>
- <td class="closing">March 26, 2010</td>
- <td class="updated">August 23, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="state-aurora.html">State Bank of Aurora</a></td>
- <td class="city">Aurora</td>
- <td class="state">MN</td>
- <td class="cert">8221</td>
- <td class="ai">Northern State Bank</td>
- <td class="closing">March 19, 2010</td>
- <td class="updated">August 23, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="firstlowndes.html">First Lowndes Bank</a></td>
- <td class="city">Fort Deposit</td>
- <td class="state">AL</td>
- <td class="cert">24957</td>
- <td class="ai">First Citizens Bank</td>
- <td class="closing">March 19, 2010</td>
- <td class="updated">August 23, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="bankofhiawassee.html">Bank of Hiawassee</a></td>
- <td class="city">Hiawassee</td>
- <td class="state">GA</td>
- <td class="cert">10054</td>
- <td class="ai">Citizens South Bank</td>
- <td class="closing">March 19, 2010</td>
- <td class="updated">August 23, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="appalachian.html">Appalachian Community Bank</a></td>
- <td class="city">Ellijay</td>
- <td class="state">GA</td>
- <td class="cert">33989</td>
- <td class="ai">Community & Southern Bank</td>
- <td class="closing">March 19, 2010</td>
- <td class="updated">October 31, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="advanta-ut.html">Advanta Bank Corp.</a></td>
- <td class="city">Draper</td>
- <td class="state">UT</td>
- <td class="cert">33535</td>
- <td class="ai">No Acquirer</td>
- <td class="closing">March 19, 2010</td>
- <td class="updated">September 14, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="cent-security.html">Century Security Bank</a></td>
- <td class="city">Duluth</td>
- <td class="state">GA</td>
- <td class="cert">58104</td>
- <td class="ai">Bank of Upson</td>
- <td class="closing">March 19, 2010</td>
- <td class="updated">August 23, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="amer-natl-oh.html">American National Bank</a></td>
- <td class="city">Parma</td>
- <td class="state">OH</td>
- <td class="cert">18806</td>
- <td class="ai">The National Bank and Trust Company</td>
- <td class="closing">March 19, 2010</td>
- <td class="updated">August 23, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="statewide.html">Statewide Bank</a></td>
- <td class="city">Covington</td>
- <td class="state">LA</td>
- <td class="cert">29561</td>
- <td class="ai">Home Bank</td>
- <td class="closing">March 12, 2010</td>
- <td class="updated">August 23, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="oldsouthern.html">Old Southern Bank</a></td>
- <td class="city">Orlando</td>
- <td class="state">FL</td>
- <td class="cert">58182</td>
- <td class="ai">Centennial Bank</td>
- <td class="closing">March 12, 2010</td>
- <td class="updated">August 23, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="parkavenue-ny.html">The Park Avenue Bank</a></td>
- <td class="city">New York</td>
- <td class="state">NY</td>
- <td class="cert">27096</td>
- <td class="ai">Valley National Bank</td>
- <td class="closing">March 12, 2010</td>
- <td class="updated">August 23, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="libertypointe.html">LibertyPointe Bank</a></td>
- <td class="city">New York</td>
- <td class="state">NY</td>
- <td class="cert">58071</td>
- <td class="ai">Valley National Bank</td>
- <td class="closing">March 11, 2010</td>
- <td class="updated">August 23, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="centennial-ut.html">Centennial Bank</a></td>
- <td class="city">Ogden</td>
- <td class="state">UT</td>
- <td class="cert">34430</td>
- <td class="ai">No Acquirer</td>
- <td class="closing">March 5, 2010</td>
- <td class="updated">September 14, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="waterfield.html">Waterfield Bank</a></td>
- <td class="city">Germantown</td>
- <td class="state">MD</td>
- <td class="cert">34976</td>
- <td class="ai">No Acquirer</td>
- <td class="closing">March 5, 2010</td>
- <td class="updated">August 23, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="bankofillinois.html">Bank of Illinois</a></td>
- <td class="city">Normal</td>
- <td class="state">IL</td>
- <td class="cert">9268</td>
- <td class="ai">Heartland Bank and Trust Company</td>
- <td class="closing">March 5, 2010</td>
- <td class="updated">August 23, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="sunamerican.html">Sun American Bank</a></td>
- <td class="city">Boca Raton</td>
- <td class="state">FL</td>
- <td class="cert">27126</td>
- <td class="ai">First-Citizens Bank & Trust Company</td>
- <td class="closing">March 5, 2010</td>
- <td class="updated">August 23, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="rainier.html">Rainier Pacific Bank</a></td>
- <td class="city">Tacoma</td>
- <td class="state">WA</td>
- <td class="cert">38129</td>
- <td class="ai">Umpqua Bank</td>
- <td class="closing">February 26, 2010</td>
- <td class="updated">August 23, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="carsonriver.html">Carson River Community Bank</a></td>
- <td class="city">Carson City</td>
- <td class="state">NV</td>
- <td class="cert">58352</td>
- <td class="ai">Heritage Bank of Nevada</td>
- <td class="closing">February 26, 2010</td>
- <td class="updated">January 15, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="lajolla.html">La Jolla Bank, FSB</a></td>
- <td class="city">La Jolla</td>
- <td class="state">CA</td>
- <td class="cert">32423</td>
- <td class="ai">OneWest Bank, FSB</td>
- <td class="closing">February 19, 2010</td>
- <td class="updated">August 24, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="georgewashington.html">George Washington Savings Bank</a></td>
- <td class="city">Orland Park</td>
- <td class="state">IL</td>
- <td class="cert">29952</td>
- <td class="ai">FirstMerit Bank, N.A.</td>
- <td class="closing">February 19, 2010</td>
- <td class="updated">August 24, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="lacoste.html">The La Coste National Bank</a></td>
- <td class="city">La Coste</td>
- <td class="state">TX</td>
- <td class="cert">3287</td>
- <td class="ai">Community National Bank</td>
- <td class="closing">February 19, 2010</td>
- <td class="updated">September 14, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="marco.html">Marco Community Bank</a></td>
- <td class="city">Marco Island</td>
- <td class="state">FL</td>
- <td class="cert">57586</td>
- <td class="ai">Mutual of Omaha Bank</td>
- <td class="closing">February 19, 2010</td>
- <td class="updated">August 24, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="1stamerican.html">1st American State Bank of Minnesota</a></td>
- <td class="city">Hancock</td>
- <td class="state">MN</td>
- <td class="cert">15448</td>
- <td class="ai">Community Development Bank, FSB</td>
- <td class="closing">February 5, 2010</td>
- <td class="updated">August 24, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="americanmarine.html">American Marine Bank</a></td>
- <td class="city">Bainbridge Island</td>
- <td class="state">WA</td>
- <td class="cert">16730</td>
- <td class="ai">Columbia State Bank</td>
- <td class="closing">January 29, 2010</td>
- <td class="updated">August 24, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="firstregional.html">First Regional Bank</a></td>
- <td class="city">Los Angeles</td>
- <td class="state">CA</td>
- <td class="cert">23011</td>
- <td class="ai">First-Citizens Bank & Trust Company</td>
- <td class="closing">January 29, 2010</td>
- <td class="updated">August 24, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="cbt-cornelia.html">Community Bank and Trust</a></td>
- <td class="city">Cornelia</td>
- <td class="state">GA</td>
- <td class="cert">5702</td>
- <td class="ai">SCBT National Association</td>
- <td class="closing">January 29, 2010</td>
- <td class="updated">January 15, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="marshall-mn.html">Marshall Bank, N.A.</a></td>
- <td class="city">Hallock</td>
- <td class="state">MN</td>
- <td class="cert">16133</td>
- <td class="ai">United Valley Bank</td>
- <td class="closing">January 29, 2010</td>
- <td class="updated">August 23, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="floridacommunity.html">Florida Community Bank</a></td>
- <td class="city">Immokalee</td>
- <td class="state">FL</td>
- <td class="cert">5672</td>
- <td class="ai">Premier American Bank, N.A.</td>
- <td class="closing">January 29, 2010</td>
- <td class="updated">January 15, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="firstnational-carrollton.html">First National Bank of Georgia</a></td>
- <td class="city">Carrollton</td>
- <td class="state">GA</td>
- <td class="cert">16480</td>
- <td class="ai">Community & Southern Bank</td>
- <td class="closing">January 29, 2010</td>
- <td class="updated">December 13, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="columbiariver.html">Columbia River Bank</a></td>
- <td class="city">The Dalles</td>
- <td class="state">OR</td>
- <td class="cert">22469</td>
- <td class="ai">Columbia State Bank</td>
- <td class="closing">January 22, 2010</td>
- <td class="updated">September 14, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="evergreen-wa.html">Evergreen Bank</a></td>
- <td class="city">Seattle</td>
- <td class="state">WA</td>
- <td class="cert">20501</td>
- <td class="ai">Umpqua Bank</td>
- <td class="closing">January 22, 2010</td>
- <td class="updated">January 15, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="charter-nm.html">Charter Bank</a></td>
- <td class="city">Santa Fe</td>
- <td class="state">NM</td>
- <td class="cert">32498</td>
- <td class="ai">Charter Bank</td>
- <td class="closing">January 22, 2010</td>
- <td class="updated">August 23, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="leeton.html">Bank of Leeton</a></td>
- <td class="city">Leeton</td>
- <td class="state">MO</td>
- <td class="cert">8265</td>
- <td class="ai">Sunflower Bank, N.A.</td>
- <td class="closing">January 22, 2010</td>
- <td class="updated">January 15, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="premieramerican.html">Premier American Bank</a></td>
- <td class="city">Miami</td>
- <td class="state">FL</td>
- <td class="cert">57147</td>
- <td class="ai">Premier American Bank, N.A.</td>
- <td class="closing">January 22, 2010</td>
- <td class="updated">December 13, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="barnes.html">Barnes Banking Company</a></td>
- <td class="city">Kaysville</td>
- <td class="state">UT</td>
- <td class="cert">1252</td>
- <td class="ai">No Acquirer</td>
- <td class="closing">January 15, 2010</td>
- <td class="updated">August 23, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="ststephen.html">St. Stephen State Bank</a></td>
- <td class="city">St. Stephen</td>
- <td class="state">MN</td>
- <td class="cert">17522</td>
- <td class="ai">First State Bank of St. Joseph</td>
- <td class="closing">January 15, 2010</td>
- <td class="updated">August 23, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="towncommunity.html">Town Community Bank & Trust</a></td>
- <td class="city">Antioch</td>
- <td class="state">IL</td>
- <td class="cert">34705</td>
- <td class="ai">First American Bank</td>
- <td class="closing">January 15, 2010</td>
- <td class="updated">August 23, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="horizon-wa.html">Horizon Bank</a></td>
- <td class="city">Bellingham</td>
- <td class="state">WA</td>
- <td class="cert">22977</td>
- <td class="ai">Washington Federal Savings and Loan Association</td>
- <td class="closing">January 8, 2010</td>
- <td class="updated">August 23, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="firstfederal-ca.html">First Federal Bank of California, F.S.B.</a></td>
- <td class="city">Santa Monica</td>
- <td class="state">CA</td>
- <td class="cert">28536</td>
- <td class="ai">OneWest Bank, FSB</td>
- <td class="closing">December 18, 2009</td>
- <td class="updated">August 23, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="imperialcapital.html">Imperial Capital Bank</a></td>
- <td class="city">La Jolla</td>
- <td class="state">CA</td>
- <td class="cert">26348</td>
- <td class="ai">City National Bank</td>
- <td class="closing">December 18, 2009</td>
- <td class="updated">September 5, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="ibb.html">Independent Bankers' Bank</a></td>
- <td class="city">Springfield</td>
- <td class="state">IL</td>
- <td class="cert">26820</td>
- <td class="ai">The Independent BankersBank (TIB)</td>
- <td class="closing">December 18, 2009</td>
- <td class="updated">August 23, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="newsouth.html">New South Federal Savings Bank</a></td>
- <td class="city">Irondale</td>
- <td class="state">AL</td>
- <td class="cert">32276</td>
- <td class="ai">Beal Bank</td>
- <td class="closing">December 18, 2009</td>
- <td class="updated">August 23, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="citizensstate-mi.html">Citizens State Bank</a></td>
- <td class="city">New Baltimore</td>
- <td class="state">MI</td>
- <td class="cert">1006</td>
- <td class="ai">No Acquirer</td>
- <td class="closing">December 18, 2009</td>
- <td class="updated">November 5, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="peoplesfirst-fl.html">Peoples First Community Bank</a></td>
- <td class="city">Panama City</td>
- <td class="state">FL</td>
- <td class="cert">32167</td>
- <td class="ai">Hancock Bank</td>
- <td class="closing">December 18, 2009</td>
- <td class="updated">November 5, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="rockbridge.html">RockBridge Commercial Bank</a></td>
- <td class="city">Atlanta</td>
- <td class="state">GA</td>
- <td class="cert">58315</td>
- <td class="ai">No Acquirer</td>
- <td class="closing">December 18, 2009</td>
- <td class="updated">November 5, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="solutions.html">SolutionsBank</a></td>
- <td class="city">Overland Park</td>
- <td class="state">KS</td>
- <td class="cert">4731</td>
- <td class="ai">Arvest Bank</td>
- <td class="closing">December 11, 2009</td>
- <td class="updated">August 23, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="valleycapital.html">Valley Capital Bank, N.A.</a></td>
- <td class="city">Mesa</td>
- <td class="state">AZ</td>
- <td class="cert">58399</td>
- <td class="ai">Enterprise Bank & Trust</td>
- <td class="closing">December 11, 2009</td>
- <td class="updated">August 23, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="republicfederal.html">Republic Federal Bank, N.A.</a></td>
- <td class="city">Miami</td>
- <td class="state">FL</td>
- <td class="cert">22846</td>
- <td class="ai">1st United Bank</td>
- <td class="closing">December 11, 2009</td>
- <td class="updated">November 5, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="atlantic-va.html">Greater Atlantic Bank</a></td>
- <td class="city">Reston</td>
- <td class="state">VA</td>
- <td class="cert">32583</td>
- <td class="ai">Sonabank</td>
- <td class="closing">December 4, 2009</td>
- <td class="updated">November 5, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="benchmark-il.html">Benchmark Bank</a></td>
- <td class="city">Aurora</td>
- <td class="state">IL</td>
- <td class="cert">10440</td>
- <td class="ai">MB Financial Bank, N.A.</td>
- <td class="closing">December 4, 2009</td>
- <td class="updated">August 23, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="amtrust.html">AmTrust Bank</a></td>
- <td class="city">Cleveland</td>
- <td class="state">OH</td>
- <td class="cert">29776</td>
- <td class="ai">New York Community Bank</td>
- <td class="closing">December 4, 2009</td>
- <td class="updated">November 5, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="tattnall.html">The Tattnall Bank</a></td>
- <td class="city">Reidsville</td>
- <td class="state">GA</td>
- <td class="cert">12080</td>
- <td class="ai">Heritage Bank of the South</td>
- <td class="closing">December 4, 2009</td>
- <td class="updated">November 5, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="firstsecurity.html">First Security National Bank</a></td>
- <td class="city">Norcross</td>
- <td class="state">GA</td>
- <td class="cert">26290</td>
- <td class="ai">State Bank and Trust Company</td>
- <td class="closing">December 4, 2009</td>
- <td class="updated">November 5, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="buckheadcommunity.html">The Buckhead Community Bank</a></td>
- <td class="city">Atlanta</td>
- <td class="state">GA</td>
- <td class="cert">34663</td>
- <td class="ai">State Bank and Trust Company</td>
- <td class="closing">December 4, 2009</td>
- <td class="updated">November 5, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="commercesw-fl.html">Commerce Bank of Southwest Florida</a></td>
- <td class="city">Fort Myers</td>
- <td class="state">FL</td>
- <td class="cert">58016</td>
- <td class="ai">Central Bank</td>
- <td class="closing">November 20, 2009</td>
- <td class="updated">November 5, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="pacificcoastnatl.html">Pacific Coast National Bank</a></td>
- <td class="city">San Clemente</td>
- <td class="state">CA</td>
- <td class="cert">57914</td>
- <td class="ai">Sunwest Bank</td>
- <td class="closing">November 13, 2009</td>
- <td class="updated">August 22, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="orion-fl.html">Orion Bank</a></td>
- <td class="city">Naples</td>
- <td class="state">FL</td>
- <td class="cert">22427</td>
- <td class="ai">IBERIABANK</td>
- <td class="closing">November 13, 2009</td>
- <td class="updated">November 5, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="centuryfsb.html">Century Bank, F.S.B.</a></td>
- <td class="city">Sarasota</td>
- <td class="state">FL</td>
- <td class="cert">32267</td>
- <td class="ai">IBERIABANK</td>
- <td class="closing">November 13, 2009</td>
- <td class="updated">August 22, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="ucb.html">United Commercial Bank</a></td>
- <td class="city">San Francisco</td>
- <td class="state">CA</td>
- <td class="cert">32469</td>
- <td class="ai">East West Bank</td>
- <td class="closing">November 6, 2009</td>
- <td class="updated">November 5, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="gateway-mo.html">Gateway Bank of St. Louis</a></td>
- <td class="city">St. Louis</td>
- <td class="state">MO</td>
- <td class="cert">19450</td>
- <td class="ai">Central Bank of Kansas City</td>
- <td class="closing">November 6, 2009</td>
- <td class="updated">August 22, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="prosperan.html">Prosperan Bank</a></td>
- <td class="city">Oakdale</td>
- <td class="state">MN</td>
- <td class="cert">35074</td>
- <td class="ai">Alerus Financial, N.A.</td>
- <td class="closing">November 6, 2009</td>
- <td class="updated">August 22, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="homefsb-mi.html">Home Federal Savings Bank</a></td>
- <td class="city">Detroit</td>
- <td class="state">MI</td>
- <td class="cert">30329</td>
- <td class="ai">Liberty Bank and Trust Company</td>
- <td class="closing">November 6, 2009</td>
- <td class="updated">August 22, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="unitedsecurity-ga.html">United Security Bank</a></td>
- <td class="city">Sparta</td>
- <td class="state">GA</td>
- <td class="cert">22286</td>
- <td class="ai">Ameris Bank</td>
- <td class="closing">November 6, 2009</td>
- <td class="updated">January 15, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="northhouston-tx.html">North Houston Bank</a></td>
- <td class="city">Houston</td>
- <td class="state">TX</td>
- <td class="cert">18776</td>
- <td class="ai">U.S. Bank N.A.</td>
- <td class="closing">October 30, 2009</td>
- <td class="updated">August 22, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="madisonville-tx.html">Madisonville State Bank</a></td>
- <td class="city">Madisonville</td>
- <td class="state">TX</td>
- <td class="cert">33782</td>
- <td class="ai">U.S. Bank N.A.</td>
- <td class="closing">October 30, 2009</td>
- <td class="updated">August 22, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="citizens-teague.html">Citizens National Bank</a></td>
- <td class="city">Teague</td>
- <td class="state">TX</td>
- <td class="cert">25222</td>
- <td class="ai">U.S. Bank N.A.</td>
- <td class="closing">October 30, 2009</td>
- <td class="updated">August 22, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="park-il.html">Park National Bank</a></td>
- <td class="city">Chicago</td>
- <td class="state">IL</td>
- <td class="cert">11677</td>
- <td class="ai">U.S. Bank N.A.</td>
- <td class="closing">October 30, 2009</td>
- <td class="updated">August 22, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="pacificnational-ca.html">Pacific National Bank</a></td>
- <td class="city">San Francisco</td>
- <td class="state">CA</td>
- <td class="cert">30006</td>
- <td class="ai">U.S. Bank N.A.</td>
- <td class="closing">October 30, 2009</td>
- <td class="updated">August 22, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="calnational.html">California National Bank</a></td>
- <td class="city">Los Angeles</td>
- <td class="state">CA</td>
- <td class="cert">34659</td>
- <td class="ai">U.S. Bank N.A.</td>
- <td class="closing">October 30, 2009</td>
- <td class="updated">September 5, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="sandiegonational.html">San Diego National Bank</a></td>
- <td class="city">San Diego</td>
- <td class="state">CA</td>
- <td class="cert">23594</td>
- <td class="ai">U.S. Bank N.A.</td>
- <td class="closing">October 30, 2009</td>
- <td class="updated">August 22, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="community-lemont.html">Community Bank of Lemont</a></td>
- <td class="city">Lemont</td>
- <td class="state">IL</td>
- <td class="cert">35291</td>
- <td class="ai">U.S. Bank N.A.</td>
- <td class="closing">October 30, 2009</td>
- <td class="updated">January 15, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="bankusa-az.html">Bank USA, N.A.</a></td>
- <td class="city">Phoenix</td>
- <td class="state">AZ</td>
- <td class="cert">32218</td>
- <td class="ai">U.S. Bank N.A.</td>
- <td class="closing">October 30, 2009</td>
- <td class="updated">August 22, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="firstdupage.html">First DuPage Bank</a></td>
- <td class="city">Westmont</td>
- <td class="state">IL</td>
- <td class="cert">35038</td>
- <td class="ai">First Midwest Bank</td>
- <td class="closing">October 23, 2009</td>
- <td class="updated">August 22, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="riverview-mn.html">Riverview Community Bank</a></td>
- <td class="city">Otsego</td>
- <td class="state">MN</td>
- <td class="cert">57525</td>
- <td class="ai">Central Bank</td>
- <td class="closing">October 23, 2009</td>
- <td class="updated">August 22, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="elmwood.html">Bank of Elmwood</a></td>
- <td class="city">Racine</td>
- <td class="state">WI</td>
- <td class="cert">18321</td>
- <td class="ai">Tri City National Bank</td>
- <td class="closing">October 23, 2009</td>
- <td class="updated">August 22, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="flagship.html">Flagship National Bank</a></td>
- <td class="city">Bradenton</td>
- <td class="state">FL</td>
- <td class="cert">35044</td>
- <td class="ai">First Federal Bank of Florida</td>
- <td class="closing">October 23, 2009</td>
- <td class="updated">August 22, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="hillcrest-fl.html">Hillcrest Bank Florida</a></td>
- <td class="city">Naples</td>
- <td class="state">FL</td>
- <td class="cert">58336</td>
- <td class="ai">Stonegate Bank</td>
- <td class="closing">October 23, 2009</td>
- <td class="updated">August 22, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="americanunited.html">American United Bank</a></td>
- <td class="city">Lawrenceville</td>
- <td class="state">GA</td>
- <td class="cert">57794</td>
- <td class="ai">Ameris Bank</td>
- <td class="closing">October 23, 2009</td>
- <td class="updated">September 5, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="partners-fl.html">Partners Bank</a></td>
- <td class="city">Naples</td>
- <td class="state">FL</td>
- <td class="cert">57959</td>
- <td class="ai">Stonegate Bank</td>
- <td class="closing">October 23, 2009</td>
- <td class="updated">January 15, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="sanjoaquin.html">San Joaquin Bank</a></td>
- <td class="city">Bakersfield</td>
- <td class="state">CA</td>
- <td class="cert">23266</td>
- <td class="ai">Citizens Business Bank</td>
- <td class="closing">October 16, 2009</td>
- <td class="updated">August 22, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="scnb-co.html">Southern Colorado National Bank</a></td>
- <td class="city">Pueblo</td>
- <td class="state">CO</td>
- <td class="cert">57263</td>
- <td class="ai">Legacy Bank</td>
- <td class="closing">October 2, 2009</td>
- <td class="updated">September 5, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="jennings-mn.html">Jennings State Bank</a></td>
- <td class="city">Spring Grove</td>
- <td class="state">MN</td>
- <td class="cert">11416</td>
- <td class="ai">Central Bank</td>
- <td class="closing">October 2, 2009</td>
- <td class="updated">August 21, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="warren-mi.html">Warren Bank</a></td>
- <td class="city">Warren</td>
- <td class="state">MI</td>
- <td class="cert">34824</td>
- <td class="ai">The Huntington National Bank</td>
- <td class="closing">October 2, 2009</td>
- <td class="updated">August 21, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="georgian.html">Georgian Bank</a></td>
- <td class="city">Atlanta</td>
- <td class="state">GA</td>
- <td class="cert">57151</td>
- <td class="ai">First Citizens Bank and Trust Company, Inc.</td>
- <td class="closing">September 25, 2009</td>
- <td class="updated">August 21, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="irwin-ky.html">Irwin Union Bank, F.S.B.</a></td>
- <td class="city">Louisville</td>
- <td class="state">KY</td>
- <td class="cert">57068</td>
- <td class="ai">First Financial Bank, N.A.</td>
- <td class="closing">September 18, 2009</td>
- <td class="updated">September 5, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="irwin-in.html">Irwin Union Bank and Trust Company</a></td>
- <td class="city">Columbus</td>
- <td class="state">IN</td>
- <td class="cert">10100</td>
- <td class="ai">First Financial Bank, N.A.</td>
- <td class="closing">September 18, 2009</td>
- <td class="updated">August 21, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="venture-wa.html">Venture Bank</a></td>
- <td class="city">Lacey</td>
- <td class="state">WA</td>
- <td class="cert">22868</td>
- <td class="ai">First-Citizens Bank & Trust Company</td>
- <td class="closing">September 11, 2009</td>
- <td class="updated">August 21, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="brickwell-mn.html">Brickwell Community Bank</a></td>
- <td class="city">Woodbury</td>
- <td class="state">MN</td>
- <td class="cert">57736</td>
- <td class="ai">CorTrust Bank N.A.</td>
- <td class="closing">September 11, 2009</td>
- <td class="updated">January 15, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="corus.html">Corus Bank, N.A.</a></td>
- <td class="city">Chicago</td>
- <td class="state">IL</td>
- <td class="cert">13693</td>
- <td class="ai">MB Financial Bank, N.A.</td>
- <td class="closing">September 11, 2009</td>
- <td class="updated">August 21, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="firststate-az.html">First State Bank</a></td>
- <td class="city">Flagstaff</td>
- <td class="state">AZ</td>
- <td class="cert">34875</td>
- <td class="ai">Sunwest Bank</td>
- <td class="closing">September 4, 2009</td>
- <td class="updated">January 15, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="platinum-il.html">Platinum Community Bank</a></td>
- <td class="city">Rolling Meadows</td>
- <td class="state">IL</td>
- <td class="cert">35030</td>
- <td class="ai">No Acquirer</td>
- <td class="closing">September 4, 2009</td>
- <td class="updated">August 21, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="vantus.html">Vantus Bank</a></td>
- <td class="city">Sioux City</td>
- <td class="state">IN</td>
- <td class="cert">27732</td>
- <td class="ai">Great Southern Bank</td>
- <td class="closing">September 4, 2009</td>
- <td class="updated">August 21, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="inbank.html">InBank</a></td>
- <td class="city">Oak Forest</td>
- <td class="state">IL</td>
- <td class="cert">20203</td>
- <td class="ai">MB Financial Bank, N.A.</td>
- <td class="closing">September 4, 2009</td>
- <td class="updated">August 21, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="firstbankkc-mo.html">First Bank of Kansas City</a></td>
- <td class="city">Kansas City</td>
- <td class="state">MO</td>
- <td class="cert">25231</td>
- <td class="ai">Great American Bank</td>
- <td class="closing">September 4, 2009</td>
- <td class="updated">August 21, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="affinity-ca.html">Affinity Bank</a></td>
- <td class="city">Ventura</td>
- <td class="state">CA</td>
- <td class="cert">27197</td>
- <td class="ai">Pacific Western Bank</td>
- <td class="closing">August 28, 2009</td>
- <td class="updated">August 21, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="mainstreet-mn.html">Mainstreet Bank</a></td>
- <td class="city">Forest Lake</td>
- <td class="state">MN</td>
- <td class="cert">1909</td>
- <td class="ai">Central Bank</td>
- <td class="closing">August 28, 2009</td>
- <td class="updated">August 21, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="bradford-md.html">Bradford Bank</a></td>
- <td class="city">Baltimore</td>
- <td class="state">MD</td>
- <td class="cert">28312</td>
- <td class="ai">Manufacturers and Traders Trust Company (M&T Bank)</td>
- <td class="closing">August 28, 2009</td>
- <td class="updated">January 15, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="guaranty-tx.html">Guaranty Bank</a></td>
- <td class="city">Austin</td>
- <td class="state">TX</td>
- <td class="cert">32618</td>
- <td class="ai">BBVA Compass</td>
- <td class="closing">August 21, 2009</td>
- <td class="updated">August 21, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="capitalsouth.html">CapitalSouth Bank</a></td>
- <td class="city">Birmingham</td>
- <td class="state">AL</td>
- <td class="cert">22130</td>
- <td class="ai">IBERIABANK</td>
- <td class="closing">August 21, 2009</td>
- <td class="updated">January 15, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="coweta.html">First Coweta Bank</a></td>
- <td class="city">Newnan</td>
- <td class="state">GA</td>
- <td class="cert">57702</td>
- <td class="ai">United Bank</td>
- <td class="closing">August 21, 2009</td>
- <td class="updated">January 15, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="ebank.html">ebank</a></td>
- <td class="city">Atlanta</td>
- <td class="state">GA</td>
- <td class="cert">34682</td>
- <td class="ai">Stearns Bank, N.A.</td>
- <td class="closing">August 21, 2009</td>
- <td class="updated">August 21, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="community-nv.html">Community Bank of Nevada</a></td>
- <td class="city">Las Vegas</td>
- <td class="state">NV</td>
- <td class="cert">34043</td>
- <td class="ai">No Acquirer</td>
- <td class="closing">August 14, 2009</td>
- <td class="updated">August 21, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="community-az.html">Community Bank of Arizona</a></td>
- <td class="city">Phoenix</td>
- <td class="state">AZ</td>
- <td class="cert">57645</td>
- <td class="ai">MidFirst Bank</td>
- <td class="closing">August 14, 2009</td>
- <td class="updated">August 21, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="union-az.html">Union Bank, National Association</a></td>
- <td class="city">Gilbert</td>
- <td class="state">AZ</td>
- <td class="cert">34485</td>
- <td class="ai">MidFirst Bank</td>
- <td class="closing">August 14, 2009</td>
- <td class="updated">August 21, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="colonial-al.html">Colonial Bank</a></td>
- <td class="city">Montgomery</td>
- <td class="state">AL</td>
- <td class="cert">9609</td>
- <td class="ai">Branch Banking & Trust Company, (BB&T)</td>
- <td class="closing">August 14, 2009</td>
- <td class="updated">September 5, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="dwelling.html">Dwelling House Savings and Loan Association</a></td>
- <td class="city">Pittsburgh</td>
- <td class="state">PA</td>
- <td class="cert">31559</td>
- <td class="ai">PNC Bank, N.A.</td>
- <td class="closing">August 14, 2009</td>
- <td class="updated">January 15, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="community-prineville.html">Community First Bank</a></td>
- <td class="city">Prineville</td>
- <td class="state">OR</td>
- <td class="cert">23268</td>
- <td class="ai">Home Federal Bank</td>
- <td class="closing">August 7, 2009</td>
- <td class="updated">January 15, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="community-venice.html">Community National Bank of Sarasota County</a></td>
- <td class="city">Venice</td>
- <td class="state">FL</td>
- <td class="cert">27183</td>
- <td class="ai">Stearns Bank, N.A.</td>
- <td class="closing">August 7, 2009</td>
- <td class="updated">August 20, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="fsb-sarasota.html">First State Bank</a></td>
- <td class="city">Sarasota</td>
- <td class="state">FL</td>
- <td class="cert">27364</td>
- <td class="ai">Stearns Bank, N.A.</td>
- <td class="closing">August 7, 2009</td>
- <td class="updated">August 20, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="mutual-harvey.html">Mutual Bank</a></td>
- <td class="city">Harvey</td>
- <td class="state">IL</td>
- <td class="cert">18659</td>
- <td class="ai">United Central Bank</td>
- <td class="closing">July 31, 2009</td>
- <td class="updated">August 20, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="americano.html">First BankAmericano</a></td>
- <td class="city">Elizabeth</td>
- <td class="state">NJ</td>
- <td class="cert">34270</td>
- <td class="ai">Crown Bank</td>
- <td class="closing">July 31, 2009</td>
- <td class="updated">August 20, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="peoplescommunity-oh.html">Peoples Community Bank</a></td>
- <td class="city">West Chester</td>
- <td class="state">OH</td>
- <td class="cert">32288</td>
- <td class="ai">First Financial Bank, N.A.</td>
- <td class="closing">July 31, 2009</td>
- <td class="updated">August 20, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="integrity-fl.html">Integrity Bank</a></td>
- <td class="city">Jupiter</td>
- <td class="state">FL</td>
- <td class="cert">57604</td>
- <td class="ai">Stonegate Bank</td>
- <td class="closing">July 31, 2009</td>
- <td class="updated">August 20, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="fsb-altus.html">First State Bank of Altus</a></td>
- <td class="city">Altus</td>
- <td class="state">OK</td>
- <td class="cert">9873</td>
- <td class="ai">Herring Bank</td>
- <td class="closing">July 31, 2009</td>
- <td class="updated">August 20, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="sb-jones.html">Security Bank of Jones County</a></td>
- <td class="city">Gray</td>
- <td class="state">GA</td>
- <td class="cert">8486</td>
- <td class="ai">State Bank and Trust Company</td>
- <td class="closing">July 24, 2009</td>
- <td class="updated">August 20, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="sb-houston.html">Security Bank of Houston County</a></td>
- <td class="city">Perry</td>
- <td class="state">GA</td>
- <td class="cert">27048</td>
- <td class="ai">State Bank and Trust Company</td>
- <td class="closing">July 24, 2009</td>
- <td class="updated">August 20, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="sb-bibb.html">Security Bank of Bibb County</a></td>
- <td class="city">Macon</td>
- <td class="state">GA</td>
- <td class="cert">27367</td>
- <td class="ai">State Bank and Trust Company</td>
- <td class="closing">July 24, 2009</td>
- <td class="updated">August 20, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="sb-metro.html">Security Bank of North Metro</a></td>
- <td class="city">Woodstock</td>
- <td class="state">GA</td>
- <td class="cert">57105</td>
- <td class="ai">State Bank and Trust Company</td>
- <td class="closing">July 24, 2009</td>
- <td class="updated">August 20, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="sb-fulton.html">Security Bank of North Fulton</a></td>
- <td class="city">Alpharetta</td>
- <td class="state">GA</td>
- <td class="cert">57430</td>
- <td class="ai">State Bank and Trust Company</td>
- <td class="closing">July 24, 2009</td>
- <td class="updated">August 20, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="sb-gwinnett.html">Security Bank of Gwinnett County</a></td>
- <td class="city">Suwanee</td>
- <td class="state">GA</td>
- <td class="cert">57346</td>
- <td class="ai">State Bank and Trust Company</td>
- <td class="closing">July 24, 2009</td>
- <td class="updated">August 20, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="waterford.html">Waterford Village Bank</a></td>
- <td class="city">Williamsville</td>
- <td class="state">NY</td>
- <td class="cert">58065</td>
- <td class="ai">Evans Bank, N.A.</td>
- <td class="closing">July 24, 2009</td>
- <td class="updated">August 20, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="temecula.html">Temecula Valley Bank</a></td>
- <td class="city">Temecula</td>
- <td class="state">CA</td>
- <td class="cert">34341</td>
- <td class="ai">First-Citizens Bank & Trust Company</td>
- <td class="closing">July 17, 2009</td>
- <td class="updated">August 20, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="vineyard.html">Vineyard Bank</a></td>
- <td class="city">Rancho Cucamonga</td>
- <td class="state">CA</td>
- <td class="cert">23556</td>
- <td class="ai">California Bank & Trust</td>
- <td class="closing">July 17, 2009</td>
- <td class="updated">August 20, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="bankfirst.html">BankFirst</a></td>
- <td class="city">Sioux Falls</td>
- <td class="state">SD</td>
- <td class="cert">34103</td>
- <td class="ai">Alerus Financial, N.A.</td>
- <td class="closing">July 17, 2009</td>
- <td class="updated">August 20, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="piedmont.html">First Piedmont Bank</a></td>
- <td class="city">Winder</td>
- <td class="state">GA</td>
- <td class="cert">34594</td>
- <td class="ai">First American Bank and Trust Company</td>
- <td class="closing">July 17, 2009</td>
- <td class="updated">January 15, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="wyoming.html">Bank of Wyoming</a></td>
- <td class="city">Thermopolis</td>
- <td class="state">WY</td>
- <td class="cert">22754</td>
- <td class="ai">Central Bank & Trust</td>
- <td class="closing">July 10, 2009</td>
- <td class="updated">August 20, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="founders.html">Founders Bank</a></td>
- <td class="city">Worth</td>
- <td class="state">IL</td>
- <td class="cert">18390</td>
- <td class="ai">The PrivateBank and Trust Company</td>
- <td class="closing">July 2, 2009</td>
- <td class="updated">August 20, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="millennium.html">Millennium State Bank of Texas</a></td>
- <td class="city">Dallas</td>
- <td class="state">TX</td>
- <td class="cert">57667</td>
- <td class="ai">State Bank of Texas</td>
- <td class="closing">July 2, 2009</td>
- <td class="updated">October 26, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="danville.html">First National Bank of Danville</a></td>
- <td class="city">Danville</td>
- <td class="state">IL</td>
- <td class="cert">3644</td>
- <td class="ai">First Financial Bank, N.A.</td>
- <td class="closing">July 2, 2009</td>
- <td class="updated">August 20, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="elizabeth.html">Elizabeth State Bank</a></td>
- <td class="city">Elizabeth</td>
- <td class="state">IL</td>
- <td class="cert">9262</td>
- <td class="ai">Galena State Bank and Trust Company</td>
- <td class="closing">July 2, 2009</td>
- <td class="updated">August 20, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="rockriver.html">Rock River Bank</a></td>
- <td class="city">Oregon</td>
- <td class="state">IL</td>
- <td class="cert">15302</td>
- <td class="ai">The Harvard State Bank</td>
- <td class="closing">July 2, 2009</td>
- <td class="updated">August 20, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="winchester.html">First State Bank of Winchester</a></td>
- <td class="city">Winchester</td>
- <td class="state">IL</td>
- <td class="cert">11710</td>
- <td class="ai">The First National Bank of Beardstown</td>
- <td class="closing">July 2, 2009</td>
- <td class="updated">August 20, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="warner.html">John Warner Bank</a></td>
- <td class="city">Clinton</td>
- <td class="state">IL</td>
- <td class="cert">12093</td>
- <td class="ai">State Bank of Lincoln</td>
- <td class="closing">July 2, 2009</td>
- <td class="updated">August 20, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="mirae.html">Mirae Bank</a></td>
- <td class="city">Los Angeles</td>
- <td class="state">CA</td>
- <td class="cert">57332</td>
- <td class="ai">Wilshire State Bank</td>
- <td class="closing">June 26, 2009</td>
- <td class="updated">August 20, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="metropacific.html">MetroPacific Bank</a></td>
- <td class="city">Irvine</td>
- <td class="state">CA</td>
- <td class="cert">57893</td>
- <td class="ai">Sunwest Bank</td>
- <td class="closing">June 26, 2009</td>
- <td class="updated">August 20, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="horizon.html">Horizon Bank</a></td>
- <td class="city">Pine City</td>
- <td class="state">MN</td>
- <td class="cert">9744</td>
- <td class="ai">Stearns Bank, N.A.</td>
- <td class="closing">June 26, 2009</td>
- <td class="updated">August 20, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="neighbor.html">Neighborhood Community Bank</a></td>
- <td class="city">Newnan</td>
- <td class="state">GA</td>
- <td class="cert">35285</td>
- <td class="ai">CharterBank</td>
- <td class="closing">June 26, 2009</td>
- <td class="updated">August 20, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="communityga.html">Community Bank of West Georgia</a></td>
- <td class="city">Villa Rica</td>
- <td class="state">GA</td>
- <td class="cert">57436</td>
- <td class="ai">No Acquirer</td>
- <td class="closing">June 26, 2009</td>
- <td class="updated">August 17, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="anthony.html">First National Bank of Anthony</a></td>
- <td class="city">Anthony</td>
- <td class="state">KS</td>
- <td class="cert">4614</td>
- <td class="ai">Bank of Kansas</td>
- <td class="closing">June 19, 2009</td>
- <td class="updated">August 17, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="cooperative.html">Cooperative Bank</a></td>
- <td class="city">Wilmington</td>
- <td class="state">NC</td>
- <td class="cert">27837</td>
- <td class="ai">First Bank</td>
- <td class="closing">June 19, 2009</td>
- <td class="updated">August 17, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="scb.html">Southern Community Bank</a></td>
- <td class="city">Fayetteville</td>
- <td class="state">GA</td>
- <td class="cert">35251</td>
- <td class="ai">United Community Bank</td>
- <td class="closing">June 19, 2009</td>
- <td class="updated">August 17, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="lincolnwood.html">Bank of Lincolnwood</a></td>
- <td class="city">Lincolnwood</td>
- <td class="state">IL</td>
- <td class="cert">17309</td>
- <td class="ai">Republic Bank of Chicago</td>
- <td class="closing">June 5, 2009</td>
- <td class="updated">August 17, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="citizensnational.html">Citizens National Bank</a></td>
- <td class="city">Macomb</td>
- <td class="state">IL</td>
- <td class="cert">5757</td>
- <td class="ai">Morton Community Bank</td>
- <td class="closing">May 22, 2009</td>
- <td class="updated">September 4, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="strategiccapital.html">Strategic Capital Bank</a></td>
- <td class="city">Champaign</td>
- <td class="state">IL</td>
- <td class="cert">35175</td>
- <td class="ai">Midland States Bank</td>
- <td class="closing">May 22, 2009</td>
- <td class="updated">September 4, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="bankunited.html">BankUnited, FSB</a></td>
- <td class="city">Coral Gables</td>
- <td class="state">FL</td>
- <td class="cert">32247</td>
- <td class="ai">BankUnited</td>
- <td class="closing">May 21, 2009</td>
- <td class="updated">August 17, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="westsound.html">Westsound Bank</a></td>
- <td class="city">Bremerton</td>
- <td class="state">WA</td>
- <td class="cert">34843</td>
- <td class="ai">Kitsap Bank</td>
- <td class="closing">May 8, 2009</td>
- <td class="updated">September 4, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="americawest.html">America West Bank</a></td>
- <td class="city">Layton</td>
- <td class="state">UT</td>
- <td class="cert">35461</td>
- <td class="ai">Cache Valley Bank</td>
- <td class="closing">May 1, 2009</td>
- <td class="updated">August 17, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="citizens.html">Citizens Community Bank</a></td>
- <td class="city">Ridgewood</td>
- <td class="state">NJ</td>
- <td class="cert">57563</td>
- <td class="ai">North Jersey Community Bank</td>
- <td class="closing">May 1, 2009</td>
- <td class="updated">September 4, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="silverton.html">Silverton Bank, NA</a></td>
- <td class="city">Atlanta</td>
- <td class="state">GA</td>
- <td class="cert">26535</td>
- <td class="ai">No Acquirer</td>
- <td class="closing">May 1, 2009</td>
- <td class="updated">August 17, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="firstbankidaho.html">First Bank of Idaho</a></td>
- <td class="city">Ketchum</td>
- <td class="state">ID</td>
- <td class="cert">34396</td>
- <td class="ai">U.S. Bank, N.A.</td>
- <td class="closing">April 24, 2009</td>
- <td class="updated">August 17, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="beverlyhills.html">First Bank of Beverly Hills</a></td>
- <td class="city">Calabasas</td>
- <td class="state">CA</td>
- <td class="cert">32069</td>
- <td class="ai">No Acquirer</td>
- <td class="closing">April 24, 2009</td>
- <td class="updated">September 4, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="michiganheritage.html">Michigan Heritage Bank</a></td>
- <td class="city">Farmington Hills</td>
- <td class="state">MI</td>
- <td class="cert">34369</td>
- <td class="ai">Level One Bank</td>
- <td class="closing">April 24, 2009</td>
- <td class="updated">August 17, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="amsouthern.html">American Southern Bank</a></td>
- <td class="city">Kennesaw</td>
- <td class="state">GA</td>
- <td class="cert">57943</td>
- <td class="ai">Bank of North Georgia</td>
- <td class="closing">April 24, 2009</td>
- <td class="updated">August 17, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="greatbasin.html">Great Basin Bank of Nevada</a></td>
- <td class="city">Elko</td>
- <td class="state">NV</td>
- <td class="cert">33824</td>
- <td class="ai">Nevada State Bank</td>
- <td class="closing">April 17, 2009</td>
- <td class="updated">September 4, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="amsterling.html">American Sterling Bank</a></td>
- <td class="city">Sugar Creek</td>
- <td class="state">MO</td>
- <td class="cert">8266</td>
- <td class="ai">Metcalf Bank</td>
- <td class="closing">April 17, 2009</td>
- <td class="updated">August 31, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="newfrontier.html">New Frontier Bank</a></td>
- <td class="city">Greeley</td>
- <td class="state">CO</td>
- <td class="cert">34881</td>
- <td class="ai">No Acquirer</td>
- <td class="closing">April 10, 2009</td>
- <td class="updated">September 4, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="capefear.html">Cape Fear Bank</a></td>
- <td class="city">Wilmington</td>
- <td class="state">NC</td>
- <td class="cert">34639</td>
- <td class="ai">First Federal Savings and Loan Association</td>
- <td class="closing">April 10, 2009</td>
- <td class="updated">August 17, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="omni.html">Omni National Bank</a></td>
- <td class="city">Atlanta</td>
- <td class="state">GA</td>
- <td class="cert">22238</td>
- <td class="ai">No Acquirer</td>
- <td class="closing">March 27, 2009</td>
- <td class="updated">August 17, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="teambank.html">TeamBank, NA</a></td>
- <td class="city">Paola</td>
- <td class="state">KS</td>
- <td class="cert">4754</td>
- <td class="ai">Great Southern Bank</td>
- <td class="closing">March 20, 2009</td>
- <td class="updated">August 17, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="coloradonational.html">Colorado National Bank</a></td>
- <td class="city">Colorado Springs</td>
- <td class="state">CO</td>
- <td class="cert">18896</td>
- <td class="ai">Herring Bank</td>
- <td class="closing">March 20, 2009</td>
- <td class="updated">August 17, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="firstcity.html">FirstCity Bank</a></td>
- <td class="city">Stockbridge</td>
- <td class="state">GA</td>
- <td class="cert">18243</td>
- <td class="ai">No Acquirer</td>
- <td class="closing">March 20, 2009</td>
- <td class="updated">August 17, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="freedomga.html">Freedom Bank of Georgia</a></td>
- <td class="city">Commerce</td>
- <td class="state">GA</td>
- <td class="cert">57558</td>
- <td class="ai">Northeast Georgia Bank</td>
- <td class="closing">March 6, 2009</td>
- <td class="updated">August 17, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="securitysavings.html">Security Savings Bank</a></td>
- <td class="city">Henderson</td>
- <td class="state">NV</td>
- <td class="cert">34820</td>
- <td class="ai">Bank of Nevada</td>
- <td class="closing">February 27, 2009</td>
- <td class="updated">September 7, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="heritagebank.html">Heritage Community Bank</a></td>
- <td class="city">Glenwood</td>
- <td class="state">IL</td>
- <td class="cert">20078</td>
- <td class="ai">MB Financial Bank, N.A.</td>
- <td class="closing">February 27, 2009</td>
- <td class="updated">August 17, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="silverfalls.html">Silver Falls Bank</a></td>
- <td class="city">Silverton</td>
- <td class="state">OR</td>
- <td class="cert">35399</td>
- <td class="ai">Citizens Bank</td>
- <td class="closing">February 20, 2009</td>
- <td class="updated">August 17, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="pinnacle.html">Pinnacle Bank of Oregon</a></td>
- <td class="city">Beaverton</td>
- <td class="state">OR</td>
- <td class="cert">57342</td>
- <td class="ai">Washington Trust Bank of Spokane</td>
- <td class="closing">February 13, 2009</td>
- <td class="updated">August 17, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="cornbelt.html">Corn Belt Bank & Trust Co.</a></td>
- <td class="city">Pittsfield</td>
- <td class="state">IL</td>
- <td class="cert">16500</td>
- <td class="ai">The Carlinville National Bank</td>
- <td class="closing">February 13, 2009</td>
- <td class="updated">August 17, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="riverside.html">Riverside Bank of the Gulf Coast</a></td>
- <td class="city">Cape Coral</td>
- <td class="state">FL</td>
- <td class="cert">34563</td>
- <td class="ai">TIB Bank</td>
- <td class="closing">February 13, 2009</td>
- <td class="updated">August 17, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="sherman.html">Sherman County Bank</a></td>
- <td class="city">Loup City</td>
- <td class="state">NE</td>
- <td class="cert">5431</td>
- <td class="ai">Heritage Bank</td>
- <td class="closing">February 13, 2009</td>
- <td class="updated">August 17, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="county.html">County Bank</a></td>
- <td class="city">Merced</td>
- <td class="state">CA</td>
- <td class="cert">22574</td>
- <td class="ai">Westamerica Bank</td>
- <td class="closing">February 6, 2009</td>
- <td class="updated">September 4, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="alliance.html">Alliance Bank</a></td>
- <td class="city">Culver City</td>
- <td class="state">CA</td>
- <td class="cert">23124</td>
- <td class="ai">California Bank & Trust</td>
- <td class="closing">February 6, 2009</td>
- <td class="updated">August 16, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="firstbank.html">FirstBank Financial Services</a></td>
- <td class="city">McDonough</td>
- <td class="state">GA</td>
- <td class="cert">57017</td>
- <td class="ai">Regions Bank</td>
- <td class="closing">February 6, 2009</td>
- <td class="updated">August 16, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="ocala.html">Ocala National Bank</a></td>
- <td class="city">Ocala</td>
- <td class="state">FL</td>
- <td class="cert">26538</td>
- <td class="ai">CenterState Bank of Florida, N.A.</td>
- <td class="closing">January 30, 2009</td>
- <td class="updated">September 4, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="suburban.html">Suburban FSB</a></td>
- <td class="city">Crofton</td>
- <td class="state">MD</td>
- <td class="cert">30763</td>
- <td class="ai">Bank of Essex</td>
- <td class="closing">January 30, 2009</td>
- <td class="updated">August 16, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="magnet.html">MagnetBank</a></td>
- <td class="city">Salt Lake City</td>
- <td class="state">UT</td>
- <td class="cert">58001</td>
- <td class="ai">No Acquirer</td>
- <td class="closing">January 30, 2009</td>
- <td class="updated">August 16, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="centennial.html">1st Centennial Bank</a></td>
- <td class="city">Redlands</td>
- <td class="state">CA</td>
- <td class="cert">33025</td>
- <td class="ai">First California Bank</td>
- <td class="closing">January 23, 2009</td>
- <td class="updated">August 16, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="clark.html">Bank of Clark County</a></td>
- <td class="city">Vancouver</td>
- <td class="state">WA</td>
- <td class="cert">34959</td>
- <td class="ai">Umpqua Bank</td>
- <td class="closing">January 16, 2009</td>
- <td class="updated">August 16, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="commerce.html">National Bank of Commerce</a></td>
- <td class="city">Berkeley</td>
- <td class="state">IL</td>
- <td class="cert">19733</td>
- <td class="ai">Republic Bank of Chicago</td>
- <td class="closing">January 16, 2009</td>
- <td class="updated">August 16, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="sanderson.html">Sanderson State Bank</a><br><a href="sanderson_spanish.html">En Espanol</a></td>
- <td class="city">Sanderson</td>
- <td class="state">TX</td>
- <td class="cert">11568</td>
- <td class="ai">The Pecos County State Bank</td>
- <td class="closing">December 12, 2008</td>
- <td class="updated">September 4, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="haventrust.html">Haven Trust Bank</a></td>
- <td class="city">Duluth</td>
- <td class="state">GA</td>
- <td class="cert">35379</td>
- <td class="ai">Branch Banking & Trust Company, (BB&T)</td>
- <td class="closing">December 12, 2008</td>
- <td class="updated">August 16, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="firstga.html">First Georgia Community Bank</a></td>
- <td class="city">Jackson</td>
- <td class="state">GA</td>
- <td class="cert">34301</td>
- <td class="ai">United Bank</td>
- <td class="closing">December 5, 2008</td>
- <td class="updated">August 16, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="pff.html">PFF Bank & Trust</a></td>
- <td class="city">Pomona</td>
- <td class="state">CA</td>
- <td class="cert">28344</td>
- <td class="ai">U.S. Bank, N.A.</td>
- <td class="closing">November 21, 2008</td>
- <td class="updated">January 4, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="downey.html">Downey Savings & Loan</a></td>
- <td class="city">Newport Beach</td>
- <td class="state">CA</td>
- <td class="cert">30968</td>
- <td class="ai">U.S. Bank, N.A.</td>
- <td class="closing">November 21, 2008</td>
- <td class="updated">January 4, 2013</td>
- </tr>
- <tr>
- <td class="institution"><a href="community.html">Community Bank</a></td>
- <td class="city">Loganville</td>
- <td class="state">GA</td>
- <td class="cert">16490</td>
- <td class="ai">Bank of Essex</td>
- <td class="closing">November 21, 2008</td>
- <td class="updated">September 4, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="securitypacific.html">Security Pacific Bank</a></td>
- <td class="city">Los Angeles</td>
- <td class="state">CA</td>
- <td class="cert">23595</td>
- <td class="ai">Pacific Western Bank</td>
- <td class="closing">November 7, 2008</td>
- <td class="updated">August 28, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="franklinbank.html">Franklin Bank, SSB</a></td>
- <td class="city">Houston</td>
- <td class="state">TX</td>
- <td class="cert">26870</td>
- <td class="ai">Prosperity Bank</td>
- <td class="closing">November 7, 2008</td>
- <td class="updated">August 16, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="freedom.html">Freedom Bank</a></td>
- <td class="city">Bradenton</td>
- <td class="state">FL</td>
- <td class="cert">57930</td>
- <td class="ai">Fifth Third Bank</td>
- <td class="closing">October 31, 2008</td>
- <td class="updated">August 16, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="alpha.html">Alpha Bank & Trust</a></td>
- <td class="city">Alpharetta</td>
- <td class="state">GA</td>
- <td class="cert">58241</td>
- <td class="ai">Stearns Bank, N.A.</td>
- <td class="closing">October 24, 2008</td>
- <td class="updated">August 16, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="meridian.html">Meridian Bank</a></td>
- <td class="city">Eldred</td>
- <td class="state">IL</td>
- <td class="cert">13789</td>
- <td class="ai">National Bank</td>
- <td class="closing">October 10, 2008</td>
- <td class="updated">May 31, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="mainstreet.html">Main Street Bank</a></td>
- <td class="city">Northville</td>
- <td class="state">MI</td>
- <td class="cert">57654</td>
- <td class="ai">Monroe Bank & Trust</td>
- <td class="closing">October 10, 2008</td>
- <td class="updated">August 16, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="wamu.html">Washington Mutual Bank<br>(Including its subsidiary Washington Mutual Bank FSB)</a></td>
- <td class="city">Henderson</td>
- <td class="state">NV</td>
- <td class="cert">32633</td>
- <td class="ai">JP Morgan Chase Bank</td>
- <td class="closing">September 25, 2008</td>
- <td class="updated">August 16, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="ameribank.html">Ameribank</a></td>
- <td class="city">Northfork</td>
- <td class="state">WV</td>
- <td class="cert">6782</td>
- <td class="ai">The Citizens Savings Bank<br><br>Pioneer Community Bank, Inc.</td>
- <td class="closing">September 19, 2008</td>
- <td class="updated">August 16, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="silverstate.html">Silver State Bank</a><br><a href="silverstatesp.html">En Espanol</a></td>
- <td class="city">Henderson</td>
- <td class="state">NV</td>
- <td class="cert">34194</td>
- <td class="ai">Nevada State Bank</td>
- <td class="closing">September 5, 2008</td>
- <td class="updated">August 16, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="integrity.html">Integrity Bank</a></td>
- <td class="city">Alpharetta</td>
- <td class="state">GA</td>
- <td class="cert">35469</td>
- <td class="ai">Regions Bank</td>
- <td class="closing">August 29, 2008</td>
- <td class="updated">August 16, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="columbian.html">Columbian Bank & Trust</a></td>
- <td class="city">Topeka</td>
- <td class="state">KS</td>
- <td class="cert">22728</td>
- <td class="ai">Citizens Bank & Trust</td>
- <td class="closing">August 22, 2008</td>
- <td class="updated">August 16, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="firstprioritybank.html">First Priority Bank</a></td>
- <td class="city">Bradenton</td>
- <td class="state">FL</td>
- <td class="cert">57523</td>
- <td class="ai">SunTrust Bank</td>
- <td class="closing">August 1, 2008</td>
- <td class="updated">August 16, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="heritage.html">First Heritage Bank, NA</a></td>
- <td class="city">Newport Beach</td>
- <td class="state">CA</td>
- <td class="cert">57961</td>
- <td class="ai">Mutual of Omaha Bank</td>
- <td class="closing">July 25, 2008</td>
- <td class="updated">August 28, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="fnbnv.html">First National Bank of Nevada</a></td>
- <td class="city">Reno</td>
- <td class="state">NV</td>
- <td class="cert">27011</td>
- <td class="ai">Mutual of Omaha Bank</td>
- <td class="closing">July 25, 2008</td>
- <td class="updated">August 28, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="IndyMac.html">IndyMac Bank</a></td>
- <td class="city">Pasadena</td>
- <td class="state">CA</td>
- <td class="cert">29730</td>
- <td class="ai">OneWest Bank, FSB</td>
- <td class="closing">July 11, 2008</td>
- <td class="updated">August 28, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="first_integrity_bank.html">First Integrity Bank, NA</a></td>
- <td class="city">Staples</td>
- <td class="state">MN</td>
- <td class="cert">12736</td>
- <td class="ai">First International Bank and Trust</td>
- <td class="closing">May 30, 2008</td>
- <td class="updated">August 28, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="anb.html">ANB Financial, NA</a></td>
- <td class="city">Bentonville</td>
- <td class="state">AR</td>
- <td class="cert">33901</td>
- <td class="ai">Pulaski Bank and Trust Company</td>
- <td class="closing">May 9, 2008</td>
- <td class="updated">August 28, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="Hume.html">Hume Bank</a></td>
- <td class="city">Hume</td>
- <td class="state">MO</td>
- <td class="cert">1971</td>
- <td class="ai">Security Bank</td>
- <td class="closing">March 7, 2008</td>
- <td class="updated">August 28, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="Douglass.html">Douglass National Bank</a></td>
- <td class="city">Kansas City</td>
- <td class="state">MO</td>
- <td class="cert">24660</td>
- <td class="ai">Liberty Bank and Trust Company</td>
- <td class="closing">January 25, 2008</td>
- <td class="updated">October 26, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="MiamiValley.html">Miami Valley Bank</a></td>
- <td class="city">Lakeview</td>
- <td class="state">OH</td>
- <td class="cert">16848</td>
- <td class="ai">The Citizens Banking Company</td>
- <td class="closing">October 4, 2007</td>
- <td class="updated">August 28, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="NetBank.html">NetBank</a></td>
- <td class="city">Alpharetta</td>
- <td class="state">GA</td>
- <td class="cert">32575</td>
- <td class="ai">ING DIRECT</td>
- <td class="closing">September 28, 2007</td>
- <td class="updated">August 28, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="MetropolitanSB.html">Metropolitan Savings Bank</a></td>
- <td class="city">Pittsburgh</td>
- <td class="state">PA</td>
- <td class="cert">35353</td>
- <td class="ai">Allegheny Valley Bank of Pittsburgh</td>
- <td class="closing">February 2, 2007</td>
- <td class="updated">October 27, 2010</td>
- </tr>
- <tr>
- <td class="institution"><a href="ephraim.html">Bank of Ephraim</a></td>
- <td class="city">Ephraim</td>
- <td class="state">UT</td>
- <td class="cert">1249</td>
- <td class="ai">Far West Bank</td>
- <td class="closing">June 25, 2004</td>
- <td class="updated">April 9, 2008</td>
- </tr>
- <tr>
- <td class="institution"><a href="reliance.html">Reliance Bank</a></td>
- <td class="city">White Plains</td>
- <td class="state">NY</td>
- <td class="cert">26778</td>
- <td class="ai">Union State Bank</td>
- <td class="closing">March 19, 2004</td>
- <td class="updated">April 9, 2008</td>
- </tr>
- <tr>
- <td class="institution"><a href="gnb.html">Guaranty National Bank of Tallahassee</a></td>
- <td class="city">Tallahassee</td>
- <td class="state">FL</td>
- <td class="cert">26838</td>
- <td class="ai">Hancock Bank of Florida</td>
- <td class="closing">March 12, 2004</td>
- <td class="updated">June 5, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="dollar.html">Dollar Savings Bank</a></td>
- <td class="city">Newark</td>
- <td class="state">NJ</td>
- <td class="cert">31330</td>
- <td class="ai">No Acquirer</td>
- <td class="closing">February 14, 2004</td>
- <td class="updated">April 9, 2008</td>
- </tr>
- <tr>
- <td class="institution"><a href="pulaski.html">Pulaski Savings Bank</a></td>
- <td class="city">Philadelphia</td>
- <td class="state">PA</td>
- <td class="cert">27203</td>
- <td class="ai">Earthstar Bank</td>
- <td class="closing">November 14, 2003</td>
- <td class="updated">July 22, 2005</td>
- </tr>
- <tr>
- <td class="institution"><a href="blanchardville.html">First National Bank of Blanchardville</a></td>
- <td class="city">Blanchardville</td>
- <td class="state">WI</td>
- <td class="cert">11639</td>
- <td class="ai">The Park Bank</td>
- <td class="closing">May 9, 2003</td>
- <td class="updated">June 5, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="spbank.html">Southern Pacific Bank</a></td>
- <td class="city">Torrance</td>
- <td class="state">CA</td>
- <td class="cert">27094</td>
- <td class="ai">Beal Bank</td>
- <td class="closing">February 7, 2003</td>
- <td class="updated">October 20, 2008</td>
- </tr>
- <tr>
- <td class="institution"><a href="farmers.html">Farmers Bank of Cheneyville</a></td>
- <td class="city">Cheneyville</td>
- <td class="state">LA</td>
- <td class="cert">16445</td>
- <td class="ai">Sabine State Bank & Trust</td>
- <td class="closing">December 17, 2002</td>
- <td class="updated">October 20, 2004</td>
- </tr>
- <tr>
- <td class="institution"><a href="bankofalamo.html">Bank of Alamo</a></td>
- <td class="city">Alamo</td>
- <td class="state">TN</td>
- <td class="cert">9961</td>
- <td class="ai">No Acquirer</td>
- <td class="closing">November 8, 2002</td>
- <td class="updated">March 18, 2005</td>
- </tr>
- <tr>
- <td class="institution"><a href="amtrade.html">AmTrade International Bank</a><br><a href="amtrade-spanish.html">En Espanol</a></td>
- <td class="city">Atlanta</td>
- <td class="state">GA</td>
- <td class="cert">33784</td>
- <td class="ai">No Acquirer</td>
- <td class="closing">September 30, 2002</td>
- <td class="updated">September 11, 2006</td>
- </tr>
- <tr>
- <td class="institution"><a href="universal.html">Universal Federal Savings Bank</a></td>
- <td class="city">Chicago</td>
- <td class="state">IL</td>
- <td class="cert">29355</td>
- <td class="ai">Chicago Community Bank</td>
- <td class="closing">June 27, 2002</td>
- <td class="updated">April 9, 2008</td>
- </tr>
- <tr>
- <td class="institution"><a href="cbc.html">Connecticut Bank of Commerce</a></td>
- <td class="city">Stamford</td>
- <td class="state">CT</td>
- <td class="cert">19183</td>
- <td class="ai">Hudson United Bank</td>
- <td class="closing">June 26, 2002</td>
- <td class="updated">February 14, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="newcentury.html">New Century Bank</a></td>
- <td class="city">Shelby Township</td>
- <td class="state">MI</td>
- <td class="cert">34979</td>
- <td class="ai">No Acquirer</td>
- <td class="closing">March 28, 2002</td>
- <td class="updated">March 18, 2005</td>
- </tr>
- <tr>
- <td class="institution"><a href="netfirst.html">Net 1st National Bank</a></td>
- <td class="city">Boca Raton</td>
- <td class="state">FL</td>
- <td class="cert">26652</td>
- <td class="ai">Bank Leumi USA</td>
- <td class="closing">March 1, 2002</td>
- <td class="updated">April 9, 2008</td>
- </tr>
- <tr>
- <td class="institution"><a href="nextbank.html">NextBank, NA</a></td>
- <td class="city">Phoenix</td>
- <td class="state">AZ</td>
- <td class="cert">22314</td>
- <td class="ai">No Acquirer</td>
- <td class="closing">February 7, 2002</td>
- <td class="updated">August 27, 2010</td>
- </tr>
- <tr>
- <td class="institution"><a href="Oakwood.html">Oakwood Deposit Bank Co.</a></td>
- <td class="city">Oakwood</td>
- <td class="state">OH</td>
- <td class="cert">8966</td>
- <td class="ai">The State Bank & Trust Company</td>
- <td class="closing">February 1, 2002</td>
- <td class="updated">October 25, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="sierrablanca.html">Bank of Sierra Blanca</a></td>
- <td class="city">Sierra Blanca</td>
- <td class="state">TX</td>
- <td class="cert">22002</td>
- <td class="ai">The Security State Bank of Pecos</td>
- <td class="closing">January 18, 2002</td>
- <td class="updated">November 6, 2003</td>
- </tr>
- <tr>
- <td class="institution"><a href="hamilton.html">Hamilton Bank, NA</a><br><a href="hamilton-spanish.html">En Espanol</a></td>
- <td class="city">Miami</td>
- <td class="state">FL</td>
- <td class="cert">24382</td>
- <td class="ai">Israel Discount Bank of New York</td>
- <td class="closing">January 11, 2002</td>
- <td class="updated">June 5, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="sinclair.html">Sinclair National Bank</a></td>
- <td class="city">Gravette</td>
- <td class="state">AR</td>
- <td class="cert">34248</td>
- <td class="ai">Delta Trust & Bank</td>
- <td class="closing">September 7, 2001</td>
- <td class="updated">February 10, 2004</td>
- </tr>
- <tr>
- <td class="institution"><a href="superior.html">Superior Bank, FSB</a></td>
- <td class="city">Hinsdale</td>
- <td class="state">IL</td>
- <td class="cert">32646</td>
- <td class="ai">Superior Federal, FSB</td>
- <td class="closing">July 27, 2001</td>
- <td class="updated">June 5, 2012</td>
- </tr>
- <tr>
- <td class="institution"><a href="Malta.html">Malta National Bank</a></td>
- <td class="city">Malta</td>
- <td class="state">OH</td>
- <td class="cert">6629</td>
- <td class="ai">North Valley Bank</td>
- <td class="closing">May 3, 2001</td>
- <td class="updated">November 18, 2002</td>
- </tr>
- <tr>
- <td class="institution"><a href="firstalliance.html">First Alliance Bank & Trust Co.</a></td>
- <td class="city">Manchester</td>
- <td class="state">NH</td>
- <td class="cert">34264</td>
- <td class="ai">Southern New Hampshire Bank & Trust</td>
- <td class="closing">February 2, 2001</td>
- <td class="updated">February 18, 2003</td>
- </tr>
- <tr>
- <td class="institution"><a href="nsb.html">National State Bank of Metropolis</a></td>
- <td class="city">Metropolis</td>
- <td class="state">IL</td>
- <td class="cert">3815</td>
- <td class="ai">Banterra Bank of Marion</td>
- <td class="closing">December 14, 2000</td>
- <td class="updated">March 17, 2005</td>
- </tr>
- <tr>
- <td class="institution"><a href="boh.html">Bank of Honolulu</a></td>
- <td class="city">Honolulu</td>
- <td class="state">HI</td>
- <td class="cert">21029</td>
- <td class="ai">Bank of the Orient</td>
- <td class="closing">October 13, 2000</td>
- <td class="updated">March 17, 2005</td>
- </tr>
- </tbody>
- </table>
- </div>
-
-</div>
-<div id="page_foot">
- <div class="date">Last Updated 05/31/2013</div>
- <div class="email"><a href="mailto:cservicefdicdal@fdic.gov">cservicefdicdal@fdic.gov</a></div>
- <div class="clear"></div>
-</div>
-
-<!-- START of Footer -->
-<footer>
-<link rel="stylesheet" type="text/css" href="/responsive/footer/css/footer.css" />
-<div id="responsive_footer">
- <div id="responsive_footer-full">
- <ul>
- <li><a href="/" title="Home">Home</a></li>
- <li><a href="/about/contact/ask/" title="Contact Us">Contact Us</a></li>
- <li><a href="/search/" title="Search">Search</a></li>
- <li><a href="/help/" title="Help">Help</a></li>
- <li><a href="/sitemap/" title="SiteMap">SiteMap</a></li>
- <li><a href="/regulations/laws/forms/" title="Forms">Forms</a></li>
- <li><a href="/quicklinks/spanish.html" title="En Español">En Español</a></li>
- </ul>
- <hr>
- <ul>
- <li><a href="/about/policies/" title="Website Policies">Website Policies</a></li>
- <li><a href="/about/privacy/policy/" title="Privacy Policy">Privacy Policy</a></li>
- <li><a href="/plainlanguage/" title="Privacy Policy">Plain Writing Act of 2010 </a></li>
- <li><a href="http://www.usa.gov/" title="USA.gov">USA.gov</a></li>
- <li><a href="http://www.fdicoig.gov/" title="FDIC Office of Inspector General">FDIC Office of Inspector General</a></li>
- </ul>
- <hr>
- <ul>
- <li><a href="/about/freedom/" title="Freedom of Information Act (FOIA) Service Center">Freedom of Information Act (FOIA) Service Center</a></li>
- <li><a href="/open/" title="FDIC Open Government Webpage">FDIC Open Government Webpage</a></li>
- <li><a href="/about/diversity/nofear/" title="No FEAR Act Data">No FEAR Act Data</a></li>
- </ul>
- </div>
- <div id="responsive_footer-small">
- <ul>
- <li><a href="/" title="Home">Home</a></li>
- <li><a href="/about/contact/ask/" title="Contact Us">Contact Us</a></li>
- <li><a href="/about/policies/" title="Website Policies">Website Policies</a></li>
- <li><a href="/search/" title="Search">Search</a></li>
- </ul>
- </div>
-</div>
-</footer>
-<!-- START Omniture SiteCatalyst Code -->
-<script language="JavaScript" type="text/javascript" src="/js/s_code_v1.js"></script>
-<script type="text/javascript">
-/************* DO NOT ALTER ANYTHING BELOW THIS LINE ! **************/
-var s_code=s.t();if(s_code)document.write(s_code)</script>
-<script type="text/javascript">
-if(navigator.appVersion.indexOf('MSIE')>=0)document.write(unescape('%3C')+'\!-'+'-')
-</script>
-<noscript>
-<a href="http://www.omniture.com" title="Web Analytics">
-<img src="http://fdic.122.2o7.net/b/ss/fdicgovprod/1/H.21--NS/0?[AQB]%26cl=Session%26AQE" height="1" width="1" border="0" alt="" /></a></li>
-</noscript>
-<!--/DO NOT REMOVE/-->
-<!-- END Omniture SiteCatalyst Code -->
-<!-- END of Footer -->
-
-<script type="text/javascript" src="/responsive/js/jquery.tablesorter.js"></script>
-<script type="text/javascript" src="banklist.js"></script>
-
-</body>
-</html>
+<!DOCTYPE html><!-- HTML5 -->
+<html lang="en-US">
+<!-- Content language is American English. -->
+<head>
+<title>FDIC: Failed Bank List</title>
+<!-- Meta Tags -->
+<meta charset="UTF-8">
+<!-- Unicode character encoding -->
+<meta http-equiv="X-UA-Compatible" content="IE=edge">
+<!-- Turns off IE Compatiblity Mode -->
+<meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=1">
+<!-- Makes it so phones don't auto zoom out. -->
+<meta name="author" content="DRR">
+<meta http-equiv="keywords" name="keywords" content="banks, financial institutions, failed, failure, closing, deposits, depositors, banking services, assuming institution, acquiring institution, claims">
+<!-- CSS -->
+<link rel="stylesheet" type="text/css" href="/responsive/css/responsive.css">
+<link rel="stylesheet" type="text/css" href="banklist.css">
+</head>
+<body>
+
+<!-- START of Header -->
+<script type="text/javascript" src="/responsive/header/js/header.js"></script>
+<link rel="stylesheet" type="text/css" href="/responsive/header/css/header.css" />
+<!-- googleac.html includes Autocomplete functionality -->
+<!-- Autocomplete files -->
+<link rel="stylesheet" type="text/css" href="/responsive/header/css/jquery.autocomplete.css" />
+<script type="text/javascript" src="/responsive/js/jquery-1.4.1.min.js"></script>
+<script type="text/javascript" src="/responsive/header/js/jquery.autocomplete-1.4.2.js"></script>
+<script type="text/javascript">
+function findValue(li) {
+ if( li == null ) return alert("No match!");
+
+ // if coming from an AJAX call, let's use the Id as the value
+ if( !!li.extra ) var sValue = li.extra[0];
+
+ // otherwise, let's just display the value in the text box
+ else var sValue = li.selectValue;
+
+ $('#googlesearch').submit();
+
+}
+function findValue2(li) {
+ if( li == null ) return alert("No match!");
+
+ // if coming from an AJAX call, let's use the Id as the value
+ if( !!li.extra ) var sValue = li.extra[0];
+
+ // otherwise, let's just display the value in the text box
+ else var sValue = li.selectValue;
+
+ $('#googlesearch2').submit();
+}
+function selectItem(li) {
+ findValue(li);
+}
+function selectItem2(li) {
+ findValue2(li);
+}
+
+$().ready(function() {
+
+ function log(event, data, formatted) {
+ $("<li>").html( !data ? "No match!" : "Selected: " + formatted).appendTo("#result");
+ }
+
+ function formatItem(row) {
+ return row[0] + " (<strong>id: " + row[1] + "</strong>)";
+ }
+ function formatResult(row) {
+ return row[0].replace(/(<.+?>)/gi, '');
+ }
+
+ $("#newSearch").autocomplete("/searchjs.asp", {
+ width: 179,
+ autoFill: false,
+ //delay:10,
+ minChars:2,
+ cacheLength: 10,
+ onFindValue:findValue,
+ onItemSelect: selectItem,
+ selectFirst: false
+
+ });
+
+ $("#search2").autocomplete("/searchjs.asp", {
+ width: 160,
+ autoFill: false,
+ //delay:10,
+ minChars:2,
+ cacheLength: 10,
+ onFindValue:findValue2,
+ onItemSelect: selectItem2,
+ selectFirst: false
+
+ });
+
+});
+
+</script>
+<!-- END CODE NEEDED TO MAKE THE SEARCH BOX WORK -->
+
+<!-- FORESEE Code -->
+<script type="text/javascript" src="/foresee/foresee-trigger.js"></script>
+
+<a href="#after_header" class="responsive_header-skip_header">Skip Header</a>
+<header>
+<div id="responsive_header">
+ <div id="responsive_header-right_side">
+ <ul id="responsive_header-links">
+ <li id="responsive_header-twitter" title="Visit FDIC on Twitter"><a tabindex="1" href="/social.html?site=http://twitter.com/FDICgov">Visit FDIC on Twitter</a></li>
+ <li id="responsive_header-facebook" title="Visit FDIC on Facebook"><a tabindex="1" href="/social.html?site=http://www.facebook.com/FDICgov">Visit FDIC on Facebook</a></li>
+ <li id="responsive_header-fdicchannel" title="Visit FDIC on YouTube"><a tabindex="1" href="/social.html?site=http://www.youtube.com/user/FDICchannel">Visit FDIC on YouTube</a></li>
+ <li id="responsive_header-rss" title="FDIC RSS Feed"><a tabindex="1" href="/rss.html">FDIC RSS Feed</a></li>
+ <li id="responsive_header-subscribe" title="Subscribe to FDIC alerts"><a tabindex="1" href="http://service.govdelivery.com/service/multi_subscribe.html?code=USFDIC">Subscribe to FDIC alerts</a></li>
+ </ul>
+ <div id="responsive_header-search">
+ <a href="/search/advanced.html" class="search" title="Advanced Search">Advanced Search</a>
+ <form id="googlesearch" action="http://search.fdic.gov/search" method="get" name="Search box for FDIC.gov">
+ <fieldset>
+ <div class="form">
+ <label for="q">Search FDIC.gov</label>
+ <input tabindex="1" id="newSearch" name="q" class="field" type="text" style="outline: 0 none;" value="Search FDIC..." onblur="if(this.value == '') {this.value = 'Search FDIC...';}" onfocus="if(this.value == 'Search FDIC...') {this.value = '';}" />
+ <input tabindex="1" id="searchsubmit" class="submit" alt="Search Icon" title="Search Icon" type="submit" value="" />
+ <input value="date:D:L:d1" name="sort" type="hidden" />
+
+ <input value="xml_no_dtd" name="output" type="hidden" />
+ <input value="UTF-8" name="ie" type="hidden" />
+ <input value="UTF-8" name="oe" type="hidden" />
+ <input value="wwwGOV" name="client" type="hidden" />
+ <input value="wwwGOV" name="proxystylesheet" type="hidden" />
+ <input value="default" name="site" type="hidden" />
+ </div>
+ </fieldset>
+ </form>
+ </div>
+ </div>
+ <!-- close right side -->
+ <a id="responsive_header-fdic_logo" href="/" title="FDIC Homepage">FDIC Homepage</a>
+ <h1>Federal Deposit<br>Insurance Corporation</h1>
+ <h2>Each depositor insured to at least $250,000 per insured bank</h2>
+ <div class="clear"></div>
+ <nav>
+ <div id="responsive_header_nav">
+ <div id="responsive_header-topnav">
+ <div id="responsive_header-topnav-downarrow" onclick="show_rwdnav(this)"></div>
+ <ul id="responsive_header-topnav-list">
+ <li id="responsive_header-topnav-home" title="Home" onmouseover="show_responsive_header_subnav(this)"><a href="/">Home</a></li>
+ <li id="responsive_header-topnav-deposit" title="Deposit Insurance" onmouseover="show_responsive_header_subnav(this)"><a href="/deposit/">Deposit Insurance</a></li>
+ <li id="responsive_header-topnav-consumers" title="Consumer Protection" onmouseover="show_responsive_header_subnav(this)"><a href="/consumers/">Consumer Protection</a></li>
+ <li id="responsive_header-topnav-bank" title="Industry Analysis" onmouseover="show_responsive_header_subnav(this)"><a href="/bank/">Industry Analysis</a></li>
+ <li id="responsive_header-topnav-regulations" title="Regulations & Examinations" onmouseover="show_responsive_header_subnav(this)"><a href="/regulations/">Regulations & Examinations</a></li>
+ <li id="responsive_header-topnav-buying" title="Asset Sales" onmouseover="show_responsive_header_subnav(this)"><a href="/buying/">Asset Sales</a></li>
+ <li id="responsive_header-topnav-news" title="News & Events" onmouseover="show_responsive_header_subnav(this)"><a href="/news/">News & Events</a></li>
+ <li id="responsive_header-topnav-about" title="About FDIC" onmouseover="show_responsive_header_subnav(this)"><a href="/about/">About FDIC</a></li>
+ </ul>
+ <div class="clear"></div>
+ </div>
+ <div id="responsive_header-topnav_subnav">
+ <div id="responsive_header-topnav_subnav-downarrow" onclick="show_rwdnav(this)"></div>
+ <ul id="responsive_header-topnav-home_subnav"><li><a> </a></li></ul>
+ <ul id="responsive_header-topnav-deposit_subnav">
+ <li title="BankFind"><a href="http://research.fdic.gov/bankfind/">BankFind</a></li>
+ <li title="Are My Deposits Insured?"><a href="/deposit/deposits/">Are My Deposits Insured?</a></li>
+ <li title="Uninsured Investments"><a href="/deposit/investments/">Uninsured Investments</a></li>
+ <li title="The Deposit Insurance Fund"><a href="/deposit/insurance/index.html">The Deposit Insurance Fund</a></li>
+ <li title="International Deposit Insurance"><a href="/deposit/deposits/international/">International Deposit Insurance</a></li>
+ </ul>
+ <ul id="responsive_header-topnav-consumers_subnav">
+ <li title="Consumer News & Information"><a href="/consumers/consumer/">Consumer News & Information</a></li>
+ <li title="Loans & Mortgages"><a href="/consumers/loans/">Loans & Mortgages</a></li>
+ <li title="Banking & Your Money"><a href="/consumers/banking/">Banking & Your Money</a></li>
+ <li title="Financial Education & Literacy"><a href="/consumers/education/">Financial Education & Literacy</a></li>
+ <li title="Community Affairs"><a href="/consumers/community/">Community Affairs</a></li>
+ <li title="Identity Theft & Fraud"><a href="/consumers/theft/">Identity Theft & Fraud</a></li>
+ <li title="Consumer Financial Privacy"><a href="/consumers/privacy/">Consumer Financial Privacy</a></li>
+ </ul>
+ <ul id="responsive_header-topnav-bank_subnav">
+ <li title="Bank Data & Statistics"><a href="/bank/statistical/">Bank Data & Statistics</a></li>
+ <li title="Research & Analysis"><a href="/bank/analytical/">Research & Analysis</a></li>
+ <li title="Failed Banks"><a href="/bank/individual/failed/">Failed Banks</a></li>
+ </ul>
+ <ul id="responsive_header-topnav-regulations_subnav">
+ <li title="Bank Examinations"><a href="/regulations/examinations/">Bank Examinations</a></li>
+ <li title="Laws & Regulations"><a href="/regulations/laws/">Laws & Regulations</a></li>
+ <li title="Resources for Bank Officers & Directors"><a href="/regulations/resources/">Resources for Bank Officers & Directors</a></li>
+ <li title="FDICconnect"><a href="http://www.fdicconnect.gov/">FDIC<em>connect</em></a></li>
+ <li title="Required Financial Reports"><a href="/regulations/required/">Required Financial Reports</a></li>
+ <li title="Examiner Training Programs"><a href="/regulations/examiner/">Examiner Training Programs</a></li>
+ </ul>
+ <ul id="responsive_header-topnav-buying_subnav">
+ <li title="Loan Sales"><a href="/buying/loan/">Loan Sales</a></li>
+ <li title="Real Estate Sales"><a href="/buying/owned/">Real Estate and Property Marketplace</a></li>
+ <li title="Financial Asset Sales"><a href="/buying/financial/">Financial Asset Sales</a></li>
+ <li title="Servicing Sales Announcements"><a href="/buying/servicing/">Servicing Sales Announcements</a></li>
+ <li title="Other Asset Sales"><a href="/buying/otherasset/">Other Asset Sales</a></li>
+ <li title="Historical Sales"><a href="/buying/historical/">Historical Sales</a></li>
+ </ul>
+ <ul id="responsive_header-topnav-news_subnav">
+ <li title="Press Releases"><a href="/news/news/press/2013/">Press Releases</a></li>
+ <li title="Online Press Room"><a href="https://fdicsurvey.inquisiteasp.com/fdic/cgi-bin/qwebcorporate.dll?M58TRS">Online Press Room</a></li>
+ <li title="Conferences & Events"><a href="/news/conferences/">Conferences & Events</a></li>
+ <li title="Financial Institution Letters"><a href="/news/news/financial/2013/">Financial Institution Letters</a></li>
+ <li title="Special Alerts"><a href="/news/news/SpecialAlert/2012/">Special Alerts</a></li>
+ <li title="Letters to the Editor/Opinion Editorials"><a href="/news/letters/">Letters to the Editor/Opinion Editorials</a></li>
+ <li title="Speeches & Testimony"><a href="/news/news/speeches/chairman/">Speeches & Testimony</a></li>
+ </ul>
+ <ul id="responsive_header-topnav-about_subnav">
+ <li title="Mission & Purpose"><a href="/about/index.html#1">Mission & Purpose</a></span></li>
+ <li title="Advisory Committees"><a href="/about/index.html#2">Advisory Committees</a></span></li>
+ <li title="Careers with the FDIC"><a href="/about/index.html#3">Careers with the FDIC</a></span></li>
+ <li title="Management Team"><a href="/about/index.html#4">Management Team</a></span></li>
+ <li title="Plans & Reports"><a href="/about/index.html#5">Plans & Reports</a></span></li>
+ <li title="What We Can Do for You"><a href="/about/index.html#6">What We Can Do for You</a></span></li>
+ <li title="Diversity at the FDIC"><a href="/about/index.html#7">Diversity at the FDIC</a></span></li>
+ </ul>
+ </div><!-- Close subnav -->
+ <div class="clear"></div>
+ </div>
+ </nav>
+</div>
+</header>
+<a id="after_header" name="after_header"></a>
+<script type="text/javascript">
+prepare_responsive_header_nav();
+</script>
+<!-- END of Header -->
+
+<div id="breadcrumbs"><a href="/">Home</a> > <a href="/bank/">Industry Analysis</a> > <a href="/bank/individual/failed/">Failed Banks</a> > Failed Bank List</div>
+
+<div id="content" class="failed_bank_list">
+
+ <h1 class="page_title">Failed Bank List</h1>
+
+ <p>The FDIC is often appointed as receiver for failed banks. This page contains useful information for the customers and vendors of these banks. This includes information on the acquiring bank (if applicable), how your accounts and loans are affected, and how vendors can file claims against the receivership. <a href="http://www2.fdic.gov/drrip/cs/index.asp">Failed Financial Institution Contact Search</a> displays point of contact information related to failed banks.</p>
+
+ <p>This list includes banks which have failed since October 1, 2000. To search for banks that failed prior to those on this page, visit this link: <a href="http://www2.fdic.gov/hsob/SelectRpt.asp?EntryTyp=30">Failures and Assistance Transactions</a></p>
+
+ <p><a href="banklist.csv">Failed Bank List</a> - CSV file (Updated on Mondays. Also opens in Excel - <a href="/excel.html">Excel Help</a>)</p>
+
+ <p class="small_screen_warning">Due to the small screen size some information is no longer visible.<br>Full information available when viewed on a larger screen.</p>
+
+ <script type="text/javascript">
+ <!--
+ document.writeln("<p><em>Click arrows next to headers to sort in Ascending or Descending order.</em></p>");
+ //-->
+ </script>
+
+ <div id="table_wrapper">
+ <table id="table" class="sortable">
+ <thead>
+ <tr>
+ <th id="institution" scope="col">Bank Name</th>
+ <th id="city" class="nosort" scope="col">City</th>
+ <th id="state" scope="col">ST</th>
+ <th id="cert" class="nosort" scope="col">CERT</th>
+ <th id="ai" scope="col">Acquiring Institution</th>
+ <th id="closing" scope="col">Closing Date</th>
+ <th id="updated" scope="col">Updated Date</th>
+ </tr>
+ </thead>
+ <tbody>
+ <tr>
+ <td class="institution"><a href="kenosha.html">Banks of Wisconsin d/b/a Bank of Kenosha</a></td>
+ <td class="city">Kenosha</td>
+ <td class="state">WI</td>
+ <td class="cert">35386</td>
+ <td class="ai">North Shore Bank, FSB</td>
+ <td class="closing">May 31, 2013</td>
+ <td class="updated">May 31, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="centralaz.html">Central Arizona Bank</a></td>
+ <td class="city">Scottsdale</td>
+ <td class="state">AZ</td>
+ <td class="cert">34527</td>
+ <td class="ai">Western State Bank</td>
+ <td class="closing">May 14, 2013</td>
+ <td class="updated">May 20, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="sunrisebank.html">Sunrise Bank</a></td>
+ <td class="city">Valdosta</td>
+ <td class="state">GA</td>
+ <td class="cert">58185</td>
+ <td class="ai">Synovus Bank</td>
+ <td class="closing">May 10, 2013</td>
+ <td class="updated">May 21, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="pisgahcommbk.html">Pisgah Community Bank</a></td>
+ <td class="city">Asheville</td>
+ <td class="state">NC</td>
+ <td class="cert">58701</td>
+ <td class="ai">Capital Bank, N.A.</td>
+ <td class="closing">May 10, 2013</td>
+ <td class="updated">May 14, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="douglascb.html">Douglas County Bank</a></td>
+ <td class="city">Douglasville</td>
+ <td class="state">GA</td>
+ <td class="cert">21649</td>
+ <td class="ai">Hamilton State Bank</td>
+ <td class="closing">April 26, 2013</td>
+ <td class="updated">May 16, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="parkway.html">Parkway Bank</a></td>
+ <td class="city">Lenoir</td>
+ <td class="state">NC</td>
+ <td class="cert">57158</td>
+ <td class="ai">CertusBank, National Association</td>
+ <td class="closing">April 26, 2013</td>
+ <td class="updated">May 17, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="chipola.html">Chipola Community Bank</a></td>
+ <td class="city">Marianna</td>
+ <td class="state">FL</td>
+ <td class="cert">58034</td>
+ <td class="ai">First Federal Bank of Florida</td>
+ <td class="closing">April 19, 2013</td>
+ <td class="updated">May 16, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="heritagebank-fl.html">Heritage Bank of North Florida</a></td>
+ <td class="city">Orange Park</td>
+ <td class="state">FL</td>
+ <td class="cert">26680</td>
+ <td class="ai">FirstAtlantic Bank</td>
+ <td class="closing">April 19, 2013</td>
+ <td class="updated">May 16, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstfederal-ky.html">First Federal Bank</a></td>
+ <td class="city">Lexington</td>
+ <td class="state">KY</td>
+ <td class="cert">29594</td>
+ <td class="ai">Your Community Bank</td>
+ <td class="closing">April 19, 2013</td>
+ <td class="updated">April 23, 2013</td>
+ </tr>
+ <td class="institution"><a href="goldcanyon.html">Gold Canyon Bank</a></td>
+ <td class="city">Gold Canyon</td>
+ <td class="state">AZ</td>
+ <td class="cert">58066</td>
+ <td class="ai">First Scottsdale Bank, National Association</td>
+ <td class="closing">April 5, 2013</td>
+ <td class="updated">April 9, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="frontier-ga.html">Frontier Bank</a></td>
+ <td class="city">LaGrange</td>
+ <td class="state">GA</td>
+ <td class="cert">16431</td>
+ <td class="ai">HeritageBank of the South</td>
+ <td class="closing">March 8, 2013</td>
+ <td class="updated">March 26, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="covenant-il.html">Covenant Bank</a></td>
+ <td class="city">Chicago</td>
+ <td class="state">IL</td>
+ <td class="cert">22476</td>
+ <td class="ai">Liberty Bank and Trust Company</td>
+ <td class="closing">February 15, 2013</td>
+ <td class="updated">March 4, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="1stregents.html">1st Regents Bank</a></td>
+ <td class="city">Andover</td>
+ <td class="state">MN</td>
+ <td class="cert">57157</td>
+ <td class="ai">First Minnesota Bank</td>
+ <td class="closing">January 18, 2013</td>
+ <td class="updated">February 28, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="westside.html">Westside Community Bank</a></td>
+ <td class="city">University Place</td>
+ <td class="state">WA</td>
+ <td class="cert">33997</td>
+ <td class="ai">Sunwest Bank</td>
+ <td class="closing">January 11, 2013</td>
+ <td class="updated">January 24, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="cmbkozarks.html">Community Bank of the Ozarks</a></td>
+ <td class="city">Sunrise Beach</td>
+ <td class="state">MO</td>
+ <td class="cert">27331</td>
+ <td class="ai">Bank of Sullivan</td>
+ <td class="closing">December 14, 2012</td>
+ <td class="updated">January 24, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="hometown.html">Hometown Community Bank</a></td>
+ <td class="city">Braselton</td>
+ <td class="state">GA</td>
+ <td class="cert">57928</td>
+ <td class="ai">CertusBank, National Association</td>
+ <td class="closing">November 16, 2012</td>
+ <td class="updated">January 24, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="cfnb.html">Citizens First National Bank</a></td>
+ <td class="city">Princeton</td>
+ <td class="state">IL</td>
+ <td class="cert">3731</td>
+ <td class="ai">Heartland Bank and Trust Company</td>
+ <td class="closing">November 2, 2012</td>
+ <td class="updated">January 24, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="heritage_fl.html">Heritage Bank of Florida</a></td>
+ <td class="city">Lutz</td>
+ <td class="state">FL</td>
+ <td class="cert">35009</td>
+ <td class="ai">Centennial Bank</td>
+ <td class="closing">November 2, 2012</td>
+ <td class="updated">January 24, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="novabank.html">NOVA Bank</a></td>
+ <td class="city">Berwyn</td>
+ <td class="state">PA</td>
+ <td class="cert">27148</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">October 26, 2012</td>
+ <td class="updated">January 24, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="excelbank.html">Excel Bank</a></td>
+ <td class="city">Sedalia</td>
+ <td class="state">MO</td>
+ <td class="cert">19189</td>
+ <td class="ai">Simmons First National Bank</td>
+ <td class="closing">October 19, 2012</td>
+ <td class="updated">January 24, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firsteastside.html">First East Side Savings Bank</a></td>
+ <td class="city">Tamarac</td>
+ <td class="state">FL</td>
+ <td class="cert">28144</td>
+ <td class="ai">Stearns Bank N.A.</td>
+ <td class="closing">October 19, 2012</td>
+ <td class="updated">January 24, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="gulfsouth.html">GulfSouth Private Bank</a></td>
+ <td class="city">Destin</td>
+ <td class="state">FL</td>
+ <td class="cert">58073</td>
+ <td class="ai">SmartBank</td>
+ <td class="closing">October 19, 2012</td>
+ <td class="updated">January 24, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstunited.html">First United Bank</a></td>
+ <td class="city">Crete</td>
+ <td class="state">IL</td>
+ <td class="cert">20685</td>
+ <td class="ai">Old Plank Trail Community Bank, National Association</td>
+ <td class="closing">September 28, 2012</td>
+ <td class="updated">November 15, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="truman.html">Truman Bank</a></td>
+ <td class="city">St. Louis</td>
+ <td class="state">MO</td>
+ <td class="cert">27316</td>
+ <td class="ai">Simmons First National Bank</td>
+ <td class="closing">September 14, 2012</td>
+ <td class="updated">December 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstcommbk_mn.html">First Commercial Bank</a></td>
+ <td class="city">Bloomington</td>
+ <td class="state">MN</td>
+ <td class="cert">35246</td>
+ <td class="ai">Republic Bank & Trust Company</td>
+ <td class="closing">September 7, 2012</td>
+ <td class="updated">December 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="waukegan.html">Waukegan Savings Bank</a></td>
+ <td class="city">Waukegan</td>
+ <td class="state">IL</td>
+ <td class="cert">28243</td>
+ <td class="ai">First Midwest Bank</td>
+ <td class="closing">August 3, 2012</td>
+ <td class="updated">October 11, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="jasper.html">Jasper Banking Company</a></td>
+ <td class="city">Jasper</td>
+ <td class="state">GA</td>
+ <td class="cert">16240</td>
+ <td class="ai">Stearns Bank N.A.</td>
+ <td class="closing">July 27, 2012</td>
+ <td class="updated">December 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="secondfederal.html">Second Federal Savings and Loan Association of Chicago</a></td>
+ <td class="city">Chicago</td>
+ <td class="state">IL</td>
+ <td class="cert">27986</td>
+ <td class="ai">Hinsdale Bank & Trust Company</td>
+ <td class="closing">July 20, 2012</td>
+ <td class="updated">January 14, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="heartland.html">Heartland Bank</a></td>
+ <td class="city">Leawood</td>
+ <td class="state">KS</td>
+ <td class="cert">1361</td>
+ <td class="ai">Metcalf Bank</td>
+ <td class="closing">July 20, 2012</td>
+ <td class="updated">December 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="cherokee.html">First Cherokee State Bank</a></td>
+ <td class="city">Woodstock</td>
+ <td class="state">GA</td>
+ <td class="cert">32711</td>
+ <td class="ai">Community & Southern Bank</td>
+ <td class="closing">July 20, 2012</td>
+ <td class="updated">October 31, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="georgiatrust.html">Georgia Trust Bank</a></td>
+ <td class="city">Buford</td>
+ <td class="state">GA</td>
+ <td class="cert">57847</td>
+ <td class="ai">Community & Southern Bank</td>
+ <td class="closing">July 20, 2012</td>
+ <td class="updated">December 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="royalpalm.html">The Royal Palm Bank of Florida</a></td>
+ <td class="city">Naples</td>
+ <td class="state">FL</td>
+ <td class="cert">57096</td>
+ <td class="ai">First National Bank of the Gulf Coast</td>
+ <td class="closing">July 20, 2012</td>
+ <td class="updated">January 7, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="glasgow.html">Glasgow Savings Bank</a></td>
+ <td class="city">Glasgow</td>
+ <td class="state">MO</td>
+ <td class="cert">1056</td>
+ <td class="ai">Regional Missouri Bank</td>
+ <td class="closing">July 13, 2012</td>
+ <td class="updated">October 11, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="montgomery.html">Montgomery Bank & Trust</a></td>
+ <td class="city">Ailey</td>
+ <td class="state">GA</td>
+ <td class="cert">19498</td>
+ <td class="ai">Ameris Bank</td>
+ <td class="closing">July 6, 2012</td>
+ <td class="updated">October 31, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="farmersbank.html">The Farmers Bank of Lynchburg</a></td>
+ <td class="city">Lynchburg</td>
+ <td class="state">TN</td>
+ <td class="cert">1690</td>
+ <td class="ai">Clayton Bank and Trust</td>
+ <td class="closing">June 15, 2012</td>
+ <td class="updated">October 31, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="securityexchange.html">Security Exchange Bank</a></td>
+ <td class="city">Marietta</td>
+ <td class="state">GA</td>
+ <td class="cert">35299</td>
+ <td class="ai">Fidelity Bank</td>
+ <td class="closing">June 15, 2012</td>
+ <td class="updated">October 10, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="putnam.html">Putnam State Bank</a></td>
+ <td class="city">Palatka</td>
+ <td class="state">FL</td>
+ <td class="cert">27405</td>
+ <td class="ai">Harbor Community Bank</td>
+ <td class="closing">June 15, 2012</td>
+ <td class="updated">October 10, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="waccamaw.html">Waccamaw Bank</a></td>
+ <td class="city">Whiteville</td>
+ <td class="state">NC</td>
+ <td class="cert">34515</td>
+ <td class="ai">First Community Bank</td>
+ <td class="closing">June 8, 2012</td>
+ <td class="updated">November 8, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="ftsb.html">Farmers' and Traders' State Bank</a></td>
+ <td class="city">Shabbona</td>
+ <td class="state">IL</td>
+ <td class="cert">9257</td>
+ <td class="ai">First State Bank</td>
+ <td class="closing">June 8, 2012</td>
+ <td class="updated">October 10, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="carolina.html">Carolina Federal Savings Bank</a></td>
+ <td class="city">Charleston</td>
+ <td class="state">SC</td>
+ <td class="cert">35372</td>
+ <td class="ai">Bank of North Carolina</td>
+ <td class="closing">June 8, 2012</td>
+ <td class="updated">October 31, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstcapital.html">First Capital Bank</a></td>
+ <td class="city">Kingfisher</td>
+ <td class="state">OK</td>
+ <td class="cert">416</td>
+ <td class="ai">F & M Bank</td>
+ <td class="closing">June 8, 2012</td>
+ <td class="updated">October 10, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="alabamatrust.html">Alabama Trust Bank, National Association</a></td>
+ <td class="city">Sylacauga</td>
+ <td class="state">AL</td>
+ <td class="cert">35224</td>
+ <td class="ai">Southern States Bank</td>
+ <td class="closing">May 18, 2012</td>
+ <td class="updated">May 20, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="securitybank.html">Security Bank, National Association</a></td>
+ <td class="city">North Lauderdale</td>
+ <td class="state">FL</td>
+ <td class="cert">23156</td>
+ <td class="ai">Banesco USA</td>
+ <td class="closing">May 4, 2012</td>
+ <td class="updated">October 31, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="palmdesert.html">Palm Desert National Bank</a></td>
+ <td class="city">Palm Desert</td>
+ <td class="state">CA</td>
+ <td class="cert">23632</td>
+ <td class="ai">Pacific Premier Bank</td>
+ <td class="closing">April 27, 2012</td>
+ <td class="updated">May 17, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="plantation.html">Plantation Federal Bank</a></td>
+ <td class="city">Pawleys Island</td>
+ <td class="state">SC</td>
+ <td class="cert">32503</td>
+ <td class="ai">First Federal Bank</td>
+ <td class="closing">April 27, 2012</td>
+ <td class="updated">May 17, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="interbank.html">Inter Savings Bank, fsb D/B/A InterBank, fsb</a></td>
+ <td class="city">Maple Grove</td>
+ <td class="state">MN</td>
+ <td class="cert">31495</td>
+ <td class="ai">Great Southern Bank</td>
+ <td class="closing">April 27, 2012</td>
+ <td class="updated">May 17, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="harvest.html">HarVest Bank of Maryland</a></td>
+ <td class="city">Gaithersburg</td>
+ <td class="state">MD</td>
+ <td class="cert">57766</td>
+ <td class="ai">Sonabank</td>
+ <td class="closing">April 27, 2012</td>
+ <td class="updated">May 17, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="easternshore.html">Bank of the Eastern Shore</a></td>
+ <td class="city">Cambridge</td>
+ <td class="state">MD</td>
+ <td class="cert">26759</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">April 27, 2012</td>
+ <td class="updated">October 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="fortlee.html">Fort Lee Federal Savings Bank, FSB</a></td>
+ <td class="city">Fort Lee</td>
+ <td class="state">NJ</td>
+ <td class="cert">35527</td>
+ <td class="ai">Alma Bank</td>
+ <td class="closing">April 20, 2012</td>
+ <td class="updated">May 17, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="fidelity.html">Fidelity Bank</a></td>
+ <td class="city">Dearborn</td>
+ <td class="state">MI</td>
+ <td class="cert">33883</td>
+ <td class="ai">The Huntington National Bank</td>
+ <td class="closing">March 30, 2012</td>
+ <td class="updated">May 16, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="premier-il.html">Premier Bank</a></td>
+ <td class="city">Wilmette</td>
+ <td class="state">IL</td>
+ <td class="cert">35419</td>
+ <td class="ai">International Bank of Chicago</td>
+ <td class="closing">March 23, 2012</td>
+ <td class="updated">October 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="covenant.html">Covenant Bank & Trust</a></td>
+ <td class="city">Rock Spring</td>
+ <td class="state">GA</td>
+ <td class="cert">58068</td>
+ <td class="ai">Stearns Bank, N.A.</td>
+ <td class="closing">March 23, 2012</td>
+ <td class="updated">October 31, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="newcity.html">New City Bank</a></td>
+ <td class="city">Chicago</td>
+ <td class="state">IL</td>
+ <td class="cert">57597</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">March 9, 2012</td>
+ <td class="updated">October 29, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="global.html">Global Commerce Bank</a></td>
+ <td class="city">Doraville</td>
+ <td class="state">GA</td>
+ <td class="cert">34046</td>
+ <td class="ai">Metro City Bank</td>
+ <td class="closing">March 2, 2012</td>
+ <td class="updated">October 31, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="homesvgs.html">Home Savings of America</a></td>
+ <td class="city">Little Falls</td>
+ <td class="state">MN</td>
+ <td class="cert">29178</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">February 24, 2012</td>
+ <td class="updated">December 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="cbg.html">Central Bank of Georgia</a></td>
+ <td class="city">Ellaville</td>
+ <td class="state">GA</td>
+ <td class="cert">5687</td>
+ <td class="ai">Ameris Bank</td>
+ <td class="closing">February 24, 2012</td>
+ <td class="updated">August 9, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="scbbank.html">SCB Bank</a></td>
+ <td class="city">Shelbyville</td>
+ <td class="state">IN</td>
+ <td class="cert">29761</td>
+ <td class="ai">First Merchants Bank, National Association</td>
+ <td class="closing">February 10, 2012</td>
+ <td class="updated">March 25, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="cnbt.html">Charter National Bank and Trust</a></td>
+ <td class="city">Hoffman Estates</td>
+ <td class="state">IL</td>
+ <td class="cert">23187</td>
+ <td class="ai">Barrington Bank & Trust Company, National Association</td>
+ <td class="closing">February 10, 2012</td>
+ <td class="updated">March 25, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="bankeast.html">BankEast</a></td>
+ <td class="city">Knoxville</td>
+ <td class="state">TN</td>
+ <td class="cert">19869</td>
+ <td class="ai">U.S.Bank National Association</td>
+ <td class="closing">January 27, 2012</td>
+ <td class="updated">March 8, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="patriot-mn.html">Patriot Bank Minnesota</a></td>
+ <td class="city">Forest Lake</td>
+ <td class="state">MN</td>
+ <td class="cert">34823</td>
+ <td class="ai">First Resource Bank</td>
+ <td class="closing">January 27, 2012</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="tcb.html">Tennessee Commerce Bank</a></td>
+ <td class="city">Franklin</td>
+ <td class="state">TN</td>
+ <td class="cert">35296</td>
+ <td class="ai">Republic Bank & Trust Company</td>
+ <td class="closing">January 27, 2012</td>
+ <td class="updated">November 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="fgbtcj.html">First Guaranty Bank and Trust Company of Jacksonville</a></td>
+ <td class="city">Jacksonville</td>
+ <td class="state">FL</td>
+ <td class="cert">16579</td>
+ <td class="ai">CenterState Bank of Florida, N.A.</td>
+ <td class="closing">January 27, 2012</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="americaneagle.html">American Eagle Savings Bank</a></td>
+ <td class="city">Boothwyn</td>
+ <td class="state">PA</td>
+ <td class="cert">31581</td>
+ <td class="ai">Capital Bank, N.A.</td>
+ <td class="closing">January 20, 2012</td>
+ <td class="updated">January 25, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firststatebank-ga.html">The First State Bank</a></td>
+ <td class="city">Stockbridge</td>
+ <td class="state">GA</td>
+ <td class="cert">19252</td>
+ <td class="ai">Hamilton State Bank</td>
+ <td class="closing">January 20, 2012</td>
+ <td class="updated">January 25, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="cfsb.html">Central Florida State Bank</a></td>
+ <td class="city">Belleview</td>
+ <td class="state">FL</td>
+ <td class="cert">57186</td>
+ <td class="ai">CenterState Bank of Florida, N.A.</td>
+ <td class="closing">January 20, 2012</td>
+ <td class="updated">January 25, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="westernnatl.html">Western National Bank</a></td>
+ <td class="city">Phoenix</td>
+ <td class="state">AZ</td>
+ <td class="cert">57917</td>
+ <td class="ai">Washington Federal</td>
+ <td class="closing">December 16, 2011</td>
+ <td class="updated">August 13, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="premier-fl.html">Premier Community Bank of the Emerald Coast</a></td>
+ <td class="city">Crestview</td>
+ <td class="state">FL</td>
+ <td class="cert">58343</td>
+ <td class="ai">Summit Bank</td>
+ <td class="closing">December 16, 2011</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="centralprog.html">Central Progressive Bank</a></td>
+ <td class="city">Lacombe</td>
+ <td class="state">LA</td>
+ <td class="cert">19657</td>
+ <td class="ai">First NBC Bank</td>
+ <td class="closing">November 18, 2011</td>
+ <td class="updated">August 13, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="polkcounty.html">Polk County Bank</a></td>
+ <td class="city">Johnston</td>
+ <td class="state">IA</td>
+ <td class="cert">14194</td>
+ <td class="ai">Grinnell State Bank</td>
+ <td class="closing">November 18, 2011</td>
+ <td class="updated">August 15, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="rockmart.html">Community Bank of Rockmart</a></td>
+ <td class="city">Rockmart</td>
+ <td class="state">GA</td>
+ <td class="cert">57860</td>
+ <td class="ai">Century Bank of Georgia</td>
+ <td class="closing">November 10, 2011</td>
+ <td class="updated">August 13, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="sunfirst.html">SunFirst Bank</a></td>
+ <td class="city">Saint George</td>
+ <td class="state">UT</td>
+ <td class="cert">57087</td>
+ <td class="ai">Cache Valley Bank</td>
+ <td class="closing">November 4, 2011</td>
+ <td class="updated">November 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="midcity.html">Mid City Bank, Inc.</a></td>
+ <td class="city">Omaha</td>
+ <td class="state">NE</td>
+ <td class="cert">19397</td>
+ <td class="ai">Premier Bank</td>
+ <td class="closing">November 4, 2011</td>
+ <td class="updated">August 15, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="allamerican.html ">All American Bank</a></td>
+ <td class="city">Des Plaines</td>
+ <td class="state">IL</td>
+ <td class="cert">57759</td>
+ <td class="ai">International Bank of Chicago</td>
+ <td class="closing">October 28, 2011</td>
+ <td class="updated">August 15, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="commbanksco.html">Community Banks of Colorado</a></td>
+ <td class="city">Greenwood Village</td>
+ <td class="state">CO</td>
+ <td class="cert">21132</td>
+ <td class="ai">Bank Midwest, N.A.</td>
+ <td class="closing">October 21, 2011</td>
+ <td class="updated">January 2, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="commcapbk.html">Community Capital Bank</a></td>
+ <td class="city">Jonesboro</td>
+ <td class="state">GA</td>
+ <td class="cert">57036</td>
+ <td class="ai">State Bank and Trust Company</td>
+ <td class="closing">October 21, 2011</td>
+ <td class="updated">November 8, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="decatur.html">Decatur First Bank</a></td>
+ <td class="city">Decatur</td>
+ <td class="state">GA</td>
+ <td class="cert">34392</td>
+ <td class="ai">Fidelity Bank</td>
+ <td class="closing">October 21, 2011</td>
+ <td class="updated">November 8, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="oldharbor.html">Old Harbor Bank</a></td>
+ <td class="city">Clearwater</td>
+ <td class="state">FL</td>
+ <td class="cert">57537</td>
+ <td class="ai">1st United Bank</td>
+ <td class="closing">October 21, 2011</td>
+ <td class="updated">November 8, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="countrybank.html">Country Bank</a></td>
+ <td class="city">Aledo</td>
+ <td class="state">IL</td>
+ <td class="cert">35395</td>
+ <td class="ai">Blackhawk Bank & Trust</td>
+ <td class="closing">October 14, 2011</td>
+ <td class="updated">August 15, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firststatebank-nj.html">First State Bank</a></td>
+ <td class="city">Cranford</td>
+ <td class="state">NJ</td>
+ <td class="cert">58046</td>
+ <td class="ai">Northfield Bank</td>
+ <td class="closing">October 14, 2011</td>
+ <td class="updated">November 8, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="blueridge.html">Blue Ridge Savings Bank, Inc.</a></td>
+ <td class="city">Asheville</td>
+ <td class="state">NC</td>
+ <td class="cert">32347</td>
+ <td class="ai">Bank of North Carolina</td>
+ <td class="closing">October 14, 2011</td>
+ <td class="updated">November 8, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="piedmont-ga.html">Piedmont Community Bank</a></td>
+ <td class="city">Gray</td>
+ <td class="state">GA</td>
+ <td class="cert">57256</td>
+ <td class="ai">State Bank and Trust Company</td>
+ <td class="closing">October 14, 2011</td>
+ <td class="updated">January 22, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="sunsecurity.html">Sun Security Bank</a></td>
+ <td class="city">Ellington</td>
+ <td class="state">MO</td>
+ <td class="cert">20115</td>
+ <td class="ai">Great Southern Bank</td>
+ <td class="closing">October 7, 2011</td>
+ <td class="updated">November 7, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="riverbank.html">The RiverBank</a></td>
+ <td class="city">Wyoming</td>
+ <td class="state">MN</td>
+ <td class="cert">10216</td>
+ <td class="ai">Central Bank</td>
+ <td class="closing">October 7, 2011</td>
+ <td class="updated">November 7, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstintlbank.html">First International Bank</a></td>
+ <td class="city">Plano</td>
+ <td class="state">TX</td>
+ <td class="cert">33513</td>
+ <td class="ai">American First National Bank</td>
+ <td class="closing">September 30, 2011</td>
+ <td class="updated">October 9, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="cbnc.html">Citizens Bank of Northern California</a></td>
+ <td class="city">Nevada City</td>
+ <td class="state">CA</td>
+ <td class="cert">33983</td>
+ <td class="ai">Tri Counties Bank</td>
+ <td class="closing">September 23, 2011</td>
+ <td class="updated">October 9, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="boc-va.html">Bank of the Commonwealth</a></td>
+ <td class="city">Norfolk</td>
+ <td class="state">VA</td>
+ <td class="cert">20408</td>
+ <td class="ai">Southern Bank and Trust Company</td>
+ <td class="closing">September 23, 2011</td>
+ <td class="updated">October 9, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="fnbf.html">The First National Bank of Florida</a></td>
+ <td class="city">Milton</td>
+ <td class="state">FL</td>
+ <td class="cert">25155</td>
+ <td class="ai">CharterBank</td>
+ <td class="closing">September 9, 2011</td>
+ <td class="updated">September 6, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="creekside.html">CreekSide Bank</a></td>
+ <td class="city">Woodstock</td>
+ <td class="state">GA</td>
+ <td class="cert">58226</td>
+ <td class="ai">Georgia Commerce Bank</td>
+ <td class="closing">September 2, 2011</td>
+ <td class="updated">September 6, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="patriot.html">Patriot Bank of Georgia</a></td>
+ <td class="city">Cumming</td>
+ <td class="state">GA</td>
+ <td class="cert">58273</td>
+ <td class="ai">Georgia Commerce Bank</td>
+ <td class="closing">September 2, 2011</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstchoice-il.html">First Choice Bank</a></td>
+ <td class="city">Geneva</td>
+ <td class="state">IL</td>
+ <td class="cert">57212</td>
+ <td class="ai">Inland Bank & Trust</td>
+ <td class="closing">August 19, 2011</td>
+ <td class="updated">August 15, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstsouthern-ga.html">First Southern National Bank</a></td>
+ <td class="city">Statesboro</td>
+ <td class="state">GA</td>
+ <td class="cert">57239</td>
+ <td class="ai">Heritage Bank of the South</td>
+ <td class="closing">August 19, 2011</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="lydian.html">Lydian Private Bank</a></td>
+ <td class="city">Palm Beach</td>
+ <td class="state">FL</td>
+ <td class="cert">35356</td>
+ <td class="ai">Sabadell United Bank, N.A.</td>
+ <td class="closing">August 19, 2011</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="publicsvgs.html">Public Savings Bank</a></td>
+ <td class="city">Huntingdon Valley</td>
+ <td class="state">PA</td>
+ <td class="cert">34130</td>
+ <td class="ai">Capital Bank, N.A.</td>
+ <td class="closing">August 18, 2011</td>
+ <td class="updated">August 15, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="fnbo.html">The First National Bank of Olathe</a></td>
+ <td class="city">Olathe</td>
+ <td class="state">KS</td>
+ <td class="cert">4744</td>
+ <td class="ai">Enterprise Bank & Trust</td>
+ <td class="closing">August 12, 2011</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="whitman.html">Bank of Whitman</a></td>
+ <td class="city">Colfax</td>
+ <td class="state">WA</td>
+ <td class="cert">22528</td>
+ <td class="ai">Columbia State Bank</td>
+ <td class="closing">August 5, 2011</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="shorewood.html">Bank of Shorewood</a></td>
+ <td class="city">Shorewood</td>
+ <td class="state">IL</td>
+ <td class="cert">22637</td>
+ <td class="ai">Heartland Bank and Trust Company</td>
+ <td class="closing">August 5, 2011</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="integra.html">Integra Bank National Association</a></td>
+ <td class="city">Evansville</td>
+ <td class="state">IN</td>
+ <td class="cert">4392</td>
+ <td class="ai">Old National Bank</td>
+ <td class="closing">July 29, 2011</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="bankmeridian.html">BankMeridian, N.A.</a></td>
+ <td class="city">Columbia</td>
+ <td class="state">SC</td>
+ <td class="cert">58222</td>
+ <td class="ai">SCBT National Association</td>
+ <td class="closing">July 29, 2011</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="vbb.html">Virginia Business Bank</a></td>
+ <td class="city">Richmond</td>
+ <td class="state">VA</td>
+ <td class="cert">58283</td>
+ <td class="ai">Xenith Bank</td>
+ <td class="closing">July 29, 2011</td>
+ <td class="updated">October 9, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="bankofchoice.html">Bank of Choice</a></td>
+ <td class="city">Greeley</td>
+ <td class="state">CO</td>
+ <td class="cert">2994</td>
+ <td class="ai">Bank Midwest, N.A.</td>
+ <td class="closing">July 22, 2011</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="landmark.html">LandMark Bank of Florida</a></td>
+ <td class="city">Sarasota</td>
+ <td class="state">FL</td>
+ <td class="cert">35244</td>
+ <td class="ai">American Momentum Bank</td>
+ <td class="closing">July 22, 2011</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="southshore.html">Southshore Community Bank</a></td>
+ <td class="city">Apollo Beach</td>
+ <td class="state">FL</td>
+ <td class="cert">58056</td>
+ <td class="ai">American Momentum Bank</td>
+ <td class="closing">July 22, 2011</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="summitbank.html">Summit Bank</a></td>
+ <td class="city">Prescott</td>
+ <td class="state">AZ</td>
+ <td class="cert">57442</td>
+ <td class="ai">The Foothills Bank</td>
+ <td class="closing">July 15, 2011</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstpeoples.html">First Peoples Bank</a></td>
+ <td class="city">Port St. Lucie</td>
+ <td class="state">FL</td>
+ <td class="cert">34870</td>
+ <td class="ai">Premier American Bank, N.A.</td>
+ <td class="closing">July 15, 2011</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="hightrust.html">High Trust Bank</a></td>
+ <td class="city">Stockbridge</td>
+ <td class="state">GA</td>
+ <td class="cert">19554</td>
+ <td class="ai">Ameris Bank</td>
+ <td class="closing">July 15, 2011</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="onegeorgia.html">One Georgia Bank</a></td>
+ <td class="city">Atlanta</td>
+ <td class="state">GA</td>
+ <td class="cert">58238</td>
+ <td class="ai">Ameris Bank</td>
+ <td class="closing">July 15, 2011</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="signaturebank.html">Signature Bank</a></td>
+ <td class="city">Windsor</td>
+ <td class="state">CO</td>
+ <td class="cert">57835</td>
+ <td class="ai">Points West Community Bank</td>
+ <td class="closing">July 8, 2011</td>
+ <td class="updated">October 26, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="coloradocapital.html">Colorado Capital Bank</a></td>
+ <td class="city">Castle Rock</td>
+ <td class="state">CO</td>
+ <td class="cert">34522</td>
+ <td class="ai">First-Citizens Bank & Trust Company</td>
+ <td class="closing">July 8, 2011</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstchicago.html">First Chicago Bank & Trust</a></td>
+ <td class="city">Chicago</td>
+ <td class="state">IL</td>
+ <td class="cert">27935</td>
+ <td class="ai">Northbrook Bank & Trust Company</td>
+ <td class="closing">July 8, 2011</td>
+ <td class="updated">September 9, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="mountain.html">Mountain Heritage Bank</a></td>
+ <td class="city">Clayton</td>
+ <td class="state">GA</td>
+ <td class="cert">57593</td>
+ <td class="ai">First American Bank and Trust Company</td>
+ <td class="closing">June 24, 2011</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="fcbtb.html">First Commercial Bank of Tampa Bay</a></td>
+ <td class="city">Tampa</td>
+ <td class="state">FL</td>
+ <td class="cert">27583</td>
+ <td class="ai">Stonegate Bank</td>
+ <td class="closing">June 17, 2011</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="mcintoshstate.html">McIntosh State Bank</a></td>
+ <td class="city">Jackson</td>
+ <td class="state">GA</td>
+ <td class="cert">19237</td>
+ <td class="ai">Hamilton State Bank</td>
+ <td class="closing">June 17, 2011</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="atlanticbanktrust.html">Atlantic Bank and Trust</a></td>
+ <td class="city">Charleston</td>
+ <td class="state">SC</td>
+ <td class="cert">58420</td>
+ <td class="ai">First Citizens Bank and Trust Company, Inc.</td>
+ <td class="closing">June 3, 2011</td>
+ <td class="updated">October 31, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstheritage.html">First Heritage Bank</a></td>
+ <td class="city">Snohomish</td>
+ <td class="state">WA</td>
+ <td class="cert">23626</td>
+ <td class="ai">Columbia State Bank</td>
+ <td class="closing">May 27, 2011</td>
+ <td class="updated">January 28, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="summit.html">Summit Bank</a></td>
+ <td class="city">Burlington</td>
+ <td class="state">WA</td>
+ <td class="cert">513</td>
+ <td class="ai">Columbia State Bank</td>
+ <td class="closing">May 20, 2011</td>
+ <td class="updated">January 22, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="fgbc.html">First Georgia Banking Company</a></td>
+ <td class="city">Franklin</td>
+ <td class="state">GA</td>
+ <td class="cert">57647</td>
+ <td class="ai">CertusBank, National Association</td>
+ <td class="closing">May 20, 2011</td>
+ <td class="updated">November 13, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="atlanticsthrn.html">Atlantic Southern Bank</a></td>
+ <td class="city">Macon</td>
+ <td class="state">GA</td>
+ <td class="cert">57213</td>
+ <td class="ai">CertusBank, National Association</td>
+ <td class="closing">May 20, 2011</td>
+ <td class="updated">October 31, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="coastal_fl.html">Coastal Bank</a></td>
+ <td class="city">Cocoa Beach</td>
+ <td class="state">FL</td>
+ <td class="cert">34898</td>
+ <td class="ai">Florida Community Bank, a division of Premier American Bank, N.A.</td>
+ <td class="closing">May 6, 2011</td>
+ <td class="updated">November 30, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="communitycentral.html">Community Central Bank</a></td>
+ <td class="city">Mount Clemens</td>
+ <td class="state">MI</td>
+ <td class="cert">34234</td>
+ <td class="ai">Talmer Bank & Trust</td>
+ <td class="closing">April 29, 2011</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="parkavenue_ga.html">The Park Avenue Bank</a></td>
+ <td class="city">Valdosta</td>
+ <td class="state">GA</td>
+ <td class="cert">19797</td>
+ <td class="ai">Bank of the Ozarks</td>
+ <td class="closing">April 29, 2011</td>
+ <td class="updated">November 30, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstchoice.html">First Choice Community Bank</a></td>
+ <td class="city">Dallas</td>
+ <td class="state">GA</td>
+ <td class="cert">58539</td>
+ <td class="ai">Bank of the Ozarks</td>
+ <td class="closing">April 29, 2011</td>
+ <td class="updated">January 22, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="cortez.html">Cortez Community Bank</a></td>
+ <td class="city">Brooksville</td>
+ <td class="state">FL</td>
+ <td class="cert">57625</td>
+ <td class="ai">Florida Community Bank, a division of Premier American Bank, N.A.</td>
+ <td class="closing">April 29, 2011</td>
+ <td class="updated">November 30, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="fnbcf.html">First National Bank of Central Florida</a></td>
+ <td class="city">Winter Park</td>
+ <td class="state">FL</td>
+ <td class="cert">26297</td>
+ <td class="ai">Florida Community Bank, a division of Premier American Bank, N.A.</td>
+ <td class="closing">April 29, 2011</td>
+ <td class="updated">November 30, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="heritage_ms.html">Heritage Banking Group</a></td>
+ <td class="city">Carthage</td>
+ <td class="state">MS</td>
+ <td class="cert">14273</td>
+ <td class="ai">Trustmark National Bank</td>
+ <td class="closing">April 15, 2011</td>
+ <td class="updated">November 30, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="rosemount.html">Rosemount National Bank</a></td>
+ <td class="city">Rosemount</td>
+ <td class="state">MN</td>
+ <td class="cert">24099</td>
+ <td class="ai">Central Bank</td>
+ <td class="closing">April 15, 2011</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="superior_al.html">Superior Bank</a></td>
+ <td class="city">Birmingham</td>
+ <td class="state">AL</td>
+ <td class="cert">17750</td>
+ <td class="ai">Superior Bank, National Association</td>
+ <td class="closing">April 15, 2011</td>
+ <td class="updated">November 30, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="nexity.html">Nexity Bank</a></td>
+ <td class="city">Birmingham</td>
+ <td class="state">AL</td>
+ <td class="cert">19794</td>
+ <td class="ai">AloStar Bank of Commerce</td>
+ <td class="closing">April 15, 2011</td>
+ <td class="updated">September 4, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="newhorizons.html">New Horizons Bank</a></td>
+ <td class="city">East Ellijay</td>
+ <td class="state">GA</td>
+ <td class="cert">57705</td>
+ <td class="ai">Citizens South Bank</td>
+ <td class="closing">April 15, 2011</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="bartow.html">Bartow County Bank</a></td>
+ <td class="city">Cartersville</td>
+ <td class="state">GA</td>
+ <td class="cert">21495</td>
+ <td class="ai">Hamilton State Bank</td>
+ <td class="closing">April 15, 2011</td>
+ <td class="updated">January 22, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="nevadacommerce.html">Nevada Commerce Bank</a></td>
+ <td class="city">Las Vegas</td>
+ <td class="state">NV</td>
+ <td class="cert">35418</td>
+ <td class="ai">City National Bank</td>
+ <td class="closing">April 8, 2011</td>
+ <td class="updated">September 9, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="westernsprings.html">Western Springs National Bank and Trust</a></td>
+ <td class="city">Western Springs</td>
+ <td class="state">IL</td>
+ <td class="cert">10086</td>
+ <td class="ai">Heartland Bank and Trust Company</td>
+ <td class="closing">April 8, 2011</td>
+ <td class="updated">January 22, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="bankofcommerce.html">The Bank of Commerce</a></td>
+ <td class="city">Wood Dale</td>
+ <td class="state">IL</td>
+ <td class="cert">34292</td>
+ <td class="ai">Advantage National Bank Group</td>
+ <td class="closing">March 25, 2011</td>
+ <td class="updated">January 22, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="legacy-wi.html">Legacy Bank</a></td>
+ <td class="city">Milwaukee</td>
+ <td class="state">WI</td>
+ <td class="cert">34818</td>
+ <td class="ai">Seaway Bank and Trust Company</td>
+ <td class="closing">March 11, 2011</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstnatldavis.html">First National Bank of Davis</a></td>
+ <td class="city">Davis</td>
+ <td class="state">OK</td>
+ <td class="cert">4077</td>
+ <td class="ai">The Pauls Valley National Bank</td>
+ <td class="closing">March 11, 2011</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="valleycomm.html">Valley Community Bank</a></td>
+ <td class="city">St. Charles</td>
+ <td class="state">IL</td>
+ <td class="cert">34187</td>
+ <td class="ai">First State Bank</td>
+ <td class="closing">February 25, 2011</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="sanluistrust.html">San Luis Trust Bank, FSB</a></td>
+ <td class="city">San Luis Obispo</td>
+ <td class="state">CA</td>
+ <td class="cert">34783</td>
+ <td class="ai">First California Bank</td>
+ <td class="closing">February 18, 2011</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="charteroak.html">Charter Oak Bank</a></td>
+ <td class="city">Napa</td>
+ <td class="state">CA</td>
+ <td class="cert">57855</td>
+ <td class="ai">Bank of Marin</td>
+ <td class="closing">February 18, 2011</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="citizensbk_ga.html">Citizens Bank of Effingham</a></td>
+ <td class="city">Springfield</td>
+ <td class="state">GA</td>
+ <td class="cert">34601</td>
+ <td class="ai">Heritage Bank of the South</td>
+ <td class="closing">February 18, 2011</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="habersham.html">Habersham Bank</a></td>
+ <td class="city">Clarkesville</td>
+ <td class="state">GA</td>
+ <td class="cert">151</td>
+ <td class="ai">SCBT National Association</td>
+ <td class="closing">February 18, 2011</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="canyonstate.html">Canyon National Bank</a></td>
+ <td class="city">Palm Springs</td>
+ <td class="state">CA</td>
+ <td class="cert">34692</td>
+ <td class="ai">Pacific Premier Bank</td>
+ <td class="closing">February 11, 2011</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="badgerstate.html">Badger State Bank</a></td>
+ <td class="city">Cassville</td>
+ <td class="state">WI</td>
+ <td class="cert">13272</td>
+ <td class="ai">Royal Bank</td>
+ <td class="closing">February 11, 2011</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="peoplesstatebank.html">Peoples State Bank</a></td>
+ <td class="city">Hamtramck</td>
+ <td class="state">MI</td>
+ <td class="cert">14939</td>
+ <td class="ai">First Michigan Bank</td>
+ <td class="closing">February 11, 2011</td>
+ <td class="updated">January 22, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="sunshinestate.html">Sunshine State Community Bank</a></td>
+ <td class="city">Port Orange</td>
+ <td class="state">FL</td>
+ <td class="cert">35478</td>
+ <td class="ai">Premier American Bank, N.A.</td>
+ <td class="closing">February 11, 2011</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="commfirst_il.html">Community First Bank Chicago</a></td>
+ <td class="city">Chicago</td>
+ <td class="state">IL</td>
+ <td class="cert">57948</td>
+ <td class="ai">Northbrook Bank & Trust Company</td>
+ <td class="closing">February 4, 2011</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="northgabank.html">North Georgia Bank</a></td>
+ <td class="city">Watkinsville</td>
+ <td class="state">GA</td>
+ <td class="cert">35242</td>
+ <td class="ai">BankSouth</td>
+ <td class="closing">February 4, 2011</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="americantrust.html">American Trust Bank</a></td>
+ <td class="city">Roswell</td>
+ <td class="state">GA</td>
+ <td class="cert">57432</td>
+ <td class="ai">Renasant Bank</td>
+ <td class="closing">February 4, 2011</td>
+ <td class="updated">October 31, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstcomm_nm.html">First Community Bank</a></td>
+ <td class="city">Taos</td>
+ <td class="state">NM</td>
+ <td class="cert">12261</td>
+ <td class="ai">U.S. Bank, N.A.</td>
+ <td class="closing">January 28, 2011</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstier.html">FirsTier Bank</a></td>
+ <td class="city">Louisville</td>
+ <td class="state">CO</td>
+ <td class="cert">57646</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">January 28, 2011</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="evergreenstatewi.html">Evergreen State Bank</a></td>
+ <td class="city">Stoughton</td>
+ <td class="state">WI</td>
+ <td class="cert">5328</td>
+ <td class="ai">McFarland State Bank</td>
+ <td class="closing">January 28, 2011</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firststatebank_ok.html">The First State Bank</a></td>
+ <td class="city">Camargo</td>
+ <td class="state">OK</td>
+ <td class="cert">2303</td>
+ <td class="ai">Bank 7</td>
+ <td class="closing">January 28, 2011</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="unitedwestern.html">United Western Bank</a></td>
+ <td class="city">Denver</td>
+ <td class="state">CO</td>
+ <td class="cert">31293</td>
+ <td class="ai">First-Citizens Bank & Trust Company</td>
+ <td class="closing">January 21, 2011</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="bankofasheville.html">The Bank of Asheville</a></td>
+ <td class="city">Asheville</td>
+ <td class="state">NC</td>
+ <td class="cert">34516</td>
+ <td class="ai">First Bank</td>
+ <td class="closing">January 21, 2011</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="commsouth.html">CommunitySouth Bank & Trust</a></td>
+ <td class="city">Easley</td>
+ <td class="state">SC</td>
+ <td class="cert">57868</td>
+ <td class="ai">CertusBank, National Association</td>
+ <td class="closing">January 21, 2011</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="enterprise.html">Enterprise Banking Company</a></td>
+ <td class="city">McDonough</td>
+ <td class="state">GA</td>
+ <td class="cert">19758</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">January 21, 2011</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="oglethorpe.html">Oglethorpe Bank</a></td>
+ <td class="city">Brunswick</td>
+ <td class="state">GA</td>
+ <td class="cert">57440</td>
+ <td class="ai">Bank of the Ozarks</td>
+ <td class="closing">January 14, 2011</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="legacybank.html">Legacy Bank</a></td>
+ <td class="city">Scottsdale</td>
+ <td class="state">AZ</td>
+ <td class="cert">57820</td>
+ <td class="ai">Enterprise Bank & Trust</td>
+ <td class="closing">January 7, 2011</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstcommercial.html">First Commercial Bank of Florida</a></td>
+ <td class="city">Orlando</td>
+ <td class="state">FL</td>
+ <td class="cert">34965</td>
+ <td class="ai">First Southern Bank</td>
+ <td class="closing">January 7, 2011</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="communitynatl.html">Community National Bank</a></td>
+ <td class="city">Lino Lakes</td>
+ <td class="state">MN</td>
+ <td class="cert">23306</td>
+ <td class="ai">Farmers & Merchants Savings Bank</td>
+ <td class="closing">December 17, 2010</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstsouthern.html">First Southern Bank</a></td>
+ <td class="city">Batesville</td>
+ <td class="state">AR</td>
+ <td class="cert">58052</td>
+ <td class="ai">Southern Bank</td>
+ <td class="closing">December 17, 2010</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="unitedamericas.html">United Americas Bank, N.A.</a></td>
+ <td class="city">Atlanta</td>
+ <td class="state">GA</td>
+ <td class="cert">35065</td>
+ <td class="ai">State Bank and Trust Company</td>
+ <td class="closing">December 17, 2010</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="appalachianga.html">Appalachian Community Bank, FSB</a></td>
+ <td class="city">McCaysville</td>
+ <td class="state">GA</td>
+ <td class="cert">58495</td>
+ <td class="ai">Peoples Bank of East Tennessee</td>
+ <td class="closing">December 17, 2010</td>
+ <td class="updated">October 31, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="chestatee.html">Chestatee State Bank</a></td>
+ <td class="city">Dawsonville</td>
+ <td class="state">GA</td>
+ <td class="cert">34578</td>
+ <td class="ai">Bank of the Ozarks</td>
+ <td class="closing">December 17, 2010</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="bankofmiami.html">The Bank of Miami,N.A.</a></td>
+ <td class="city">Coral Gables</td>
+ <td class="state">FL</td>
+ <td class="cert">19040</td>
+ <td class="ai">1st United Bank</td>
+ <td class="closing">December 17, 2010</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="earthstar.html">Earthstar Bank</a></td>
+ <td class="city">Southampton</td>
+ <td class="state">PA</td>
+ <td class="cert">35561</td>
+ <td class="ai">Polonia Bank</td>
+ <td class="closing">December 10, 2010</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="paramount.html">Paramount Bank</a></td>
+ <td class="city">Farmington Hills</td>
+ <td class="state">MI</td>
+ <td class="cert">34673</td>
+ <td class="ai">Level One Bank</td>
+ <td class="closing">December 10, 2010</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstbanking.html">First Banking Center</a></td>
+ <td class="city">Burlington</td>
+ <td class="state">WI</td>
+ <td class="cert">5287</td>
+ <td class="ai">First Michigan Bank</td>
+ <td class="closing">November 19, 2010</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="allegbank.html">Allegiance Bank of North America</a></td>
+ <td class="city">Bala Cynwyd</td>
+ <td class="state">PA</td>
+ <td class="cert">35078</td>
+ <td class="ai">VIST Bank</td>
+ <td class="closing">November 19, 2010</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="gulfstate.html">Gulf State Community Bank</a></td>
+ <td class="city">Carrabelle</td>
+ <td class="state">FL</td>
+ <td class="cert">20340</td>
+ <td class="ai">Centennial Bank</td>
+ <td class="closing">November 19, 2010</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="copperstar.html">Copper Star Bank</a></td>
+ <td class="city">Scottsdale</td>
+ <td class="state">AZ</td>
+ <td class="cert">35463</td>
+ <td class="ai">Stearns Bank, N.A.</td>
+ <td class="closing">November 12, 2010</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="darbybank.html">Darby Bank & Trust Co.</a></td>
+ <td class="city">Vidalia</td>
+ <td class="state">GA</td>
+ <td class="cert">14580</td>
+ <td class="ai">Ameris Bank</td>
+ <td class="closing">November 12, 2010</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="tifton.html">Tifton Banking Company</a></td>
+ <td class="city">Tifton</td>
+ <td class="state">GA</td>
+ <td class="cert">57831</td>
+ <td class="ai">Ameris Bank</td>
+ <td class="closing">November 12, 2010</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstvietnamese.html">First Vietnamese American Bank</a><br><a href="firstvietnamese_viet.pdf">In Vietnamese</a></td>
+ <td class="city">Westminster</td>
+ <td class="state">CA</td>
+ <td class="cert">57885</td>
+ <td class="ai">Grandpoint Bank</td>
+ <td class="closing">November 5, 2010</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="piercecommercial.html">Pierce Commercial Bank</a></td>
+ <td class="city">Tacoma</td>
+ <td class="state">WA</td>
+ <td class="cert">34411</td>
+ <td class="ai">Heritage Bank</td>
+ <td class="closing">November 5, 2010</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="westerncommercial_ca.html">Western Commercial Bank</a></td>
+ <td class="city">Woodland Hills</td>
+ <td class="state">CA</td>
+ <td class="cert">58087</td>
+ <td class="ai">First California Bank</td>
+ <td class="closing">November 5, 2010</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="kbank.html">K Bank</a></td>
+ <td class="city">Randallstown</td>
+ <td class="state">MD</td>
+ <td class="cert">31263</td>
+ <td class="ai">Manufacturers and Traders Trust Company (M&T Bank)</td>
+ <td class="closing">November 5, 2010</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstazfsb.html">First Arizona Savings, A FSB</a></td>
+ <td class="city">Scottsdale</td>
+ <td class="state">AZ</td>
+ <td class="cert">32582</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">October 22, 2010</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="hillcrest_ks.html">Hillcrest Bank</a></td>
+ <td class="city">Overland Park</td>
+ <td class="state">KS</td>
+ <td class="cert">22173</td>
+ <td class="ai">Hillcrest Bank, N.A.</td>
+ <td class="closing">October 22, 2010</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstsuburban.html">First Suburban National Bank</a></td>
+ <td class="city">Maywood</td>
+ <td class="state">IL</td>
+ <td class="cert">16089</td>
+ <td class="ai">Seaway Bank and Trust Company</td>
+ <td class="closing">October 22, 2010</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="fnbbarnesville.html">The First National Bank of Barnesville</a></td>
+ <td class="city">Barnesville</td>
+ <td class="state">GA</td>
+ <td class="cert">2119</td>
+ <td class="ai">United Bank</td>
+ <td class="closing">October 22, 2010</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="gordon.html">The Gordon Bank</a></td>
+ <td class="city">Gordon</td>
+ <td class="state">GA</td>
+ <td class="cert">33904</td>
+ <td class="ai">Morris Bank</td>
+ <td class="closing">October 22, 2010</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="progress_fl.html">Progress Bank of Florida</a></td>
+ <td class="city">Tampa</td>
+ <td class="state">FL</td>
+ <td class="cert">32251</td>
+ <td class="ai">Bay Cities Bank</td>
+ <td class="closing">October 22, 2010</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstbankjacksonville.html">First Bank of Jacksonville</a></td>
+ <td class="city">Jacksonville</td>
+ <td class="state">FL</td>
+ <td class="cert">27573</td>
+ <td class="ai">Ameris Bank</td>
+ <td class="closing">October 22, 2010</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="premier_mo.html">Premier Bank</a></td>
+ <td class="city">Jefferson City</td>
+ <td class="state">MO</td>
+ <td class="cert">34016</td>
+ <td class="ai">Providence Bank</td>
+ <td class="closing">October 15, 2010</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="westbridge.html">WestBridge Bank and Trust Company</a></td>
+ <td class="city">Chesterfield</td>
+ <td class="state">MO</td>
+ <td class="cert">58205</td>
+ <td class="ai">Midland States Bank</td>
+ <td class="closing">October 15, 2010</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="securitysavingsfsb.html">Security Savings Bank, F.S.B.</a></td>
+ <td class="city">Olathe</td>
+ <td class="state">KS</td>
+ <td class="cert">30898</td>
+ <td class="ai">Simmons First National Bank</td>
+ <td class="closing">October 15, 2010</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="shoreline.html">Shoreline Bank</a></td>
+ <td class="city">Shoreline</td>
+ <td class="state">WA</td>
+ <td class="cert">35250</td>
+ <td class="ai">GBC International Bank</td>
+ <td class="closing">October 1, 2010</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="wakulla.html">Wakulla Bank</a></td>
+ <td class="city">Crawfordville</td>
+ <td class="state">FL</td>
+ <td class="cert">21777</td>
+ <td class="ai">Centennial Bank</td>
+ <td class="closing">October 1, 2010</td>
+ <td class="updated">November 2, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="northcounty.html">North County Bank</a></td>
+ <td class="city">Arlington</td>
+ <td class="state">WA</td>
+ <td class="cert">35053</td>
+ <td class="ai">Whidbey Island Bank</td>
+ <td class="closing">September 24, 2010</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="haventrust_fl.html">Haven Trust Bank Florida</a></td>
+ <td class="city">Ponte Vedra Beach</td>
+ <td class="state">FL</td>
+ <td class="cert">58308</td>
+ <td class="ai">First Southern Bank</td>
+ <td class="closing">September 24, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="maritimesavings.html">Maritime Savings Bank</a></td>
+ <td class="city">West Allis</td>
+ <td class="state">WI</td>
+ <td class="cert">28612</td>
+ <td class="ai">North Shore Bank, FSB</td>
+ <td class="closing">September 17, 2010</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="bramblesavings.html">Bramble Savings Bank</a></td>
+ <td class="city">Milford</td>
+ <td class="state">OH</td>
+ <td class="cert">27808</td>
+ <td class="ai">Foundation Bank</td>
+ <td class="closing">September 17, 2010</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="peoplesbank_ga.html">The Peoples Bank</a></td>
+ <td class="city">Winder</td>
+ <td class="state">GA</td>
+ <td class="cert">182</td>
+ <td class="ai">Community & Southern Bank</td>
+ <td class="closing">September 17, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstcommerce_ga.html">First Commerce Community Bank</a></td>
+ <td class="city">Douglasville</td>
+ <td class="state">GA</td>
+ <td class="cert">57448</td>
+ <td class="ai">Community & Southern Bank</td>
+ <td class="closing">September 17, 2010</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="ellijay.html">Bank of Ellijay</a></td>
+ <td class="city">Ellijay</td>
+ <td class="state">GA</td>
+ <td class="cert">58197</td>
+ <td class="ai">Community & Southern Bank</td>
+ <td class="closing">September 17, 2010</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="isnbank.html">ISN Bank</a></td>
+ <td class="city">Cherry Hill</td>
+ <td class="state">NJ</td>
+ <td class="cert">57107</td>
+ <td class="ai">Customers Bank</td>
+ <td class="closing">September 17, 2010</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="horizonfl.html">Horizon Bank</a></td>
+ <td class="city">Bradenton</td>
+ <td class="state">FL</td>
+ <td class="cert">35061</td>
+ <td class="ai">Bank of the Ozarks</td>
+ <td class="closing">September 10, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="sonoma.html">Sonoma Valley Bank</a></td>
+ <td class="city">Sonoma</td>
+ <td class="state">CA</td>
+ <td class="cert">27259</td>
+ <td class="ai">Westamerica Bank</td>
+ <td class="closing">August 20, 2010</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="lospadres.html">Los Padres Bank</a></td>
+ <td class="city">Solvang</td>
+ <td class="state">CA</td>
+ <td class="cert">32165</td>
+ <td class="ai">Pacific Western Bank</td>
+ <td class="closing">August 20, 2010</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="butte.html">Butte Community Bank</a></td>
+ <td class="city">Chico</td>
+ <td class="state">CA</td>
+ <td class="cert">33219</td>
+ <td class="ai">Rabobank, N.A.</td>
+ <td class="closing">August 20, 2010</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="pacificbk.html">Pacific State Bank</a></td>
+ <td class="city">Stockton</td>
+ <td class="state">CA</td>
+ <td class="cert">27090</td>
+ <td class="ai">Rabobank, N.A.</td>
+ <td class="closing">August 20, 2010</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="shorebank.html">ShoreBank</a></td>
+ <td class="city">Chicago</td>
+ <td class="state">IL</td>
+ <td class="cert">15640</td>
+ <td class="ai">Urban Partnership Bank</td>
+ <td class="closing">August 20, 2010</td>
+ <td class="updated">May 16, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="imperialsvgs.html">Imperial Savings and Loan Association</a></td>
+ <td class="city">Martinsville</td>
+ <td class="state">VA</td>
+ <td class="cert">31623</td>
+ <td class="ai">River Community Bank, N.A.</td>
+ <td class="closing">August 20, 2010</td>
+ <td class="updated">August 24, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="inatbank.html">Independent National Bank</a></td>
+ <td class="city">Ocala</td>
+ <td class="state">FL</td>
+ <td class="cert">27344</td>
+ <td class="ai">CenterState Bank of Florida, N.A.</td>
+ <td class="closing">August 20, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="cnbbartow.html">Community National Bank at Bartow</a></td>
+ <td class="city">Bartow</td>
+ <td class="state">FL</td>
+ <td class="cert">25266</td>
+ <td class="ai">CenterState Bank of Florida, N.A.</td>
+ <td class="closing">August 20, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="palosbank.html">Palos Bank and Trust Company</a></td>
+ <td class="city">Palos Heights</td>
+ <td class="state">IL</td>
+ <td class="cert">17599</td>
+ <td class="ai">First Midwest Bank</td>
+ <td class="closing">August 13, 2010</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="ravenswood.html">Ravenswood Bank</a></td>
+ <td class="city">Chicago</td>
+ <td class="state">IL</td>
+ <td class="cert">34231</td>
+ <td class="ai">Northbrook Bank & Trust Company</td>
+ <td class="closing">August 6, 2010</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="libertyor.html">LibertyBank</a></td>
+ <td class="city">Eugene</td>
+ <td class="state">OR</td>
+ <td class="cert">31964</td>
+ <td class="ai">Home Federal Bank</td>
+ <td class="closing">July 30, 2010</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="cowlitz.html">The Cowlitz Bank</a></td>
+ <td class="city">Longview</td>
+ <td class="state">WA</td>
+ <td class="cert">22643</td>
+ <td class="ai">Heritage Bank</td>
+ <td class="closing">July 30, 2010</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="coastal.html">Coastal Community Bank</a></td>
+ <td class="city">Panama City Beach</td>
+ <td class="state">FL</td>
+ <td class="cert">9619</td>
+ <td class="ai">Centennial Bank</td>
+ <td class="closing">July 30, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="bayside.html">Bayside Savings Bank</a></td>
+ <td class="city">Port Saint Joe</td>
+ <td class="state">FL</td>
+ <td class="cert">57669</td>
+ <td class="ai">Centennial Bank</td>
+ <td class="closing">July 30, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="northwestga.html">Northwest Bank & Trust</a></td>
+ <td class="city">Acworth</td>
+ <td class="state">GA</td>
+ <td class="cert">57658</td>
+ <td class="ai">State Bank and Trust Company</td>
+ <td class="closing">July 30, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="homevalleyor.html">Home Valley Bank</a></td>
+ <td class="city">Cave Junction</td>
+ <td class="state">OR</td>
+ <td class="cert">23181</td>
+ <td class="ai">South Valley Bank & Trust</td>
+ <td class="closing">July 23, 2010</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="southwestusanv.html">SouthwestUSA Bank</a></td>
+ <td class="city">Las Vegas</td>
+ <td class="state">NV</td>
+ <td class="cert">35434</td>
+ <td class="ai">Plaza Bank</td>
+ <td class="closing">July 23, 2010</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="communitysecmn.html">Community Security Bank</a></td>
+ <td class="city">New Prague</td>
+ <td class="state">MN</td>
+ <td class="cert">34486</td>
+ <td class="ai">Roundbank</td>
+ <td class="closing">July 23, 2010</td>
+ <td class="updated">September 12, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="thunderbankks.html">Thunder Bank</a></td>
+ <td class="city">Sylvan Grove</td>
+ <td class="state">KS</td>
+ <td class="cert">10506</td>
+ <td class="ai">The Bennington State Bank</td>
+ <td class="closing">July 23, 2010</td>
+ <td class="updated">September 13, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="williamsburgsc.html">Williamsburg First National Bank</a></td>
+ <td class="city">Kingstree</td>
+ <td class="state">SC</td>
+ <td class="cert">17837</td>
+ <td class="ai">First Citizens Bank and Trust Company, Inc.</td>
+ <td class="closing">July 23, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="crescentga.html">Crescent Bank and Trust Company</a></td>
+ <td class="city">Jasper</td>
+ <td class="state">GA</td>
+ <td class="cert">27559</td>
+ <td class="ai">Renasant Bank</td>
+ <td class="closing">July 23, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="sterlingfl.html">Sterling Bank</a></td>
+ <td class="city">Lantana</td>
+ <td class="state">FL</td>
+ <td class="cert">32536</td>
+ <td class="ai">IBERIABANK</td>
+ <td class="closing">July 23, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="mainstsvgs.html">Mainstreet Savings Bank, FSB</a></td>
+ <td class="city">Hastings</td>
+ <td class="state">MI</td>
+ <td class="cert">28136</td>
+ <td class="ai">Commercial Bank</td>
+ <td class="closing">July 16, 2010</td>
+ <td class="updated">September 13, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="oldecypress.html">Olde Cypress Community Bank</a></td>
+ <td class="city">Clewiston</td>
+ <td class="state">FL</td>
+ <td class="cert">28864</td>
+ <td class="ai">CenterState Bank of Florida, N.A.</td>
+ <td class="closing">July 16, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="turnberry.html">Turnberry Bank</a></td>
+ <td class="city">Aventura</td>
+ <td class="state">FL</td>
+ <td class="cert">32280</td>
+ <td class="ai">NAFH National Bank</td>
+ <td class="closing">July 16, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="metrobankfl.html">Metro Bank of Dade County</a></td>
+ <td class="city">Miami</td>
+ <td class="state">FL</td>
+ <td class="cert">25172</td>
+ <td class="ai">NAFH National Bank</td>
+ <td class="closing">July 16, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstnatlsc.html">First National Bank of the South</a></td>
+ <td class="city">Spartanburg</td>
+ <td class="state">SC</td>
+ <td class="cert">35383</td>
+ <td class="ai">NAFH National Bank</td>
+ <td class="closing">July 16, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="woodlands.html">Woodlands Bank</a></td>
+ <td class="city">Bluffton</td>
+ <td class="state">SC</td>
+ <td class="cert">32571</td>
+ <td class="ai">Bank of the Ozarks</td>
+ <td class="closing">July 16, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="homenatlok.html">Home National Bank</a></td>
+ <td class="city">Blackwell</td>
+ <td class="state">OK</td>
+ <td class="cert">11636</td>
+ <td class="ai">RCB Bank</td>
+ <td class="closing">July 9, 2010</td>
+ <td class="updated">December 10, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="usabankny.html">USA Bank</a></td>
+ <td class="city">Port Chester</td>
+ <td class="state">NY</td>
+ <td class="cert">58072</td>
+ <td class="ai">New Century Bank</td>
+ <td class="closing">July 9, 2010</td>
+ <td class="updated">September 14, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="idealfedsvngsmd.html">Ideal Federal Savings Bank</a></td>
+ <td class="city">Baltimore</td>
+ <td class="state">MD</td>
+ <td class="cert">32456</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">July 9, 2010</td>
+ <td class="updated">September 14, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="baynatlmd.html">Bay National Bank</a></td>
+ <td class="city">Baltimore</td>
+ <td class="state">MD</td>
+ <td class="cert">35462</td>
+ <td class="ai">Bay Bank, FSB</td>
+ <td class="closing">July 9, 2010</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="highdesertnm.html">High Desert State Bank</a></td>
+ <td class="city">Albuquerque</td>
+ <td class="state">NM</td>
+ <td class="cert">35279</td>
+ <td class="ai">First American Bank</td>
+ <td class="closing">June 25, 2010</td>
+ <td class="updated">September 14, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstnatga.html">First National Bank</a></td>
+ <td class="city">Savannah</td>
+ <td class="state">GA</td>
+ <td class="cert">34152</td>
+ <td class="ai">The Savannah Bank, N.A.</td>
+ <td class="closing">June 25, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="peninsulafl.html">Peninsula Bank</a></td>
+ <td class="city">Englewood</td>
+ <td class="state">FL</td>
+ <td class="cert">26563</td>
+ <td class="ai">Premier American Bank, N.A.</td>
+ <td class="closing">June 25, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="nevsecbank.html">Nevada Security Bank</a></td>
+ <td class="city">Reno</td>
+ <td class="state">NV</td>
+ <td class="cert">57110</td>
+ <td class="ai">Umpqua Bank</td>
+ <td class="closing">June 18, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="washfirstintl.html">Washington First International Bank</a></td>
+ <td class="city">Seattle</td>
+ <td class="state">WA</td>
+ <td class="cert">32955</td>
+ <td class="ai">East West Bank</td>
+ <td class="closing">June 11, 2010</td>
+ <td class="updated">September 14, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="tieronebankne.html">TierOne Bank</a></td>
+ <td class="city">Lincoln</td>
+ <td class="state">NE</td>
+ <td class="cert">29341</td>
+ <td class="ai">Great Western Bank</td>
+ <td class="closing">June 4, 2010</td>
+ <td class="updated">September 14, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="arcolail.html">Arcola Homestead Savings Bank</a></td>
+ <td class="city">Arcola</td>
+ <td class="state">IL</td>
+ <td class="cert">31813</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">June 4, 2010</td>
+ <td class="updated">September 14, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstnatms.html">First National Bank</a></td>
+ <td class="city">Rosedale</td>
+ <td class="state">MS</td>
+ <td class="cert">15814</td>
+ <td class="ai">The Jefferson Bank</td>
+ <td class="closing">June 4, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="swbnevada.html">Sun West Bank</a></td>
+ <td class="city">Las Vegas</td>
+ <td class="state">NV</td>
+ <td class="cert">34785</td>
+ <td class="ai">City National Bank</td>
+ <td class="closing">May 28, 2010</td>
+ <td class="updated">September 14, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="graniteca.html">Granite Community Bank, NA</a></td>
+ <td class="city">Granite Bay</td>
+ <td class="state">CA</td>
+ <td class="cert">57315</td>
+ <td class="ai">Tri Counties Bank</td>
+ <td class="closing">May 28, 2010</td>
+ <td class="updated">September 14, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="bankoffloridatb.html">Bank of Florida - Tampa</a></td>
+ <td class="city">Tampa</td>
+ <td class="state">FL</td>
+ <td class="cert">57814</td>
+ <td class="ai">EverBank</td>
+ <td class="closing">May 28, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="bankoffloridasw.html">Bank of Florida - Southwest</a></td>
+ <td class="city">Naples</td>
+ <td class="state">FL</td>
+ <td class="cert">35106</td>
+ <td class="ai">EverBank</td>
+ <td class="closing">May 28, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="bankoffloridase.html">Bank of Florida - Southeast</a></td>
+ <td class="city">Fort Lauderdale</td>
+ <td class="state">FL</td>
+ <td class="cert">57360</td>
+ <td class="ai">EverBank</td>
+ <td class="closing">May 28, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="pinehurstmn.html">Pinehurst Bank</a></td>
+ <td class="city">Saint Paul</td>
+ <td class="state">MN</td>
+ <td class="cert">57735</td>
+ <td class="ai">Coulee Bank</td>
+ <td class="closing">May 21, 2010</td>
+ <td class="updated">October 26, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="midwestil.html">Midwest Bank and Trust Company</a></td>
+ <td class="city">Elmwood Park</td>
+ <td class="state">IL</td>
+ <td class="cert">18117</td>
+ <td class="ai">FirstMerit Bank, N.A.</td>
+ <td class="closing">May 14, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="swcmntymo.html">Southwest Community Bank</a></td>
+ <td class="city">Springfield</td>
+ <td class="state">MO</td>
+ <td class="cert">34255</td>
+ <td class="ai">Simmons First National Bank</td>
+ <td class="closing">May 14, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="newlibertymi.html">New Liberty Bank</a></td>
+ <td class="city">Plymouth</td>
+ <td class="state">MI</td>
+ <td class="cert">35586</td>
+ <td class="ai">Bank of Ann Arbor</td>
+ <td class="closing">May 14, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="satillacmntyga.html">Satilla Community Bank</a></td>
+ <td class="city">Saint Marys</td>
+ <td class="state">GA</td>
+ <td class="cert">35114</td>
+ <td class="ai">Ameris Bank</td>
+ <td class="closing">May 14, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="1stpacific.html">1st Pacific Bank of California</a></td>
+ <td class="city">San Diego</td>
+ <td class="state">CA</td>
+ <td class="cert">35517</td>
+ <td class="ai">City National Bank</td>
+ <td class="closing">May 7, 2010</td>
+ <td class="updated">December 13, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="townebank.html">Towne Bank of Arizona</a></td>
+ <td class="city">Mesa</td>
+ <td class="state">AZ</td>
+ <td class="cert">57697</td>
+ <td class="ai">Commerce Bank of Arizona</td>
+ <td class="closing">May 7, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="accessbank.html">Access Bank</a></td>
+ <td class="city">Champlin</td>
+ <td class="state">MN</td>
+ <td class="cert">16476</td>
+ <td class="ai">PrinsBank</td>
+ <td class="closing">May 7, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="bonifay.html">The Bank of Bonifay</a></td>
+ <td class="city">Bonifay</td>
+ <td class="state">FL</td>
+ <td class="cert">14246</td>
+ <td class="ai">First Federal Bank of Florida</td>
+ <td class="closing">May 7, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="frontier.html">Frontier Bank</a></td>
+ <td class="city">Everett</td>
+ <td class="state">WA</td>
+ <td class="cert">22710</td>
+ <td class="ai">Union Bank, N.A.</td>
+ <td class="closing">April 30, 2010</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="bc-natl.html">BC National Banks</a></td>
+ <td class="city">Butler</td>
+ <td class="state">MO</td>
+ <td class="cert">17792</td>
+ <td class="ai">Community First Bank</td>
+ <td class="closing">April 30, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="champion.html">Champion Bank</a></td>
+ <td class="city">Creve Coeur</td>
+ <td class="state">MO</td>
+ <td class="cert">58362</td>
+ <td class="ai">BankLiberty</td>
+ <td class="closing">April 30, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="cfbancorp.html">CF Bancorp</a></td>
+ <td class="city">Port Huron</td>
+ <td class="state">MI</td>
+ <td class="cert">30005</td>
+ <td class="ai">First Michigan Bank</td>
+ <td class="closing">April 30, 2010</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="westernbank-puertorico.html">Westernbank Puerto Rico</a><br><a href="westernbank-puertorico_spanish.html">En Espanol</a></td>
+ <td class="city">Mayaguez</td>
+ <td class="state">PR</td>
+ <td class="cert">31027</td>
+ <td class="ai">Banco Popular de Puerto Rico</td>
+ <td class="closing">April 30, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="r-gpremier-puertorico.html">R-G Premier Bank of Puerto Rico</a><br><a href="r-gpremier-puertorico_spanish.html">En Espanol</a></td>
+ <td class="city">Hato Rey</td>
+ <td class="state">PR</td>
+ <td class="cert">32185</td>
+ <td class="ai">Scotiabank de Puerto Rico</td>
+ <td class="closing">April 30, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="eurobank-puertorico.html">Eurobank</a><br><a href="eurobank-puertorico_spanish.html">En Espanol</a></td>
+ <td class="city">San Juan</td>
+ <td class="state">PR</td>
+ <td class="cert">27150</td>
+ <td class="ai">Oriental Bank and Trust</td>
+ <td class="closing">April 30, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="wheatland.html">Wheatland Bank</a></td>
+ <td class="city">Naperville</td>
+ <td class="state">IL</td>
+ <td class="cert">58429</td>
+ <td class="ai">Wheaton Bank & Trust</td>
+ <td class="closing">April 23, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="peotone.html">Peotone Bank and Trust Company</a></td>
+ <td class="city">Peotone</td>
+ <td class="state">IL</td>
+ <td class="cert">10888</td>
+ <td class="ai">First Midwest Bank</td>
+ <td class="closing">April 23, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="lincoln-park.html">Lincoln Park Savings Bank</a></td>
+ <td class="city">Chicago</td>
+ <td class="state">IL</td>
+ <td class="cert">30600</td>
+ <td class="ai">Northbrook Bank & Trust Company</td>
+ <td class="closing">April 23, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="new-century-il.html">New Century Bank</a></td>
+ <td class="city">Chicago</td>
+ <td class="state">IL</td>
+ <td class="cert">34821</td>
+ <td class="ai">MB Financial Bank, N.A.</td>
+ <td class="closing">April 23, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="citizens-bank.html">Citizens Bank and Trust Company of Chicago</a></td>
+ <td class="city">Chicago</td>
+ <td class="state">IL</td>
+ <td class="cert">34658</td>
+ <td class="ai">Republic Bank of Chicago</td>
+ <td class="closing">April 23, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="broadway.html">Broadway Bank</a></td>
+ <td class="city">Chicago</td>
+ <td class="state">IL</td>
+ <td class="cert">22853</td>
+ <td class="ai">MB Financial Bank, N.A.</td>
+ <td class="closing">April 23, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="amcore.html">Amcore Bank, National Association</a></td>
+ <td class="city">Rockford</td>
+ <td class="state">IL</td>
+ <td class="cert">3735</td>
+ <td class="ai">Harris N.A.</td>
+ <td class="closing">April 23, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="citybank.html">City Bank</a></td>
+ <td class="city">Lynnwood</td>
+ <td class="state">WA</td>
+ <td class="cert">21521</td>
+ <td class="ai">Whidbey Island Bank</td>
+ <td class="closing">April 16, 2010</td>
+ <td class="updated">September 14, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="tamalpais.html">Tamalpais Bank</a></td>
+ <td class="city">San Rafael</td>
+ <td class="state">CA</td>
+ <td class="cert">33493</td>
+ <td class="ai">Union Bank, N.A.</td>
+ <td class="closing">April 16, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="innovative.html">Innovative Bank</a></td>
+ <td class="city">Oakland</td>
+ <td class="state">CA</td>
+ <td class="cert">23876</td>
+ <td class="ai">Center Bank</td>
+ <td class="closing">April 16, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="butlerbank.html">Butler Bank</a></td>
+ <td class="city">Lowell</td>
+ <td class="state">MA</td>
+ <td class="cert">26619</td>
+ <td class="ai">People's United Bank</td>
+ <td class="closing">April 16, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="riverside-natl.html">Riverside National Bank of Florida</a></td>
+ <td class="city">Fort Pierce</td>
+ <td class="state">FL</td>
+ <td class="cert">24067</td>
+ <td class="ai">TD Bank, N.A.</td>
+ <td class="closing">April 16, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="americanfirst.html">AmericanFirst Bank</a></td>
+ <td class="city">Clermont</td>
+ <td class="state">FL</td>
+ <td class="cert">57724</td>
+ <td class="ai">TD Bank, N.A.</td>
+ <td class="closing">April 16, 2010</td>
+ <td class="updated">October 31, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="ffbnf.html">First Federal Bank of North Florida</a></td>
+ <td class="city">Palatka</td>
+ <td class="state">FL</td>
+ <td class="cert">28886</td>
+ <td class="ai">TD Bank, N.A.</td>
+ <td class="closing">April 16, 2010</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="lakeside-comm.html">Lakeside Community Bank</a></td>
+ <td class="city">Sterling Heights</td>
+ <td class="state">MI</td>
+ <td class="cert">34878</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">April 16, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="beachfirst.html">Beach First National Bank</a></td>
+ <td class="city">Myrtle Beach</td>
+ <td class="state">SC</td>
+ <td class="cert">34242</td>
+ <td class="ai">Bank of North Carolina</td>
+ <td class="closing">April 9, 2010</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="deserthills.html">Desert Hills Bank</a></td>
+ <td class="city">Phoenix</td>
+ <td class="state">AZ</td>
+ <td class="cert">57060</td>
+ <td class="ai">New York Community Bank</td>
+ <td class="closing">March 26, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="unity-natl.html">Unity National Bank</a></td>
+ <td class="city">Cartersville</td>
+ <td class="state">GA</td>
+ <td class="cert">34678</td>
+ <td class="ai">Bank of the Ozarks</td>
+ <td class="closing">March 26, 2010</td>
+ <td class="updated">September 14, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="key-west.html">Key West Bank</a></td>
+ <td class="city">Key West</td>
+ <td class="state">FL</td>
+ <td class="cert">34684</td>
+ <td class="ai">Centennial Bank</td>
+ <td class="closing">March 26, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="mcintosh.html">McIntosh Commercial Bank</a></td>
+ <td class="city">Carrollton</td>
+ <td class="state">GA</td>
+ <td class="cert">57399</td>
+ <td class="ai">CharterBank</td>
+ <td class="closing">March 26, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="state-aurora.html">State Bank of Aurora</a></td>
+ <td class="city">Aurora</td>
+ <td class="state">MN</td>
+ <td class="cert">8221</td>
+ <td class="ai">Northern State Bank</td>
+ <td class="closing">March 19, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstlowndes.html">First Lowndes Bank</a></td>
+ <td class="city">Fort Deposit</td>
+ <td class="state">AL</td>
+ <td class="cert">24957</td>
+ <td class="ai">First Citizens Bank</td>
+ <td class="closing">March 19, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="bankofhiawassee.html">Bank of Hiawassee</a></td>
+ <td class="city">Hiawassee</td>
+ <td class="state">GA</td>
+ <td class="cert">10054</td>
+ <td class="ai">Citizens South Bank</td>
+ <td class="closing">March 19, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="appalachian.html">Appalachian Community Bank</a></td>
+ <td class="city">Ellijay</td>
+ <td class="state">GA</td>
+ <td class="cert">33989</td>
+ <td class="ai">Community & Southern Bank</td>
+ <td class="closing">March 19, 2010</td>
+ <td class="updated">October 31, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="advanta-ut.html">Advanta Bank Corp.</a></td>
+ <td class="city">Draper</td>
+ <td class="state">UT</td>
+ <td class="cert">33535</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">March 19, 2010</td>
+ <td class="updated">September 14, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="cent-security.html">Century Security Bank</a></td>
+ <td class="city">Duluth</td>
+ <td class="state">GA</td>
+ <td class="cert">58104</td>
+ <td class="ai">Bank of Upson</td>
+ <td class="closing">March 19, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="amer-natl-oh.html">American National Bank</a></td>
+ <td class="city">Parma</td>
+ <td class="state">OH</td>
+ <td class="cert">18806</td>
+ <td class="ai">The National Bank and Trust Company</td>
+ <td class="closing">March 19, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="statewide.html">Statewide Bank</a></td>
+ <td class="city">Covington</td>
+ <td class="state">LA</td>
+ <td class="cert">29561</td>
+ <td class="ai">Home Bank</td>
+ <td class="closing">March 12, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="oldsouthern.html">Old Southern Bank</a></td>
+ <td class="city">Orlando</td>
+ <td class="state">FL</td>
+ <td class="cert">58182</td>
+ <td class="ai">Centennial Bank</td>
+ <td class="closing">March 12, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="parkavenue-ny.html">The Park Avenue Bank</a></td>
+ <td class="city">New York</td>
+ <td class="state">NY</td>
+ <td class="cert">27096</td>
+ <td class="ai">Valley National Bank</td>
+ <td class="closing">March 12, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="libertypointe.html">LibertyPointe Bank</a></td>
+ <td class="city">New York</td>
+ <td class="state">NY</td>
+ <td class="cert">58071</td>
+ <td class="ai">Valley National Bank</td>
+ <td class="closing">March 11, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="centennial-ut.html">Centennial Bank</a></td>
+ <td class="city">Ogden</td>
+ <td class="state">UT</td>
+ <td class="cert">34430</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">March 5, 2010</td>
+ <td class="updated">September 14, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="waterfield.html">Waterfield Bank</a></td>
+ <td class="city">Germantown</td>
+ <td class="state">MD</td>
+ <td class="cert">34976</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">March 5, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="bankofillinois.html">Bank of Illinois</a></td>
+ <td class="city">Normal</td>
+ <td class="state">IL</td>
+ <td class="cert">9268</td>
+ <td class="ai">Heartland Bank and Trust Company</td>
+ <td class="closing">March 5, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="sunamerican.html">Sun American Bank</a></td>
+ <td class="city">Boca Raton</td>
+ <td class="state">FL</td>
+ <td class="cert">27126</td>
+ <td class="ai">First-Citizens Bank & Trust Company</td>
+ <td class="closing">March 5, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="rainier.html">Rainier Pacific Bank</a></td>
+ <td class="city">Tacoma</td>
+ <td class="state">WA</td>
+ <td class="cert">38129</td>
+ <td class="ai">Umpqua Bank</td>
+ <td class="closing">February 26, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="carsonriver.html">Carson River Community Bank</a></td>
+ <td class="city">Carson City</td>
+ <td class="state">NV</td>
+ <td class="cert">58352</td>
+ <td class="ai">Heritage Bank of Nevada</td>
+ <td class="closing">February 26, 2010</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="lajolla.html">La Jolla Bank, FSB</a></td>
+ <td class="city">La Jolla</td>
+ <td class="state">CA</td>
+ <td class="cert">32423</td>
+ <td class="ai">OneWest Bank, FSB</td>
+ <td class="closing">February 19, 2010</td>
+ <td class="updated">August 24, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="georgewashington.html">George Washington Savings Bank</a></td>
+ <td class="city">Orland Park</td>
+ <td class="state">IL</td>
+ <td class="cert">29952</td>
+ <td class="ai">FirstMerit Bank, N.A.</td>
+ <td class="closing">February 19, 2010</td>
+ <td class="updated">August 24, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="lacoste.html">The La Coste National Bank</a></td>
+ <td class="city">La Coste</td>
+ <td class="state">TX</td>
+ <td class="cert">3287</td>
+ <td class="ai">Community National Bank</td>
+ <td class="closing">February 19, 2010</td>
+ <td class="updated">September 14, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="marco.html">Marco Community Bank</a></td>
+ <td class="city">Marco Island</td>
+ <td class="state">FL</td>
+ <td class="cert">57586</td>
+ <td class="ai">Mutual of Omaha Bank</td>
+ <td class="closing">February 19, 2010</td>
+ <td class="updated">August 24, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="1stamerican.html">1st American State Bank of Minnesota</a></td>
+ <td class="city">Hancock</td>
+ <td class="state">MN</td>
+ <td class="cert">15448</td>
+ <td class="ai">Community Development Bank, FSB</td>
+ <td class="closing">February 5, 2010</td>
+ <td class="updated">August 24, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="americanmarine.html">American Marine Bank</a></td>
+ <td class="city">Bainbridge Island</td>
+ <td class="state">WA</td>
+ <td class="cert">16730</td>
+ <td class="ai">Columbia State Bank</td>
+ <td class="closing">January 29, 2010</td>
+ <td class="updated">August 24, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstregional.html">First Regional Bank</a></td>
+ <td class="city">Los Angeles</td>
+ <td class="state">CA</td>
+ <td class="cert">23011</td>
+ <td class="ai">First-Citizens Bank & Trust Company</td>
+ <td class="closing">January 29, 2010</td>
+ <td class="updated">August 24, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="cbt-cornelia.html">Community Bank and Trust</a></td>
+ <td class="city">Cornelia</td>
+ <td class="state">GA</td>
+ <td class="cert">5702</td>
+ <td class="ai">SCBT National Association</td>
+ <td class="closing">January 29, 2010</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="marshall-mn.html">Marshall Bank, N.A.</a></td>
+ <td class="city">Hallock</td>
+ <td class="state">MN</td>
+ <td class="cert">16133</td>
+ <td class="ai">United Valley Bank</td>
+ <td class="closing">January 29, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="floridacommunity.html">Florida Community Bank</a></td>
+ <td class="city">Immokalee</td>
+ <td class="state">FL</td>
+ <td class="cert">5672</td>
+ <td class="ai">Premier American Bank, N.A.</td>
+ <td class="closing">January 29, 2010</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstnational-carrollton.html">First National Bank of Georgia</a></td>
+ <td class="city">Carrollton</td>
+ <td class="state">GA</td>
+ <td class="cert">16480</td>
+ <td class="ai">Community & Southern Bank</td>
+ <td class="closing">January 29, 2010</td>
+ <td class="updated">December 13, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="columbiariver.html">Columbia River Bank</a></td>
+ <td class="city">The Dalles</td>
+ <td class="state">OR</td>
+ <td class="cert">22469</td>
+ <td class="ai">Columbia State Bank</td>
+ <td class="closing">January 22, 2010</td>
+ <td class="updated">September 14, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="evergreen-wa.html">Evergreen Bank</a></td>
+ <td class="city">Seattle</td>
+ <td class="state">WA</td>
+ <td class="cert">20501</td>
+ <td class="ai">Umpqua Bank</td>
+ <td class="closing">January 22, 2010</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="charter-nm.html">Charter Bank</a></td>
+ <td class="city">Santa Fe</td>
+ <td class="state">NM</td>
+ <td class="cert">32498</td>
+ <td class="ai">Charter Bank</td>
+ <td class="closing">January 22, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="leeton.html">Bank of Leeton</a></td>
+ <td class="city">Leeton</td>
+ <td class="state">MO</td>
+ <td class="cert">8265</td>
+ <td class="ai">Sunflower Bank, N.A.</td>
+ <td class="closing">January 22, 2010</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="premieramerican.html">Premier American Bank</a></td>
+ <td class="city">Miami</td>
+ <td class="state">FL</td>
+ <td class="cert">57147</td>
+ <td class="ai">Premier American Bank, N.A.</td>
+ <td class="closing">January 22, 2010</td>
+ <td class="updated">December 13, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="barnes.html">Barnes Banking Company</a></td>
+ <td class="city">Kaysville</td>
+ <td class="state">UT</td>
+ <td class="cert">1252</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">January 15, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="ststephen.html">St. Stephen State Bank</a></td>
+ <td class="city">St. Stephen</td>
+ <td class="state">MN</td>
+ <td class="cert">17522</td>
+ <td class="ai">First State Bank of St. Joseph</td>
+ <td class="closing">January 15, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="towncommunity.html">Town Community Bank & Trust</a></td>
+ <td class="city">Antioch</td>
+ <td class="state">IL</td>
+ <td class="cert">34705</td>
+ <td class="ai">First American Bank</td>
+ <td class="closing">January 15, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="horizon-wa.html">Horizon Bank</a></td>
+ <td class="city">Bellingham</td>
+ <td class="state">WA</td>
+ <td class="cert">22977</td>
+ <td class="ai">Washington Federal Savings and Loan Association</td>
+ <td class="closing">January 8, 2010</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstfederal-ca.html">First Federal Bank of California, F.S.B.</a></td>
+ <td class="city">Santa Monica</td>
+ <td class="state">CA</td>
+ <td class="cert">28536</td>
+ <td class="ai">OneWest Bank, FSB</td>
+ <td class="closing">December 18, 2009</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="imperialcapital.html">Imperial Capital Bank</a></td>
+ <td class="city">La Jolla</td>
+ <td class="state">CA</td>
+ <td class="cert">26348</td>
+ <td class="ai">City National Bank</td>
+ <td class="closing">December 18, 2009</td>
+ <td class="updated">September 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="ibb.html">Independent Bankers' Bank</a></td>
+ <td class="city">Springfield</td>
+ <td class="state">IL</td>
+ <td class="cert">26820</td>
+ <td class="ai">The Independent BankersBank (TIB)</td>
+ <td class="closing">December 18, 2009</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="newsouth.html">New South Federal Savings Bank</a></td>
+ <td class="city">Irondale</td>
+ <td class="state">AL</td>
+ <td class="cert">32276</td>
+ <td class="ai">Beal Bank</td>
+ <td class="closing">December 18, 2009</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="citizensstate-mi.html">Citizens State Bank</a></td>
+ <td class="city">New Baltimore</td>
+ <td class="state">MI</td>
+ <td class="cert">1006</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">December 18, 2009</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="peoplesfirst-fl.html">Peoples First Community Bank</a></td>
+ <td class="city">Panama City</td>
+ <td class="state">FL</td>
+ <td class="cert">32167</td>
+ <td class="ai">Hancock Bank</td>
+ <td class="closing">December 18, 2009</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="rockbridge.html">RockBridge Commercial Bank</a></td>
+ <td class="city">Atlanta</td>
+ <td class="state">GA</td>
+ <td class="cert">58315</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">December 18, 2009</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="solutions.html">SolutionsBank</a></td>
+ <td class="city">Overland Park</td>
+ <td class="state">KS</td>
+ <td class="cert">4731</td>
+ <td class="ai">Arvest Bank</td>
+ <td class="closing">December 11, 2009</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="valleycapital.html">Valley Capital Bank, N.A.</a></td>
+ <td class="city">Mesa</td>
+ <td class="state">AZ</td>
+ <td class="cert">58399</td>
+ <td class="ai">Enterprise Bank & Trust</td>
+ <td class="closing">December 11, 2009</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="republicfederal.html">Republic Federal Bank, N.A.</a></td>
+ <td class="city">Miami</td>
+ <td class="state">FL</td>
+ <td class="cert">22846</td>
+ <td class="ai">1st United Bank</td>
+ <td class="closing">December 11, 2009</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="atlantic-va.html">Greater Atlantic Bank</a></td>
+ <td class="city">Reston</td>
+ <td class="state">VA</td>
+ <td class="cert">32583</td>
+ <td class="ai">Sonabank</td>
+ <td class="closing">December 4, 2009</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="benchmark-il.html">Benchmark Bank</a></td>
+ <td class="city">Aurora</td>
+ <td class="state">IL</td>
+ <td class="cert">10440</td>
+ <td class="ai">MB Financial Bank, N.A.</td>
+ <td class="closing">December 4, 2009</td>
+ <td class="updated">August 23, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="amtrust.html">AmTrust Bank</a></td>
+ <td class="city">Cleveland</td>
+ <td class="state">OH</td>
+ <td class="cert">29776</td>
+ <td class="ai">New York Community Bank</td>
+ <td class="closing">December 4, 2009</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="tattnall.html">The Tattnall Bank</a></td>
+ <td class="city">Reidsville</td>
+ <td class="state">GA</td>
+ <td class="cert">12080</td>
+ <td class="ai">Heritage Bank of the South</td>
+ <td class="closing">December 4, 2009</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstsecurity.html">First Security National Bank</a></td>
+ <td class="city">Norcross</td>
+ <td class="state">GA</td>
+ <td class="cert">26290</td>
+ <td class="ai">State Bank and Trust Company</td>
+ <td class="closing">December 4, 2009</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="buckheadcommunity.html">The Buckhead Community Bank</a></td>
+ <td class="city">Atlanta</td>
+ <td class="state">GA</td>
+ <td class="cert">34663</td>
+ <td class="ai">State Bank and Trust Company</td>
+ <td class="closing">December 4, 2009</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="commercesw-fl.html">Commerce Bank of Southwest Florida</a></td>
+ <td class="city">Fort Myers</td>
+ <td class="state">FL</td>
+ <td class="cert">58016</td>
+ <td class="ai">Central Bank</td>
+ <td class="closing">November 20, 2009</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="pacificcoastnatl.html">Pacific Coast National Bank</a></td>
+ <td class="city">San Clemente</td>
+ <td class="state">CA</td>
+ <td class="cert">57914</td>
+ <td class="ai">Sunwest Bank</td>
+ <td class="closing">November 13, 2009</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="orion-fl.html">Orion Bank</a></td>
+ <td class="city">Naples</td>
+ <td class="state">FL</td>
+ <td class="cert">22427</td>
+ <td class="ai">IBERIABANK</td>
+ <td class="closing">November 13, 2009</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="centuryfsb.html">Century Bank, F.S.B.</a></td>
+ <td class="city">Sarasota</td>
+ <td class="state">FL</td>
+ <td class="cert">32267</td>
+ <td class="ai">IBERIABANK</td>
+ <td class="closing">November 13, 2009</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="ucb.html">United Commercial Bank</a></td>
+ <td class="city">San Francisco</td>
+ <td class="state">CA</td>
+ <td class="cert">32469</td>
+ <td class="ai">East West Bank</td>
+ <td class="closing">November 6, 2009</td>
+ <td class="updated">November 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="gateway-mo.html">Gateway Bank of St. Louis</a></td>
+ <td class="city">St. Louis</td>
+ <td class="state">MO</td>
+ <td class="cert">19450</td>
+ <td class="ai">Central Bank of Kansas City</td>
+ <td class="closing">November 6, 2009</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="prosperan.html">Prosperan Bank</a></td>
+ <td class="city">Oakdale</td>
+ <td class="state">MN</td>
+ <td class="cert">35074</td>
+ <td class="ai">Alerus Financial, N.A.</td>
+ <td class="closing">November 6, 2009</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="homefsb-mi.html">Home Federal Savings Bank</a></td>
+ <td class="city">Detroit</td>
+ <td class="state">MI</td>
+ <td class="cert">30329</td>
+ <td class="ai">Liberty Bank and Trust Company</td>
+ <td class="closing">November 6, 2009</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="unitedsecurity-ga.html">United Security Bank</a></td>
+ <td class="city">Sparta</td>
+ <td class="state">GA</td>
+ <td class="cert">22286</td>
+ <td class="ai">Ameris Bank</td>
+ <td class="closing">November 6, 2009</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="northhouston-tx.html">North Houston Bank</a></td>
+ <td class="city">Houston</td>
+ <td class="state">TX</td>
+ <td class="cert">18776</td>
+ <td class="ai">U.S. Bank N.A.</td>
+ <td class="closing">October 30, 2009</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="madisonville-tx.html">Madisonville State Bank</a></td>
+ <td class="city">Madisonville</td>
+ <td class="state">TX</td>
+ <td class="cert">33782</td>
+ <td class="ai">U.S. Bank N.A.</td>
+ <td class="closing">October 30, 2009</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="citizens-teague.html">Citizens National Bank</a></td>
+ <td class="city">Teague</td>
+ <td class="state">TX</td>
+ <td class="cert">25222</td>
+ <td class="ai">U.S. Bank N.A.</td>
+ <td class="closing">October 30, 2009</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="park-il.html">Park National Bank</a></td>
+ <td class="city">Chicago</td>
+ <td class="state">IL</td>
+ <td class="cert">11677</td>
+ <td class="ai">U.S. Bank N.A.</td>
+ <td class="closing">October 30, 2009</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="pacificnational-ca.html">Pacific National Bank</a></td>
+ <td class="city">San Francisco</td>
+ <td class="state">CA</td>
+ <td class="cert">30006</td>
+ <td class="ai">U.S. Bank N.A.</td>
+ <td class="closing">October 30, 2009</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="calnational.html">California National Bank</a></td>
+ <td class="city">Los Angeles</td>
+ <td class="state">CA</td>
+ <td class="cert">34659</td>
+ <td class="ai">U.S. Bank N.A.</td>
+ <td class="closing">October 30, 2009</td>
+ <td class="updated">September 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="sandiegonational.html">San Diego National Bank</a></td>
+ <td class="city">San Diego</td>
+ <td class="state">CA</td>
+ <td class="cert">23594</td>
+ <td class="ai">U.S. Bank N.A.</td>
+ <td class="closing">October 30, 2009</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="community-lemont.html">Community Bank of Lemont</a></td>
+ <td class="city">Lemont</td>
+ <td class="state">IL</td>
+ <td class="cert">35291</td>
+ <td class="ai">U.S. Bank N.A.</td>
+ <td class="closing">October 30, 2009</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="bankusa-az.html">Bank USA, N.A.</a></td>
+ <td class="city">Phoenix</td>
+ <td class="state">AZ</td>
+ <td class="cert">32218</td>
+ <td class="ai">U.S. Bank N.A.</td>
+ <td class="closing">October 30, 2009</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstdupage.html">First DuPage Bank</a></td>
+ <td class="city">Westmont</td>
+ <td class="state">IL</td>
+ <td class="cert">35038</td>
+ <td class="ai">First Midwest Bank</td>
+ <td class="closing">October 23, 2009</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="riverview-mn.html">Riverview Community Bank</a></td>
+ <td class="city">Otsego</td>
+ <td class="state">MN</td>
+ <td class="cert">57525</td>
+ <td class="ai">Central Bank</td>
+ <td class="closing">October 23, 2009</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="elmwood.html">Bank of Elmwood</a></td>
+ <td class="city">Racine</td>
+ <td class="state">WI</td>
+ <td class="cert">18321</td>
+ <td class="ai">Tri City National Bank</td>
+ <td class="closing">October 23, 2009</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="flagship.html">Flagship National Bank</a></td>
+ <td class="city">Bradenton</td>
+ <td class="state">FL</td>
+ <td class="cert">35044</td>
+ <td class="ai">First Federal Bank of Florida</td>
+ <td class="closing">October 23, 2009</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="hillcrest-fl.html">Hillcrest Bank Florida</a></td>
+ <td class="city">Naples</td>
+ <td class="state">FL</td>
+ <td class="cert">58336</td>
+ <td class="ai">Stonegate Bank</td>
+ <td class="closing">October 23, 2009</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="americanunited.html">American United Bank</a></td>
+ <td class="city">Lawrenceville</td>
+ <td class="state">GA</td>
+ <td class="cert">57794</td>
+ <td class="ai">Ameris Bank</td>
+ <td class="closing">October 23, 2009</td>
+ <td class="updated">September 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="partners-fl.html">Partners Bank</a></td>
+ <td class="city">Naples</td>
+ <td class="state">FL</td>
+ <td class="cert">57959</td>
+ <td class="ai">Stonegate Bank</td>
+ <td class="closing">October 23, 2009</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="sanjoaquin.html">San Joaquin Bank</a></td>
+ <td class="city">Bakersfield</td>
+ <td class="state">CA</td>
+ <td class="cert">23266</td>
+ <td class="ai">Citizens Business Bank</td>
+ <td class="closing">October 16, 2009</td>
+ <td class="updated">August 22, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="scnb-co.html">Southern Colorado National Bank</a></td>
+ <td class="city">Pueblo</td>
+ <td class="state">CO</td>
+ <td class="cert">57263</td>
+ <td class="ai">Legacy Bank</td>
+ <td class="closing">October 2, 2009</td>
+ <td class="updated">September 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="jennings-mn.html">Jennings State Bank</a></td>
+ <td class="city">Spring Grove</td>
+ <td class="state">MN</td>
+ <td class="cert">11416</td>
+ <td class="ai">Central Bank</td>
+ <td class="closing">October 2, 2009</td>
+ <td class="updated">August 21, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="warren-mi.html">Warren Bank</a></td>
+ <td class="city">Warren</td>
+ <td class="state">MI</td>
+ <td class="cert">34824</td>
+ <td class="ai">The Huntington National Bank</td>
+ <td class="closing">October 2, 2009</td>
+ <td class="updated">August 21, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="georgian.html">Georgian Bank</a></td>
+ <td class="city">Atlanta</td>
+ <td class="state">GA</td>
+ <td class="cert">57151</td>
+ <td class="ai">First Citizens Bank and Trust Company, Inc.</td>
+ <td class="closing">September 25, 2009</td>
+ <td class="updated">August 21, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="irwin-ky.html">Irwin Union Bank, F.S.B.</a></td>
+ <td class="city">Louisville</td>
+ <td class="state">KY</td>
+ <td class="cert">57068</td>
+ <td class="ai">First Financial Bank, N.A.</td>
+ <td class="closing">September 18, 2009</td>
+ <td class="updated">September 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="irwin-in.html">Irwin Union Bank and Trust Company</a></td>
+ <td class="city">Columbus</td>
+ <td class="state">IN</td>
+ <td class="cert">10100</td>
+ <td class="ai">First Financial Bank, N.A.</td>
+ <td class="closing">September 18, 2009</td>
+ <td class="updated">August 21, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="venture-wa.html">Venture Bank</a></td>
+ <td class="city">Lacey</td>
+ <td class="state">WA</td>
+ <td class="cert">22868</td>
+ <td class="ai">First-Citizens Bank & Trust Company</td>
+ <td class="closing">September 11, 2009</td>
+ <td class="updated">August 21, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="brickwell-mn.html">Brickwell Community Bank</a></td>
+ <td class="city">Woodbury</td>
+ <td class="state">MN</td>
+ <td class="cert">57736</td>
+ <td class="ai">CorTrust Bank N.A.</td>
+ <td class="closing">September 11, 2009</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="corus.html">Corus Bank, N.A.</a></td>
+ <td class="city">Chicago</td>
+ <td class="state">IL</td>
+ <td class="cert">13693</td>
+ <td class="ai">MB Financial Bank, N.A.</td>
+ <td class="closing">September 11, 2009</td>
+ <td class="updated">August 21, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firststate-az.html">First State Bank</a></td>
+ <td class="city">Flagstaff</td>
+ <td class="state">AZ</td>
+ <td class="cert">34875</td>
+ <td class="ai">Sunwest Bank</td>
+ <td class="closing">September 4, 2009</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="platinum-il.html">Platinum Community Bank</a></td>
+ <td class="city">Rolling Meadows</td>
+ <td class="state">IL</td>
+ <td class="cert">35030</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">September 4, 2009</td>
+ <td class="updated">August 21, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="vantus.html">Vantus Bank</a></td>
+ <td class="city">Sioux City</td>
+ <td class="state">IN</td>
+ <td class="cert">27732</td>
+ <td class="ai">Great Southern Bank</td>
+ <td class="closing">September 4, 2009</td>
+ <td class="updated">August 21, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="inbank.html">InBank</a></td>
+ <td class="city">Oak Forest</td>
+ <td class="state">IL</td>
+ <td class="cert">20203</td>
+ <td class="ai">MB Financial Bank, N.A.</td>
+ <td class="closing">September 4, 2009</td>
+ <td class="updated">August 21, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstbankkc-mo.html">First Bank of Kansas City</a></td>
+ <td class="city">Kansas City</td>
+ <td class="state">MO</td>
+ <td class="cert">25231</td>
+ <td class="ai">Great American Bank</td>
+ <td class="closing">September 4, 2009</td>
+ <td class="updated">August 21, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="affinity-ca.html">Affinity Bank</a></td>
+ <td class="city">Ventura</td>
+ <td class="state">CA</td>
+ <td class="cert">27197</td>
+ <td class="ai">Pacific Western Bank</td>
+ <td class="closing">August 28, 2009</td>
+ <td class="updated">August 21, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="mainstreet-mn.html">Mainstreet Bank</a></td>
+ <td class="city">Forest Lake</td>
+ <td class="state">MN</td>
+ <td class="cert">1909</td>
+ <td class="ai">Central Bank</td>
+ <td class="closing">August 28, 2009</td>
+ <td class="updated">August 21, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="bradford-md.html">Bradford Bank</a></td>
+ <td class="city">Baltimore</td>
+ <td class="state">MD</td>
+ <td class="cert">28312</td>
+ <td class="ai">Manufacturers and Traders Trust Company (M&T Bank)</td>
+ <td class="closing">August 28, 2009</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="guaranty-tx.html">Guaranty Bank</a></td>
+ <td class="city">Austin</td>
+ <td class="state">TX</td>
+ <td class="cert">32618</td>
+ <td class="ai">BBVA Compass</td>
+ <td class="closing">August 21, 2009</td>
+ <td class="updated">August 21, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="capitalsouth.html">CapitalSouth Bank</a></td>
+ <td class="city">Birmingham</td>
+ <td class="state">AL</td>
+ <td class="cert">22130</td>
+ <td class="ai">IBERIABANK</td>
+ <td class="closing">August 21, 2009</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="coweta.html">First Coweta Bank</a></td>
+ <td class="city">Newnan</td>
+ <td class="state">GA</td>
+ <td class="cert">57702</td>
+ <td class="ai">United Bank</td>
+ <td class="closing">August 21, 2009</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="ebank.html">ebank</a></td>
+ <td class="city">Atlanta</td>
+ <td class="state">GA</td>
+ <td class="cert">34682</td>
+ <td class="ai">Stearns Bank, N.A.</td>
+ <td class="closing">August 21, 2009</td>
+ <td class="updated">August 21, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="community-nv.html">Community Bank of Nevada</a></td>
+ <td class="city">Las Vegas</td>
+ <td class="state">NV</td>
+ <td class="cert">34043</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">August 14, 2009</td>
+ <td class="updated">August 21, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="community-az.html">Community Bank of Arizona</a></td>
+ <td class="city">Phoenix</td>
+ <td class="state">AZ</td>
+ <td class="cert">57645</td>
+ <td class="ai">MidFirst Bank</td>
+ <td class="closing">August 14, 2009</td>
+ <td class="updated">August 21, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="union-az.html">Union Bank, National Association</a></td>
+ <td class="city">Gilbert</td>
+ <td class="state">AZ</td>
+ <td class="cert">34485</td>
+ <td class="ai">MidFirst Bank</td>
+ <td class="closing">August 14, 2009</td>
+ <td class="updated">August 21, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="colonial-al.html">Colonial Bank</a></td>
+ <td class="city">Montgomery</td>
+ <td class="state">AL</td>
+ <td class="cert">9609</td>
+ <td class="ai">Branch Banking & Trust Company, (BB&T)</td>
+ <td class="closing">August 14, 2009</td>
+ <td class="updated">September 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="dwelling.html">Dwelling House Savings and Loan Association</a></td>
+ <td class="city">Pittsburgh</td>
+ <td class="state">PA</td>
+ <td class="cert">31559</td>
+ <td class="ai">PNC Bank, N.A.</td>
+ <td class="closing">August 14, 2009</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="community-prineville.html">Community First Bank</a></td>
+ <td class="city">Prineville</td>
+ <td class="state">OR</td>
+ <td class="cert">23268</td>
+ <td class="ai">Home Federal Bank</td>
+ <td class="closing">August 7, 2009</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="community-venice.html">Community National Bank of Sarasota County</a></td>
+ <td class="city">Venice</td>
+ <td class="state">FL</td>
+ <td class="cert">27183</td>
+ <td class="ai">Stearns Bank, N.A.</td>
+ <td class="closing">August 7, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="fsb-sarasota.html">First State Bank</a></td>
+ <td class="city">Sarasota</td>
+ <td class="state">FL</td>
+ <td class="cert">27364</td>
+ <td class="ai">Stearns Bank, N.A.</td>
+ <td class="closing">August 7, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="mutual-harvey.html">Mutual Bank</a></td>
+ <td class="city">Harvey</td>
+ <td class="state">IL</td>
+ <td class="cert">18659</td>
+ <td class="ai">United Central Bank</td>
+ <td class="closing">July 31, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="americano.html">First BankAmericano</a></td>
+ <td class="city">Elizabeth</td>
+ <td class="state">NJ</td>
+ <td class="cert">34270</td>
+ <td class="ai">Crown Bank</td>
+ <td class="closing">July 31, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="peoplescommunity-oh.html">Peoples Community Bank</a></td>
+ <td class="city">West Chester</td>
+ <td class="state">OH</td>
+ <td class="cert">32288</td>
+ <td class="ai">First Financial Bank, N.A.</td>
+ <td class="closing">July 31, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="integrity-fl.html">Integrity Bank</a></td>
+ <td class="city">Jupiter</td>
+ <td class="state">FL</td>
+ <td class="cert">57604</td>
+ <td class="ai">Stonegate Bank</td>
+ <td class="closing">July 31, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="fsb-altus.html">First State Bank of Altus</a></td>
+ <td class="city">Altus</td>
+ <td class="state">OK</td>
+ <td class="cert">9873</td>
+ <td class="ai">Herring Bank</td>
+ <td class="closing">July 31, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="sb-jones.html">Security Bank of Jones County</a></td>
+ <td class="city">Gray</td>
+ <td class="state">GA</td>
+ <td class="cert">8486</td>
+ <td class="ai">State Bank and Trust Company</td>
+ <td class="closing">July 24, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="sb-houston.html">Security Bank of Houston County</a></td>
+ <td class="city">Perry</td>
+ <td class="state">GA</td>
+ <td class="cert">27048</td>
+ <td class="ai">State Bank and Trust Company</td>
+ <td class="closing">July 24, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="sb-bibb.html">Security Bank of Bibb County</a></td>
+ <td class="city">Macon</td>
+ <td class="state">GA</td>
+ <td class="cert">27367</td>
+ <td class="ai">State Bank and Trust Company</td>
+ <td class="closing">July 24, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="sb-metro.html">Security Bank of North Metro</a></td>
+ <td class="city">Woodstock</td>
+ <td class="state">GA</td>
+ <td class="cert">57105</td>
+ <td class="ai">State Bank and Trust Company</td>
+ <td class="closing">July 24, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="sb-fulton.html">Security Bank of North Fulton</a></td>
+ <td class="city">Alpharetta</td>
+ <td class="state">GA</td>
+ <td class="cert">57430</td>
+ <td class="ai">State Bank and Trust Company</td>
+ <td class="closing">July 24, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="sb-gwinnett.html">Security Bank of Gwinnett County</a></td>
+ <td class="city">Suwanee</td>
+ <td class="state">GA</td>
+ <td class="cert">57346</td>
+ <td class="ai">State Bank and Trust Company</td>
+ <td class="closing">July 24, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="waterford.html">Waterford Village Bank</a></td>
+ <td class="city">Williamsville</td>
+ <td class="state">NY</td>
+ <td class="cert">58065</td>
+ <td class="ai">Evans Bank, N.A.</td>
+ <td class="closing">July 24, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="temecula.html">Temecula Valley Bank</a></td>
+ <td class="city">Temecula</td>
+ <td class="state">CA</td>
+ <td class="cert">34341</td>
+ <td class="ai">First-Citizens Bank & Trust Company</td>
+ <td class="closing">July 17, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="vineyard.html">Vineyard Bank</a></td>
+ <td class="city">Rancho Cucamonga</td>
+ <td class="state">CA</td>
+ <td class="cert">23556</td>
+ <td class="ai">California Bank & Trust</td>
+ <td class="closing">July 17, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="bankfirst.html">BankFirst</a></td>
+ <td class="city">Sioux Falls</td>
+ <td class="state">SD</td>
+ <td class="cert">34103</td>
+ <td class="ai">Alerus Financial, N.A.</td>
+ <td class="closing">July 17, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="piedmont.html">First Piedmont Bank</a></td>
+ <td class="city">Winder</td>
+ <td class="state">GA</td>
+ <td class="cert">34594</td>
+ <td class="ai">First American Bank and Trust Company</td>
+ <td class="closing">July 17, 2009</td>
+ <td class="updated">January 15, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="wyoming.html">Bank of Wyoming</a></td>
+ <td class="city">Thermopolis</td>
+ <td class="state">WY</td>
+ <td class="cert">22754</td>
+ <td class="ai">Central Bank & Trust</td>
+ <td class="closing">July 10, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="founders.html">Founders Bank</a></td>
+ <td class="city">Worth</td>
+ <td class="state">IL</td>
+ <td class="cert">18390</td>
+ <td class="ai">The PrivateBank and Trust Company</td>
+ <td class="closing">July 2, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="millennium.html">Millennium State Bank of Texas</a></td>
+ <td class="city">Dallas</td>
+ <td class="state">TX</td>
+ <td class="cert">57667</td>
+ <td class="ai">State Bank of Texas</td>
+ <td class="closing">July 2, 2009</td>
+ <td class="updated">October 26, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="danville.html">First National Bank of Danville</a></td>
+ <td class="city">Danville</td>
+ <td class="state">IL</td>
+ <td class="cert">3644</td>
+ <td class="ai">First Financial Bank, N.A.</td>
+ <td class="closing">July 2, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="elizabeth.html">Elizabeth State Bank</a></td>
+ <td class="city">Elizabeth</td>
+ <td class="state">IL</td>
+ <td class="cert">9262</td>
+ <td class="ai">Galena State Bank and Trust Company</td>
+ <td class="closing">July 2, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="rockriver.html">Rock River Bank</a></td>
+ <td class="city">Oregon</td>
+ <td class="state">IL</td>
+ <td class="cert">15302</td>
+ <td class="ai">The Harvard State Bank</td>
+ <td class="closing">July 2, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="winchester.html">First State Bank of Winchester</a></td>
+ <td class="city">Winchester</td>
+ <td class="state">IL</td>
+ <td class="cert">11710</td>
+ <td class="ai">The First National Bank of Beardstown</td>
+ <td class="closing">July 2, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="warner.html">John Warner Bank</a></td>
+ <td class="city">Clinton</td>
+ <td class="state">IL</td>
+ <td class="cert">12093</td>
+ <td class="ai">State Bank of Lincoln</td>
+ <td class="closing">July 2, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="mirae.html">Mirae Bank</a></td>
+ <td class="city">Los Angeles</td>
+ <td class="state">CA</td>
+ <td class="cert">57332</td>
+ <td class="ai">Wilshire State Bank</td>
+ <td class="closing">June 26, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="metropacific.html">MetroPacific Bank</a></td>
+ <td class="city">Irvine</td>
+ <td class="state">CA</td>
+ <td class="cert">57893</td>
+ <td class="ai">Sunwest Bank</td>
+ <td class="closing">June 26, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="horizon.html">Horizon Bank</a></td>
+ <td class="city">Pine City</td>
+ <td class="state">MN</td>
+ <td class="cert">9744</td>
+ <td class="ai">Stearns Bank, N.A.</td>
+ <td class="closing">June 26, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="neighbor.html">Neighborhood Community Bank</a></td>
+ <td class="city">Newnan</td>
+ <td class="state">GA</td>
+ <td class="cert">35285</td>
+ <td class="ai">CharterBank</td>
+ <td class="closing">June 26, 2009</td>
+ <td class="updated">August 20, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="communityga.html">Community Bank of West Georgia</a></td>
+ <td class="city">Villa Rica</td>
+ <td class="state">GA</td>
+ <td class="cert">57436</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">June 26, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="anthony.html">First National Bank of Anthony</a></td>
+ <td class="city">Anthony</td>
+ <td class="state">KS</td>
+ <td class="cert">4614</td>
+ <td class="ai">Bank of Kansas</td>
+ <td class="closing">June 19, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="cooperative.html">Cooperative Bank</a></td>
+ <td class="city">Wilmington</td>
+ <td class="state">NC</td>
+ <td class="cert">27837</td>
+ <td class="ai">First Bank</td>
+ <td class="closing">June 19, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="scb.html">Southern Community Bank</a></td>
+ <td class="city">Fayetteville</td>
+ <td class="state">GA</td>
+ <td class="cert">35251</td>
+ <td class="ai">United Community Bank</td>
+ <td class="closing">June 19, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="lincolnwood.html">Bank of Lincolnwood</a></td>
+ <td class="city">Lincolnwood</td>
+ <td class="state">IL</td>
+ <td class="cert">17309</td>
+ <td class="ai">Republic Bank of Chicago</td>
+ <td class="closing">June 5, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="citizensnational.html">Citizens National Bank</a></td>
+ <td class="city">Macomb</td>
+ <td class="state">IL</td>
+ <td class="cert">5757</td>
+ <td class="ai">Morton Community Bank</td>
+ <td class="closing">May 22, 2009</td>
+ <td class="updated">September 4, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="strategiccapital.html">Strategic Capital Bank</a></td>
+ <td class="city">Champaign</td>
+ <td class="state">IL</td>
+ <td class="cert">35175</td>
+ <td class="ai">Midland States Bank</td>
+ <td class="closing">May 22, 2009</td>
+ <td class="updated">September 4, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="bankunited.html">BankUnited, FSB</a></td>
+ <td class="city">Coral Gables</td>
+ <td class="state">FL</td>
+ <td class="cert">32247</td>
+ <td class="ai">BankUnited</td>
+ <td class="closing">May 21, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="westsound.html">Westsound Bank</a></td>
+ <td class="city">Bremerton</td>
+ <td class="state">WA</td>
+ <td class="cert">34843</td>
+ <td class="ai">Kitsap Bank</td>
+ <td class="closing">May 8, 2009</td>
+ <td class="updated">September 4, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="americawest.html">America West Bank</a></td>
+ <td class="city">Layton</td>
+ <td class="state">UT</td>
+ <td class="cert">35461</td>
+ <td class="ai">Cache Valley Bank</td>
+ <td class="closing">May 1, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="citizens.html">Citizens Community Bank</a></td>
+ <td class="city">Ridgewood</td>
+ <td class="state">NJ</td>
+ <td class="cert">57563</td>
+ <td class="ai">North Jersey Community Bank</td>
+ <td class="closing">May 1, 2009</td>
+ <td class="updated">September 4, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="silverton.html">Silverton Bank, NA</a></td>
+ <td class="city">Atlanta</td>
+ <td class="state">GA</td>
+ <td class="cert">26535</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">May 1, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstbankidaho.html">First Bank of Idaho</a></td>
+ <td class="city">Ketchum</td>
+ <td class="state">ID</td>
+ <td class="cert">34396</td>
+ <td class="ai">U.S. Bank, N.A.</td>
+ <td class="closing">April 24, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="beverlyhills.html">First Bank of Beverly Hills</a></td>
+ <td class="city">Calabasas</td>
+ <td class="state">CA</td>
+ <td class="cert">32069</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">April 24, 2009</td>
+ <td class="updated">September 4, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="michiganheritage.html">Michigan Heritage Bank</a></td>
+ <td class="city">Farmington Hills</td>
+ <td class="state">MI</td>
+ <td class="cert">34369</td>
+ <td class="ai">Level One Bank</td>
+ <td class="closing">April 24, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="amsouthern.html">American Southern Bank</a></td>
+ <td class="city">Kennesaw</td>
+ <td class="state">GA</td>
+ <td class="cert">57943</td>
+ <td class="ai">Bank of North Georgia</td>
+ <td class="closing">April 24, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="greatbasin.html">Great Basin Bank of Nevada</a></td>
+ <td class="city">Elko</td>
+ <td class="state">NV</td>
+ <td class="cert">33824</td>
+ <td class="ai">Nevada State Bank</td>
+ <td class="closing">April 17, 2009</td>
+ <td class="updated">September 4, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="amsterling.html">American Sterling Bank</a></td>
+ <td class="city">Sugar Creek</td>
+ <td class="state">MO</td>
+ <td class="cert">8266</td>
+ <td class="ai">Metcalf Bank</td>
+ <td class="closing">April 17, 2009</td>
+ <td class="updated">August 31, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="newfrontier.html">New Frontier Bank</a></td>
+ <td class="city">Greeley</td>
+ <td class="state">CO</td>
+ <td class="cert">34881</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">April 10, 2009</td>
+ <td class="updated">September 4, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="capefear.html">Cape Fear Bank</a></td>
+ <td class="city">Wilmington</td>
+ <td class="state">NC</td>
+ <td class="cert">34639</td>
+ <td class="ai">First Federal Savings and Loan Association</td>
+ <td class="closing">April 10, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="omni.html">Omni National Bank</a></td>
+ <td class="city">Atlanta</td>
+ <td class="state">GA</td>
+ <td class="cert">22238</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">March 27, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="teambank.html">TeamBank, NA</a></td>
+ <td class="city">Paola</td>
+ <td class="state">KS</td>
+ <td class="cert">4754</td>
+ <td class="ai">Great Southern Bank</td>
+ <td class="closing">March 20, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="coloradonational.html">Colorado National Bank</a></td>
+ <td class="city">Colorado Springs</td>
+ <td class="state">CO</td>
+ <td class="cert">18896</td>
+ <td class="ai">Herring Bank</td>
+ <td class="closing">March 20, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstcity.html">FirstCity Bank</a></td>
+ <td class="city">Stockbridge</td>
+ <td class="state">GA</td>
+ <td class="cert">18243</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">March 20, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="freedomga.html">Freedom Bank of Georgia</a></td>
+ <td class="city">Commerce</td>
+ <td class="state">GA</td>
+ <td class="cert">57558</td>
+ <td class="ai">Northeast Georgia Bank</td>
+ <td class="closing">March 6, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="securitysavings.html">Security Savings Bank</a></td>
+ <td class="city">Henderson</td>
+ <td class="state">NV</td>
+ <td class="cert">34820</td>
+ <td class="ai">Bank of Nevada</td>
+ <td class="closing">February 27, 2009</td>
+ <td class="updated">September 7, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="heritagebank.html">Heritage Community Bank</a></td>
+ <td class="city">Glenwood</td>
+ <td class="state">IL</td>
+ <td class="cert">20078</td>
+ <td class="ai">MB Financial Bank, N.A.</td>
+ <td class="closing">February 27, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="silverfalls.html">Silver Falls Bank</a></td>
+ <td class="city">Silverton</td>
+ <td class="state">OR</td>
+ <td class="cert">35399</td>
+ <td class="ai">Citizens Bank</td>
+ <td class="closing">February 20, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="pinnacle.html">Pinnacle Bank of Oregon</a></td>
+ <td class="city">Beaverton</td>
+ <td class="state">OR</td>
+ <td class="cert">57342</td>
+ <td class="ai">Washington Trust Bank of Spokane</td>
+ <td class="closing">February 13, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="cornbelt.html">Corn Belt Bank & Trust Co.</a></td>
+ <td class="city">Pittsfield</td>
+ <td class="state">IL</td>
+ <td class="cert">16500</td>
+ <td class="ai">The Carlinville National Bank</td>
+ <td class="closing">February 13, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="riverside.html">Riverside Bank of the Gulf Coast</a></td>
+ <td class="city">Cape Coral</td>
+ <td class="state">FL</td>
+ <td class="cert">34563</td>
+ <td class="ai">TIB Bank</td>
+ <td class="closing">February 13, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="sherman.html">Sherman County Bank</a></td>
+ <td class="city">Loup City</td>
+ <td class="state">NE</td>
+ <td class="cert">5431</td>
+ <td class="ai">Heritage Bank</td>
+ <td class="closing">February 13, 2009</td>
+ <td class="updated">August 17, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="county.html">County Bank</a></td>
+ <td class="city">Merced</td>
+ <td class="state">CA</td>
+ <td class="cert">22574</td>
+ <td class="ai">Westamerica Bank</td>
+ <td class="closing">February 6, 2009</td>
+ <td class="updated">September 4, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="alliance.html">Alliance Bank</a></td>
+ <td class="city">Culver City</td>
+ <td class="state">CA</td>
+ <td class="cert">23124</td>
+ <td class="ai">California Bank & Trust</td>
+ <td class="closing">February 6, 2009</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstbank.html">FirstBank Financial Services</a></td>
+ <td class="city">McDonough</td>
+ <td class="state">GA</td>
+ <td class="cert">57017</td>
+ <td class="ai">Regions Bank</td>
+ <td class="closing">February 6, 2009</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="ocala.html">Ocala National Bank</a></td>
+ <td class="city">Ocala</td>
+ <td class="state">FL</td>
+ <td class="cert">26538</td>
+ <td class="ai">CenterState Bank of Florida, N.A.</td>
+ <td class="closing">January 30, 2009</td>
+ <td class="updated">September 4, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="suburban.html">Suburban FSB</a></td>
+ <td class="city">Crofton</td>
+ <td class="state">MD</td>
+ <td class="cert">30763</td>
+ <td class="ai">Bank of Essex</td>
+ <td class="closing">January 30, 2009</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="magnet.html">MagnetBank</a></td>
+ <td class="city">Salt Lake City</td>
+ <td class="state">UT</td>
+ <td class="cert">58001</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">January 30, 2009</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="centennial.html">1st Centennial Bank</a></td>
+ <td class="city">Redlands</td>
+ <td class="state">CA</td>
+ <td class="cert">33025</td>
+ <td class="ai">First California Bank</td>
+ <td class="closing">January 23, 2009</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="clark.html">Bank of Clark County</a></td>
+ <td class="city">Vancouver</td>
+ <td class="state">WA</td>
+ <td class="cert">34959</td>
+ <td class="ai">Umpqua Bank</td>
+ <td class="closing">January 16, 2009</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="commerce.html">National Bank of Commerce</a></td>
+ <td class="city">Berkeley</td>
+ <td class="state">IL</td>
+ <td class="cert">19733</td>
+ <td class="ai">Republic Bank of Chicago</td>
+ <td class="closing">January 16, 2009</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="sanderson.html">Sanderson State Bank</a><br><a href="sanderson_spanish.html">En Espanol</a></td>
+ <td class="city">Sanderson</td>
+ <td class="state">TX</td>
+ <td class="cert">11568</td>
+ <td class="ai">The Pecos County State Bank</td>
+ <td class="closing">December 12, 2008</td>
+ <td class="updated">September 4, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="haventrust.html">Haven Trust Bank</a></td>
+ <td class="city">Duluth</td>
+ <td class="state">GA</td>
+ <td class="cert">35379</td>
+ <td class="ai">Branch Banking & Trust Company, (BB&T)</td>
+ <td class="closing">December 12, 2008</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstga.html">First Georgia Community Bank</a></td>
+ <td class="city">Jackson</td>
+ <td class="state">GA</td>
+ <td class="cert">34301</td>
+ <td class="ai">United Bank</td>
+ <td class="closing">December 5, 2008</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="pff.html">PFF Bank & Trust</a></td>
+ <td class="city">Pomona</td>
+ <td class="state">CA</td>
+ <td class="cert">28344</td>
+ <td class="ai">U.S. Bank, N.A.</td>
+ <td class="closing">November 21, 2008</td>
+ <td class="updated">January 4, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="downey.html">Downey Savings & Loan</a></td>
+ <td class="city">Newport Beach</td>
+ <td class="state">CA</td>
+ <td class="cert">30968</td>
+ <td class="ai">U.S. Bank, N.A.</td>
+ <td class="closing">November 21, 2008</td>
+ <td class="updated">January 4, 2013</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="community.html">Community Bank</a></td>
+ <td class="city">Loganville</td>
+ <td class="state">GA</td>
+ <td class="cert">16490</td>
+ <td class="ai">Bank of Essex</td>
+ <td class="closing">November 21, 2008</td>
+ <td class="updated">September 4, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="securitypacific.html">Security Pacific Bank</a></td>
+ <td class="city">Los Angeles</td>
+ <td class="state">CA</td>
+ <td class="cert">23595</td>
+ <td class="ai">Pacific Western Bank</td>
+ <td class="closing">November 7, 2008</td>
+ <td class="updated">August 28, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="franklinbank.html">Franklin Bank, SSB</a></td>
+ <td class="city">Houston</td>
+ <td class="state">TX</td>
+ <td class="cert">26870</td>
+ <td class="ai">Prosperity Bank</td>
+ <td class="closing">November 7, 2008</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="freedom.html">Freedom Bank</a></td>
+ <td class="city">Bradenton</td>
+ <td class="state">FL</td>
+ <td class="cert">57930</td>
+ <td class="ai">Fifth Third Bank</td>
+ <td class="closing">October 31, 2008</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="alpha.html">Alpha Bank & Trust</a></td>
+ <td class="city">Alpharetta</td>
+ <td class="state">GA</td>
+ <td class="cert">58241</td>
+ <td class="ai">Stearns Bank, N.A.</td>
+ <td class="closing">October 24, 2008</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="meridian.html">Meridian Bank</a></td>
+ <td class="city">Eldred</td>
+ <td class="state">IL</td>
+ <td class="cert">13789</td>
+ <td class="ai">National Bank</td>
+ <td class="closing">October 10, 2008</td>
+ <td class="updated">May 31, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="mainstreet.html">Main Street Bank</a></td>
+ <td class="city">Northville</td>
+ <td class="state">MI</td>
+ <td class="cert">57654</td>
+ <td class="ai">Monroe Bank & Trust</td>
+ <td class="closing">October 10, 2008</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="wamu.html">Washington Mutual Bank<br>(Including its subsidiary Washington Mutual Bank FSB)</a></td>
+ <td class="city">Henderson</td>
+ <td class="state">NV</td>
+ <td class="cert">32633</td>
+ <td class="ai">JP Morgan Chase Bank</td>
+ <td class="closing">September 25, 2008</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="ameribank.html">Ameribank</a></td>
+ <td class="city">Northfork</td>
+ <td class="state">WV</td>
+ <td class="cert">6782</td>
+ <td class="ai">The Citizens Savings Bank<br><br>Pioneer Community Bank, Inc.</td>
+ <td class="closing">September 19, 2008</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="silverstate.html">Silver State Bank</a><br><a href="silverstatesp.html">En Espanol</a></td>
+ <td class="city">Henderson</td>
+ <td class="state">NV</td>
+ <td class="cert">34194</td>
+ <td class="ai">Nevada State Bank</td>
+ <td class="closing">September 5, 2008</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="integrity.html">Integrity Bank</a></td>
+ <td class="city">Alpharetta</td>
+ <td class="state">GA</td>
+ <td class="cert">35469</td>
+ <td class="ai">Regions Bank</td>
+ <td class="closing">August 29, 2008</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="columbian.html">Columbian Bank & Trust</a></td>
+ <td class="city">Topeka</td>
+ <td class="state">KS</td>
+ <td class="cert">22728</td>
+ <td class="ai">Citizens Bank & Trust</td>
+ <td class="closing">August 22, 2008</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstprioritybank.html">First Priority Bank</a></td>
+ <td class="city">Bradenton</td>
+ <td class="state">FL</td>
+ <td class="cert">57523</td>
+ <td class="ai">SunTrust Bank</td>
+ <td class="closing">August 1, 2008</td>
+ <td class="updated">August 16, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="heritage.html">First Heritage Bank, NA</a></td>
+ <td class="city">Newport Beach</td>
+ <td class="state">CA</td>
+ <td class="cert">57961</td>
+ <td class="ai">Mutual of Omaha Bank</td>
+ <td class="closing">July 25, 2008</td>
+ <td class="updated">August 28, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="fnbnv.html">First National Bank of Nevada</a></td>
+ <td class="city">Reno</td>
+ <td class="state">NV</td>
+ <td class="cert">27011</td>
+ <td class="ai">Mutual of Omaha Bank</td>
+ <td class="closing">July 25, 2008</td>
+ <td class="updated">August 28, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="IndyMac.html">IndyMac Bank</a></td>
+ <td class="city">Pasadena</td>
+ <td class="state">CA</td>
+ <td class="cert">29730</td>
+ <td class="ai">OneWest Bank, FSB</td>
+ <td class="closing">July 11, 2008</td>
+ <td class="updated">August 28, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="first_integrity_bank.html">First Integrity Bank, NA</a></td>
+ <td class="city">Staples</td>
+ <td class="state">MN</td>
+ <td class="cert">12736</td>
+ <td class="ai">First International Bank and Trust</td>
+ <td class="closing">May 30, 2008</td>
+ <td class="updated">August 28, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="anb.html">ANB Financial, NA</a></td>
+ <td class="city">Bentonville</td>
+ <td class="state">AR</td>
+ <td class="cert">33901</td>
+ <td class="ai">Pulaski Bank and Trust Company</td>
+ <td class="closing">May 9, 2008</td>
+ <td class="updated">August 28, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="Hume.html">Hume Bank</a></td>
+ <td class="city">Hume</td>
+ <td class="state">MO</td>
+ <td class="cert">1971</td>
+ <td class="ai">Security Bank</td>
+ <td class="closing">March 7, 2008</td>
+ <td class="updated">August 28, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="Douglass.html">Douglass National Bank</a></td>
+ <td class="city">Kansas City</td>
+ <td class="state">MO</td>
+ <td class="cert">24660</td>
+ <td class="ai">Liberty Bank and Trust Company</td>
+ <td class="closing">January 25, 2008</td>
+ <td class="updated">October 26, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="MiamiValley.html">Miami Valley Bank</a></td>
+ <td class="city">Lakeview</td>
+ <td class="state">OH</td>
+ <td class="cert">16848</td>
+ <td class="ai">The Citizens Banking Company</td>
+ <td class="closing">October 4, 2007</td>
+ <td class="updated">August 28, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="NetBank.html">NetBank</a></td>
+ <td class="city">Alpharetta</td>
+ <td class="state">GA</td>
+ <td class="cert">32575</td>
+ <td class="ai">ING DIRECT</td>
+ <td class="closing">September 28, 2007</td>
+ <td class="updated">August 28, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="MetropolitanSB.html">Metropolitan Savings Bank</a></td>
+ <td class="city">Pittsburgh</td>
+ <td class="state">PA</td>
+ <td class="cert">35353</td>
+ <td class="ai">Allegheny Valley Bank of Pittsburgh</td>
+ <td class="closing">February 2, 2007</td>
+ <td class="updated">October 27, 2010</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="ephraim.html">Bank of Ephraim</a></td>
+ <td class="city">Ephraim</td>
+ <td class="state">UT</td>
+ <td class="cert">1249</td>
+ <td class="ai">Far West Bank</td>
+ <td class="closing">June 25, 2004</td>
+ <td class="updated">April 9, 2008</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="reliance.html">Reliance Bank</a></td>
+ <td class="city">White Plains</td>
+ <td class="state">NY</td>
+ <td class="cert">26778</td>
+ <td class="ai">Union State Bank</td>
+ <td class="closing">March 19, 2004</td>
+ <td class="updated">April 9, 2008</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="gnb.html">Guaranty National Bank of Tallahassee</a></td>
+ <td class="city">Tallahassee</td>
+ <td class="state">FL</td>
+ <td class="cert">26838</td>
+ <td class="ai">Hancock Bank of Florida</td>
+ <td class="closing">March 12, 2004</td>
+ <td class="updated">June 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="dollar.html">Dollar Savings Bank</a></td>
+ <td class="city">Newark</td>
+ <td class="state">NJ</td>
+ <td class="cert">31330</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">February 14, 2004</td>
+ <td class="updated">April 9, 2008</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="pulaski.html">Pulaski Savings Bank</a></td>
+ <td class="city">Philadelphia</td>
+ <td class="state">PA</td>
+ <td class="cert">27203</td>
+ <td class="ai">Earthstar Bank</td>
+ <td class="closing">November 14, 2003</td>
+ <td class="updated">July 22, 2005</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="blanchardville.html">First National Bank of Blanchardville</a></td>
+ <td class="city">Blanchardville</td>
+ <td class="state">WI</td>
+ <td class="cert">11639</td>
+ <td class="ai">The Park Bank</td>
+ <td class="closing">May 9, 2003</td>
+ <td class="updated">June 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="spbank.html">Southern Pacific Bank</a></td>
+ <td class="city">Torrance</td>
+ <td class="state">CA</td>
+ <td class="cert">27094</td>
+ <td class="ai">Beal Bank</td>
+ <td class="closing">February 7, 2003</td>
+ <td class="updated">October 20, 2008</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="farmers.html">Farmers Bank of Cheneyville</a></td>
+ <td class="city">Cheneyville</td>
+ <td class="state">LA</td>
+ <td class="cert">16445</td>
+ <td class="ai">Sabine State Bank & Trust</td>
+ <td class="closing">December 17, 2002</td>
+ <td class="updated">October 20, 2004</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="bankofalamo.html">Bank of Alamo</a></td>
+ <td class="city">Alamo</td>
+ <td class="state">TN</td>
+ <td class="cert">9961</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">November 8, 2002</td>
+ <td class="updated">March 18, 2005</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="amtrade.html">AmTrade International Bank</a><br><a href="amtrade-spanish.html">En Espanol</a></td>
+ <td class="city">Atlanta</td>
+ <td class="state">GA</td>
+ <td class="cert">33784</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">September 30, 2002</td>
+ <td class="updated">September 11, 2006</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="universal.html">Universal Federal Savings Bank</a></td>
+ <td class="city">Chicago</td>
+ <td class="state">IL</td>
+ <td class="cert">29355</td>
+ <td class="ai">Chicago Community Bank</td>
+ <td class="closing">June 27, 2002</td>
+ <td class="updated">April 9, 2008</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="cbc.html">Connecticut Bank of Commerce</a></td>
+ <td class="city">Stamford</td>
+ <td class="state">CT</td>
+ <td class="cert">19183</td>
+ <td class="ai">Hudson United Bank</td>
+ <td class="closing">June 26, 2002</td>
+ <td class="updated">February 14, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="newcentury.html">New Century Bank</a></td>
+ <td class="city">Shelby Township</td>
+ <td class="state">MI</td>
+ <td class="cert">34979</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">March 28, 2002</td>
+ <td class="updated">March 18, 2005</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="netfirst.html">Net 1st National Bank</a></td>
+ <td class="city">Boca Raton</td>
+ <td class="state">FL</td>
+ <td class="cert">26652</td>
+ <td class="ai">Bank Leumi USA</td>
+ <td class="closing">March 1, 2002</td>
+ <td class="updated">April 9, 2008</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="nextbank.html">NextBank, NA</a></td>
+ <td class="city">Phoenix</td>
+ <td class="state">AZ</td>
+ <td class="cert">22314</td>
+ <td class="ai">No Acquirer</td>
+ <td class="closing">February 7, 2002</td>
+ <td class="updated">August 27, 2010</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="Oakwood.html">Oakwood Deposit Bank Co.</a></td>
+ <td class="city">Oakwood</td>
+ <td class="state">OH</td>
+ <td class="cert">8966</td>
+ <td class="ai">The State Bank & Trust Company</td>
+ <td class="closing">February 1, 2002</td>
+ <td class="updated">October 25, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="sierrablanca.html">Bank of Sierra Blanca</a></td>
+ <td class="city">Sierra Blanca</td>
+ <td class="state">TX</td>
+ <td class="cert">22002</td>
+ <td class="ai">The Security State Bank of Pecos</td>
+ <td class="closing">January 18, 2002</td>
+ <td class="updated">November 6, 2003</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="hamilton.html">Hamilton Bank, NA</a><br><a href="hamilton-spanish.html">En Espanol</a></td>
+ <td class="city">Miami</td>
+ <td class="state">FL</td>
+ <td class="cert">24382</td>
+ <td class="ai">Israel Discount Bank of New York</td>
+ <td class="closing">January 11, 2002</td>
+ <td class="updated">June 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="sinclair.html">Sinclair National Bank</a></td>
+ <td class="city">Gravette</td>
+ <td class="state">AR</td>
+ <td class="cert">34248</td>
+ <td class="ai">Delta Trust & Bank</td>
+ <td class="closing">September 7, 2001</td>
+ <td class="updated">February 10, 2004</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="superior.html">Superior Bank, FSB</a></td>
+ <td class="city">Hinsdale</td>
+ <td class="state">IL</td>
+ <td class="cert">32646</td>
+ <td class="ai">Superior Federal, FSB</td>
+ <td class="closing">July 27, 2001</td>
+ <td class="updated">June 5, 2012</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="Malta.html">Malta National Bank</a></td>
+ <td class="city">Malta</td>
+ <td class="state">OH</td>
+ <td class="cert">6629</td>
+ <td class="ai">North Valley Bank</td>
+ <td class="closing">May 3, 2001</td>
+ <td class="updated">November 18, 2002</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="firstalliance.html">First Alliance Bank & Trust Co.</a></td>
+ <td class="city">Manchester</td>
+ <td class="state">NH</td>
+ <td class="cert">34264</td>
+ <td class="ai">Southern New Hampshire Bank & Trust</td>
+ <td class="closing">February 2, 2001</td>
+ <td class="updated">February 18, 2003</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="nsb.html">National State Bank of Metropolis</a></td>
+ <td class="city">Metropolis</td>
+ <td class="state">IL</td>
+ <td class="cert">3815</td>
+ <td class="ai">Banterra Bank of Marion</td>
+ <td class="closing">December 14, 2000</td>
+ <td class="updated">March 17, 2005</td>
+ </tr>
+ <tr>
+ <td class="institution"><a href="boh.html">Bank of Honolulu</a></td>
+ <td class="city">Honolulu</td>
+ <td class="state">HI</td>
+ <td class="cert">21029</td>
+ <td class="ai">Bank of the Orient</td>
+ <td class="closing">October 13, 2000</td>
+ <td class="updated">March 17, 2005</td>
+ </tr>
+ </tbody>
+ </table>
+ </div>
+
+</div>
+<div id="page_foot">
+ <div class="date">Last Updated 05/31/2013</div>
+ <div class="email"><a href="mailto:cservicefdicdal@fdic.gov">cservicefdicdal@fdic.gov</a></div>
+ <div class="clear"></div>
+</div>
+
+<!-- START of Footer -->
+<footer>
+<link rel="stylesheet" type="text/css" href="/responsive/footer/css/footer.css" />
+<div id="responsive_footer">
+ <div id="responsive_footer-full">
+ <ul>
+ <li><a href="/" title="Home">Home</a></li>
+ <li><a href="/about/contact/ask/" title="Contact Us">Contact Us</a></li>
+ <li><a href="/search/" title="Search">Search</a></li>
+ <li><a href="/help/" title="Help">Help</a></li>
+ <li><a href="/sitemap/" title="SiteMap">SiteMap</a></li>
+ <li><a href="/regulations/laws/forms/" title="Forms">Forms</a></li>
+ <li><a href="/quicklinks/spanish.html" title="En Español">En Español</a></li>
+ </ul>
+ <hr>
+ <ul>
+ <li><a href="/about/policies/" title="Website Policies">Website Policies</a></li>
+ <li><a href="/about/privacy/policy/" title="Privacy Policy">Privacy Policy</a></li>
+ <li><a href="/plainlanguage/" title="Privacy Policy">Plain Writing Act of 2010 </a></li>
+ <li><a href="http://www.usa.gov/" title="USA.gov">USA.gov</a></li>
+ <li><a href="http://www.fdicoig.gov/" title="FDIC Office of Inspector General">FDIC Office of Inspector General</a></li>
+ </ul>
+ <hr>
+ <ul>
+ <li><a href="/about/freedom/" title="Freedom of Information Act (FOIA) Service Center">Freedom of Information Act (FOIA) Service Center</a></li>
+ <li><a href="/open/" title="FDIC Open Government Webpage">FDIC Open Government Webpage</a></li>
+ <li><a href="/about/diversity/nofear/" title="No FEAR Act Data">No FEAR Act Data</a></li>
+ </ul>
+ </div>
+ <div id="responsive_footer-small">
+ <ul>
+ <li><a href="/" title="Home">Home</a></li>
+ <li><a href="/about/contact/ask/" title="Contact Us">Contact Us</a></li>
+ <li><a href="/about/policies/" title="Website Policies">Website Policies</a></li>
+ <li><a href="/search/" title="Search">Search</a></li>
+ </ul>
+ </div>
+</div>
+</footer>
+<!-- START Omniture SiteCatalyst Code -->
+<script language="JavaScript" type="text/javascript" src="/js/s_code_v1.js"></script>
+<script type="text/javascript">
+/************* DO NOT ALTER ANYTHING BELOW THIS LINE ! **************/
+var s_code=s.t();if(s_code)document.write(s_code)</script>
+<script type="text/javascript">
+if(navigator.appVersion.indexOf('MSIE')>=0)document.write(unescape('%3C')+'\!-'+'-')
+</script>
+<noscript>
+<a href="http://www.omniture.com" title="Web Analytics">
+<img src="http://fdic.122.2o7.net/b/ss/fdicgovprod/1/H.21--NS/0?[AQB]%26cl=Session%26AQE" height="1" width="1" border="0" alt="" /></a></li>
+</noscript>
+<!--/DO NOT REMOVE/-->
+<!-- END Omniture SiteCatalyst Code -->
+<!-- END of Footer -->
+
+<script type="text/javascript" src="/responsive/js/jquery.tablesorter.js"></script>
+<script type="text/javascript" src="banklist.js"></script>
+
+</body>
+</html>
diff --git a/pandas/io/tests/data/spam.html b/pandas/io/tests/data/spam.html
index 9f6ac2d74e0c9..935b39f6d6011 100644
--- a/pandas/io/tests/data/spam.html
+++ b/pandas/io/tests/data/spam.html
@@ -204,574 +204,574 @@ <h1>Nutrient data for 07908, Luncheon meat, pork with ham, minced, canned, inclu
<p style="font-style:italic;font-size:.8em">Nutrient values and weights are for edible portion</p>
-
- <table>
- <thead>
-
- <tr><td colspan="6" style="vertical-align:middle;text-align:center;height:2em;" class="buttons"><input type="submit" name="_action_show" value="Apply Changes" class="calc" title="Click to recalculate measures" id="1732" /><a href="/ndb/help/contextHelp/measures" onclick="jQuery.ajax({type:'POST', url:'/ndb/help/contextHelp/measures',success:function(data,textStatus){jQuery('#helpDiv').html(data);},error:function(XMLHttpRequest,textStatus,errorThrown){},complete:function(XMLHttpRequest,textStatus){GRAILSUI.measuresHelpDialog.show();}});return false;" controller="help" action="contextHelp" id="measures"><img title="Click for more information on calculating household measures" src="/ndb/static/images/skin/help.png" alt="Help" border="0" style="vertical-align:middle"/></a></span></td></tr>
- <th style="vertical-align:middle">Nutrient</th>
- <th style="vertical-align:middle" >Unit</th>
- <th style="vertical-align:middle"><input type="text" name="Qv" style="width:30px;text-align:right;border-style:inset;height:15px" maxlength="5" value="1" id="Qv" /><br/>Value per 100.0g</th>
-
-
-
-
- <th style="width:130px;line-height:1.2em;text-align:center">
- <input type="text" name="Q3483" style="width:30px;text-align:right;border-style:inset;height:15px" maxlength="5" value="2.0" id="Q3483" />
- <br>
-
- oz 1 NLEA serving
- <br>56g
- <!--
- -->
- </th>
-
- </thead>
- <tbody>
-
- <tr class="even" >
- <td style="font-weight:bold" colspan="6" bgcolor="#dddddd" >Proximates</td>
- </tr>
-
-
- <tr class="odd">
- <td >Water
-
-
- </td>
-
- <td style="text-align:center;">g</td>
- <td style="text-align:right;">51.70</td>
-
-
- <td style="text-align:right;">28.95</td>
-
-
- </tr>
-
-
- <tr class="even">
- <td >Energy
-
-
- </td>
-
- <td style="text-align:center;">kcal</td>
- <td style="text-align:right;">315</td>
-
-
- <td style="text-align:right;">176</td>
-
-
- </tr>
-
-
- <tr class="odd">
- <td >Protein
-
-
- </td>
-
- <td style="text-align:center;">g</td>
- <td style="text-align:right;">13.40</td>
-
-
- <td style="text-align:right;">7.50</td>
-
-
- </tr>
-
-
- <tr class="even">
- <td >Total lipid (fat)
-
-
- </td>
-
- <td style="text-align:center;">g</td>
- <td style="text-align:right;">26.60</td>
-
-
- <td style="text-align:right;">14.90</td>
-
-
- </tr>
-
-
- <tr class="odd">
- <td >Carbohydrate, by difference
-
-
- </td>
-
- <td style="text-align:center;">g</td>
- <td style="text-align:right;">4.60</td>
-
-
- <td style="text-align:right;">2.58</td>
-
-
- </tr>
-
-
- <tr class="even">
- <td >Fiber, total dietary
-
-
- </td>
-
- <td style="text-align:center;">g</td>
- <td style="text-align:right;">0.0</td>
-
-
- <td style="text-align:right;">0.0</td>
-
-
- </tr>
-
-
- <tr class="odd">
- <td >Sugars, total
-
-
- </td>
-
- <td style="text-align:center;">g</td>
- <td style="text-align:right;">0.00</td>
-
-
- <td style="text-align:right;">0.00</td>
-
-
- </tr>
-
-
-
- <tr class="even" >
- <td style="font-weight:bold" colspan="6" bgcolor="#dddddd" >Minerals</td>
- </tr>
-
-
- <tr class="odd">
- <td >Calcium, Ca
-
-
- </td>
-
- <td style="text-align:center;">mg</td>
- <td style="text-align:right;">0</td>
-
-
- <td style="text-align:right;">0</td>
-
-
- </tr>
-
-
- <tr class="even">
- <td >Iron, Fe
-
-
- </td>
-
- <td style="text-align:center;">mg</td>
- <td style="text-align:right;">0.64</td>
-
-
- <td style="text-align:right;">0.36</td>
-
-
- </tr>
-
-
- <tr class="odd">
- <td >Magnesium, Mg
-
-
- </td>
-
- <td style="text-align:center;">mg</td>
- <td style="text-align:right;">14</td>
-
-
- <td style="text-align:right;">8</td>
-
-
- </tr>
-
-
- <tr class="even">
- <td >Phosphorus, P
-
-
- </td>
-
- <td style="text-align:center;">mg</td>
- <td style="text-align:right;">151</td>
-
-
- <td style="text-align:right;">85</td>
-
-
- </tr>
-
-
- <tr class="odd">
- <td >Potassium, K
-
-
- </td>
-
- <td style="text-align:center;">mg</td>
- <td style="text-align:right;">409</td>
-
-
- <td style="text-align:right;">229</td>
-
-
- </tr>
-
-
- <tr class="even">
- <td >Sodium, Na
-
-
- </td>
-
- <td style="text-align:center;">mg</td>
- <td style="text-align:right;">1411</td>
-
-
- <td style="text-align:right;">790</td>
-
-
- </tr>
-
-
- <tr class="odd">
- <td >Zinc, Zn
-
-
- </td>
-
- <td style="text-align:center;">mg</td>
- <td style="text-align:right;">1.59</td>
-
-
- <td style="text-align:right;">0.89</td>
-
-
- </tr>
-
-
-
- <tr class="even" >
- <td style="font-weight:bold" colspan="6" bgcolor="#dddddd" >Vitamins</td>
- </tr>
-
-
- <tr class="odd">
- <td >Vitamin C, total ascorbic acid
-
-
- </td>
-
- <td style="text-align:center;">mg</td>
- <td style="text-align:right;">0.0</td>
-
-
- <td style="text-align:right;">0.0</td>
-
-
- </tr>
-
-
- <tr class="even">
- <td >Thiamin
-
-
- </td>
-
- <td style="text-align:center;">mg</td>
- <td style="text-align:right;">0.317</td>
-
-
- <td style="text-align:right;">0.178</td>
-
-
- </tr>
-
-
- <tr class="odd">
- <td >Riboflavin
-
-
- </td>
-
- <td style="text-align:center;">mg</td>
- <td style="text-align:right;">0.176</td>
-
-
- <td style="text-align:right;">0.099</td>
-
-
- </tr>
-
-
- <tr class="even">
- <td >Niacin
-
-
- </td>
-
- <td style="text-align:center;">mg</td>
- <td style="text-align:right;">3.530</td>
-
-
- <td style="text-align:right;">1.977</td>
-
-
- </tr>
-
-
- <tr class="odd">
- <td >Vitamin B-6
-
-
- </td>
-
- <td style="text-align:center;">mg</td>
- <td style="text-align:right;">0.218</td>
-
-
- <td style="text-align:right;">0.122</td>
-
-
- </tr>
-
-
- <tr class="even">
- <td >Folate, DFE
-
-
- </td>
-
- <td style="text-align:center;">µg</td>
- <td style="text-align:right;">3</td>
-
-
- <td style="text-align:right;">2</td>
-
-
- </tr>
-
-
- <tr class="odd">
- <td >Vitamin B-12
-
-
- </td>
-
- <td style="text-align:center;">µg</td>
- <td style="text-align:right;">0.45</td>
-
-
- <td style="text-align:right;">0.25</td>
-
-
- </tr>
-
-
- <tr class="even">
- <td >Vitamin A, RAE
-
-
- </td>
-
- <td style="text-align:center;">µg</td>
- <td style="text-align:right;">0</td>
-
-
- <td style="text-align:right;">0</td>
-
-
- </tr>
-
-
- <tr class="odd">
- <td >Vitamin A, IU
-
-
- </td>
-
- <td style="text-align:center;">IU</td>
- <td style="text-align:right;">0</td>
-
-
- <td style="text-align:right;">0</td>
-
-
- </tr>
-
-
- <tr class="even">
- <td >Vitamin E (alpha-tocopherol)
-
-
- </td>
-
- <td style="text-align:center;">mg</td>
- <td style="text-align:right;">0.42</td>
-
-
- <td style="text-align:right;">0.24</td>
-
-
- </tr>
-
-
- <tr class="odd">
- <td >Vitamin D (D2 + D3)
-
-
- </td>
-
- <td style="text-align:center;">µg</td>
- <td style="text-align:right;">0.6</td>
-
-
- <td style="text-align:right;">0.3</td>
-
-
- </tr>
-
-
- <tr class="even">
- <td >Vitamin D
-
-
- </td>
-
- <td style="text-align:center;">IU</td>
- <td style="text-align:right;">26</td>
-
-
- <td style="text-align:right;">15</td>
-
-
- </tr>
-
-
- <tr class="odd">
- <td >Vitamin K (phylloquinone)
-
-
- </td>
-
- <td style="text-align:center;">µg</td>
- <td style="text-align:right;">0.0</td>
-
-
- <td style="text-align:right;">0.0</td>
-
-
- </tr>
-
-
-
- <tr class="even" >
- <td style="font-weight:bold" colspan="6" bgcolor="#dddddd" >Lipids</td>
- </tr>
-
-
- <tr class="odd">
- <td >Fatty acids, total saturated
-
-
- </td>
-
- <td style="text-align:center;">g</td>
- <td style="text-align:right;">9.987</td>
-
-
- <td style="text-align:right;">5.593</td>
-
-
- </tr>
-
-
- <tr class="even">
- <td >Fatty acids, total monounsaturated
-
-
- </td>
-
- <td style="text-align:center;">g</td>
- <td style="text-align:right;">13.505</td>
-
-
- <td style="text-align:right;">7.563</td>
-
-
- </tr>
-
-
- <tr class="odd">
- <td >Fatty acids, total polyunsaturated
-
-
- </td>
-
- <td style="text-align:center;">g</td>
- <td style="text-align:right;">2.019</td>
-
-
- <td style="text-align:right;">1.131</td>
-
-
- </tr>
-
-
- <tr class="even">
- <td >Cholesterol
-
-
- </td>
-
- <td style="text-align:center;">mg</td>
- <td style="text-align:right;">71</td>
-
-
- <td style="text-align:right;">40</td>
-
-
- </tr>
-
-
-
- <tr class="even" >
- <td style="font-weight:bold" colspan="6" bgcolor="#dddddd" >Other</td>
- </tr>
-
-
- <tr class="odd">
- <td >Caffeine
-
-
- </td>
-
- <td style="text-align:center;">mg</td>
- <td style="text-align:right;">0</td>
-
-
- <td style="text-align:right;">0</td>
-
-
- </tr>
-
-
-
- </tbody>
+
+ <table>
+ <thead>
+
+ <tr><td colspan="6" style="vertical-align:middle;text-align:center;height:2em;" class="buttons"><input type="submit" name="_action_show" value="Apply Changes" class="calc" title="Click to recalculate measures" id="1732" /><a href="/ndb/help/contextHelp/measures" onclick="jQuery.ajax({type:'POST', url:'/ndb/help/contextHelp/measures',success:function(data,textStatus){jQuery('#helpDiv').html(data);},error:function(XMLHttpRequest,textStatus,errorThrown){},complete:function(XMLHttpRequest,textStatus){GRAILSUI.measuresHelpDialog.show();}});return false;" controller="help" action="contextHelp" id="measures"><img title="Click for more information on calculating household measures" src="/ndb/static/images/skin/help.png" alt="Help" border="0" style="vertical-align:middle"/></a></span></td></tr>
+ <th style="vertical-align:middle">Nutrient</th>
+ <th style="vertical-align:middle" >Unit</th>
+ <th style="vertical-align:middle"><input type="text" name="Qv" style="width:30px;text-align:right;border-style:inset;height:15px" maxlength="5" value="1" id="Qv" /><br/>Value per 100.0g</th>
+
+
+
+
+ <th style="width:130px;line-height:1.2em;text-align:center">
+ <input type="text" name="Q3483" style="width:30px;text-align:right;border-style:inset;height:15px" maxlength="5" value="2.0" id="Q3483" />
+ <br>
+
+ oz 1 NLEA serving
+ <br>56g
+ <!--
+ -->
+ </th>
+
+ </thead>
+ <tbody>
+
+ <tr class="even" >
+ <td style="font-weight:bold" colspan="6" bgcolor="#dddddd" >Proximates</td>
+ </tr>
+
+
+ <tr class="odd">
+ <td >Water
+
+
+ </td>
+
+ <td style="text-align:center;">g</td>
+ <td style="text-align:right;">51.70</td>
+
+
+ <td style="text-align:right;">28.95</td>
+
+
+ </tr>
+
+
+ <tr class="even">
+ <td >Energy
+
+
+ </td>
+
+ <td style="text-align:center;">kcal</td>
+ <td style="text-align:right;">315</td>
+
+
+ <td style="text-align:right;">176</td>
+
+
+ </tr>
+
+
+ <tr class="odd">
+ <td >Protein
+
+
+ </td>
+
+ <td style="text-align:center;">g</td>
+ <td style="text-align:right;">13.40</td>
+
+
+ <td style="text-align:right;">7.50</td>
+
+
+ </tr>
+
+
+ <tr class="even">
+ <td >Total lipid (fat)
+
+
+ </td>
+
+ <td style="text-align:center;">g</td>
+ <td style="text-align:right;">26.60</td>
+
+
+ <td style="text-align:right;">14.90</td>
+
+
+ </tr>
+
+
+ <tr class="odd">
+ <td >Carbohydrate, by difference
+
+
+ </td>
+
+ <td style="text-align:center;">g</td>
+ <td style="text-align:right;">4.60</td>
+
+
+ <td style="text-align:right;">2.58</td>
+
+
+ </tr>
+
+
+ <tr class="even">
+ <td >Fiber, total dietary
+
+
+ </td>
+
+ <td style="text-align:center;">g</td>
+ <td style="text-align:right;">0.0</td>
+
+
+ <td style="text-align:right;">0.0</td>
+
+
+ </tr>
+
+
+ <tr class="odd">
+ <td >Sugars, total
+
+
+ </td>
+
+ <td style="text-align:center;">g</td>
+ <td style="text-align:right;">0.00</td>
+
+
+ <td style="text-align:right;">0.00</td>
+
+
+ </tr>
+
+
+
+ <tr class="even" >
+ <td style="font-weight:bold" colspan="6" bgcolor="#dddddd" >Minerals</td>
+ </tr>
+
+
+ <tr class="odd">
+ <td >Calcium, Ca
+
+
+ </td>
+
+ <td style="text-align:center;">mg</td>
+ <td style="text-align:right;">0</td>
+
+
+ <td style="text-align:right;">0</td>
+
+
+ </tr>
+
+
+ <tr class="even">
+ <td >Iron, Fe
+
+
+ </td>
+
+ <td style="text-align:center;">mg</td>
+ <td style="text-align:right;">0.64</td>
+
+
+ <td style="text-align:right;">0.36</td>
+
+
+ </tr>
+
+
+ <tr class="odd">
+ <td >Magnesium, Mg
+
+
+ </td>
+
+ <td style="text-align:center;">mg</td>
+ <td style="text-align:right;">14</td>
+
+
+ <td style="text-align:right;">8</td>
+
+
+ </tr>
+
+
+ <tr class="even">
+ <td >Phosphorus, P
+
+
+ </td>
+
+ <td style="text-align:center;">mg</td>
+ <td style="text-align:right;">151</td>
+
+
+ <td style="text-align:right;">85</td>
+
+
+ </tr>
+
+
+ <tr class="odd">
+ <td >Potassium, K
+
+
+ </td>
+
+ <td style="text-align:center;">mg</td>
+ <td style="text-align:right;">409</td>
+
+
+ <td style="text-align:right;">229</td>
+
+
+ </tr>
+
+
+ <tr class="even">
+ <td >Sodium, Na
+
+
+ </td>
+
+ <td style="text-align:center;">mg</td>
+ <td style="text-align:right;">1411</td>
+
+
+ <td style="text-align:right;">790</td>
+
+
+ </tr>
+
+
+ <tr class="odd">
+ <td >Zinc, Zn
+
+
+ </td>
+
+ <td style="text-align:center;">mg</td>
+ <td style="text-align:right;">1.59</td>
+
+
+ <td style="text-align:right;">0.89</td>
+
+
+ </tr>
+
+
+
+ <tr class="even" >
+ <td style="font-weight:bold" colspan="6" bgcolor="#dddddd" >Vitamins</td>
+ </tr>
+
+
+ <tr class="odd">
+ <td >Vitamin C, total ascorbic acid
+
+
+ </td>
+
+ <td style="text-align:center;">mg</td>
+ <td style="text-align:right;">0.0</td>
+
+
+ <td style="text-align:right;">0.0</td>
+
+
+ </tr>
+
+
+ <tr class="even">
+ <td >Thiamin
+
+
+ </td>
+
+ <td style="text-align:center;">mg</td>
+ <td style="text-align:right;">0.317</td>
+
+
+ <td style="text-align:right;">0.178</td>
+
+
+ </tr>
+
+
+ <tr class="odd">
+ <td >Riboflavin
+
+
+ </td>
+
+ <td style="text-align:center;">mg</td>
+ <td style="text-align:right;">0.176</td>
+
+
+ <td style="text-align:right;">0.099</td>
+
+
+ </tr>
+
+
+ <tr class="even">
+ <td >Niacin
+
+
+ </td>
+
+ <td style="text-align:center;">mg</td>
+ <td style="text-align:right;">3.530</td>
+
+
+ <td style="text-align:right;">1.977</td>
+
+
+ </tr>
+
+
+ <tr class="odd">
+ <td >Vitamin B-6
+
+
+ </td>
+
+ <td style="text-align:center;">mg</td>
+ <td style="text-align:right;">0.218</td>
+
+
+ <td style="text-align:right;">0.122</td>
+
+
+ </tr>
+
+
+ <tr class="even">
+ <td >Folate, DFE
+
+
+ </td>
+
+ <td style="text-align:center;">µg</td>
+ <td style="text-align:right;">3</td>
+
+
+ <td style="text-align:right;">2</td>
+
+
+ </tr>
+
+
+ <tr class="odd">
+ <td >Vitamin B-12
+
+
+ </td>
+
+ <td style="text-align:center;">µg</td>
+ <td style="text-align:right;">0.45</td>
+
+
+ <td style="text-align:right;">0.25</td>
+
+
+ </tr>
+
+
+ <tr class="even">
+ <td >Vitamin A, RAE
+
+
+ </td>
+
+ <td style="text-align:center;">µg</td>
+ <td style="text-align:right;">0</td>
+
+
+ <td style="text-align:right;">0</td>
+
+
+ </tr>
+
+
+ <tr class="odd">
+ <td >Vitamin A, IU
+
+
+ </td>
+
+ <td style="text-align:center;">IU</td>
+ <td style="text-align:right;">0</td>
+
+
+ <td style="text-align:right;">0</td>
+
+
+ </tr>
+
+
+ <tr class="even">
+ <td >Vitamin E (alpha-tocopherol)
+
+
+ </td>
+
+ <td style="text-align:center;">mg</td>
+ <td style="text-align:right;">0.42</td>
+
+
+ <td style="text-align:right;">0.24</td>
+
+
+ </tr>
+
+
+ <tr class="odd">
+ <td >Vitamin D (D2 + D3)
+
+
+ </td>
+
+ <td style="text-align:center;">µg</td>
+ <td style="text-align:right;">0.6</td>
+
+
+ <td style="text-align:right;">0.3</td>
+
+
+ </tr>
+
+
+ <tr class="even">
+ <td >Vitamin D
+
+
+ </td>
+
+ <td style="text-align:center;">IU</td>
+ <td style="text-align:right;">26</td>
+
+
+ <td style="text-align:right;">15</td>
+
+
+ </tr>
+
+
+ <tr class="odd">
+ <td >Vitamin K (phylloquinone)
+
+
+ </td>
+
+ <td style="text-align:center;">µg</td>
+ <td style="text-align:right;">0.0</td>
+
+
+ <td style="text-align:right;">0.0</td>
+
+
+ </tr>
+
+
+
+ <tr class="even" >
+ <td style="font-weight:bold" colspan="6" bgcolor="#dddddd" >Lipids</td>
+ </tr>
+
+
+ <tr class="odd">
+ <td >Fatty acids, total saturated
+
+
+ </td>
+
+ <td style="text-align:center;">g</td>
+ <td style="text-align:right;">9.987</td>
+
+
+ <td style="text-align:right;">5.593</td>
+
+
+ </tr>
+
+
+ <tr class="even">
+ <td >Fatty acids, total monounsaturated
+
+
+ </td>
+
+ <td style="text-align:center;">g</td>
+ <td style="text-align:right;">13.505</td>
+
+
+ <td style="text-align:right;">7.563</td>
+
+
+ </tr>
+
+
+ <tr class="odd">
+ <td >Fatty acids, total polyunsaturated
+
+
+ </td>
+
+ <td style="text-align:center;">g</td>
+ <td style="text-align:right;">2.019</td>
+
+
+ <td style="text-align:right;">1.131</td>
+
+
+ </tr>
+
+
+ <tr class="even">
+ <td >Cholesterol
+
+
+ </td>
+
+ <td style="text-align:center;">mg</td>
+ <td style="text-align:right;">71</td>
+
+
+ <td style="text-align:right;">40</td>
+
+
+ </tr>
+
+
+
+ <tr class="even" >
+ <td style="font-weight:bold" colspan="6" bgcolor="#dddddd" >Other</td>
+ </tr>
+
+
+ <tr class="odd">
+ <td >Caffeine
+
+
+ </td>
+
+ <td style="text-align:center;">mg</td>
+ <td style="text-align:right;">0</td>
+
+
+ <td style="text-align:right;">0</td>
+
+
+ </tr>
+
+
+
+ </tbody>
</table>
</div>
| View this pull request with [`?w=t`](https://github.com/pydata/pandas/pull/3915?w=t) added to the end of the url to see that there are not differences [aside from the .gitattributes files] - this should prevent more commits with CRLF entering the repo.
I think it'd be good to merge this (especially because we can avoid this happening in the future.), though (I think) it'll mess up the history for 10 minutes to pandas and `pandas/core/expressions.py`.
After this, will never need to deal with CRLF again! Yay!
| https://api.github.com/repos/pandas-dev/pandas/pulls/3915 | 2013-06-15T15:28:08Z | 2013-06-17T23:46:16Z | 2013-06-17T23:46:16Z | 2014-06-14T06:24:42Z |
TST: Move explicit connectivity checks to decorator. | diff --git a/doc/source/release.rst b/doc/source/release.rst
index 882826765d057..61cccaba44be7 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -91,6 +91,11 @@ pandas 0.11.1
integers or floats that are in an epoch unit of ``D, s, ms, us, ns``, thanks @mtkini (:issue:`3969`)
(e.g. unix timestamps or epoch ``s``, with fracional seconds allowed) (:issue:`3540`)
- DataFrame corr method (spearman) is now cythonized.
+ - Improved ``network`` test decorator to catch ``IOError`` (and therefore
+ ``URLError`` as well). Added ``with_connectivity_check`` decorator to allow
+ explicitly checking a website as a proxy for seeing if there is network
+ connectivity. Plus, new ``optional_args`` decorator factory for decorators.
+ (:issue:`3910`, :issue:`3914`)
**API Changes**
diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt
index 76c439afc452c..3202efbcef83a 100644
--- a/doc/source/v0.11.1.txt
+++ b/doc/source/v0.11.1.txt
@@ -386,6 +386,11 @@ Bug Fixes
- ``read_html`` now correctly skips tests (:issue:`3741`)
- Fixed a bug where ``DataFrame.replace`` with a compiled regular expression
in the ``to_replace`` argument wasn't working (:issue:`3907`)
+ - Improved ``network`` test decorator to catch ``IOError`` (and therefore
+ ``URLError`` as well). Added ``with_connectivity_check`` decorator to allow
+ explicitly checking a website as a proxy for seeing if there is network
+ connectivity. Plus, new ``optional_args`` decorator factory for decorators.
+ (:issue:`3910`, :issue:`3914`)
See the :ref:`full release notes
<release>` or issue tracker
diff --git a/pandas/io/tests/test_fred.py b/pandas/io/tests/test_fred.py
index 00a90ec3da402..cd52dca507841 100644
--- a/pandas/io/tests/test_fred.py
+++ b/pandas/io/tests/test_fred.py
@@ -8,7 +8,7 @@
import pandas.io.data as web
from pandas.util.testing import (network, assert_frame_equal,
assert_series_equal,
- assert_almost_equal)
+ assert_almost_equal, with_connectivity_check)
from numpy.testing.decorators import slow
import urllib2
@@ -17,7 +17,7 @@
class TestFred(unittest.TestCase):
@slow
- @network
+ @with_connectivity_check("http://www.google.com")
def test_fred(self):
"""
Throws an exception when DataReader can't get a 200 response from
@@ -26,22 +26,14 @@ def test_fred(self):
start = datetime(2010, 1, 1)
end = datetime(2013, 01, 27)
- try:
- self.assertEquals(
- web.DataReader("GDP", "fred", start, end)['GDP'].tail(1),
- 16004.5)
+ self.assertEquals(
+ web.DataReader("GDP", "fred", start, end)['GDP'].tail(1),
+ 16004.5)
- self.assertRaises(
- Exception,
- lambda: web.DataReader("NON EXISTENT SERIES", 'fred',
- start, end))
- except urllib2.URLError:
- try:
- urllib2.urlopen('http://google.com')
- except urllib2.URLError:
- raise nose.SkipTest
- else:
- raise
+ self.assertRaises(
+ Exception,
+ lambda: web.DataReader("NON EXISTENT SERIES", 'fred',
+ start, end))
@slow
@network
diff --git a/pandas/io/tests/test_ga.py b/pandas/io/tests/test_ga.py
index 5fa2120090025..d2061a6d0b57a 100644
--- a/pandas/io/tests/test_ga.py
+++ b/pandas/io/tests/test_ga.py
@@ -1,26 +1,26 @@
+import os
import unittest
-import nose
from datetime import datetime
+import nose
import pandas as pd
-import pandas.core.common as com
from pandas import DataFrame
-from pandas.util.testing import network, assert_frame_equal
+from pandas.util.testing import network, assert_frame_equal, with_connectivity_check
from numpy.testing.decorators import slow
+try:
+ import httplib2
+ from pandas.io.ga import GAnalytics, read_ga
+ from pandas.io.auth import AuthenticationConfigError, reset_token_store
+ from pandas.io import auth
+except ImportError:
+ raise nose.SkipTest
class TestGoogle(unittest.TestCase):
_multiprocess_can_split_ = True
def test_remove_token_store(self):
- import os
- try:
- import pandas.io.auth as auth
- from pandas.io.ga import reset_token_store
- except ImportError:
- raise nose.SkipTest
-
auth.DEFAULT_TOKEN_FILE = 'test.dat'
with open(auth.DEFAULT_TOKEN_FILE, 'w') as fh:
fh.write('test')
@@ -31,13 +31,6 @@ def test_remove_token_store(self):
@slow
@network
def test_getdata(self):
- try:
- import httplib2
- from pandas.io.ga import GAnalytics, read_ga
- from pandas.io.auth import AuthenticationConfigError
- except ImportError:
- raise nose.SkipTest
-
try:
end_date = datetime.now()
start_date = end_date - pd.offsets.Day() * 5
@@ -76,24 +69,10 @@ def test_getdata(self):
except AuthenticationConfigError:
raise nose.SkipTest
- except httplib2.ServerNotFoundError:
- try:
- h = httplib2.Http()
- response, content = h.request("http://www.google.com")
- raise
- except httplib2.ServerNotFoundError:
- raise nose.SkipTest
@slow
- @network
+ @with_connectivity_check("http://www.google.com")
def test_iterator(self):
- try:
- import httplib2
- from pandas.io.ga import GAnalytics, read_ga
- from pandas.io.auth import AuthenticationConfigError
- except ImportError:
- raise nose.SkipTest
-
try:
reader = GAnalytics()
@@ -118,24 +97,10 @@ def test_iterator(self):
except AuthenticationConfigError:
raise nose.SkipTest
- except httplib2.ServerNotFoundError:
- try:
- h = httplib2.Http()
- response, content = h.request("http://www.google.com")
- raise
- except httplib2.ServerNotFoundError:
- raise nose.SkipTest
@slow
- @network
+ @with_connectivity_check("http://www.google.com")
def test_segment(self):
- try:
- import httplib2
- from pandas.io.ga import GAnalytics, read_ga
- from pandas.io.auth import AuthenticationConfigError
- except ImportError:
- raise nose.SkipTest
-
try:
end_date = datetime.now()
start_date = end_date - pd.offsets.Day() * 5
@@ -186,16 +151,7 @@ def test_segment(self):
except AuthenticationConfigError:
raise nose.SkipTest
- except httplib2.ServerNotFoundError:
- try:
- h = httplib2.Http()
- response, content = h.request("http://www.google.com")
- raise
- except httplib2.ServerNotFoundError:
- raise nose.SkipTest
-
if __name__ == '__main__':
- import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
diff --git a/pandas/io/tests/test_google.py b/pandas/io/tests/test_google.py
index 8c16c60ac8b87..8ceda94f07a52 100644
--- a/pandas/io/tests/test_google.py
+++ b/pandas/io/tests/test_google.py
@@ -2,57 +2,30 @@
import nose
from datetime import datetime
+import numpy as np
import pandas as pd
import pandas.io.data as web
-from pandas.util.testing import (network, assert_series_equal)
-from numpy.testing.decorators import slow
-import numpy as np
-
-import urllib2
+from pandas.util.testing import network, with_connectivity_check
class TestGoogle(unittest.TestCase):
- @network
+ @with_connectivity_check("http://www.google.com")
def test_google(self):
# asserts that google is minimally working and that it throws
- # an excecption when DataReader can't get a 200 response from
+ # an exception when DataReader can't get a 200 response from
# google
start = datetime(2010, 1, 1)
end = datetime(2013, 01, 27)
- try:
- self.assertEquals(
- web.DataReader("F", 'google', start, end)['Close'][-1],
- 13.68)
- except urllib2.URLError:
- try:
- urllib2.urlopen('http://www.google.com')
- except urllib2.URLError:
- raise nose.SkipTest
- else:
- raise
-
- @network
- def test_google_non_existent(self):
- # asserts that google is minimally working and that it throws
- # an excecption when DataReader can't get a 200 response from
- # google
- start = datetime(2010, 1, 1)
- end = datetime(2013, 01, 27)
+ self.assertEquals(
+ web.DataReader("F", 'google', start, end)['Close'][-1],
+ 13.68)
- try:
- self.assertRaises(
- Exception,
- lambda: web.DataReader("NON EXISTENT TICKER", 'google',
- start, end))
- except urllib2.URLError:
- try:
- urllib2.urlopen('http://www.google.com')
- except urllib2.URLError:
- raise nose.SkipTest
- else:
- raise
+ self.assertRaises(
+ Exception,
+ lambda: web.DataReader("NON EXISTENT TICKER", 'google',
+ start, end))
@network
@@ -60,64 +33,40 @@ def test_get_quote(self):
self.assertRaises(NotImplementedError,
lambda: web.get_quote_google(pd.Series(['GOOG', 'AAPL', 'GOOG'])))
- @network
+ @with_connectivity_check('http://www.google.com')
def test_get_goog_volume(self):
- try:
- df = web.get_data_google('GOOG')
- assert df.Volume.ix['OCT-08-2010'] == 2863473
- except IOError:
- try:
- urllib2.urlopen('http://www.google.com')
- except IOError:
- raise nose.SkipTest
- else:
- raise
+ df = web.get_data_google('GOOG')
+ assert df.Volume.ix['OCT-08-2010'] == 2863473
- @network
+ @with_connectivity_check('http://www.google.com')
def test_get_multi1(self):
- try:
- sl = ['AAPL', 'AMZN', 'GOOG']
- pan = web.get_data_google(sl, '2012')
- ts = pan.Close.GOOG.index[pan.Close.AAPL > pan.Close.GOOG]
- assert ts[0].dayofyear == 96
- except IOError:
- try:
- urllib2.urlopen('http://www.google.com')
- except IOError:
- raise nose.SkipTest
- else:
- raise
+ sl = ['AAPL', 'AMZN', 'GOOG']
+ pan = web.get_data_google(sl, '2012')
+ ts = pan.Close.GOOG.index[pan.Close.AAPL > pan.Close.GOOG]
+ assert ts[0].dayofyear == 96
- @network
+ @with_connectivity_check('http://www.google.com')
def test_get_multi2(self):
- try:
- pan = web.get_data_google(['GE', 'MSFT', 'INTC'], 'JAN-01-12', 'JAN-31-12')
- expected = [19.02, 28.23, 25.39]
- result = pan.Close.ix['01-18-12'][['GE', 'MSFT', 'INTC']].tolist()
- assert result == expected
-
- # sanity checking
- t= np.array(result)
- assert np.issubdtype(t.dtype, np.floating)
- assert t.shape == (3,)
-
- expected = [[ 18.99, 28.4 , 25.18],
- [ 18.58, 28.31, 25.13],
- [ 19.03, 28.16, 25.52],
- [ 18.81, 28.82, 25.87]]
- result = pan.Open.ix['Jan-15-12':'Jan-20-12'][['GE', 'MSFT', 'INTC']].values
- assert (result == expected).all()
-
- # sanity checking
- t= np.array(pan)
- assert np.issubdtype(t.dtype, np.floating)
- except IOError:
- try:
- urllib2.urlopen('http://www.google.com')
- except IOError:
- raise nose.SkipTest
- else:
- raise
+ pan = web.get_data_google(['GE', 'MSFT', 'INTC'], 'JAN-01-12', 'JAN-31-12')
+ expected = [19.02, 28.23, 25.39]
+ result = pan.Close.ix['01-18-12'][['GE', 'MSFT', 'INTC']].tolist()
+ assert result == expected
+
+ # sanity checking
+ t= np.array(result)
+ assert np.issubdtype(t.dtype, np.floating)
+ assert t.shape == (3,)
+
+ expected = [[ 18.99, 28.4 , 25.18],
+ [ 18.58, 28.31, 25.13],
+ [ 19.03, 28.16, 25.52],
+ [ 18.81, 28.82, 25.87]]
+ result = pan.Open.ix['Jan-15-12':'Jan-20-12'][['GE', 'MSFT', 'INTC']].values
+ assert (result == expected).all()
+
+ # sanity checking
+ t= np.array(pan)
+ assert np.issubdtype(t.dtype, np.floating)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
diff --git a/pandas/io/tests/test_yahoo.py b/pandas/io/tests/test_yahoo.py
index 0e2c2022af422..712475f76f5ed 100644
--- a/pandas/io/tests/test_yahoo.py
+++ b/pandas/io/tests/test_yahoo.py
@@ -4,41 +4,27 @@
import pandas as pd
import pandas.io.data as web
-from pandas.util.testing import (network, assert_frame_equal,
- assert_series_equal,
- assert_almost_equal)
-from numpy.testing.decorators import slow
-
-import urllib2
+from pandas.util.testing import network, assert_series_equal, with_connectivity_check
class TestYahoo(unittest.TestCase):
- @network
+ @with_connectivity_check("http://www.google.com")
def test_yahoo(self):
# asserts that yahoo is minimally working and that it throws
- # an excecption when DataReader can't get a 200 response from
+ # an exception when DataReader can't get a 200 response from
# yahoo
start = datetime(2010, 1, 1)
end = datetime(2013, 01, 27)
- try:
- self.assertEquals(
- web.DataReader("F", 'yahoo', start, end)['Close'][-1],
- 13.68)
+ self.assertEquals(
+ web.DataReader("F", 'yahoo', start, end)['Close'][-1],
+ 13.68)
- self.assertRaises(
- Exception,
- lambda: web.DataReader("NON EXISTENT TICKER", 'yahoo',
+ self.assertRaises(
+ Exception,
+ lambda: web.DataReader("NON EXISTENT TICKER", 'yahoo',
start, end))
- except urllib2.URLError:
- try:
- urllib2.urlopen('http://www.google.com')
- except urllib2.URLError:
- raise nose.SkipTest
- else:
- raise
-
@network
def test_get_quote(self):
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index 5e1ab59305bab..5a583ca3ae7d9 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -3,6 +3,7 @@
# pylint: disable-msg=W0402
from datetime import datetime
+from functools import wraps
import random
import string
import sys
@@ -12,6 +13,8 @@
from contextlib import contextmanager # contextlib is available since 2.5
from distutils.version import LooseVersion
+import urllib2
+import nose
from numpy.random import randn
import numpy as np
@@ -36,6 +39,7 @@
N = 30
K = 4
+_RAISE_NETWORK_ERROR_DEFAULT = False
def rands(n):
@@ -663,18 +667,55 @@ def skip_if_no_package(*args, **kwargs):
# Additional tags decorators for nose
#
+def optional_args(decorator):
+ """allows a decorator to take optional positional and keyword arguments.
+ Assumes that taking a single, callable, positional argument means that
+ it is decorating a function, i.e. something like this::
-def network(t):
+ @my_decorator
+ def function(): pass
+
+ Calls decorator with decorator(f, *args, **kwargs)"""
+
+ @wraps(decorator)
+ def wrapper(*args, **kwargs):
+ def dec(f):
+ return decorator(f, *args, **kwargs)
+
+ is_decorating = not kwargs and len(args) == 1 and callable(args[0])
+ if is_decorating:
+ f = args[0]
+ args = []
+ return dec(f)
+ else:
+ return dec
+
+ return wrapper
+
+
+@optional_args
+def network(t, raise_on_error=_RAISE_NETWORK_ERROR_DEFAULT,
+ error_classes=(IOError,)):
"""
- Label a test as requiring network connection.
+ Label a test as requiring network connection and skip test if it encounters a ``URLError``.
In some cases it is not possible to assume network presence (e.g. Debian
build hosts).
+ You can pass an optional ``raise_on_error`` argument to the decorator, in
+ which case it will always raise an error even if it's not a subclass of
+ ``error_classes``.
+
Parameters
----------
t : callable
The test requiring network connectivity.
+ raise_on_error : bool
+ If True, never catches errors.
+ error_classes : iterable
+ error classes to ignore. If not in ``error_classes``, raises the error.
+ defaults to URLError. Be careful about changing the error classes here,
+ it may result in undefined behavior.
Returns
-------
@@ -685,18 +726,136 @@ def network(t):
--------
A test can be decorated as requiring network like this::
- from pandas.util.testing import *
-
- @network
- def test_network(self):
- print 'Fetch the stars from http://'
+ >>> from pandas.util.testing import network
+ >>> import urllib2
+ >>> @network
+ ... def test_network():
+ ... urllib2.urlopen("rabbit://bonanza.com")
+ ...
+ >>> try:
+ ... test_network()
+ ... except nose.SkipTest:
+ ... print "SKIPPING!"
+ ...
+ SKIPPING!
+
+ Alternatively, you can use set ``raise_on_error`` in order to get
+ the error to bubble up, e.g.::
+
+ >>> @network(raise_on_error=True)
+ ... def test_network():
+ ... urllib2.urlopen("complaint://deadparrot.com")
+ ...
+ >>> test_network()
+ Traceback (most recent call last):
+ ...
+ URLError: <urlopen error unknown url type: complaint>
And use ``nosetests -a '!network'`` to exclude running tests requiring
- network connectivity.
+ network connectivity. ``_RAISE_NETWORK_ERROR_DEFAULT`` in
+ ``pandas/util/testing.py`` sets the default behavior (currently False).
+ """
+ t.network = True
+
+ @wraps(t)
+ def network_wrapper(*args, **kwargs):
+ if raise_on_error:
+ return t(*args, **kwargs)
+ else:
+ try:
+ return t(*args, **kwargs)
+ except error_classes as e:
+ raise nose.SkipTest("Skipping test %s" % e)
+
+ return network_wrapper
+
+
+def can_connect(url):
+ """tries to connect to the given url. True if succeeds, False if IOError raised"""
+ try:
+ urllib2.urlopen(url)
+ except IOError:
+ return False
+ else:
+ return True
+
+
+@optional_args
+def with_connectivity_check(t, url="http://www.google.com",
+ raise_on_error=_RAISE_NETWORK_ERROR_DEFAULT, check_before_test=False,
+ error_classes=IOError):
"""
+ Label a test as requiring network connection and, if an error is
+ encountered, only raise if it does not find a network connection.
+
+ In comparison to ``network``, this assumes an added contract to your test:
+ you must assert that, under normal conditions, your test will ONLY fail if
+ it does not have network connectivity.
+
+ You can call this in 3 ways: as a standard decorator, with keyword
+ arguments, or with a positional argument that is the url to check.
+
+ Parameters
+ ----------
+ t : callable
+ The test requiring network connectivity.
+ url : path
+ The url to test via ``urllib2.urlopen`` to check for connectivity.
+ Defaults to 'http://www.google.com'.
+ raise_on_error : bool
+ If True, never catches errors.
+ check_before_test : bool
+ If True, checks connectivity before running the test case.
+ error_classes : tuple or Exception
+ error classes to ignore. If not in ``error_classes``, raises the error.
+ defaults to IOError. Be careful about changing the error classes here.
+
+ NOTE: ``raise_on_error`` supercedes ``check_before_test``
+ Returns
+ -------
+ t : callable
+ The decorated test ``t``, with checks for connectivity errors.
+ Example
+ -------
+
+ In this example, you see how it will raise the error if it can connect to
+ the url::
+ >>> @with_connectivity_check("http://www.yahoo.com")
+ ... def test_something_with_yahoo():
+ ... raise IOError("Failure Message")
+ >>> test_something_with_yahoo()
+ Traceback (most recent call last):
+ ...
+ IOError: Failure Message
+
+ I you set check_before_test, it will check the url first and not run the test on failure::
+ >>> @with_connectivity_check("failing://url.blaher", check_before_test=True)
+ ... def test_something():
+ ... print("I ran!")
+ ... raise ValueError("Failure")
+ >>> test_something()
+ Traceback (most recent call last):
+ ...
+ SkipTest
+ """
t.network = True
- return t
+
+ @wraps(t)
+ def wrapper(*args, **kwargs):
+ if check_before_test and not raise_on_error:
+ if not can_connect(url):
+ raise nose.SkipTest
+ try:
+ return t(*args, **kwargs)
+ except error_classes as e:
+ if raise_on_error or can_connect(url):
+ raise
+ else:
+ raise nose.SkipTest("Skipping test due to lack of connectivity"
+ " and error %s" % e)
+
+ return wrapper
class SimpleMock(object):
@@ -743,11 +902,13 @@ def stdin_encoding(encoding=None):
"""
import sys
+
_stdin = sys.stdin
sys.stdin = SimpleMock(sys.stdin, "encoding", encoding)
yield
sys.stdin = _stdin
+
def assertRaisesRegexp(exception, regexp, callable, *args, **kwargs):
""" Port of assertRaisesRegexp from unittest in Python 2.7 - used in with statement.
@@ -779,6 +940,7 @@ def assertRaisesRegexp(exception, regexp, callable, *args, **kwargs):
"""
import re
+
try:
callable(*args, **kwargs)
except Exception as e:
@@ -792,7 +954,7 @@ def assertRaisesRegexp(exception, regexp, callable, *args, **kwargs):
expected_regexp = re.compile(regexp)
if not expected_regexp.search(str(e)):
raise AssertionError('"%s" does not match "%s"' %
- (expected_regexp.pattern, str(e)))
+ (expected_regexp.pattern, str(e)))
else:
# Apparently some exceptions don't have a __name__ attribute? Just aping unittest library here
name = getattr(exception, "__name__", str(exception))
| Instead, `network` decorator in pandas.util.testing catches `IOError` instead.
You have to opt into failing on tests by setting
`pandas.util.testing._RAISE_NETWORK_ERROR_DEFAULT` to `True`.
Also adds a `with_network_connectivity_check` that can automatically check for a connection.
Fixes #3910.
This version of the fix ignores all IOErrors and assumes there are connectivity problems
with any URLError.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3914 | 2013-06-15T12:12:57Z | 2013-06-21T01:48:25Z | 2013-06-21T01:48:25Z | 2014-06-12T07:13:41Z |
BUG: (GH3911) groupby appying with custom function not converting dtypes of result | diff --git a/RELEASE.rst b/RELEASE.rst
index 285bbb2095488..8fb9406a3ba0e 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -234,8 +234,11 @@ pandas 0.11.1
- ``read_html`` now correctly skips tests (GH3741_)
- PandasObjects raise TypeError when trying to hash (GH3882_)
- Fix incorrect arguments passed to concat that are not list-like (e.g. concat(df1,df2)) (GH3481_)
- - Correctly parse when passed the ``dtype=str`` (or other variable-len string dtypes) in ``read_csv`` (GH3795_)
+ - Correctly parse when passed the ``dtype=str`` (or other variable-len string dtypes)
+ in ``read_csv`` (GH3795_)
- Fix index name not propogating when using ``loc/ix`` (GH3880_)
+ - Fix groupby when applying a custom function resulting in a returned DataFrame was
+ not converting dtypes (GH3911_)
.. _GH3164: https://github.com/pydata/pandas/issues/3164
.. _GH2786: https://github.com/pydata/pandas/issues/2786
@@ -331,6 +334,7 @@ pandas 0.11.1
.. _GH3873: https://github.com/pydata/pandas/issues/3873
.. _GH3877: https://github.com/pydata/pandas/issues/3877
.. _GH3880: https://github.com/pydata/pandas/issues/3880
+.. _GH3911: https://github.com/pydata/pandas/issues/3911
pandas 0.11.0
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index 0be5d438e5e7c..168615c060c2b 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -1928,7 +1928,7 @@ def _wrap_applied_output(self, keys, values, not_indexed_same=False):
return Series(values, index=key_index)
return DataFrame(stacked_values, index=index,
- columns=columns)
+ columns=columns).convert_objects()
else:
return Series(values, index=key_index)
diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py
index f3a608b82e756..6989d3bcae42b 100644
--- a/pandas/tests/test_groupby.py
+++ b/pandas/tests/test_groupby.py
@@ -261,6 +261,20 @@ def test_groupby_nonobject_dtype(self):
expected = self.mframe.groupby(key.astype('O')).sum()
assert_frame_equal(result, expected)
+ # GH 3911, mixed frame non-conversion
+ df = self.df_mixed_floats.copy()
+ df['value'] = range(len(df))
+
+ def max_value(group):
+ return group.ix[group['value'].idxmax()]
+
+ applied = df.groupby('A').apply(max_value)
+ result = applied.get_dtype_counts()
+ result.sort()
+ expected = Series({ 'object' : 2, 'float64' : 2, 'int64' : 1 })
+ expected.sort()
+ assert_series_equal(result,expected)
+
def test_groupby_return_type(self):
# GH2893, return a reduced type
| closes #3911
| https://api.github.com/repos/pandas-dev/pandas/pulls/3913 | 2013-06-15T11:42:08Z | 2013-06-15T12:25:21Z | 2013-06-15T12:25:21Z | 2014-07-07T14:54:54Z |
FIX/ENH: attempt soft conversion of object series before raising a TypeError when plotting | diff --git a/RELEASE.rst b/RELEASE.rst
index 9d862c687bcac..f03e10df1b460 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -77,8 +77,10 @@ pandas 0.11.1
dependencies offered for Linux) (GH3837_).
- Plotting functions now raise a ``TypeError`` before trying to plot anything
if the associated objects have have a dtype of ``object`` (GH1818_,
- GH3572_). This happens before any drawing takes place which elimnates any
- spurious plots from showing up.
+ GH3572_, GH3911_, GH3912_), but they will try to convert object arrays to
+ numeric arrays if possible so that you can still plot, for example, an
+ object array with floats. This happens before any drawing takes place which
+ elimnates any spurious plots from showing up.
- Added Faq section on repr display options, to help users customize their setup.
- ``where`` operations that result in block splitting are much faster (GH3733_)
- Series and DataFrame hist methods now take a ``figsize`` argument (GH3834_)
@@ -341,13 +343,13 @@ pandas 0.11.1
.. _GH3834: https://github.com/pydata/pandas/issues/3834
.. _GH3873: https://github.com/pydata/pandas/issues/3873
.. _GH3877: https://github.com/pydata/pandas/issues/3877
+.. _GH3659: https://github.com/pydata/pandas/issues/3659
+.. _GH3679: https://github.com/pydata/pandas/issues/3679
.. _GH3880: https://github.com/pydata/pandas/issues/3880
-<<<<<<< HEAD
.. _GH3911: https://github.com/pydata/pandas/issues/3911
-=======
.. _GH3907: https://github.com/pydata/pandas/issues/3907
->>>>>>> 7b5933247b80174de4ba571e95a1add809dd9d09
-
+.. _GH3911: https://github.com/pydata/pandas/issues/3911
+.. _GH3912: https://github.com/pydata/pandas/issues/3912
pandas 0.11.0
=============
diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt
index dd87c5ea827c3..76ae85a53102b 100644
--- a/doc/source/v0.11.1.txt
+++ b/doc/source/v0.11.1.txt
@@ -300,9 +300,11 @@ Bug Fixes
~~~~~~~~~
- Plotting functions now raise a ``TypeError`` before trying to plot anything
- if the associated objects have have a ``dtype`` of ``object`` (GH1818_,
- GH3572_). This happens before any drawing takes place which elimnates any
- spurious plots from showing up.
+ if the associated objects have have a dtype of ``object`` (GH1818_,
+ GH3572_, GH3911_, GH3912_), but they will try to convert object arrays to
+ numeric arrays if possible so that you can still plot, for example, an
+ object array with floats. This happens before any drawing takes place which
+ elimnates any spurious plots from showing up.
- ``fillna`` methods now raise a ``TypeError`` if the ``value`` parameter is
a list or tuple.
@@ -416,3 +418,5 @@ on GitHub for a complete list.
.. _GH3659: https://github.com/pydata/pandas/issues/3659
.. _GH3679: https://github.com/pydata/pandas/issues/3679
.. _GH3907: https://github.com/pydata/pandas/issues/3907
+.. _GH3911: https://github.com/pydata/pandas/issues/3911
+.. _GH3912: https://github.com/pydata/pandas/issues/3912
diff --git a/pandas/io/common.py b/pandas/io/common.py
index 353930482c8b8..3bd6dd5d74ba8 100644
--- a/pandas/io/common.py
+++ b/pandas/io/common.py
@@ -9,6 +9,10 @@
_VALID_URLS.discard('')
+class PerformanceWarning(Warning):
+ pass
+
+
def _is_url(url):
"""Check to see if a URL has a valid protocol.
@@ -26,27 +30,29 @@ def _is_url(url):
except:
return False
+
def _is_s3_url(url):
- """ Check for an s3 url """
+ """Check for an s3 url"""
try:
return urlparse.urlparse(url).scheme == 's3'
except:
return False
+
def get_filepath_or_buffer(filepath_or_buffer, encoding=None):
- """ if the filepath_or_buffer is a url, translate and return the buffer
- passthru otherwise
-
- Parameters
- ----------
- filepath_or_buffer : a url, filepath, or buffer
- encoding : the encoding to use to decode py3 bytes, default is 'utf-8'
-
- Returns
- -------
- a filepath_or_buffer, the encoding
-
- """
+ """
+ If the filepath_or_buffer is a url, translate and return the buffer
+ passthru otherwise.
+
+ Parameters
+ ----------
+ filepath_or_buffer : a url, filepath, or buffer
+ encoding : the encoding to use to decode py3 bytes, default is 'utf-8'
+
+ Returns
+ -------
+ a filepath_or_buffer, the encoding
+ """
if _is_url(filepath_or_buffer):
from urllib2 import urlopen
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index b1b7b80e5fd23..62aa1b99dfac0 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -12,23 +12,22 @@
import warnings
import numpy as np
-from pandas import (
- Series, TimeSeries, DataFrame, Panel, Panel4D, Index,
- MultiIndex, Int64Index, Timestamp
-)
+from pandas import (Series, TimeSeries, DataFrame, Panel, Panel4D, Index,
+ MultiIndex, Int64Index, Timestamp)
from pandas.sparse.api import SparseSeries, SparseDataFrame, SparsePanel
from pandas.sparse.array import BlockIndex, IntIndex
from pandas.tseries.api import PeriodIndex, DatetimeIndex
-from pandas.core.common import adjoin, isnull, is_list_like
-from pandas.core.algorithms import match, unique, factorize
+from pandas.core.common import adjoin, is_list_like
+from pandas.core.algorithms import match, unique
from pandas.core.categorical import Categorical
-from pandas.core.common import _asarray_tuplesafe, _try_sort
+from pandas.core.common import _asarray_tuplesafe
from pandas.core.internals import BlockManager, make_block
from pandas.core.reshape import block2d_to_blocknd, factor_indexer
-from pandas.core.index import Int64Index, _ensure_index
+from pandas.core.index import _ensure_index
import pandas.core.common as com
from pandas.tools.merge import concat
from pandas.util import py3compat
+from pandas.io.common import PerformanceWarning
import pandas.lib as lib
import pandas.algos as algos
@@ -42,11 +41,14 @@
# PY3 encoding if we don't specify
_default_encoding = 'UTF-8'
+
def _ensure_decoded(s):
""" if we have bytes, decode them to unicde """
if isinstance(s, np.bytes_):
s = s.decode('UTF-8')
return s
+
+
def _ensure_encoding(encoding):
# set the encoding if we need
if encoding is None:
@@ -54,20 +56,31 @@ def _ensure_encoding(encoding):
encoding = _default_encoding
return encoding
-class IncompatibilityWarning(Warning): pass
+
+class IncompatibilityWarning(Warning):
+ pass
+
+
incompatibility_doc = """
-where criteria is being ignored as this version [%s] is too old (or not-defined),
-read the file in and write it out to a new file to upgrade (with the copy_to method)
+where criteria is being ignored as this version [%s] is too old (or
+not-defined), read the file in and write it out to a new file to upgrade (with
+the copy_to method)
"""
-class AttributeConflictWarning(Warning): pass
+
+
+class AttributeConflictWarning(Warning):
+ pass
+
+
attribute_conflict_doc = """
-the [%s] attribute of the existing index is [%s] which conflicts with the new [%s],
-resetting the attribute to None
+the [%s] attribute of the existing index is [%s] which conflicts with the new
+[%s], resetting the attribute to None
"""
-class PerformanceWarning(Warning): pass
+
+
performance_doc = """
-your performance may suffer as PyTables will pickle object types that it cannot map
-directly to c-types [inferred_type->%s,key->%s] [items->%s]
+your performance may suffer as PyTables will pickle object types that it cannot
+map directly to c-types [inferred_type->%s,key->%s] [items->%s]
"""
# map object types
diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py
index 0755caf45d336..e57e5a9af2fc0 100644
--- a/pandas/tests/test_graphics.py
+++ b/pandas/tests/test_graphics.py
@@ -10,6 +10,7 @@
from pandas.util.testing import ensure_clean
from pandas.core.config import set_option
+
import numpy as np
from numpy.testing import assert_array_equal
@@ -189,8 +190,7 @@ def test_bootstrap_plot(self):
from pandas.tools.plotting import bootstrap_plot
_check_plot_works(bootstrap_plot, self.ts, size=10)
- @slow
- def test_all_invalid_plot_data(self):
+ def test_invalid_plot_data(self):
s = Series(list('abcd'))
kinds = 'line', 'bar', 'barh', 'kde', 'density'
@@ -198,6 +198,14 @@ def test_all_invalid_plot_data(self):
self.assertRaises(TypeError, s.plot, kind=kind)
@slow
+ def test_valid_object_plot(self):
+ from pandas.io.common import PerformanceWarning
+ s = Series(range(10), dtype=object)
+ kinds = 'line', 'bar', 'barh', 'kde', 'density'
+
+ for kind in kinds:
+ _check_plot_works(s.plot, kind=kind)
+
def test_partially_invalid_plot_data(self):
s = Series(['a', 'b', 1.0, 2])
kinds = 'line', 'bar', 'barh', 'kde', 'density'
diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py
index 83ad58c1eb41c..4e85d742e352c 100644
--- a/pandas/tools/plotting.py
+++ b/pandas/tools/plotting.py
@@ -878,15 +878,20 @@ def _get_layout(self):
def _compute_plot_data(self):
try:
- # might be a frame
+ # might be an ndframe
numeric_data = self.data._get_numeric_data()
- except AttributeError:
- # a series, but no object dtypes allowed!
- if self.data.dtype == np.object_:
- raise TypeError('invalid dtype for plotting, please cast to a '
- 'numeric dtype explicitly if you want to plot')
-
+ except AttributeError: # TODO: rm in 0.12 (series-inherit-ndframe)
numeric_data = self.data
+ orig_dtype = numeric_data.dtype
+
+ # possible object array of numeric data
+ if orig_dtype == np.object_:
+ numeric_data = numeric_data.convert_objects() # soft convert
+
+ # still an object dtype so we can't plot it
+ if numeric_data.dtype == np.object_:
+ raise TypeError('Series has object dtype and cannot be'
+ ' converted: no numeric data to plot')
try:
is_empty = numeric_data.empty
@@ -895,7 +900,8 @@ def _compute_plot_data(self):
# no empty frames or series allowed
if is_empty:
- raise TypeError('No numeric data to plot')
+ raise TypeError('Empty {0!r}: no numeric data to '
+ 'plot'.format(numeric_data.__class__.__name__))
self.data = numeric_data
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index dd86862a2d551..20e59b6d3342a 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -7,6 +7,7 @@
import string
import sys
import tempfile
+import warnings
from contextlib import contextmanager # contextlib is available since 2.5
@@ -39,7 +40,7 @@
def rands(n):
choices = string.ascii_letters + string.digits
- return ''.join([random.choice(choices) for _ in xrange(n)])
+ return ''.join(random.choice(choices) for _ in xrange(n))
def randu(n):
@@ -746,3 +747,48 @@ def stdin_encoding(encoding=None):
sys.stdin = SimpleMock(sys.stdin, "encoding", encoding)
yield
sys.stdin = _stdin
+
+
+@contextmanager
+def assert_produces_warning(expected_warning=Warning, filter_level="always"):
+ """
+ Context manager for running code that expects to raise (or not raise)
+ warnings. Checks that code raises the expected warning and only the
+ expected warning. Pass ``False`` or ``None`` to check that it does *not*
+ raise a warning. Defaults to ``exception.Warning``, baseclass of all
+ Warnings. (basically a wrapper around ``warnings.catch_warnings``).
+
+ >>> import warnings
+ >>> with assert_produces_warning():
+ ... warnings.warn(UserWarning())
+ ...
+ >>> with assert_produces_warning(False):
+ ... warnings.warn(RuntimeWarning())
+ ...
+ Traceback (most recent call last):
+ ...
+ AssertionError: Caused unexpected warning(s): ['RuntimeWarning'].
+ >>> with assert_produces_warning(UserWarning):
+ ... warnings.warn(RuntimeWarning())
+ Traceback (most recent call last):
+ ...
+ AssertionError: Did not see expected warning of class 'UserWarning'.
+
+ ..warn:: This is *not* thread-safe.
+ """
+ with warnings.catch_warnings(record=True) as w:
+ saw_warning = False
+ warnings.simplefilter(filter_level)
+ yield w
+ extra_warnings = []
+ for actual_warning in w:
+ if (expected_warning and issubclass(actual_warning.category,
+ expected_warning)):
+ saw_warning = True
+ else:
+ extra_warnings.append(actual_warning.category.__name__)
+ if expected_warning:
+ assert saw_warning, ("Did not see expected warning of class %r."
+ % expected_warning.__name__)
+ assert not extra_warnings, ("Caused unexpected warning(s): %r."
+ % extra_warnings)
| https://api.github.com/repos/pandas-dev/pandas/pulls/3912 | 2013-06-15T09:58:54Z | 2013-06-16T16:16:06Z | 2013-06-16T16:16:06Z | 2014-06-14T16:36:03Z | |
BUG/API: remove infer_types from replace and fix compiled regex bug | diff --git a/RELEASE.rst b/RELEASE.rst
index 285bbb2095488..ce0823d72296b 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -127,6 +127,10 @@ pandas 0.11.1
- ``DataFrame.interpolate()`` is now deprecated. Please use
``DataFrame.fillna()`` and ``DataFrame.replace()`` instead (GH3582_,
GH3675_, GH3676_).
+ - the ``method`` and ``axis`` arguments of ``DataFrame.replace()`` are
+ deprecated
+ - ``DataFrame.replace`` 's ``infer_types`` parameter is removed and now
+ performs conversion by default. (GH3907_)
- Deprecated display.height, display.width is now only a formatting option
does not control triggering of summary, similar to < 0.11.0.
- Add the keyword ``allow_duplicates`` to ``DataFrame.insert`` to allow a duplicate column
@@ -141,6 +145,8 @@ pandas 0.11.1
``to_pickle`` instance method, ``save`` and ``load`` will give deprecation warning.
- the ``method`` and ``axis`` arguments of ``DataFrame.replace()`` are
deprecated
+ - the ``method`` and ``axis`` arguments of ``DataFrame.replace()`` are
+ deprecated
- Implement ``__nonzero__`` for ``NDFrame`` objects (GH3691_, GH3696_)
- ``as_matrix`` with mixed signed and unsigned dtypes will result in 2 x the lcd of the unsigned
as an int, maxing with ``int64``, to avoid precision issues (GH3733_)
@@ -236,6 +242,8 @@ pandas 0.11.1
- Fix incorrect arguments passed to concat that are not list-like (e.g. concat(df1,df2)) (GH3481_)
- Correctly parse when passed the ``dtype=str`` (or other variable-len string dtypes) in ``read_csv`` (GH3795_)
- Fix index name not propogating when using ``loc/ix`` (GH3880_)
+ - Fixed a bug where ``DataFrame.replace`` with a compiled regular expression
+ in the ``to_replace`` argument wasn't working (GH3907_)
.. _GH3164: https://github.com/pydata/pandas/issues/3164
.. _GH2786: https://github.com/pydata/pandas/issues/2786
@@ -331,6 +339,7 @@ pandas 0.11.1
.. _GH3873: https://github.com/pydata/pandas/issues/3873
.. _GH3877: https://github.com/pydata/pandas/issues/3877
.. _GH3880: https://github.com/pydata/pandas/issues/3880
+.. _GH3907: https://github.com/pydata/pandas/issues/3907
pandas 0.11.0
diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt
index dfc36258a680f..dd87c5ea827c3 100644
--- a/doc/source/v0.11.1.txt
+++ b/doc/source/v0.11.1.txt
@@ -98,6 +98,9 @@ API changes
- the ``method`` and ``axis`` arguments of ``DataFrame.replace()`` are
deprecated
+ - ``DataFrame.replace`` 's ``infer_types`` parameter is removed and now
+ performs conversion by default. (GH3907_)
+
- Add the keyword ``allow_duplicates`` to ``DataFrame.insert`` to allow a duplicate column
to be inserted if ``True``, default is ``False`` (same as prior to 0.11.1) (GH3679_)
- Implement ``__nonzero__`` for ``NDFrame`` objects (GH3691_, GH3696_)
@@ -356,6 +359,8 @@ Bug Fixes
- ``DataFrame.from_records`` did not accept empty recarrays (GH3682_)
- ``read_html`` now correctly skips tests (GH3741_)
+ - Fixed a bug where ``DataFrame.replace`` with a compiled regular expression
+ in the ``to_replace`` argument wasn't working (GH3907_)
See the `full release notes
<https://github.com/pydata/pandas/blob/master/RELEASE.rst>`__ or issue tracker
@@ -410,3 +415,4 @@ on GitHub for a complete list.
.. _GH3877: https://github.com/pydata/pandas/issues/3877
.. _GH3659: https://github.com/pydata/pandas/issues/3659
.. _GH3679: https://github.com/pydata/pandas/issues/3679
+.. _GH3907: https://github.com/pydata/pandas/issues/3907
diff --git a/pandas/core/common.py b/pandas/core/common.py
index d0dcb0b9770b8..a31c92caf4343 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -3,6 +3,7 @@
"""
import itertools
+import re
from datetime import datetime
from numpy.lib.format import read_array, write_array
@@ -1585,8 +1586,21 @@ def is_complex_dtype(arr_or_dtype):
return issubclass(tipo, np.complexfloating)
+def is_re(obj):
+ return isinstance(obj, re._pattern_type)
+
+
+def is_re_compilable(obj):
+ try:
+ re.compile(obj)
+ except TypeError:
+ return False
+ else:
+ return True
+
+
def is_list_like(arg):
- return hasattr(arg, '__iter__') and not isinstance(arg, basestring) or hasattr(arg,'len')
+ return hasattr(arg, '__iter__') and not isinstance(arg, basestring)
def _is_sequence(x):
try:
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index f9f8a424f8d96..5e3d3e95d8e56 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -33,8 +33,7 @@
_maybe_convert_indices)
from pandas.core.internals import (BlockManager,
create_block_manager_from_arrays,
- create_block_manager_from_blocks,
- _re_compilable)
+ create_block_manager_from_blocks)
from pandas.core.series import Series, _radd_compat
import pandas.core.expressions as expressions
from pandas.compat.scipy import scoreatpercentile as _quantile
@@ -3483,7 +3482,7 @@ def bfill(self, axis=0, inplace=False, limit=None):
limit=limit)
def replace(self, to_replace=None, value=None, inplace=False, limit=None,
- regex=False, infer_types=False, method=None, axis=None):
+ regex=False, method=None, axis=None):
"""
Replace values given in 'to_replace' with 'value'.
@@ -3545,8 +3544,6 @@ def replace(self, to_replace=None, value=None, inplace=False, limit=None,
string. Otherwise, `to_replace` must be ``None`` because this
parameter will be interpreted as a regular expression or a list,
dict, or array of regular expressions.
- infer_types : bool, default True
- If ``True`` attempt to convert object blocks to a better dtype.
See also
--------
@@ -3582,7 +3579,7 @@ def replace(self, to_replace=None, value=None, inplace=False, limit=None,
and play with this method to gain intuition about how it works.
"""
- if not isinstance(regex, bool) and to_replace is not None:
+ if not com.is_bool(regex) and to_replace is not None:
raise AssertionError("'to_replace' must be 'None' if 'regex' is "
"not a bool")
if method is not None:
@@ -3628,8 +3625,7 @@ def replace(self, to_replace=None, value=None, inplace=False, limit=None,
to_replace, value = keys, values
return self.replace(to_replace, value, inplace=inplace,
- limit=limit, regex=regex,
- infer_types=infer_types)
+ limit=limit, regex=regex)
else:
if not len(self.columns):
return self
@@ -3673,14 +3669,14 @@ def replace(self, to_replace=None, value=None, inplace=False, limit=None,
new_data = self._data.replace(to_replace, value,
inplace=inplace, regex=regex)
elif to_replace is None:
- if not (_re_compilable(regex) or
+ if not (com.is_re_compilable(regex) or
isinstance(regex, (list, dict, np.ndarray, Series))):
raise TypeError("'regex' must be a string or a compiled "
"regular expression or a list or dict of "
"strings or regular expressions, you "
"passed a {0}".format(type(regex)))
return self.replace(regex, value, inplace=inplace, limit=limit,
- regex=True, infer_types=infer_types)
+ regex=True)
else:
# dest iterable dict-like
@@ -3701,8 +3697,7 @@ def replace(self, to_replace=None, value=None, inplace=False, limit=None,
raise TypeError('Invalid "to_replace" type: '
'{0}'.format(type(to_replace))) # pragma: no cover
- if infer_types:
- new_data = new_data.convert()
+ new_data = new_data.convert(copy=not inplace, convert_numeric=False)
if inplace:
self._data = new_data
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 49d92afc46848..01e976e397111 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -1,13 +1,14 @@
import itertools
import re
from datetime import datetime
-import collections
from numpy import nan
import numpy as np
-from pandas.core.common import _possibly_downcast_to_dtype, isnull, _NS_DTYPE, _TD_DTYPE
-from pandas.core.index import Index, MultiIndex, _ensure_index, _handle_legacy_indexes
+from pandas.core.common import (_possibly_downcast_to_dtype, isnull, _NS_DTYPE,
+ _TD_DTYPE)
+from pandas.core.index import (Index, MultiIndex, _ensure_index,
+ _handle_legacy_indexes)
from pandas.core.indexing import _check_slice_bounds, _maybe_convert_indices
import pandas.core.common as com
import pandas.lib as lib
@@ -18,10 +19,6 @@
from pandas.util import py3compat
-def _re_compilable(ex):
- return isinstance(ex, (basestring, re._pattern_type))
-
-
class Block(object):
"""
Canonical n-dimensional unit of homogeneous dtype contained in a pandas
@@ -744,14 +741,16 @@ def should_store(self, value):
def replace(self, to_replace, value, inplace=False, filter=None,
regex=False):
blk = [self]
- to_rep_is_list = (isinstance(to_replace, collections.Iterable) and not
- isinstance(to_replace, basestring))
- value_is_list = (isinstance(value, collections.Iterable) and not
- isinstance(to_replace, basestring))
+ to_rep_is_list = com.is_list_like(to_replace)
+ value_is_list = com.is_list_like(value)
both_lists = to_rep_is_list and value_is_list
either_list = to_rep_is_list or value_is_list
- if not either_list and not regex:
+ if not either_list and com.is_re(to_replace):
+ blk[0], = blk[0]._replace_single(to_replace, value,
+ inplace=inplace, filter=filter,
+ regex=True)
+ elif not (either_list or regex):
blk = super(ObjectBlock, self).replace(to_replace, value,
inplace=inplace,
filter=filter, regex=regex)
@@ -773,15 +772,18 @@ def replace(self, to_replace, value, inplace=False, filter=None,
def _replace_single(self, to_replace, value, inplace=False, filter=None,
regex=False):
# to_replace is regex compilable
- to_rep_re = _re_compilable(to_replace)
+ to_rep_re = com.is_re_compilable(to_replace)
# regex is regex compilable
- regex_re = _re_compilable(regex)
+ regex_re = com.is_re_compilable(regex)
+ # only one will survive
if to_rep_re and regex_re:
raise AssertionError('only one of to_replace and regex can be '
'regex compilable')
+ # if regex was passed as something that can be a regex (rather than a
+ # boolean)
if regex_re:
to_replace = regex
@@ -1668,7 +1670,6 @@ def get(self, item):
mgr._consolidate_inplace()
return mgr
-
def iget(self, i):
item = self.items[i]
if self.items.is_unique:
@@ -1970,7 +1971,6 @@ def reindex_indexer(self, new_axis, indexer, axis=1, fill_value=np.nan):
def _reindex_indexer_items(self, new_items, indexer, fill_value):
# TODO: less efficient than I'd like
- is_unique = self.items.is_unique
item_order = com.take_1d(self.items.values, indexer)
# keep track of what items aren't found anywhere
@@ -2141,7 +2141,6 @@ def rename_axis(self, mapper, axis=1):
def rename_items(self, mapper, copydata=True):
new_items = Index([mapper(x) for x in self.items])
- is_unique = new_items.is_unique
new_blocks = []
for block in self.blocks:
diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py
index d42d950bd2e7b..db01545fb3c9d 100644
--- a/pandas/tests/test_common.py
+++ b/pandas/tests/test_common.py
@@ -1,5 +1,6 @@
from datetime import datetime
import sys
+import re
import nose
import unittest
@@ -244,6 +245,18 @@ def test_groupby():
assert v == expected[k]
+def test_is_list_like():
+ passes = ([], [1], (1,), (1, 2), {'a': 1}, set([1, 'a']), Series([1]),
+ Series([]), Series(['a']).str)
+ fails = (1, '2', object())
+
+ for p in passes:
+ assert com.is_list_like(p)
+
+ for f in fails:
+ assert not com.is_list_like(f)
+
+
def test_ensure_int32():
values = np.arange(10, dtype=np.int32)
result = com._ensure_int32(values)
@@ -288,6 +301,30 @@ def test_ensure_platform_int():
# expected = u"\u05d0".encode('utf-8')
# assert (result == expected)
+
+def test_is_re():
+ passes = re.compile('ad'),
+ fails = 'x', 2, 3, object()
+
+ for p in passes:
+ assert com.is_re(p)
+
+ for f in fails:
+ assert not com.is_re(f)
+
+
+def test_is_recompilable():
+ passes = (r'a', u'x', r'asdf', re.compile('adsf'), ur'\u2233\s*',
+ re.compile(r''))
+ fails = 1, [], object()
+
+ for p in passes:
+ assert com.is_re_compilable(p)
+
+ for f in fails:
+ assert not com.is_re_compilable(f)
+
+
class TestTake(unittest.TestCase):
_multiprocess_can_split_ = True
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 5b4d582e5e42e..8342d218e76bb 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -6696,7 +6696,7 @@ def test_regex_replace_list_to_scalar(self):
res3 = df.copy()
res2.replace([r'\s*\.\s*', 'a|b'], nan, regex=True, inplace=True)
res3.replace(regex=[r'\s*\.\s*', 'a|b'], value=nan, inplace=True)
- expec = DataFrame({'a': mix['a'], 'b': np.array([nan] * 4, object),
+ expec = DataFrame({'a': mix['a'], 'b': np.array([nan] * 4),
'c': [nan, nan, nan, 'd']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
@@ -6772,6 +6772,30 @@ def test_replace(self):
df = DataFrame(index=['a', 'b'])
assert_frame_equal(df, df.replace(5, 7))
+ def test_replace_list(self):
+ obj = {'a': list('ab..'), 'b': list('efgh'), 'c': list('helo')}
+ dfobj = DataFrame(obj)
+
+ ## lists of regexes and values
+ # list of [v1, v2, ..., vN] -> [v1, v2, ..., vN]
+ to_replace_res = [r'.', r'e']
+ values = [nan, 'crap']
+ res = dfobj.replace(to_replace_res, values)
+ expec = DataFrame({'a': ['a', 'b', nan, nan],
+ 'b': ['crap', 'f', 'g', 'h'], 'c': ['h', 'crap',
+ 'l', 'o']})
+ assert_frame_equal(res, expec)
+
+ # list of [v1, v2, ..., vN] -> [v1, v2, .., vN]
+ to_replace_res = [r'.', r'f']
+ values = [r'..', r'crap']
+ res = dfobj.replace(to_replace_res, values)
+ expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['e', 'crap', 'g',
+ 'h'],
+ 'c': ['h', 'e', 'l', 'o']})
+
+ assert_frame_equal(res, expec)
+
def test_replace_series_dict(self):
# from GH 3064
df = DataFrame({'zero': {'a': 0.0, 'b': 1}, 'one': {'a': 2.0, 'b': 0}})
@@ -6792,10 +6816,24 @@ def test_replace_series_dict(self):
result = df.replace(s, df.mean())
assert_frame_equal(result, expected)
+ def test_replace_convert(self):
+ # gh 3907
+ df = DataFrame([['foo', 'bar', 'bah'], ['bar', 'foo', 'bah']])
+ m = {'foo': 1, 'bar': 2, 'bah': 3}
+ rep = df.replace(m)
+ expec = Series([np.int_, np.int_, np.int_])
+ res = rep.dtypes
+ assert_series_equal(expec, res)
+
def test_replace_mixed(self):
self.mixed_frame['foo'][5:20] = nan
self.mixed_frame['A'][-10:] = nan
+ result = self.mixed_frame.replace(np.nan, -18)
+ expected = self.mixed_frame.fillna(value=-18)
+ assert_frame_equal(result, expected)
+ assert_frame_equal(result.replace(-18, nan), self.mixed_frame)
+
result = self.mixed_frame.replace(np.nan, -1e8)
expected = self.mixed_frame.fillna(value=-1e8)
assert_frame_equal(result, expected)
| closes #3907.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3909 | 2013-06-14T22:26:30Z | 2013-06-15T12:34:13Z | 2013-06-15T12:34:13Z | 2014-07-04T21:31:30Z |
TST: convert knowntestfailures to skip tests | diff --git a/pandas/io/tests/test_pickle.py b/pandas/io/tests/test_pickle.py
index a7f0e3d3e37b1..5c79c57c1e020 100644
--- a/pandas/io/tests/test_pickle.py
+++ b/pandas/io/tests/test_pickle.py
@@ -15,7 +15,6 @@
from pandas import Index
from pandas.sparse.tests import test_sparse
from pandas.util import py3compat
-from pandas.util.decorators import knownfailureif
from pandas.util.misc import is_little_endian
class TestPickle(unittest.TestCase):
@@ -58,16 +57,18 @@ def compare(self, vf):
comparator = getattr(tm,"assert_%s_equal" % typ)
comparator(result,expected)
- @knownfailureif(not is_little_endian(), "known failure of test_read_pickles_0_10_1 on non-little endian")
def test_read_pickles_0_10_1(self):
+ if not is_little_endian():
+ raise nose.SkipTest("known failure of test_read_pickles_0_10_1 on non-little endian")
pth = tm.get_data_path('legacy_pickle/0.10.1')
for f in os.listdir(pth):
vf = os.path.join(pth,f)
self.compare(vf)
- @knownfailureif(not is_little_endian(), "known failure of test_read_pickles_0_11_0 on non-little endian")
def test_read_pickles_0_11_0(self):
+ if not is_little_endian():
+ raise nose.SkipTest("known failure of test_read_pickles_0_11_0 on non-little endian")
pth = tm.get_data_path('legacy_pickle/0.11.0')
for f in os.listdir(pth):
diff --git a/pandas/io/tests/test_stata.py b/pandas/io/tests/test_stata.py
index 794d303a68d79..4584976c41383 100644
--- a/pandas/io/tests/test_stata.py
+++ b/pandas/io/tests/test_stata.py
@@ -14,7 +14,6 @@
from pandas.io.stata import read_stata, StataReader, StataWriter
import pandas.util.testing as tm
from pandas.util.testing import ensure_clean
-from pandas.util.decorators import knownfailureif
from pandas.util.misc import is_little_endian
class StataTests(unittest.TestCase):
@@ -129,8 +128,10 @@ def test_read_dta4(self):
tm.assert_frame_equal(parsed, expected)
- @knownfailureif(not is_little_endian(), "known failure of test_write_dta5 on non-little endian")
def test_write_dta5(self):
+ if not is_little_endian():
+ raise nose.SkipTest("known failure of test_write_dta5 on non-little endian")
+
original = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)],
columns=['float_miss', 'double_miss', 'byte_miss', 'int_miss', 'long_miss'])
original.index.name = 'index'
@@ -140,8 +141,10 @@ def test_write_dta5(self):
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'), original)
- @knownfailureif(not is_little_endian(), "known failure of test_write_dta6 on non-little endian")
def test_write_dta6(self):
+ if not is_little_endian():
+ raise nose.SkipTest("known failure of test_write_dta6 on non-little endian")
+
original = self.read_csv(self.csv3)
original.index.name = 'index'
| in test_pickle, test_stats
closes #3894, #3896
| https://api.github.com/repos/pandas-dev/pandas/pulls/3906 | 2013-06-14T18:41:53Z | 2013-06-14T19:07:45Z | 2013-06-14T19:07:45Z | 2014-06-18T17:41:16Z |
BLD: add setup_requires in setup.py so pandas can be used with buildout | diff --git a/setup.py b/setup.py
index 030584ba509d3..3f6c945f796ea 100755
--- a/setup.py
+++ b/setup.py
@@ -34,9 +34,9 @@
_have_setuptools = False
setuptools_kwargs = {}
+min_numpy_ver = '1.6'
if sys.version_info[0] >= 3:
- min_numpy_ver = 1.6
if sys.version_info[1] >= 3: # 3.3 needs numpy 1.7+
min_numpy_ver = "1.7.0b2"
@@ -45,6 +45,7 @@
'install_requires': ['python-dateutil >= 2',
'pytz',
'numpy >= %s' % min_numpy_ver],
+ 'setup_requires': ['numpy >= %s' % min_numpy_ver],
'use_2to3_exclude_fixers': ['lib2to3.fixes.fix_next',
],
}
@@ -53,10 +54,12 @@
"\n$ pip install distribute")
else:
+ min_numpy_ver = '1.6.1'
setuptools_kwargs = {
'install_requires': ['python-dateutil',
'pytz',
- 'numpy >= 1.6.1'],
+ 'numpy >= %s' % min_numpy_ver],
+ 'setup_requires': ['numpy >= %s' % min_numpy_ver],
'zip_safe': False,
}
| Numpy is a setup-time dependency due to some .h files, but is not declared as such. This causes tools like buildout ( http://buildout.org/ ) to fail.
Adding the `setup_requires` kwarg to the setuptools config shoud fix this: buildout will correctly assemble any declared setup-time dependencies, before executing the pandas build.
closes #3861
| https://api.github.com/repos/pandas-dev/pandas/pulls/3903 | 2013-06-14T10:30:26Z | 2013-06-18T23:29:05Z | 2013-06-18T23:29:04Z | 2014-07-16T08:14:08Z |
ENH: (GH3863) Timestamp.min and Timestamp.max return a valid Timestamp | diff --git a/RELEASE.rst b/RELEASE.rst
index 4f82f7b458737..977491b554cd8 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -69,6 +69,8 @@ pandas 0.11.1
- support python3 (via ``PyTables 3.0.0``) (GH3750_)
- Add modulo operator to Series, DataFrame
- Add ``date`` method to DatetimeIndex
+ - Timestamp.min and Timestamp.max now represent valid Timestamp instances instead
+ of the default datetime.min and datetime.max (respectively).
- Simplified the API and added a describe method to Categorical
- ``melt`` now accepts the optional parameters ``var_name`` and ``value_name``
to specify custom column names of the returned DataFrame (GH3649_),
diff --git a/doc/source/gotchas.rst b/doc/source/gotchas.rst
index 45369cb7ddb08..0b736d8ddbe11 100644
--- a/doc/source/gotchas.rst
+++ b/doc/source/gotchas.rst
@@ -271,9 +271,10 @@ can be represented using a 64-bit integer is limited to approximately 584 years:
.. ipython:: python
- begin = Timestamp(-9223285636854775809L)
+ begin = Timestamp.min
begin
- end = Timestamp(np.iinfo(np.int64).max)
+
+ end = Timestamp.max
end
If you need to represent time series data outside the nanosecond timespan, use
diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt
index dfc36258a680f..d5357da16d2bb 100644
--- a/doc/source/v0.11.1.txt
+++ b/doc/source/v0.11.1.txt
@@ -289,8 +289,12 @@ Enhancements
dff.groupby('B').filter(lambda x: len(x) > 2, dropna=False)
- Series and DataFrame hist methods now take a ``figsize`` argument (GH3834_)
+
- DatetimeIndexes no longer try to convert mixed-integer indexes during join
operations (GH3877_)
+
+ - Timestamp.min and Timestamp.max now represent valid Timestamp instances instead
+ of the default datetime.min and datetime.max (respectively).
Bug Fixes
diff --git a/pandas/tests/test_tseries.py b/pandas/tests/test_tseries.py
index eaeb3325685ec..54c00e798f08a 100644
--- a/pandas/tests/test_tseries.py
+++ b/pandas/tests/test_tseries.py
@@ -2,7 +2,7 @@
from numpy import nan
import numpy as np
-from pandas import Index, isnull
+from pandas import Index, isnull, Timestamp
from pandas.util.testing import assert_almost_equal
import pandas.util.testing as common
import pandas.lib as lib
@@ -683,6 +683,22 @@ def test_int_index(self):
expected = arr.sum(1)
assert_almost_equal(result, expected)
+
+class TestTsUtil(unittest.TestCase):
+ def test_min_valid(self):
+ # Ensure that Timestamp.min is a valid Timestamp
+ Timestamp(Timestamp.min)
+
+ def test_max_valid(self):
+ # Ensure that Timestamp.max is a valid Timestamp
+ Timestamp(Timestamp.max)
+
+ def test_to_datetime_bijective(self):
+ # Ensure that converting to datetime and back only loses precision
+ # by going from nanoseconds to microseconds.
+ self.assertEqual(Timestamp(Timestamp.max.to_pydatetime()).value/1000, Timestamp.max.value/1000)
+ self.assertEqual(Timestamp(Timestamp.min.to_pydatetime()).value/1000, Timestamp.min.value/1000)
+
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx
index ec11de7392680..3a0f7d9264174 100644
--- a/pandas/tslib.pyx
+++ b/pandas/tslib.pyx
@@ -388,6 +388,15 @@ cpdef object get_value_box(ndarray arr, object loc):
return util.get_value_1d(arr, i)
+# Add the min and max fields at the class level
+# These are defined as magic numbers due to strange
+# wraparound behavior when using the true int64 lower boundary
+cdef int64_t _NS_LOWER_BOUND = -9223285636854775000LL
+cdef int64_t _NS_UPPER_BOUND = 9223372036854775807LL
+Timestamp.min = Timestamp(_NS_LOWER_BOUND)
+Timestamp.max = Timestamp(_NS_UPPER_BOUND)
+
+
#----------------------------------------------------------------------
# Frequency inference
@@ -769,8 +778,6 @@ cdef inline object _get_zone(object tz):
except AttributeError:
return tz
-# cdef int64_t _NS_LOWER_BOUND = -9223285636854775809LL
-# cdef int64_t _NS_UPPER_BOUND = -9223372036854775807LL
cdef inline _check_dts_bounds(int64_t value, pandas_datetimestruct *dts):
cdef pandas_datetimestruct dts2
@@ -2868,4 +2875,4 @@ def _calc_julian_from_U_or_W(year, week_of_year, day_of_week, week_starts_Mon):
return 1 + days_to_week + day_of_week
# def _strptime_time(data_string, format="%a %b %d %H:%M:%S %Y"):
-# return _strptime(data_string, format)[0]
+# return _strptime(data_string, format)[0]
\ No newline at end of file
| closes https://github.com/pydata/pandas/issues/3863 by adding valid min and max fields to the Timestamp class.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3902 | 2013-06-14T10:17:04Z | 2013-07-02T14:39:10Z | 2013-07-02T14:39:10Z | 2014-06-14T06:16:44Z |
BUG: fix unicode -> str cast in tslib | diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx
index ec11de7392680..9b611032455ae 100644
--- a/pandas/tslib.pyx
+++ b/pandas/tslib.pyx
@@ -465,7 +465,8 @@ cdef class _Timestamp(datetime):
elif op == 3:
return True
else:
- raise TypeError('Cannot compare Timestamp with %s' % str(other))
+ raise TypeError('Cannot compare Timestamp with '
+ '{0!r}'.format(other.__class__.__name__))
self._assert_tzawareness_compat(other)
| This should use format since calling str on a unicode string is a _bad_ idea
because it may or may not repr correctly.
closes #3875.
another error is created from fixing this issue.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3900 | 2013-06-14T00:11:29Z | 2013-06-15T00:14:09Z | 2013-06-15T00:14:09Z | 2014-07-16T08:14:07Z |
TST: test fixes for various builds (debian) | diff --git a/pandas/io/json.py b/pandas/io/json.py
index fcecb31bb77a7..ce95c3394ce2c 100644
--- a/pandas/io/json.py
+++ b/pandas/io/json.py
@@ -268,6 +268,15 @@ def _try_convert_data(self, name, data, use_dtypes=True, convert_dates=True):
except:
pass
+ if data.dtype == 'float':
+
+ # coerce floats to 64
+ try:
+ data = data.astype('float64')
+ result = True
+ except:
+ pass
+
# do't coerce 0-len data
if len(data) and (data.dtype == 'float' or data.dtype == 'object'):
@@ -280,6 +289,16 @@ def _try_convert_data(self, name, data, use_dtypes=True, convert_dates=True):
except:
pass
+ # coerce ints to 64
+ if data.dtype == 'int':
+
+ # coerce floats to 64
+ try:
+ data = data.astype('int64')
+ result = True
+ except:
+ pass
+
return data, result
def _try_convert_to_date(self, data):
diff --git a/pandas/io/tests/test_excel.py b/pandas/io/tests/test_excel.py
index 39e1042d125a2..baa4f6b64ec0e 100644
--- a/pandas/io/tests/test_excel.py
+++ b/pandas/io/tests/test_excel.py
@@ -808,11 +808,7 @@ def test_to_excel_styleconverter(self):
# self.assertTrue(ws.cell(maddr).merged)
# os.remove(filename)
def test_excel_010_hemstring(self):
- try:
- import xlwt
- import openpyxl
- except ImportError:
- raise nose.SkipTest
+ _skip_if_no_excelsuite()
from pandas.util.testing import makeCustomDataframe as mkdf
# ensure limited functionality in 0.10
diff --git a/pandas/io/tests/test_json/test_pandas.py b/pandas/io/tests/test_json/test_pandas.py
index bdd700bdbcec3..fe717f56e6bea 100644
--- a/pandas/io/tests/test_json/test_pandas.py
+++ b/pandas/io/tests/test_json/test_pandas.py
@@ -26,7 +26,7 @@
_frame = DataFrame(_seriesd)
_frame2 = DataFrame(_seriesd, columns=['D', 'C', 'B', 'A'])
-_intframe = DataFrame(dict((k, v.astype(int))
+_intframe = DataFrame(dict((k, v.astype(np.int64))
for k, v in _seriesd.iteritems()))
_tsframe = DataFrame(_tsd)
@@ -71,6 +71,9 @@ def _check_orient(df, orient, dtype=None, numpy=False, convert_axes=True, check_
unser = unser.sort()
+ if dtype is False:
+ check_dtype=False
+
if not convert_axes and df.index.dtype.type == np.datetime64:
unser.index = DatetimeIndex(unser.index.values.astype('i8'))
if orient == "records":
@@ -288,7 +291,7 @@ def test_series_to_json_except(self):
def test_typ(self):
- s = Series(range(6), index=['a','b','c','d','e','f'])
+ s = Series(range(6), index=['a','b','c','d','e','f'], dtype='int64')
result = read_json(s.to_json(),typ=None)
assert_series_equal(result,s)
diff --git a/pandas/io/tests/test_pickle.py b/pandas/io/tests/test_pickle.py
index d49597860cd16..a7f0e3d3e37b1 100644
--- a/pandas/io/tests/test_pickle.py
+++ b/pandas/io/tests/test_pickle.py
@@ -15,6 +15,8 @@
from pandas import Index
from pandas.sparse.tests import test_sparse
from pandas.util import py3compat
+from pandas.util.decorators import knownfailureif
+from pandas.util.misc import is_little_endian
class TestPickle(unittest.TestCase):
_multiprocess_can_split_ = True
@@ -56,6 +58,7 @@ def compare(self, vf):
comparator = getattr(tm,"assert_%s_equal" % typ)
comparator(result,expected)
+ @knownfailureif(not is_little_endian(), "known failure of test_read_pickles_0_10_1 on non-little endian")
def test_read_pickles_0_10_1(self):
pth = tm.get_data_path('legacy_pickle/0.10.1')
@@ -63,6 +66,7 @@ def test_read_pickles_0_10_1(self):
vf = os.path.join(pth,f)
self.compare(vf)
+ @knownfailureif(not is_little_endian(), "known failure of test_read_pickles_0_11_0 on non-little endian")
def test_read_pickles_0_11_0(self):
pth = tm.get_data_path('legacy_pickle/0.11.0')
diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py
index 8b3d4a475d952..3266a906dcfae 100644
--- a/pandas/io/tests/test_pytables.py
+++ b/pandas/io/tests/test_pytables.py
@@ -477,6 +477,9 @@ def test_append(self):
def test_encoding(self):
+ if sys.byteorder != 'little':
+ raise nose.SkipTest('system byteorder is not little, skipping test_encoding!')
+
with ensure_clean(self.path) as store:
df = DataFrame(dict(A='foo',B='bar'),index=range(5))
df.loc[2,'A'] = np.nan
diff --git a/pandas/io/tests/test_stata.py b/pandas/io/tests/test_stata.py
index d512b0267ed13..794d303a68d79 100644
--- a/pandas/io/tests/test_stata.py
+++ b/pandas/io/tests/test_stata.py
@@ -3,7 +3,7 @@
from datetime import datetime
import os
import unittest
-
+import sys
import warnings
import nose
@@ -14,6 +14,8 @@
from pandas.io.stata import read_stata, StataReader, StataWriter
import pandas.util.testing as tm
from pandas.util.testing import ensure_clean
+from pandas.util.decorators import knownfailureif
+from pandas.util.misc import is_little_endian
class StataTests(unittest.TestCase):
@@ -127,6 +129,7 @@ def test_read_dta4(self):
tm.assert_frame_equal(parsed, expected)
+ @knownfailureif(not is_little_endian(), "known failure of test_write_dta5 on non-little endian")
def test_write_dta5(self):
original = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)],
columns=['float_miss', 'double_miss', 'byte_miss', 'int_miss', 'long_miss'])
@@ -137,6 +140,7 @@ def test_write_dta5(self):
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'), original)
+ @knownfailureif(not is_little_endian(), "known failure of test_write_dta6 on non-little endian")
def test_write_dta6(self):
original = self.read_csv(self.csv3)
original.index.name = 'index'
diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py
index 380604b0de32e..4e57977a787f2 100644
--- a/pandas/tests/test_panel.py
+++ b/pandas/tests/test_panel.py
@@ -1381,7 +1381,11 @@ def test_to_excel(self):
path = '__tmp__.' + ext
with ensure_clean(path) as path:
self.panel.to_excel(path)
- reader = ExcelFile(path)
+ try:
+ reader = ExcelFile(path)
+ except ImportError:
+ raise nose.SkipTest
+
for item, df in self.panel.iterkv():
recdf = reader.parse(str(item), index_col=0)
assert_frame_equal(df, recdf)
diff --git a/pandas/util/misc.py b/pandas/util/misc.py
index 8372ba56d00cd..15492cde5a9f7 100644
--- a/pandas/util/misc.py
+++ b/pandas/util/misc.py
@@ -1,3 +1,10 @@
+""" various miscellaneous utilities """
+
+def is_little_endian():
+ """ am I little endian """
+ import sys
+ return sys.byteorder == 'little'
+
def exclusive(*args):
count = sum([arg is not None for arg in args])
return count == 1
| TST: json tests to int64 to avoid dtype issues, closes #3895
TST: skip tests if xlrd has lower than needed version, closes #3897
TST: skip pickle tests on not-little endianess , closes #3894
TST: skip test_encoding on non-little endian in test_pytables , closes #3892
TST: skip some stata tests on non-little endian, closes #3896
| https://api.github.com/repos/pandas-dev/pandas/pulls/3898 | 2013-06-13T22:58:30Z | 2013-06-14T13:13:16Z | 2013-06-14T13:13:15Z | 2014-07-01T15:50:54Z |
Skip tests on network error | diff --git a/pandas/io/tests/test_google.py b/pandas/io/tests/test_google.py
index 7f4ca13c27e58..8c16c60ac8b87 100644
--- a/pandas/io/tests/test_google.py
+++ b/pandas/io/tests/test_google.py
@@ -6,6 +6,7 @@
import pandas.io.data as web
from pandas.util.testing import (network, assert_series_equal)
from numpy.testing.decorators import slow
+import numpy as np
import urllib2
@@ -24,7 +25,23 @@ def test_google(self):
self.assertEquals(
web.DataReader("F", 'google', start, end)['Close'][-1],
13.68)
+ except urllib2.URLError:
+ try:
+ urllib2.urlopen('http://www.google.com')
+ except urllib2.URLError:
+ raise nose.SkipTest
+ else:
+ raise
+
+ @network
+ def test_google_non_existent(self):
+ # asserts that google is minimally working and that it throws
+ # an excecption when DataReader can't get a 200 response from
+ # google
+ start = datetime(2010, 1, 1)
+ end = datetime(2013, 01, 27)
+ try:
self.assertRaises(
Exception,
lambda: web.DataReader("NON EXISTENT TICKER", 'google',
@@ -44,38 +61,63 @@ def test_get_quote(self):
lambda: web.get_quote_google(pd.Series(['GOOG', 'AAPL', 'GOOG'])))
@network
- def test_get_data(self):
- import numpy as np
- df = web.get_data_google('GOOG')
- print(df.Volume.ix['OCT-08-2010'])
- assert df.Volume.ix['OCT-08-2010'] == 2863473
-
- sl = ['AAPL', 'AMZN', 'GOOG']
- pan = web.get_data_google(sl, '2012')
- ts = pan.Close.GOOG.index[pan.Close.AAPL > pan.Close.GOOG]
- assert ts[0].dayofyear == 96
-
- pan = web.get_data_google(['GE', 'MSFT', 'INTC'], 'JAN-01-12', 'JAN-31-12')
- expected = [19.02, 28.23, 25.39]
- result = pan.Close.ix['01-18-12'][['GE', 'MSFT', 'INTC']].tolist()
- assert result == expected
-
- # sanity checking
- t= np.array(result)
- assert np.issubdtype(t.dtype, np.floating)
- assert t.shape == (3,)
-
- expected = [[ 18.99, 28.4 , 25.18],
- [ 18.58, 28.31, 25.13],
- [ 19.03, 28.16, 25.52],
- [ 18.81, 28.82, 25.87]]
- result = pan.Open.ix['Jan-15-12':'Jan-20-12'][['GE', 'MSFT', 'INTC']].values
- assert (result == expected).all()
-
- # sanity checking
- t= np.array(pan)
- assert np.issubdtype(t.dtype, np.floating)
+ def test_get_goog_volume(self):
+ try:
+ df = web.get_data_google('GOOG')
+ assert df.Volume.ix['OCT-08-2010'] == 2863473
+ except IOError:
+ try:
+ urllib2.urlopen('http://www.google.com')
+ except IOError:
+ raise nose.SkipTest
+ else:
+ raise
+
+ @network
+ def test_get_multi1(self):
+ try:
+ sl = ['AAPL', 'AMZN', 'GOOG']
+ pan = web.get_data_google(sl, '2012')
+ ts = pan.Close.GOOG.index[pan.Close.AAPL > pan.Close.GOOG]
+ assert ts[0].dayofyear == 96
+ except IOError:
+ try:
+ urllib2.urlopen('http://www.google.com')
+ except IOError:
+ raise nose.SkipTest
+ else:
+ raise
+ @network
+ def test_get_multi2(self):
+ try:
+ pan = web.get_data_google(['GE', 'MSFT', 'INTC'], 'JAN-01-12', 'JAN-31-12')
+ expected = [19.02, 28.23, 25.39]
+ result = pan.Close.ix['01-18-12'][['GE', 'MSFT', 'INTC']].tolist()
+ assert result == expected
+
+ # sanity checking
+ t= np.array(result)
+ assert np.issubdtype(t.dtype, np.floating)
+ assert t.shape == (3,)
+
+ expected = [[ 18.99, 28.4 , 25.18],
+ [ 18.58, 28.31, 25.13],
+ [ 19.03, 28.16, 25.52],
+ [ 18.81, 28.82, 25.87]]
+ result = pan.Open.ix['Jan-15-12':'Jan-20-12'][['GE', 'MSFT', 'INTC']].values
+ assert (result == expected).all()
+
+ # sanity checking
+ t= np.array(pan)
+ assert np.issubdtype(t.dtype, np.floating)
+ except IOError:
+ try:
+ urllib2.urlopen('http://www.google.com')
+ except IOError:
+ raise nose.SkipTest
+ else:
+ raise
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
| https://api.github.com/repos/pandas-dev/pandas/pulls/3893 | 2013-06-13T22:02:03Z | 2013-06-19T11:24:56Z | 2013-06-19T11:24:56Z | 2016-05-10T01:09:03Z | |
BLD: install older versions of numexpr/pytables on fulldeps/2 build | diff --git a/ci/install.sh b/ci/install.sh
index c9b76b88721e9..294db286a1001 100755
--- a/ci/install.sh
+++ b/ci/install.sh
@@ -67,14 +67,17 @@ if ( ! $VENV_FILE_AVAILABLE ); then
if [ x"$FULL_DEPS" == x"true" ]; then
echo "Installing FULL_DEPS"
pip install $PIP_ARGS cython
- pip install $PIP_ARGS numexpr
if [ ${TRAVIS_PYTHON_VERSION:0:1} == "2" ]; then
pip install $PIP_ARGS xlwt
pip install $PIP_ARGS bottleneck
+ pip install $PIP_ARGS numexpr==2.0.1
+ pip install $PIP_ARGS tables==2.3.1
+ else
+ pip install $PIP_ARGS numexpr
+ pip install $PIP_ARGS tables
fi
- pip install $PIP_ARGS tables
pip install $PIP_ARGS matplotlib
pip install $PIP_ARGS openpyxl
pip install $PIP_ARGS xlrd>=0.9.0
| https://api.github.com/repos/pandas-dev/pandas/pulls/3891 | 2013-06-13T20:37:33Z | 2013-06-13T21:23:30Z | 2013-06-13T21:23:30Z | 2014-07-16T08:14:03Z | |
API: (GH3888) more consistency in the to_datetime return types (given string/array of string inputs) | diff --git a/RELEASE.rst b/RELEASE.rst
index 8e4bdd3cba268..500ba2df1ed47 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -159,6 +159,7 @@ pandas 0.11.1
- ``read_html`` now defaults to ``None`` when reading, and falls back on
``bs4`` + ``html5lib`` when lxml fails to parse. a list of parsers to try
until success is also valid
+ - more consistency in the to_datetime return types (give string/array of string inputs) (GH3888_)
**Bug Fixes**
@@ -355,6 +356,8 @@ pandas 0.11.1
.. _GH3911: https://github.com/pydata/pandas/issues/3911
.. _GH3912: https://github.com/pydata/pandas/issues/3912
.. _GH3764: https://github.com/pydata/pandas/issues/3764
+.. _GH3888: https://github.com/pydata/pandas/issues/3888
+
pandas 0.11.0
=============
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 16b3176521e28..2bbb0da9af658 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -1159,9 +1159,13 @@ def truncate(self, before=None, after=None, copy=True):
-------
truncated : type of caller
"""
- from pandas.tseries.tools import to_datetime
- before = to_datetime(before)
- after = to_datetime(after)
+
+ # if we have a date index, convert to dates, otherwise
+ # treat like a slice
+ if self.index.is_all_dates:
+ from pandas.tseries.tools import to_datetime
+ before = to_datetime(before)
+ after = to_datetime(after)
if before is not None and after is not None:
if before > after:
diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py
index bdc603dfdea31..51097cd157b99 100644
--- a/pandas/tseries/tests/test_timeseries.py
+++ b/pandas/tseries/tests/test_timeseries.py
@@ -825,13 +825,36 @@ def test_nat_scalar_field_access(self):
self.assertEquals(NaT.weekday(), -1)
- def test_to_datetime_empty_string(self):
+ def test_to_datetime_types(self):
+
+ # empty string
result = to_datetime('')
- self.assert_(result == '')
+ self.assert_(result is NaT)
result = to_datetime(['', ''])
self.assert_(isnull(result).all())
+ # ints
+ result = Timestamp(0)
+ expected = to_datetime(0)
+ self.assert_(result == expected)
+
+ # GH 3888 (strings)
+ expected = to_datetime(['2012'])[0]
+ result = to_datetime('2012')
+ self.assert_(result == expected)
+
+ ### array = ['2012','20120101','20120101 12:01:01']
+ array = ['20120101','20120101 12:01:01']
+ expected = list(to_datetime(array))
+ result = map(Timestamp,array)
+ tm.assert_almost_equal(result,expected)
+
+ ### currently fails ###
+ ### result = Timestamp('2012')
+ ### expected = to_datetime('2012')
+ ### self.assert_(result == expected)
+
def test_to_datetime_other_datetime64_units(self):
# 5/25/2012
scalar = np.int64(1337904000000000).view('M8[us]')
diff --git a/pandas/tseries/tools.py b/pandas/tseries/tools.py
index 90bc0beb8eb84..c80d2ef5d4e1c 100644
--- a/pandas/tseries/tools.py
+++ b/pandas/tseries/tools.py
@@ -99,16 +99,7 @@ def _convert_f(arg):
except (ValueError, TypeError):
raise e
- if arg is None:
- return arg
- elif isinstance(arg, datetime):
- return arg
- elif isinstance(arg, Series):
- values = arg.values
- if not com.is_datetime64_dtype(values):
- values = _convert_f(values)
- return Series(values, index=arg.index, name=arg.name)
- elif isinstance(arg, (np.ndarray, list)):
+ def _convert_listlike(arg):
if isinstance(arg, list):
arg = np.array(arg, dtype='O')
@@ -122,24 +113,23 @@ def _convert_f(arg):
return DatetimeIndex._simple_new(values, None, tz=tz)
except (ValueError, TypeError):
raise e
- return arg
+ return arg
- try:
- return _convert_f(arg)
- except ValueError:
- raise
- return arg
+ return _convert_f(arg)
- try:
- if not arg:
- return arg
- default = datetime(1, 1, 1)
- return parse(arg, dayfirst=dayfirst, default=default)
- except Exception:
- if errors == 'raise':
- raise
+ if arg is None:
return arg
+ elif isinstance(arg, datetime):
+ return arg
+ elif isinstance(arg, Series):
+ values = arg.values
+ if not com.is_datetime64_dtype(values):
+ values = _convert_f(values)
+ return Series(values, index=arg.index, name=arg.name)
+ elif isinstance(arg, (np.ndarray, list)):
+ return _convert_listlike(arg)
+ return _convert_listlike(np.array([ arg ], dtype='O'))[0]
class DateParseError(ValueError):
pass
| closes #3888
resolves the following inconsistencies in the Timestamp/to_datetime interface
Things that do the same thing will now _do the same thing_!
```
In [1]: to_datetime = pd.to_datetime
In [2]: to_datetime('')
Out[2]: NaT
In [3]: to_datetime(['', ''])
Out[3]:
<class 'pandas.tseries.index.DatetimeIndex'>
[NaT, NaT]
Length: 2, Freq: None, Timezone: None
In [4]: Timestamp(0)
Out[4]: Timestamp('1970-01-01 00:00:00', tz=None)
In [5]: to_datetime(0)
Out[5]: Timestamp('1970-01-01 00:00:00', tz=None)
In [6]: to_datetime(['2012'])[0]
Out[6]: Timestamp('2012-01-01 00:00:00', tz=None)
In [8]: to_datetime('2012')
Out[8]: Timestamp('2012-01-01 00:00:00', tz=None)
In [9]: array = ['20120101','20120101 12:01:01']
In [11]: list(to_datetime(array))
Out[11]:
[Timestamp('2012-01-01 00:00:00', tz=None),
Timestamp('2012-01-01 12:01:01', tz=None)]
In [13]: map(Timestamp,array)
Out[13]:
[Timestamp('2012-01-01 00:00:00', tz=None),
Timestamp('2012-01-01 12:01:01', tz=None)]
```
Note that the following is still inconsisten and will be fixed in a future PR
```
In [14]: Timestamp('2012')
Out[14]: Timestamp('2012-06-18 00:00:00', tz=None)
In [15]: to_datetime('2012')
Out[15]: Timestamp('2012-01-01 00:00:00', tz=None)
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/3890 | 2013-06-13T19:45:50Z | 2013-06-19T00:56:27Z | 2013-06-19T00:56:27Z | 2014-06-17T13:26:30Z |
BUG: (GH3880) index names are now propogated with loc/ix | diff --git a/RELEASE.rst b/RELEASE.rst
index 03cfc4f6bcafc..839c472da1610 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -177,6 +177,8 @@ pandas 0.11.1
- Non-unique indexing with a slice via ``loc`` and friends fixed (GH3659_)
- Allow insert/delete to non-unique columns (GH3679_)
- Extend ``reindex`` to correctly deal with non-unique indices (GH3679_)
+ - ``DataFrame.itertuples()`` now works with frames with duplicate column
+ names (GH3873_)
- Fixed bug in groupby with empty series referencing a variable before assignment. (GH3510_)
- Fixed bug in mixed-frame assignment with aligned series (GH3492_)
- Fixed bug in selecting month/quarter/year from a series would not select the time element
@@ -228,8 +230,7 @@ pandas 0.11.1
- PandasObjects raise TypeError when trying to hash (GH3882_)
- Fix incorrect arguments passed to concat that are not list-like (e.g. concat(df1,df2)) (GH3481_)
- Correctly parse when passed the ``dtype=str`` (or other variable-len string dtypes) in ``read_csv`` (GH3795_)
- - ``DataFrame.itertuples()`` now works with frames with duplicate column
- names (GH3873_)
+ - Fix index name not propogating when using ``loc/ix`` (GH3880_)
.. _GH3164: https://github.com/pydata/pandas/issues/3164
.. _GH2786: https://github.com/pydata/pandas/issues/2786
@@ -323,6 +324,7 @@ pandas 0.11.1
.. _GH3834: https://github.com/pydata/pandas/issues/3834
.. _GH3873: https://github.com/pydata/pandas/issues/3873
.. _GH3877: https://github.com/pydata/pandas/issues/3877
+.. _GH3880: https://github.com/pydata/pandas/issues/3880
pandas 0.11.0
diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt
index 1a43e9e6a49e0..dfc36258a680f 100644
--- a/doc/source/v0.11.1.txt
+++ b/doc/source/v0.11.1.txt
@@ -348,11 +348,14 @@ Bug Fixes
- Duplicate indexes with and empty DataFrame.from_records will return a correct frame (GH3562_)
- Concat to produce a non-unique columns when duplicates are across dtypes is fixed (GH3602_)
- Allow insert/delete to non-unique columns (GH3679_)
+ - Non-unique indexing with a slice via ``loc`` and friends fixed (GH3659_)
+ - Allow insert/delete to non-unique columns (GH3679_)
+ - Extend ``reindex`` to correctly deal with non-unique indices (GH3679_)
+ - ``DataFrame.itertuples()`` now works with frames with duplicate column
+ names (GH3873_)
- ``DataFrame.from_records`` did not accept empty recarrays (GH3682_)
- ``read_html`` now correctly skips tests (GH3741_)
- - ``DataFrame.itertuples()`` now works with frames with duplicate column
- names (GH3873_)
See the `full release notes
<https://github.com/pydata/pandas/blob/master/RELEASE.rst>`__ or issue tracker
@@ -405,3 +408,5 @@ on GitHub for a complete list.
.. _GH3834: https://github.com/pydata/pandas/issues/3834
.. _GH3873: https://github.com/pydata/pandas/issues/3873
.. _GH3877: https://github.com/pydata/pandas/issues/3877
+.. _GH3659: https://github.com/pydata/pandas/issues/3659
+.. _GH3679: https://github.com/pydata/pandas/issues/3679
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index b6e29204fc0d8..f9f8a424f8d96 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -2712,14 +2712,14 @@ def _reindex_multi(self, new_index, new_columns, copy, fill_value):
def _reindex_index(self, new_index, method, copy, level, fill_value=NA,
limit=None):
new_index, indexer = self.index.reindex(new_index, method, level,
- limit=limit)
+ limit=limit, copy_if_needed=True)
return self._reindex_with_indexers(new_index, indexer, None, None,
copy, fill_value)
def _reindex_columns(self, new_columns, copy, level, fill_value=NA,
limit=None):
new_columns, indexer = self.columns.reindex(new_columns, level=level,
- limit=limit)
+ limit=limit, copy_if_needed=True)
return self._reindex_with_indexers(None, None, new_columns, indexer,
copy, fill_value)
diff --git a/pandas/core/index.py b/pandas/core/index.py
index 51ebd58c33343..a5880b9f18670 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -920,7 +920,7 @@ def _get_method(self, method):
}
return aliases.get(method, method)
- def reindex(self, target, method=None, level=None, limit=None):
+ def reindex(self, target, method=None, level=None, limit=None, copy_if_needed=False):
"""
For Index, simply returns the new index and the results of
get_indexer. Provided here to enable an interface that is amenable for
@@ -939,6 +939,12 @@ def reindex(self, target, method=None, level=None, limit=None):
else:
if self.equals(target):
indexer = None
+
+ # to avoid aliasing an existing index
+ if copy_if_needed and target.name != self.name and self.name is not None:
+ if target.name is None:
+ target = self.copy()
+
else:
if self.is_unique:
indexer = self.get_indexer(target, method=method,
@@ -2196,7 +2202,7 @@ def get_indexer(self, target, method=None, limit=None):
return com._ensure_platform_int(indexer)
- def reindex(self, target, method=None, level=None, limit=None):
+ def reindex(self, target, method=None, level=None, limit=None, copy_if_needed=False):
"""
Performs any necessary conversion on the input index and calls
get_indexer. This method is here so MultiIndex and an Index of
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index af1543dad0314..49d92afc46848 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -1948,7 +1948,7 @@ def reindex_axis(self, new_axis, method=None, axis=0, copy=True):
'axis == 0')
return self.reindex_items(new_axis)
- new_axis, indexer = cur_axis.reindex(new_axis, method)
+ new_axis, indexer = cur_axis.reindex(new_axis, method, copy_if_needed=True)
return self.reindex_indexer(new_axis, indexer, axis=axis)
def reindex_indexer(self, new_axis, indexer, axis=1, fill_value=np.nan):
@@ -2014,7 +2014,7 @@ def reindex_items(self, new_items, copy=True, fill_value=np.nan):
return data.reindex_items(new_items)
# TODO: this part could be faster (!)
- new_items, indexer = self.items.reindex(new_items)
+ new_items, indexer = self.items.reindex(new_items, copy_if_needed=True)
new_axes = [new_items] + self.axes[1:]
# could have so me pathological (MultiIndex) issues here
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 2b2d59306da6e..5b4d582e5e42e 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -7207,6 +7207,7 @@ def test_reindex_name_remains(self):
s = Series(random.rand(10))
df = DataFrame(s, index=np.arange(len(s)))
i = Series(np.arange(10), name='iname')
+
df = df.reindex(i)
self.assert_(df.index.name == 'iname')
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index 295eaede443b1..0719d9c9a87db 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -1024,6 +1024,19 @@ def test_non_unique_loc(self):
expected = DataFrame({'A' : [2,4,5], 'B' : [4,6,7]}, index = [1,1,2])
assert_frame_equal(result,expected)
+ def test_loc_name(self):
+ # GH 3880
+ df = DataFrame([[1, 1], [1, 1]])
+ df.index.name = 'index_name'
+ result = df.iloc[[0, 1]].index.name
+ self.assert_(result == 'index_name')
+
+ result = df.ix[[0, 1]].index.name
+ self.assert_(result == 'index_name')
+
+ result = df.loc[[0, 1]].index.name
+ self.assert_(result == 'index_name')
+
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
| closes #3880
| https://api.github.com/repos/pandas-dev/pandas/pulls/3887 | 2013-06-13T18:57:18Z | 2013-06-13T19:46:37Z | 2013-06-13T19:46:37Z | 2014-07-03T09:04:12Z |
FIX hash of DataFrame raises Typerror | diff --git a/RELEASE.rst b/RELEASE.rst
index 307986ab81681..072f40d927108 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -220,6 +220,7 @@ pandas 0.11.1
- Groupby transform with item-by-item not upcasting correctly (GH3740_)
- Incorrectly read a HDFStore multi-index Frame witha column specification (GH3748_)
- ``read_html`` now correctly skips tests (GH3741_)
+ - PandasObjects raise TypeError when trying to hash (GH3882_)
- Fix incorrect arguments passed to concat that are not list-like (e.g. concat(df1,df2)) (GH3481_)
- Correctly parse when passed the ``dtype=str`` (or other variable-len string dtypes) in ``read_csv`` (GH3795_)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 0d2612d7aed7a..3a3ce49d50c5a 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -31,6 +31,11 @@ def save(self, path):
def load(cls, path):
return com.load(path)
+ def __hash__(self):
+ raise TypeError('{0!r} objects are mutable, thus they cannot be'
+ ' hashed'.format(self.__class__.__name__))
+
+
#----------------------------------------------------------------------
# Axis name business
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 3a7a7d0f49b66..2621c64afc205 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -527,9 +527,6 @@ def _constructor(self):
def _can_hold_na(self):
return not is_integer_dtype(self.dtype)
- def __hash__(self):
- raise TypeError('unhashable type')
-
_index = None
index = lib.SeriesIndex()
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 2c6d3b221c6ff..1e8fa91548145 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -3109,6 +3109,11 @@ def test_constructor_for_list_with_dtypes(self):
expected.sort()
assert_series_equal(result, expected)
+ def test_not_hashable(self):
+ df = pd.DataFrame([1])
+ self.assertRaises(TypeError, hash, df)
+ self.assertRaises(TypeError, hash, self.empty)
+
def test_timedeltas(self):
df = DataFrame(dict(A = Series(date_range('2012-1-1', periods=3, freq='D')),
diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py
index 58b7ac272401f..380604b0de32e 100644
--- a/pandas/tests/test_panel.py
+++ b/pandas/tests/test_panel.py
@@ -46,6 +46,12 @@ def test_cumsum(self):
cumsum = self.panel.cumsum()
assert_frame_equal(cumsum['ItemA'], self.panel['ItemA'].cumsum())
+ def not_hashable(self):
+ c_empty = Panel()
+ c = Panel(pd.Panel([[[1]]]))
+ self.assertRaises(TypeError, hash, c_empty)
+ self.assertRaises(TypeError, hash, c)
+
class SafeForLongAndSparse(object):
_multiprocess_can_split_ = True
diff --git a/pandas/tests/test_panel4d.py b/pandas/tests/test_panel4d.py
index a2e08bc744ab0..9c3a66c32c501 100644
--- a/pandas/tests/test_panel4d.py
+++ b/pandas/tests/test_panel4d.py
@@ -785,6 +785,11 @@ def test_reindex(self):
major=self.panel4d.major_axis, copy=False)
self.assert_(result is self.panel4d)
+ def test_not_hashable(self):
+ p4D_empty = Panel4D()
+ self.assertRaises(TypeError, hash, p4D_empty)
+ self.assertRaises(TypeError, hash, self.panel4d)
+
def test_reindex_like(self):
# reindex_like
smaller = self.panel4d.reindex(labels=self.panel4d.labels[:-1],
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index 88990bdde98b8..d04da38f0e526 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -579,6 +579,12 @@ def test_setindex(self):
def test_array_finalize(self):
pass
+ def test_not_hashable(self):
+ s_empty = Series()
+ s = Series([1])
+ self.assertRaises(TypeError, hash, s_empty)
+ self.assertRaises(TypeError, hash, s)
+
def test_fromValue(self):
nans = Series(np.NaN, index=self.ts.index)
| fixes #3882
raise TypeError if trying to hash a DataFrame (or Panel etc.).
| https://api.github.com/repos/pandas-dev/pandas/pulls/3884 | 2013-06-13T13:29:45Z | 2013-06-13T18:49:52Z | 2013-06-13T18:49:52Z | 2014-06-21T16:50:42Z |
BUG: allow itertuples to work with frames with duplicate column names | diff --git a/RELEASE.rst b/RELEASE.rst
index 8256b13b4e553..0fcd9bd3731fe 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -223,6 +223,8 @@ pandas 0.11.1
- ``read_html`` now correctly skips tests (GH3741_)
- Fix incorrect arguments passed to concat that are not list-like (e.g. concat(df1,df2)) (GH3481_)
- Correctly parse when passed the ``dtype=str`` (or other variable-len string dtypes) in ``read_csv`` (GH3795_)
+ - ``DataFrame.itertuples()`` now works with frames with duplicate column
+ names (GH3873_)
.. _GH3164: https://github.com/pydata/pandas/issues/3164
.. _GH2786: https://github.com/pydata/pandas/issues/2786
@@ -314,6 +316,7 @@ pandas 0.11.1
.. _GH3795: https://github.com/pydata/pandas/issues/3795
.. _GH3814: https://github.com/pydata/pandas/issues/3814
.. _GH3834: https://github.com/pydata/pandas/issues/3834
+.. _GH3873: https://github.com/pydata/pandas/issues/3873
pandas 0.11.0
diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt
index 34ba9f0859641..564939c596ced 100644
--- a/doc/source/v0.11.1.txt
+++ b/doc/source/v0.11.1.txt
@@ -349,6 +349,8 @@ Bug Fixes
- ``DataFrame.from_records`` did not accept empty recarrays (GH3682_)
- ``read_html`` now correctly skips tests (GH3741_)
+ - ``DataFrame.itertuples()`` now works with frames with duplicate column
+ names (GH3873_)
See the `full release notes
<https://github.com/pydata/pandas/blob/master/RELEASE.rst>`__ or issue tracker
@@ -399,3 +401,4 @@ on GitHub for a complete list.
.. _GH3726: https://github.com/pydata/pandas/issues/3726
.. _GH3425: https://github.com/pydata/pandas/issues/3425
.. _GH3834: https://github.com/pydata/pandas/issues/3834
+.. _GH3873: https://github.com/pydata/pandas/issues/3873
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 9c0a2843370f4..b6e29204fc0d8 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -818,7 +818,9 @@ def itertuples(self, index=True):
arrays = []
if index:
arrays.append(self.index)
- arrays.extend(self[k] for k in self.columns)
+
+ # use integer indexing because of possible duplicate column names
+ arrays.extend(self.iloc[:, k] for k in xrange(len(self.columns)))
return izip(*arrays)
iterkv = iteritems
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 2c6d3b221c6ff..9b2f078e3b95a 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -3951,6 +3951,10 @@ def test_itertuples(self):
for tup in df.itertuples(index=False):
self.assert_(isinstance(tup[1], np.integer))
+ df = DataFrame(data={"a": [1, 2, 3], "b": [4, 5, 6]})
+ dfaa = df[['a', 'a']]
+ self.assertEqual(list(dfaa.itertuples()), [(0, 1, 1), (1, 2, 2), (2, 3, 3)])
+
def test_len(self):
self.assertEqual(len(self.frame), len(self.frame.index))
| closes #3873.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3879 | 2013-06-13T05:48:29Z | 2013-06-13T12:22:47Z | 2013-06-13T12:22:47Z | 2014-06-26T11:26:17Z |
ENH: do not convert mixed-integer type indexes to datetimeindex | diff --git a/RELEASE.rst b/RELEASE.rst
index 0fcd9bd3731fe..161047c478d88 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -80,6 +80,8 @@ pandas 0.11.1
- Added Faq section on repr display options, to help users customize their setup.
- ``where`` operations that result in block splitting are much faster (GH3733_)
- Series and DataFrame hist methods now take a ``figsize`` argument (GH3834_)
+ - DatetimeIndexes no longer try to convert mixed-integer indexes during join
+ operations (GH3877_)
**API Changes**
@@ -317,6 +319,7 @@ pandas 0.11.1
.. _GH3814: https://github.com/pydata/pandas/issues/3814
.. _GH3834: https://github.com/pydata/pandas/issues/3834
.. _GH3873: https://github.com/pydata/pandas/issues/3873
+.. _GH3877: https://github.com/pydata/pandas/issues/3877
pandas 0.11.0
diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt
index 564939c596ced..1a43e9e6a49e0 100644
--- a/doc/source/v0.11.1.txt
+++ b/doc/source/v0.11.1.txt
@@ -289,6 +289,8 @@ Enhancements
dff.groupby('B').filter(lambda x: len(x) > 2, dropna=False)
- Series and DataFrame hist methods now take a ``figsize`` argument (GH3834_)
+ - DatetimeIndexes no longer try to convert mixed-integer indexes during join
+ operations (GH3877_)
Bug Fixes
@@ -402,3 +404,4 @@ on GitHub for a complete list.
.. _GH3425: https://github.com/pydata/pandas/issues/3425
.. _GH3834: https://github.com/pydata/pandas/issues/3834
.. _GH3873: https://github.com/pydata/pandas/issues/3873
+.. _GH3877: https://github.com/pydata/pandas/issues/3877
diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py
index a918e9eb18e8b..51e657d1723b2 100644
--- a/pandas/tseries/index.py
+++ b/pandas/tseries/index.py
@@ -910,7 +910,8 @@ def join(self, other, how='left', level=None, return_indexers=False):
"""
See Index.join
"""
- if not isinstance(other, DatetimeIndex) and len(other) > 0:
+ if (not isinstance(other, DatetimeIndex) and len(other) > 0 and
+ other.inferred_type != 'mixed-integer'):
try:
other = DatetimeIndex(other)
except TypeError:
diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py
index beee5caa871c5..f5415a195db77 100644
--- a/pandas/tseries/tests/test_timeseries.py
+++ b/pandas/tseries/tests/test_timeseries.py
@@ -18,7 +18,6 @@
import pandas.core.datetools as datetools
import pandas.tseries.offsets as offsets
import pandas.tseries.frequencies as fmod
-from pandas.tseries.index import TimeSeriesError
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_almost_equal
@@ -1853,6 +1852,14 @@ def test_date(self):
expected = [t.date() for t in rng]
self.assert_((result == expected).all())
+ def test_does_not_convert_mixed_integer(self):
+ df = tm.makeCustomDataframe(10, 10, data_gen_f=lambda *args, **kwargs:
+ randn(), r_idx_type='i', c_idx_type='dt')
+ cols = df.columns.join(df.index, how='outer')
+ joined = cols.join(df.columns)
+ self.assertEqual(cols.dtype, np.dtype('O'))
+ self.assertEqual(cols.dtype, joined.dtype)
+ self.assert_(np.array_equal(cols.values, joined.values))
class TestLegacySupport(unittest.TestCase):
_multiprocess_can_split_ = True
| closes #3877.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3878 | 2013-06-13T04:35:16Z | 2013-06-13T17:44:38Z | 2013-06-13T17:44:37Z | 2014-07-03T18:56:41Z |
ENH: JSON | diff --git a/doc/source/io.rst b/doc/source/io.rst
index e64cbc4bc8101..c182d456315ec 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -954,13 +954,21 @@ with optional parameters:
- path_or_buf : the pathname or buffer to write the output
This can be ``None`` in which case a JSON string is returned
-- orient : The format of the JSON string, default is ``index`` for ``Series``, ``columns`` for ``DataFrame``
+- orient :
- * split : dict like {index -> [index], columns -> [columns], data -> [values]}
- * records : list like [{column -> value}, ... , {column -> value}]
- * index : dict like {index -> {column -> value}}
- * columns : dict like {column -> {index -> value}}
- * values : just the values array
+ Series :
+ default is 'index', allowed values are: {'split','records','index'}
+
+ DataFrame :
+ default is 'columns', allowed values are: {'split','records','index','columns','values'}
+
+ The format of the JSON string
+
+ * split : dict like {index -> [index], columns -> [columns], data -> [values]}
+ * records : list like [{column -> value}, ... , {column -> value}]
+ * index : dict like {index -> {column -> value}}
+ * columns : dict like {column -> {index -> value}}
+ * values : just the values array
- date_format : type of date conversion (epoch = epoch milliseconds, iso = ISO8601), default is epoch
- double_precision : The number of decimal places to use when encoding floating point values, default 10.
@@ -989,6 +997,8 @@ Writing to a file, with a date index and a date column
dfj2 = dfj.copy()
dfj2['date'] = Timestamp('20130101')
+ dfj2['ints'] = range(5)
+ dfj2['bools'] = True
dfj2.index = date_range('20130101',periods=5)
dfj2.to_json('test.json')
open('test.json').read()
@@ -1005,31 +1015,86 @@ is ``None``. To explicity force ``Series`` parsing, pass ``typ=series``
is expected. For instance, a local file could be
file ://localhost/path/to/table.json
- typ : type of object to recover (series or frame), default 'frame'
-- orient : The format of the JSON string, one of the following
+- orient :
+
+ Series :
+ default is 'index', allowed values are: {'split','records','index'}
+
+ DataFrame :
+ default is 'columns', allowed values are: {'split','records','index','columns','values'}
+
+ The format of the JSON string
- * split : dict like {index -> [index], name -> name, data -> [values]}
- * records : list like [value, ... , value]
- * index : dict like {index -> value}
+ * split : dict like {index -> [index], columns -> [columns], data -> [values]}
+ * records : list like [{column -> value}, ... , {column -> value}]
+ * index : dict like {index -> {column -> value}}
+ * columns : dict like {column -> {index -> value}}
+ * values : just the values array
-- dtype : dtype of the resulting object
-- numpy : direct decoding to numpy arrays. default True but falls back to standard decoding if a problem occurs.
-- parse_dates : a list of columns to parse for dates; If True, then try to parse datelike columns, default is False
+- dtype : if True, infer dtypes, if a dict of column to dtype, then use those, if False, then don't infer dtypes at all, default is True, apply only to the data
+- convert_axes : boolean, try to convert the axes to the proper dtypes, default is True
+- convert_dates : a list of columns to parse for dates; If True, then try to parse datelike columns, default is True
- keep_default_dates : boolean, default True. If parsing dates, then parse the default datelike columns
+- numpy: direct decoding to numpy arrays. default is False;
+ Note that the JSON ordering **MUST** be the same for each term if ``numpy=True``
The parser will raise one of ``ValueError/TypeError/AssertionError`` if the JSON is
not parsable.
+The default of ``convert_axes=True``, ``dtype=True``, and ``convert_dates=True`` will try to parse the axes, and all of the data
+into appropriate types, including dates. If you need to override specific dtypes, pass a dict to ``dtype``. ``convert_axes`` should only
+be set to ``False`` if you need to preserve string-like numbers (e.g. '1', '2') in an axes.
+
+.. warning::
+
+ When reading JSON data, automatic coercing into dtypes has some quirks:
+
+ * an index can be in a different order, that is the returned order is not guaranteed to be the same as before serialization
+ * a column that was ``float`` data can safely be converted to ``integer``, e.g. a column of ``1.``
+ * bool columns will be converted to ``integer`` on reconstruction
+
+ Thus there are times where you may want to specify specific dtypes via the ``dtype`` keyword argument.
+
Reading from a JSON string
.. ipython:: python
pd.read_json(json)
-Reading from a file, parsing dates
+Reading from a file
+
+.. ipython:: python
+
+ pd.read_json('test.json')
+
+Don't convert any data (but still convert axes and dates)
+
+.. ipython:: python
+
+ pd.read_json('test.json',dtype=object).dtypes
+
+Specify how I want to convert data
+
+.. ipython:: python
+
+ pd.read_json('test.json',dtype={'A' : 'float32', 'bools' : 'int8'}).dtypes
+
+I like my string indicies
.. ipython:: python
- pd.read_json('test.json',parse_dates=True)
+ si = DataFrame(np.zeros((4, 4)),
+ columns=range(4),
+ index=[str(i) for i in range(4)])
+ si
+ si.index
+ si.columns
+ json = si.to_json()
+
+ sij = pd.read_json(json,convert_axes=False)
+ sij
+ sij.index
+ sij.columns
.. ipython:: python
:suppress:
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 0d2612d7aed7a..55347aef078ef 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -507,8 +507,15 @@ def to_json(self, path_or_buf=None, orient=None, date_format='epoch',
----------
path_or_buf : the path or buffer to write the result string
if this is None, return a StringIO of the converted string
- orient : {'split', 'records', 'index', 'columns', 'values'},
- default is 'index' for Series, 'columns' for DataFrame
+ orient :
+
+ Series :
+ default is 'index'
+ allowed values are: {'split','records','index'}
+
+ DataFrame :
+ default is 'columns'
+ allowed values are: {'split','records','index','columns','values'}
The format of the JSON string
split : dict like
@@ -517,6 +524,7 @@ def to_json(self, path_or_buf=None, orient=None, date_format='epoch',
index : dict like {index -> {column -> value}}
columns : dict like {column -> {index -> value}}
values : just the values array
+
date_format : type of date conversion (epoch = epoch milliseconds, iso = ISO8601),
default is epoch
double_precision : The number of decimal places to use when encoding
diff --git a/pandas/io/json.py b/pandas/io/json.py
index 17b33931bee5a..fcecb31bb77a7 100644
--- a/pandas/io/json.py
+++ b/pandas/io/json.py
@@ -11,6 +11,7 @@
import numpy as np
from pandas.tslib import iNaT
+import pandas.lib as lib
### interface to/from ###
@@ -86,6 +87,11 @@ def _format_dates(self):
self.copy_if_needed()
self.obj = self._format_to_date(self.obj)
+ def _format_bools(self):
+ if self._needs_to_bool(self.obj):
+ self.copy_if_needed()
+ self.obj = self._format_to_bool(self.obj)
+
class FrameWriter(Writer):
_default_orient = 'columns'
@@ -112,8 +118,8 @@ def _format_dates(self):
for c in dtypes.index:
self.obj[c] = self._format_to_date(self.obj[c])
-def read_json(path_or_buf=None, orient=None, typ='frame', dtype=None, numpy=True,
- parse_dates=False, keep_default_dates=True):
+def read_json(path_or_buf=None, orient=None, typ='frame', dtype=True,
+ convert_axes=True, convert_dates=True, keep_default_dates=True, numpy=False):
"""
Convert JSON string to pandas object
@@ -123,20 +129,33 @@ def read_json(path_or_buf=None, orient=None, typ='frame', dtype=None, numpy=True
a URL. Valid URL schemes include http, ftp, s3, and file. For file URLs, a host
is expected. For instance, a local file could be
file ://localhost/path/to/table.json
- orient : {'split', 'records', 'index'}, default 'index'
+ orient :
+ Series :
+ default is 'index'
+ allowed values are: {'split','records','index'}
+
+ DataFrame :
+ default is 'columns'
+ allowed values are: {'split','records','index','columns','values'}
+
The format of the JSON string
- split : dict like
- {index -> [index], name -> name, data -> [values]}
- records : list like [value, ... , value]
- index : dict like {index -> value}
+ split : dict like {index -> [index], columns -> [columns], data -> [values]}
+ records : list like [{column -> value}, ... , {column -> value}]
+ index : dict like {index -> {column -> value}}
+ columns : dict like {column -> {index -> value}}
+ values : just the values array
+
typ : type of object to recover (series or frame), default 'frame'
- dtype : dtype of the resulting object
- numpy: direct decoding to numpy arrays. default True but falls back
- to standard decoding if a problem occurs.
- parse_dates : a list of columns to parse for dates; If True, then try to parse datelike columns
- default is False
+ dtype : if True, infer dtypes, if a dict of column to dtype, then use those,
+ if False, then don't infer dtypes at all, default is True,
+ apply only to the data
+ convert_axes : boolean, try to convert the axes to the proper dtypes, default is True
+ convert_dates : a list of columns to parse for dates; If True, then try to parse datelike columns
+ default is True
keep_default_dates : boolean, default True. If parsing dates,
then parse the default datelike columns
+ numpy: direct decoding to numpy arrays. default is False.Note that the JSON ordering MUST be the same
+ for each term if numpy=True.
Returns
-------
@@ -157,16 +176,18 @@ def read_json(path_or_buf=None, orient=None, typ='frame', dtype=None, numpy=True
obj = None
if typ == 'frame':
- obj = FrameParser(json, orient, dtype, numpy, parse_dates, keep_default_dates).parse()
+ obj = FrameParser(json, orient, dtype, convert_axes, convert_dates, keep_default_dates, numpy).parse()
if typ == 'series' or obj is None:
- obj = SeriesParser(json, orient, dtype, numpy, parse_dates, keep_default_dates).parse()
+ if not isinstance(dtype,bool):
+ dtype = dict(data = dtype)
+ obj = SeriesParser(json, orient, dtype, convert_axes, convert_dates, keep_default_dates, numpy).parse()
return obj
class Parser(object):
- def __init__(self, json, orient, dtype, numpy, parse_dates=False, keep_default_dates=False):
+ def __init__(self, json, orient, dtype=True, convert_axes=True, convert_dates=True, keep_default_dates=False, numpy=False):
self.json = json
if orient is None:
@@ -175,27 +196,100 @@ def __init__(self, json, orient, dtype, numpy, parse_dates=False, keep_default_d
self.orient = orient
self.dtype = dtype
- if dtype is not None and orient == "split":
+ if orient == "split":
numpy = False
self.numpy = numpy
- self.parse_dates = parse_dates
+ self.convert_axes = convert_axes
+ self.convert_dates = convert_dates
self.keep_default_dates = keep_default_dates
self.obj = None
def parse(self):
- self._parse()
- if self.obj is not None:
+
+ # try numpy
+ numpy = self.numpy
+ if numpy:
+ self._parse_numpy()
+
+ else:
+ self._parse_no_numpy()
+
+ if self.obj is None: return None
+ if self.convert_axes:
self._convert_axes()
- if self.parse_dates:
- self._try_parse_dates()
+ self._try_convert_types()
return self.obj
+ def _convert_axes(self):
+ """ try to convert axes """
+ for axis in self.obj._AXIS_NUMBERS.keys():
+ new_axis, result = self._try_convert_data(axis, self.obj._get_axis(axis), use_dtypes=False, convert_dates=True)
+ if result:
+ setattr(self.obj,axis,new_axis)
+
+ def _try_convert_types(self):
+ raise NotImplementedError
+
+ def _try_convert_data(self, name, data, use_dtypes=True, convert_dates=True):
+ """ try to parse a ndarray like into a column by inferring dtype """
+
+ # don't try to coerce, unless a force conversion
+ if use_dtypes:
+ if self.dtype is False:
+ return data, False
+ elif self.dtype is True:
+ pass
+
+ else:
+
+ # dtype to force
+ dtype = self.dtype.get(name) if isinstance(self.dtype,dict) else self.dtype
+ if dtype is not None:
+ try:
+ dtype = np.dtype(dtype)
+ return data.astype(dtype), True
+ except:
+ return data, False
+
+ if convert_dates:
+ new_data, result = self._try_convert_to_date(data)
+ if result:
+ return new_data, True
+
+ result = False
+
+ if data.dtype == 'object':
+
+ # try float
+ try:
+ data = data.astype('float64')
+ result = True
+ except:
+ pass
- def _try_parse_to_date(self, data):
+ # do't coerce 0-len data
+ if len(data) and (data.dtype == 'float' or data.dtype == 'object'):
+
+ # coerce ints if we can
+ try:
+ new_data = data.astype('int64')
+ if (new_data == data).all():
+ data = new_data
+ result = True
+ except:
+ pass
+
+ return data, result
+
+ def _try_convert_to_date(self, data):
""" try to parse a ndarray like into a date column
try to coerce object in epoch/iso formats and
- integer/float in epcoh formats """
+ integer/float in epcoh formats, return a boolean if parsing
+ was successful """
+
+ # no conversion on empty
+ if not len(data): return data, False
new_data = data
if new_data.dtype == 'object':
@@ -208,7 +302,7 @@ def _try_parse_to_date(self, data):
# ignore numbers that are out of range
if issubclass(new_data.dtype.type,np.number):
if not ((new_data == iNaT) | (new_data > 31536000000000000L)).all():
- return data
+ return data, False
try:
new_data = to_datetime(new_data)
@@ -218,122 +312,102 @@ def _try_parse_to_date(self, data):
except:
# return old, noting more we can do
- new_data = data
+ return data, False
- return new_data
+ return new_data, True
- def _try_parse_dates(self):
+ def _try_convert_dates(self):
raise NotImplementedError
class SeriesParser(Parser):
_default_orient = 'index'
- def _parse(self):
-
+ def _parse_no_numpy(self):
+
json = self.json
- dtype = self.dtype
orient = self.orient
- numpy = self.numpy
-
- if numpy:
- try:
- if orient == "split":
- decoded = loads(json, dtype=dtype, numpy=True)
- decoded = dict((str(k), v) for k, v in decoded.iteritems())
- self.obj = Series(**decoded)
- elif orient == "columns" or orient == "index":
- self.obj = Series(*loads(json, dtype=dtype, numpy=True,
- labelled=True))
- else:
- self.obj = Series(loads(json, dtype=dtype, numpy=True))
- except ValueError:
- numpy = False
-
- if not numpy:
- if orient == "split":
- decoded = dict((str(k), v)
- for k, v in loads(json).iteritems())
- self.obj = Series(dtype=dtype, **decoded)
- else:
- self.obj = Series(loads(json), dtype=dtype)
+ if orient == "split":
+ decoded = dict((str(k), v)
+ for k, v in loads(json).iteritems())
+ self.obj = Series(dtype=None, **decoded)
+ else:
+ self.obj = Series(loads(json), dtype=None)
- def _convert_axes(self):
- """ try to axes if they are datelike """
- try:
- self.obj.index = self._try_parse_to_date(self.obj.index)
- except:
- pass
+ def _parse_numpy(self):
- def _try_parse_dates(self):
- if self.obj is None: return
+ json = self.json
+ orient = self.orient
+ if orient == "split":
+ decoded = loads(json, dtype=None, numpy=True)
+ decoded = dict((str(k), v) for k, v in decoded.iteritems())
+ self.obj = Series(**decoded)
+ elif orient == "columns" or orient == "index":
+ self.obj = Series(*loads(json, dtype=None, numpy=True,
+ labelled=True))
+ else:
+ self.obj = Series(loads(json, dtype=None, numpy=True))
- if self.parse_dates:
- self.obj = self._try_parse_to_date(self.obj)
+ def _try_convert_types(self):
+ if self.obj is None: return
+ obj, result = self._try_convert_data('data', self.obj, convert_dates=self.convert_dates)
+ if result:
+ self.obj = obj
class FrameParser(Parser):
_default_orient = 'columns'
- def _parse(self):
+ def _parse_numpy(self):
json = self.json
- dtype = self.dtype
orient = self.orient
- numpy = self.numpy
-
- if numpy:
- try:
- if orient == "columns":
- args = loads(json, dtype=dtype, numpy=True, labelled=True)
- if args:
- args = (args[0].T, args[2], args[1])
- self.obj = DataFrame(*args)
- elif orient == "split":
- decoded = loads(json, dtype=dtype, numpy=True)
- decoded = dict((str(k), v) for k, v in decoded.iteritems())
- self.obj = DataFrame(**decoded)
- elif orient == "values":
- self.obj = DataFrame(loads(json, dtype=dtype, numpy=True))
- else:
- self.obj = DataFrame(*loads(json, dtype=dtype, numpy=True,
- labelled=True))
- except ValueError:
- numpy = False
-
- if not numpy:
- if orient == "columns":
- self.obj = DataFrame(loads(json), dtype=dtype)
- elif orient == "split":
- decoded = dict((str(k), v)
- for k, v in loads(json).iteritems())
- self.obj = DataFrame(dtype=dtype, **decoded)
- elif orient == "index":
- self.obj = DataFrame(loads(json), dtype=dtype).T
- else:
- self.obj = DataFrame(loads(json), dtype=dtype)
- def _convert_axes(self):
- """ try to axes if they are datelike """
- if self.orient == 'columns':
- axis = 'index'
- elif self.orient == 'index':
- axis = 'columns'
+ if orient == "columns":
+ args = loads(json, dtype=None, numpy=True, labelled=True)
+ if args:
+ args = (args[0].T, args[2], args[1])
+ self.obj = DataFrame(*args)
+ elif orient == "split":
+ decoded = loads(json, dtype=None, numpy=True)
+ decoded = dict((str(k), v) for k, v in decoded.iteritems())
+ self.obj = DataFrame(**decoded)
+ elif orient == "values":
+ self.obj = DataFrame(loads(json, dtype=None, numpy=True))
else:
- return
+ self.obj = DataFrame(*loads(json, dtype=None, numpy=True, labelled=True))
- try:
- a = getattr(self.obj,axis)
- setattr(self.obj,axis,self._try_parse_to_date(a))
- except:
- pass
+ def _parse_no_numpy(self):
- def _try_parse_dates(self):
+ json = self.json
+ orient = self.orient
+
+ if orient == "columns":
+ self.obj = DataFrame(loads(json), dtype=None)
+ elif orient == "split":
+ decoded = dict((str(k), v)
+ for k, v in loads(json).iteritems())
+ self.obj = DataFrame(dtype=None, **decoded)
+ elif orient == "index":
+ self.obj = DataFrame(loads(json), dtype=None).T
+ else:
+ self.obj = DataFrame(loads(json), dtype=None)
+
+ def _try_convert_types(self):
+ if self.obj is None: return
+ if self.convert_dates:
+ self._try_convert_dates()
+ for col in self.obj.columns:
+ new_data, result = self._try_convert_data(col, self.obj[col], convert_dates=False)
+ if result:
+ self.obj[col] = new_data
+
+ def _try_convert_dates(self):
if self.obj is None: return
# our columns to parse
- parse_dates = self.parse_dates
- if parse_dates is True:
- parse_dates = []
- parse_dates = set(parse_dates)
+ convert_dates = self.convert_dates
+ if convert_dates is True:
+ convert_dates = []
+ convert_dates = set(convert_dates)
def is_ok(col):
""" return if this col is ok to try for a date parse """
@@ -348,6 +422,8 @@ def is_ok(col):
return False
- for col, c in self.obj.iteritems():
- if (self.keep_default_dates and is_ok(col)) or col in parse_dates:
- self.obj[col] = self._try_parse_to_date(c)
+ for col in self.obj.columns:
+ if (self.keep_default_dates and is_ok(col)) or col in convert_dates:
+ new_data, result = self._try_convert_to_date(self.obj[col])
+ if result:
+ self.obj[col] = new_data
diff --git a/pandas/io/tests/test_json/test_pandas.py b/pandas/io/tests/test_json/test_pandas.py
index 4b1294b786df7..bdd700bdbcec3 100644
--- a/pandas/io/tests/test_json/test_pandas.py
+++ b/pandas/io/tests/test_json/test_pandas.py
@@ -56,13 +56,22 @@ def setUp(self):
def test_frame_from_json_to_json(self):
- def _check_orient(df, orient, dtype=None, numpy=True):
+ def _check_orient(df, orient, dtype=None, numpy=False, convert_axes=True, check_dtype=True, raise_ok=None):
df = df.sort()
dfjson = df.to_json(orient=orient)
- unser = read_json(dfjson, orient=orient, dtype=dtype,
- numpy=numpy)
+
+ try:
+ unser = read_json(dfjson, orient=orient, dtype=dtype,
+ numpy=numpy, convert_axes=convert_axes)
+ except (Exception), detail:
+ if raise_ok is not None:
+ if type(detail) == raise_ok:
+ return
+ raise
+
unser = unser.sort()
- if df.index.dtype.type == np.datetime64:
+
+ if not convert_axes and df.index.dtype.type == np.datetime64:
unser.index = DatetimeIndex(unser.index.values.astype('i8'))
if orient == "records":
# index is not captured in this orientation
@@ -78,20 +87,40 @@ def _check_orient(df, orient, dtype=None, numpy=True):
unser = unser.sort()
assert_almost_equal(df.values, unser.values)
else:
- assert_frame_equal(df, unser)
-
- def _check_all_orients(df, dtype=None):
- _check_orient(df, "columns", dtype=dtype)
- _check_orient(df, "records", dtype=dtype)
- _check_orient(df, "split", dtype=dtype)
- _check_orient(df, "index", dtype=dtype)
- _check_orient(df, "values", dtype=dtype)
-
- _check_orient(df, "columns", dtype=dtype, numpy=False)
- _check_orient(df, "records", dtype=dtype, numpy=False)
- _check_orient(df, "split", dtype=dtype, numpy=False)
- _check_orient(df, "index", dtype=dtype, numpy=False)
- _check_orient(df, "values", dtype=dtype, numpy=False)
+ if convert_axes:
+ assert_frame_equal(df, unser, check_dtype=check_dtype)
+ else:
+ assert_frame_equal(df, unser, check_less_precise=False, check_dtype=check_dtype)
+
+ def _check_all_orients(df, dtype=None, convert_axes=True, raise_ok=None):
+
+ # numpy=False
+ if convert_axes:
+ _check_orient(df, "columns", dtype=dtype)
+ _check_orient(df, "records", dtype=dtype)
+ _check_orient(df, "split", dtype=dtype)
+ _check_orient(df, "index", dtype=dtype)
+ _check_orient(df, "values", dtype=dtype)
+
+ _check_orient(df, "columns", dtype=dtype, convert_axes=False)
+ _check_orient(df, "records", dtype=dtype, convert_axes=False)
+ _check_orient(df, "split", dtype=dtype, convert_axes=False)
+ _check_orient(df, "index", dtype=dtype, convert_axes=False)
+ _check_orient(df, "values", dtype=dtype ,convert_axes=False)
+
+ # numpy=True and raise_ok might be not None, so ignore the error
+ if convert_axes:
+ _check_orient(df, "columns", dtype=dtype, numpy=True, raise_ok=raise_ok)
+ _check_orient(df, "records", dtype=dtype, numpy=True, raise_ok=raise_ok)
+ _check_orient(df, "split", dtype=dtype, numpy=True, raise_ok=raise_ok)
+ _check_orient(df, "index", dtype=dtype, numpy=True, raise_ok=raise_ok)
+ _check_orient(df, "values", dtype=dtype, numpy=True, raise_ok=raise_ok)
+
+ _check_orient(df, "columns", dtype=dtype, numpy=True, convert_axes=False, raise_ok=raise_ok)
+ _check_orient(df, "records", dtype=dtype, numpy=True, convert_axes=False, raise_ok=raise_ok)
+ _check_orient(df, "split", dtype=dtype, numpy=True, convert_axes=False, raise_ok=raise_ok)
+ _check_orient(df, "index", dtype=dtype, numpy=True, convert_axes=False, raise_ok=raise_ok)
+ _check_orient(df, "values", dtype=dtype, numpy=True, convert_axes=False, raise_ok=raise_ok)
# basic
_check_all_orients(self.frame)
@@ -99,6 +128,7 @@ def _check_all_orients(df, dtype=None):
self.frame.to_json(orient="columns"))
_check_all_orients(self.intframe, dtype=self.intframe.values.dtype)
+ _check_all_orients(self.intframe, dtype=False)
# big one
# index and columns are strings as all unserialised JSON object keys
@@ -106,13 +136,14 @@ def _check_all_orients(df, dtype=None):
biggie = DataFrame(np.zeros((200, 4)),
columns=[str(i) for i in range(4)],
index=[str(i) for i in range(200)])
- _check_all_orients(biggie)
+ _check_all_orients(biggie,dtype=False,convert_axes=False)
# dtypes
_check_all_orients(DataFrame(biggie, dtype=np.float64),
- dtype=np.float64)
- _check_all_orients(DataFrame(biggie, dtype=np.int), dtype=np.int)
- _check_all_orients(DataFrame(biggie, dtype='<U3'), dtype='<U3')
+ dtype=np.float64, convert_axes=False)
+ _check_all_orients(DataFrame(biggie, dtype=np.int), dtype=np.int, convert_axes=False)
+ _check_all_orients(DataFrame(biggie, dtype='<U3'), dtype='<U3', convert_axes=False,
+ raise_ok=ValueError)
# empty
_check_all_orients(self.empty_frame)
@@ -129,15 +160,15 @@ def _check_all_orients(df, dtype=None):
'D': [True, False, True, False, True]
}
df = DataFrame(data=data, index=index)
- _check_orient(df, "split")
- _check_orient(df, "records")
- _check_orient(df, "values")
- _check_orient(df, "columns")
+ _check_orient(df, "split", check_dtype=False)
+ _check_orient(df, "records", check_dtype=False)
+ _check_orient(df, "values", check_dtype=False)
+ _check_orient(df, "columns", check_dtype=False)
# index oriented is problematic as it is read back in in a transposed
# state, so the columns are interpreted as having mixed data and
# given object dtypes.
# force everything to have object dtype beforehand
- _check_orient(df.transpose().transpose(), "index")
+ _check_orient(df.transpose().transpose(), "index", dtype=False)
def test_frame_from_json_bad_data(self):
self.assertRaises(ValueError, read_json, StringIO('{"key":b:a:d}'))
@@ -166,25 +197,37 @@ def test_frame_from_json_bad_data(self):
def test_frame_from_json_nones(self):
df = DataFrame([[1, 2], [4, 5, 6]])
unser = read_json(df.to_json())
- self.assert_(np.isnan(unser['2'][0]))
+ self.assert_(np.isnan(unser[2][0]))
df = DataFrame([['1', '2'], ['4', '5', '6']])
unser = read_json(df.to_json())
- self.assert_(unser['2'][0] is None)
+ self.assert_(np.isnan(unser[2][0]))
+ unser = read_json(df.to_json(),dtype=False)
+ self.assert_(unser[2][0] is None)
+ unser = read_json(df.to_json(),convert_axes=False,dtype=False)
+ self.assert_(unser['2']['0'] is None)
unser = read_json(df.to_json(), numpy=False)
- self.assert_(unser['2'][0] is None)
+ self.assert_(np.isnan(unser[2][0]))
+ unser = read_json(df.to_json(), numpy=False, dtype=False)
+ self.assert_(unser[2][0] is None)
+ unser = read_json(df.to_json(), numpy=False, convert_axes=False, dtype=False)
+ self.assert_(unser['2']['0'] is None)
# infinities get mapped to nulls which get mapped to NaNs during
# deserialisation
df = DataFrame([[1, 2], [4, 5, 6]])
df[2][0] = np.inf
unser = read_json(df.to_json())
- self.assert_(np.isnan(unser['2'][0]))
+ self.assert_(np.isnan(unser[2][0]))
+ unser = read_json(df.to_json(), dtype=False)
+ self.assert_(np.isnan(unser[2][0]))
df[2][0] = np.NINF
unser = read_json(df.to_json())
- self.assert_(np.isnan(unser['2'][0]))
+ self.assert_(np.isnan(unser[2][0]))
+ unser = read_json(df.to_json(),dtype=False)
+ self.assert_(np.isnan(unser[2][0]))
def test_frame_to_json_except(self):
df = DataFrame([1, 2, 3])
@@ -192,13 +235,13 @@ def test_frame_to_json_except(self):
def test_series_from_json_to_json(self):
- def _check_orient(series, orient, dtype=None, numpy=True):
+ def _check_orient(series, orient, dtype=None, numpy=False):
series = series.sort_index()
unser = read_json(series.to_json(orient=orient), typ='series',
orient=orient, numpy=numpy, dtype=dtype)
unser = unser.sort_index()
- if series.index.dtype.type == np.datetime64:
- unser.index = DatetimeIndex(unser.index.values.astype('i8'))
+ #if series.index.dtype.type == np.datetime64:
+ # unser.index = DatetimeIndex(unser.index.values.astype('i8'))
if orient == "records" or orient == "values":
assert_almost_equal(series.values, unser.values)
else:
@@ -216,11 +259,11 @@ def _check_all_orients(series, dtype=None):
_check_orient(series, "index", dtype=dtype)
_check_orient(series, "values", dtype=dtype)
- _check_orient(series, "columns", dtype=dtype, numpy=False)
- _check_orient(series, "records", dtype=dtype, numpy=False)
- _check_orient(series, "split", dtype=dtype, numpy=False)
- _check_orient(series, "index", dtype=dtype, numpy=False)
- _check_orient(series, "values", dtype=dtype, numpy=False)
+ _check_orient(series, "columns", dtype=dtype, numpy=True)
+ _check_orient(series, "records", dtype=dtype, numpy=True)
+ _check_orient(series, "split", dtype=dtype, numpy=True)
+ _check_orient(series, "index", dtype=dtype, numpy=True)
+ _check_orient(series, "values", dtype=dtype, numpy=True)
# basic
_check_all_orients(self.series)
@@ -230,7 +273,7 @@ def _check_all_orients(series, dtype=None):
objSeries = Series([str(d) for d in self.objSeries],
index=self.objSeries.index,
name=self.objSeries.name)
- _check_all_orients(objSeries)
+ _check_all_orients(objSeries, dtype=False)
_check_all_orients(self.empty_series)
_check_all_orients(self.ts)
@@ -276,25 +319,28 @@ def test_axis_dates(self):
result = read_json(json,typ='series')
assert_series_equal(result,self.ts)
- def test_parse_dates(self):
+ def test_convert_dates(self):
# frame
df = self.tsframe.copy()
df['date'] = Timestamp('20130101')
json = df.to_json()
- result = read_json(json,parse_dates=True)
+ result = read_json(json)
assert_frame_equal(result,df)
df['foo'] = 1.
json = df.to_json()
- result = read_json(json,parse_dates=True)
- assert_frame_equal(result,df)
+ result = read_json(json,convert_dates=False)
+ expected = df.copy()
+ expected['date'] = expected['date'].values.view('i8')
+ expected['foo'] = expected['foo'].astype('int64')
+ assert_frame_equal(result,expected)
# series
ts = Series(Timestamp('20130101'),index=self.ts.index)
json = ts.to_json()
- result = read_json(json,typ='series',parse_dates=True)
+ result = read_json(json,typ='series')
assert_series_equal(result,ts)
def test_date_format(self):
@@ -304,7 +350,7 @@ def test_date_format(self):
df_orig = df.copy()
json = df.to_json(date_format='iso')
- result = read_json(json,parse_dates=True)
+ result = read_json(json)
assert_frame_equal(result,df_orig)
# make sure that we did in fact copy
@@ -312,7 +358,7 @@ def test_date_format(self):
ts = Series(Timestamp('20130101'),index=self.ts.index)
json = ts.to_json(date_format='iso')
- result = read_json(json,typ='series',parse_dates=True)
+ result = read_json(json,typ='series')
assert_series_equal(result,ts)
def test_weird_nested_json(self):
@@ -338,6 +384,38 @@ def test_weird_nested_json(self):
read_json(s)
+ def test_doc_example(self):
+ dfj2 = DataFrame(np.random.randn(5, 2), columns=list('AB'))
+ dfj2['date'] = Timestamp('20130101')
+ dfj2['ints'] = range(5)
+ dfj2['bools'] = True
+ dfj2.index = pd.date_range('20130101',periods=5)
+
+ json = dfj2.to_json()
+ result = read_json(json,dtype={'ints' : np.int64, 'bools' : np.bool_})
+ assert_frame_equal(result,result)
+
+ def test_misc_example(self):
+
+ # parsing unordered input fails
+ result = read_json('[{"a": 1, "b": 2}, {"b":2, "a" :1}]',numpy=True)
+ expected = DataFrame([[1,2],[1,2]],columns=['a','b'])
+ #assert_frame_equal(result,expected)
+
+ result = read_json('[{"a": 1, "b": 2}, {"b":2, "a" :1}]')
+ expected = DataFrame([[1,2],[1,2]],columns=['a','b'])
+ assert_frame_equal(result,expected)
+
+ @network
+ @slow
+ def test_round_trip_exception_(self):
+ # GH 3867
+
+ df = pd.read_csv('https://raw.github.com/hayd/lahman2012/master/csvs/Teams.csv')
+ s = df.to_json()
+ result = pd.read_json(s)
+ assert_frame_equal(result.reindex(index=df.index,columns=df.columns),df)
+
@network
@slow
def test_url(self):
@@ -345,7 +423,7 @@ def test_url(self):
try:
url = 'https://api.github.com/repos/pydata/pandas/issues?per_page=5'
- result = read_json(url,parse_dates=True)
+ result = read_json(url,convert_dates=True)
for c in ['created_at','closed_at','updated_at']:
self.assert_(result[c].dtype == 'datetime64[ns]')
| revised argument structure for `read_json` to control dtype conversions, which are all on by default:
- `convert_axes` : if you for some reason want to turn off dtype conversion on the axes (only really necessary if you have string-like numbers)
- `dtype` : now accepts a dict of name -> dtype for specific conversions, or True to try to coerce all
- `convert_dates` : default True (in conjunction with `keep_default_dates` determines which columns to attempt date conversion)
DOC updates for all
| https://api.github.com/repos/pandas-dev/pandas/pulls/3876 | 2013-06-13T02:53:57Z | 2013-06-13T19:14:10Z | 2013-06-13T19:14:10Z | 2014-06-19T05:28:21Z |
FIX: change initObjToJSON return type | diff --git a/pandas/src/ujson/python/objToJSON.c b/pandas/src/ujson/python/objToJSON.c
index ce8bdf3721f5e..534d60970dd81 100644
--- a/pandas/src/ujson/python/objToJSON.c
+++ b/pandas/src/ujson/python/objToJSON.c
@@ -100,7 +100,11 @@ enum PANDAS_FORMAT
//#define PRINTMARK() fprintf(stderr, "%s: MARK(%d)\n", __FILE__, __LINE__)
#define PRINTMARK()
+#if (PY_VERSION_HEX >= 0x03000000)
void initObjToJSON(void)
+#else
+int initObjToJSON(void)
+#endif
{
PyObject *mod_frame;
PyDateTime_IMPORT;
| This is necessary because clang complains about the return type. There's a
call to the macro import_array() which injects a return statement into
wherever it's used.
closes #3872.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3874 | 2013-06-13T00:09:07Z | 2013-06-15T12:26:49Z | 2013-06-15T12:26:49Z | 2014-06-19T22:24:06Z |
BLD: remove after_script.sh from travis since it does not exist anymore | diff --git a/.travis.yml b/.travis.yml
index b48f6d834b62d..8e2bb49d9df93 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -55,4 +55,3 @@ script:
after_script:
- ci/print_versions.py
- - ci/after_script.sh
| closes #3857
| https://api.github.com/repos/pandas-dev/pandas/pulls/3868 | 2013-06-12T18:31:16Z | 2013-06-12T19:37:29Z | 2013-06-12T19:37:29Z | 2014-07-16T08:13:41Z |
CLN: avoid Unboundlocal error in tools/merge/_get_concatenated_data (GH3833) | diff --git a/pandas/tools/merge.py b/pandas/tools/merge.py
index 9cdddc47acac1..75e35b403dd78 100644
--- a/pandas/tools/merge.py
+++ b/pandas/tools/merge.py
@@ -984,11 +984,11 @@ def _prepare_blocks(self):
return blockmaps, reindexed_data
def _get_concatenated_data(self):
- try:
- # need to conform to same other (joined) axes for block join
- blockmaps, rdata = self._prepare_blocks()
- kinds = _get_all_block_kinds(blockmaps)
+ # need to conform to same other (joined) axes for block join
+ blockmaps, rdata = self._prepare_blocks()
+ kinds = _get_all_block_kinds(blockmaps)
+ try:
new_blocks = []
for kind in kinds:
klass_blocks = [mapping.get(kind) for mapping in blockmaps]
| closes #3833
| https://api.github.com/repos/pandas-dev/pandas/pulls/3864 | 2013-06-12T14:22:18Z | 2013-06-12T14:47:58Z | 2013-06-12T14:47:58Z | 2014-06-18T11:00:34Z |
Add colormap= argument to DataFrame plotting methods | diff --git a/doc/source/release.rst b/doc/source/release.rst
index b2eefda10fccc..0fa7b4b2ed5f2 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -52,6 +52,8 @@ pandas 0.12
- A ``filter`` method on grouped Series or DataFrames returns a subset of
the original (:issue:`3680`, :issue:`919`)
- Access to historical Google Finance data in pandas.io.data (:issue:`3814`)
+ - DataFrame plotting methods can sample column colors from a Matplotlib
+ colormap via the ``colormap`` keyword. (:issue:`3860`)
**Improvements to existing features**
diff --git a/doc/source/v0.12.0.txt b/doc/source/v0.12.0.txt
index 643ef7ddbbab4..4b100ed0b5fab 100644
--- a/doc/source/v0.12.0.txt
+++ b/doc/source/v0.12.0.txt
@@ -96,6 +96,12 @@ API changes
and thus you should cast to an appropriate numeric dtype if you need to
plot something.
+ - Add ``colormap`` keyword to DataFrame plotting methods. Accepts either a
+ matplotlib colormap object (ie, matplotlib.cm.jet) or a string name of such
+ an object (ie, 'jet'). The colormap is sampled to select the color for each
+ column. Please see :ref:`visualization.colormaps` for more information.
+ (:issue:`3860`)
+
- ``DataFrame.interpolate()`` is now deprecated. Please use
``DataFrame.fillna()`` and ``DataFrame.replace()`` instead. (:issue:`3582`,
:issue:`3675`, :issue:`3676`)
diff --git a/doc/source/visualization.rst b/doc/source/visualization.rst
index f0790396a5c39..f1a9880047691 100644
--- a/doc/source/visualization.rst
+++ b/doc/source/visualization.rst
@@ -531,3 +531,65 @@ be colored differently.
@savefig radviz.png width=6in
radviz(data, 'Name')
+
+.. _visualization.colormaps:
+
+Colormaps
+~~~~~~~~~
+
+A potential issue when plotting a large number of columns is that it can be difficult to distinguish some series due to repetition in the default colors. To remedy this, DataFrame plotting supports the use of the ``colormap=`` argument, which accepts either a Matplotlib `colormap <http://matplotlib.org/api/cm_api.html>`__ or a string that is a name of a colormap registered with Matplotlib. A visualization of the default matplotlib colormaps is available `here <http://wiki.scipy.org/Cookbook/Matplotlib/Show_colormaps>`__.
+
+As matplotlib does not directly support colormaps for line-based plots, the colors are selected based on an even spacing determined by the number of columns in the DataFrame. There is no consideration made for background color, so some colormaps will produce lines that are not easily visible.
+
+To use the jet colormap, we can simply pass ``'jet'`` to ``colormap=``
+
+.. ipython:: python
+
+ df = DataFrame(randn(1000, 10), index=ts.index)
+ df = df.cumsum()
+
+ plt.figure()
+
+ @savefig jet.png width=6in
+ df.plot(colormap='jet')
+
+or we can pass the colormap itself
+
+.. ipython:: python
+
+ from matplotlib import cm
+
+ plt.figure()
+
+ @savefig jet_cm.png width=6in
+ df.plot(colormap=cm.jet)
+
+Colormaps can also be used other plot types, like bar charts:
+
+.. ipython:: python
+
+ dd = DataFrame(randn(10, 10)).applymap(abs)
+ dd = dd.cumsum()
+
+ plt.figure()
+
+ @savefig greens.png width=6in
+ dd.plot(kind='bar', colormap='Greens')
+
+Parallel coordinates charts:
+
+.. ipython:: python
+
+ plt.figure()
+
+ @savefig parallel_gist_rainbow.png width=6in
+ parallel_coordinates(data, 'Name', colormap='gist_rainbow')
+
+Andrews curves charts:
+
+.. ipython:: python
+
+ plt.figure()
+
+ @savefig andrews_curve_winter.png width=6in
+ andrews_curves(data, 'Name', colormap='winter')
diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py
index e57e5a9af2fc0..d094e8b99d9cb 100644
--- a/pandas/tests/test_graphics.py
+++ b/pandas/tests/test_graphics.py
@@ -103,6 +103,35 @@ def test_bar_colors(self):
self.assert_(xp == rs)
plt.close('all')
+
+ from matplotlib import cm
+
+ # Test str -> colormap functionality
+ ax = df.plot(kind='bar', colormap='jet')
+
+ rects = ax.patches
+
+ rgba_colors = map(cm.jet, np.linspace(0, 1, 5))
+ for i, rect in enumerate(rects[::5]):
+ xp = rgba_colors[i]
+ rs = rect.get_facecolor()
+ self.assert_(xp == rs)
+
+ plt.close('all')
+
+ # Test colormap functionality
+ ax = df.plot(kind='bar', colormap=cm.jet)
+
+ rects = ax.patches
+
+ rgba_colors = map(cm.jet, np.linspace(0, 1, 5))
+ for i, rect in enumerate(rects[::5]):
+ xp = rgba_colors[i]
+ rs = rect.get_facecolor()
+ self.assert_(xp == rs)
+
+ plt.close('all')
+
df.ix[:, [0]].plot(kind='bar', color='DodgerBlue')
@slow
@@ -600,6 +629,7 @@ def test_andrews_curves(self):
def test_parallel_coordinates(self):
from pandas import read_csv
from pandas.tools.plotting import parallel_coordinates
+ from matplotlib import cm
path = os.path.join(curpath(), 'data/iris.csv')
df = read_csv(path)
_check_plot_works(parallel_coordinates, df, 'Name')
@@ -611,6 +641,7 @@ def test_parallel_coordinates(self):
colors=('#556270', '#4ECDC4', '#C7F464'))
_check_plot_works(parallel_coordinates, df, 'Name',
colors=['dodgerblue', 'aquamarine', 'seagreen'])
+ _check_plot_works(parallel_coordinates, df, 'Name', colormap=cm.jet)
df = read_csv(
path, header=None, skiprows=1, names=[1, 2, 4, 8, 'Name'])
@@ -622,9 +653,11 @@ def test_parallel_coordinates(self):
def test_radviz(self):
from pandas import read_csv
from pandas.tools.plotting import radviz
+ from matplotlib import cm
path = os.path.join(curpath(), 'data/iris.csv')
df = read_csv(path)
_check_plot_works(radviz, df, 'Name')
+ _check_plot_works(radviz, df, 'Name', colormap=cm.jet)
@slow
def test_plot_int_columns(self):
@@ -666,6 +699,7 @@ def test_line_colors(self):
import matplotlib.pyplot as plt
import sys
from StringIO import StringIO
+ from matplotlib import cm
custom_colors = 'rgcby'
@@ -691,6 +725,30 @@ def test_line_colors(self):
finally:
sys.stderr = tmp
+ plt.close('all')
+
+ ax = df.plot(colormap='jet')
+
+ rgba_colors = map(cm.jet, np.linspace(0, 1, len(df)))
+
+ lines = ax.get_lines()
+ for i, l in enumerate(lines):
+ xp = rgba_colors[i]
+ rs = l.get_color()
+ self.assert_(xp == rs)
+
+ plt.close('all')
+
+ ax = df.plot(colormap=cm.jet)
+
+ rgba_colors = map(cm.jet, np.linspace(0, 1, len(df)))
+
+ lines = ax.get_lines()
+ for i, l in enumerate(lines):
+ xp = rgba_colors[i]
+ rs = l.get_color()
+ self.assert_(xp == rs)
+
# make color a list if plotting one column frame
# handles cases like df.plot(color='DodgerBlue')
plt.close('all')
@@ -862,6 +920,10 @@ def test_option_mpl_style(self):
except ValueError:
pass
+ def test_invalid_colormap(self):
+ df = DataFrame(np.random.randn(500, 2), columns=['A', 'B'])
+
+ self.assertRaises(ValueError, df.plot, colormap='invalid_colormap')
def _check_plot_works(f, *args, **kwargs):
import matplotlib.pyplot as plt
diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py
index a5aaac05d8ad8..8abe9df5ddd56 100644
--- a/pandas/tools/plotting.py
+++ b/pandas/tools/plotting.py
@@ -91,6 +91,43 @@
def _get_standard_kind(kind):
return {'density': 'kde'}.get(kind, kind)
+def _get_standard_colors(num_colors=None, colormap=None,
+ color_type='default', color=None):
+ import matplotlib.pyplot as plt
+
+ if color is None and colormap is not None:
+ if isinstance(colormap, basestring):
+ import matplotlib.cm as cm
+ colormap = cm.get_cmap(colormap)
+ colors = map(colormap, np.linspace(0, 1, num=num_colors))
+ elif color is not None:
+ if colormap is not None:
+ warnings.warn("'color' and 'colormap' cannot be used "
+ "simultaneously. Using 'color'")
+ colors = color
+ else:
+ if color_type == 'default':
+ colors = plt.rcParams.get('axes.color_cycle', list('bgrcmyk'))
+ if isinstance(colors, basestring):
+ colors = list(colors)
+ elif color_type == 'random':
+ import random
+ def random_color(column):
+ random.seed(column)
+ return [random.random() for _ in range(3)]
+
+ colors = map(random_color, range(num_colors))
+ else:
+ raise NotImplementedError
+
+ if len(colors) != num_colors:
+ multiple = num_colors//len(colors) - 1
+ mod = num_colors % len(colors)
+
+ colors += multiple * colors
+ colors += colors[:mod]
+
+ return colors
class _Options(dict):
"""
@@ -283,7 +320,7 @@ def _get_marker_compat(marker):
return marker
-def radviz(frame, class_column, ax=None, **kwds):
+def radviz(frame, class_column, ax=None, colormap=None, **kwds):
"""RadViz - a multivariate data visualization algorithm
Parameters:
@@ -291,6 +328,9 @@ def radviz(frame, class_column, ax=None, **kwds):
frame: DataFrame object
class_column: Column name that contains information about class membership
ax: Matplotlib axis object, optional
+ colormap : str or matplotlib colormap object, default None
+ Colormap to select colors from. If string, load colormap with that name
+ from matplotlib.
kwds: Matplotlib scatter method keyword arguments, optional
Returns:
@@ -302,10 +342,6 @@ def radviz(frame, class_column, ax=None, **kwds):
import matplotlib.text as text
import random
- def random_color(column):
- random.seed(column)
- return [random.random() for _ in range(3)]
-
def normalize(series):
a = min(series)
b = max(series)
@@ -322,6 +358,9 @@ def normalize(series):
classes = set(frame[class_column])
to_plot = {}
+ colors = _get_standard_colors(num_colors=len(classes), colormap=colormap,
+ color_type='random', color=kwds.get('color'))
+
for class_ in classes:
to_plot[class_] = [[], []]
@@ -338,10 +377,10 @@ def normalize(series):
to_plot[class_name][0].append(y[0])
to_plot[class_name][1].append(y[1])
- for class_ in classes:
+ for i, class_ in enumerate(classes):
line = ax.scatter(to_plot[class_][0],
to_plot[class_][1],
- color=random_color(class_),
+ color=colors[i],
label=com.pprint_thing(class_), **kwds)
ax.legend()
@@ -368,7 +407,8 @@ def normalize(series):
return ax
-def andrews_curves(data, class_column, ax=None, samples=200):
+def andrews_curves(data, class_column, ax=None, samples=200, colormap=None,
+ **kwds):
"""
Parameters:
-----------
@@ -377,6 +417,10 @@ def andrews_curves(data, class_column, ax=None, samples=200):
class_column : Name of the column containing class names
ax : matplotlib axes object, default None
samples : Number of points to plot in each curve
+ colormap : str or matplotlib colormap object, default None
+ Colormap to select colors from. If string, load colormap with that name
+ from matplotlib.
+ kwds : Optional plotting arguments to be passed to matplotlib
Returns:
--------
@@ -401,15 +445,17 @@ def f(x):
return result
return f
- def random_color(column):
- random.seed(column)
- return [random.random() for _ in range(3)]
+
n = len(data)
classes = set(data[class_column])
class_col = data[class_column]
columns = [data[col] for col in data.columns if (col != class_column)]
x = [-pi + 2.0 * pi * (t / float(samples)) for t in range(samples)]
used_legends = set([])
+
+ colors = _get_standard_colors(num_colors=n, colormap=colormap,
+ color_type='random', color=kwds.get('color'))
+
if ax is None:
ax = plt.gca(xlim=(-pi, pi))
for i in range(n):
@@ -420,9 +466,9 @@ def random_color(column):
if com.pprint_thing(class_col[i]) not in used_legends:
label = com.pprint_thing(class_col[i])
used_legends.add(label)
- ax.plot(x, y, color=random_color(class_col[i]), label=label)
+ ax.plot(x, y, color=colors[i], label=label, **kwds)
else:
- ax.plot(x, y, color=random_color(class_col[i]))
+ ax.plot(x, y, color=colors[i], **kwds)
ax.legend(loc='upper right')
ax.grid()
@@ -492,7 +538,7 @@ def bootstrap_plot(series, fig=None, size=50, samples=500, **kwds):
def parallel_coordinates(data, class_column, cols=None, ax=None, colors=None,
- use_columns=False, xticks=None, **kwds):
+ use_columns=False, xticks=None, colormap=None, **kwds):
"""Parallel coordinates plotting.
Parameters
@@ -511,6 +557,8 @@ def parallel_coordinates(data, class_column, cols=None, ax=None, colors=None,
If true, columns will be used as xticks
xticks: list or tuple, optional
A list of values to use for xticks
+ colormap: str or matplotlib colormap, default None
+ Colormap to use for line colors.
kwds: list, optional
A list of keywords for matplotlib plot method
@@ -530,9 +578,7 @@ def parallel_coordinates(data, class_column, cols=None, ax=None, colors=None,
import matplotlib.pyplot as plt
import random
- def random_color(column):
- random.seed(column)
- return [random.random() for _ in range(3)]
+
n = len(data)
classes = set(data[class_column])
class_col = data[class_column]
@@ -563,13 +609,11 @@ def random_color(column):
if ax is None:
ax = plt.gca()
- # if user has not specified colors to use, choose at random
- if colors is None:
- colors = dict((kls, random_color(kls)) for kls in classes)
- else:
- if len(colors) != len(classes):
- raise ValueError('Number of colors must match number of classes')
- colors = dict((kls, colors[i]) for i, kls in enumerate(classes))
+ color_values = _get_standard_colors(num_colors=len(classes),
+ colormap=colormap, color_type='random',
+ color=colors)
+
+ colors = dict(zip(classes, color_values))
for i in range(n):
row = df.irow(i).values
@@ -714,7 +758,7 @@ def __init__(self, data, kind=None, by=None, subplots=False, sharex=True,
ax=None, fig=None, title=None, xlim=None, ylim=None,
xticks=None, yticks=None,
sort_columns=False, fontsize=None,
- secondary_y=False, **kwds):
+ secondary_y=False, colormap=None, **kwds):
self.data = data
self.by = by
@@ -756,6 +800,8 @@ def __init__(self, data, kind=None, by=None, subplots=False, sharex=True,
secondary_y = [secondary_y]
self.secondary_y = secondary_y
+ self.colormap = colormap
+
self.kwds = kwds
self._validate_color_args()
@@ -774,6 +820,11 @@ def _validate_color_args(self):
# support series.plot(color='green')
self.kwds['color'] = [self.kwds['color']]
+ if ('color' in self.kwds or 'colors' in self.kwds) and \
+ self.colormap is not None:
+ warnings.warn("'color' and 'colormap' cannot be used "
+ "simultaneously. Using 'color'")
+
def _iter_data(self):
from pandas.core.frame import DataFrame
if isinstance(self.data, (Series, np.ndarray)):
@@ -1072,15 +1123,18 @@ def _get_style(self, i, col_name):
return style or None
def _get_colors(self):
- import matplotlib.pyplot as plt
- cycle = plt.rcParams.get('axes.color_cycle', list('bgrcmyk'))
- if isinstance(cycle, basestring):
- cycle = list(cycle)
- colors = self.kwds.get('color', cycle)
- return colors
+ from pandas.core.frame import DataFrame
+ if isinstance(self.data, DataFrame):
+ num_colors = len(self.data.columns)
+ else:
+ num_colors = 1
+
+ return _get_standard_colors(num_colors=num_colors,
+ colormap=self.colormap,
+ color=self.kwds.get('color'))
def _maybe_add_color(self, colors, kwds, style, i):
- has_color = 'color' in kwds
+ has_color = 'color' in kwds or self.colormap is not None
if has_color and (style is None or re.match('[a-z]+', style) is None):
kwds['color'] = colors[i % len(colors)]
@@ -1090,6 +1144,7 @@ def _get_marked_label(self, label, col_num):
else:
return label
+
class KdePlot(MPLPlot):
def __init__(self, data, **kwargs):
MPLPlot.__init__(self, data, **kwargs)
@@ -1389,15 +1444,6 @@ def f(ax, x, y, w, start=None, log=self.log, **kwds):
return f
- def _get_colors(self):
- import matplotlib.pyplot as plt
- cycle = plt.rcParams.get('axes.color_cycle', list('bgrcmyk'))
- if isinstance(cycle, basestring):
- cycle = list(cycle)
- has_colors = 'color' in self.kwds
- colors = self.kwds.get('color', cycle)
- return colors
-
def _make_plot(self):
import matplotlib as mpl
colors = self._get_colors()
@@ -1547,6 +1593,9 @@ def plot_frame(frame=None, x=None, y=None, subplots=False, sharex=True,
mark_right: boolean, default True
When using a secondary_y axis, should the legend label the axis of
the various columns automatically
+ colormap : str or matplotlib colormap object, default None
+ Colormap to select colors from. If string, load colormap with that name
+ from matplotlib.
kwds : keywords
Options to pass to matplotlib plotting method
@@ -1724,12 +1773,7 @@ def boxplot(data, column=None, by=None, ax=None, fontsize=None,
def _get_colors():
- import matplotlib.pyplot as plt
- cycle = plt.rcParams.get('axes.color_cycle', list('bgrcmyk'))
- if isinstance(cycle, basestring):
- cycle = list(cycle)
- colors = kwds.get('color', cycle)
- return colors
+ return _get_standard_colors(color=kwds.get('color'), num_colors=1)
def maybe_color_bp(bp):
if 'color' not in kwds :
| I frequently plot DataFrames with a large number of columns and generally have difficulty distinguishing series due to the short cycle length of the default color scheme.
Especially in cases where the ordering of columns has significant information, the ideal way to color the series would be with a matplotlib colormap that uniformly spaces colors. This is pretty straightforward with pyplot, but pretty annoying to have to repeatedly do.
This patch modifies DataFrame plotting functions to take a `colormap=` argument consisting of either a `str` name of a [matplotlib colormap](http://wiki.scipy.org/Cookbook/Matplotlib/Show_colormaps) or a colormap object itself.
```
df.cumsum().plot(colormap='jet', figsize=(10,5))
```

KDE plot:
```
df.plot(kind='kde', colormap='jet', figsize=(10,5))
```

Some colormaps don't work as well on a white background (the 0 column is white):
df.cumsum().plot(colormap=cm.Greens, figsize=(10,5))

But work better for other graph types:
df.plot(kind='bar', colormap='jet', figsize=(10,5))

Parallel coordinates on the iris dataset:
```
parallel_coordinates(iris, 'Name', colormap='gist_rainbow')
```

Andrews curves (I'd appreciate someone double checking this one; don't think I have it quite right):
```
andrews_curves(iris, 'Name', colormap='winter')
```

I've included some test coverage and unified all the color creation code into one method `_get_standard_colors()`. I started adding to the documentation but ran into a weird issue with the sphinx plot output. When adding this to `visualization.rst`:
```
.. ipython:: python
from matplotlib import cm
df = DataFrame(randn(1000, 4), index=ts.index, columns=list('ABCD'))
df = df.cumsum()
plt.figure()
@savefig greens.png width=6in
df.plot(colormap=cm.Greens)
```
I get this output (the lines should be white->green):

My first thought was that it was the `options.display.mpl_style = 'default'`, but plots render fine in IPython with this setting. My guess is something in `@savefig`, but is anyone familiar with what might be happening here?
| https://api.github.com/repos/pandas-dev/pandas/pulls/3860 | 2013-06-12T06:57:17Z | 2013-06-27T02:56:30Z | 2013-06-27T02:56:30Z | 2014-06-18T19:57:31Z |
DOC add to_datetime to api.rst | diff --git a/doc/source/api.rst b/doc/source/api.rst
index a4be0df5f489e..7e863a4429487 100644
--- a/doc/source/api.rst
+++ b/doc/source/api.rst
@@ -126,7 +126,7 @@ Data manipulations
merge
concat
-Top-level Missing Data
+Top-level missing data
~~~~~~~~~~~~~~~~~~~~~~
.. currentmodule:: pandas.core.common
@@ -137,6 +137,17 @@ Top-level Missing Data
isnull
notnull
+Top-level dealing with datetimes
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. currentmodule:: pandas.tseries.tools
+
+.. autosummary::
+ :toctree: generated/
+
+ to_datetime
+
+
Standard moving window functions
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst
index f8d1e8323b9f5..f11bf60549d93 100644
--- a/doc/source/timeseries.rst
+++ b/doc/source/timeseries.rst
@@ -110,6 +110,68 @@ scalar values and ``PeriodIndex`` for sequences of spans. Better support for
irregular intervals with arbitrary start and end points are forth-coming in
future releases.
+
+.. _timeseries.converting:
+
+Converting to Timestamps
+------------------------
+
+To convert a Series or list-like object of date-like objects e.g. strings,
+epochs, or a mixture, you can use the ``to_datetime`` function. When passed
+a Series, this returns a Series (with the same index), while a list-like
+is converted to a DatetimeIndex:
+
+.. ipython:: python
+
+ to_datetime(Series(['Jul 31, 2009', '2010-01-10', None]))
+
+ to_datetime(['2005/11/23', '2010.12.31'])
+
+If you use dates which start with the day first (i.e. European style),
+you can pass the ``dayfirst`` flag:
+
+.. ipython:: python
+
+ to_datetime(['04-01-2012 10:00'], dayfirst=True)
+
+ to_datetime(['14-01-2012', '01-14-2012'], dayfirst=True)
+
+.. warning::
+
+ You see in the above example that ``dayfirst`` isn't strict, so if a date
+ can't be parsed with the day being first it will be parsed as if
+ ``dayfirst`` were False.
+
+
+Pass ``coerce=True`` to convert bad data to ``NaT`` (not a time):
+
+.. ipython:: python
+
+ to_datetime(['2009-07-31', 'asd'])
+
+ to_datetime(['2009-07-31', 'asd'], coerce=True)
+
+It's also possible to convert integer or float epoch times. The default unit
+for these is nanoseconds (since these are how Timestamps are stored). However,
+often epochs are stored in another ``unit`` which can be specified:
+
+
+.. ipython:: python
+
+ to_datetime([1])
+
+ to_datetime([1, 3.14], unit='s')
+
+.. note::
+
+ Epoch times will be rounded to the nearest nanosecond.
+
+Take care, ``to_datetime`` may not act as you expect on mixed data:
+
+.. ipython:: python
+
+ pd.to_datetime([1, '1'])
+
.. _timeseries.daterange:
Generating Ranges of Timestamps
| Either I'm being thick or `to_datetime` isn't in the docs (does adding it like this add it?)
Should also put something in basics...?
| https://api.github.com/repos/pandas-dev/pandas/pulls/3859 | 2013-06-12T02:54:37Z | 2013-06-21T00:07:10Z | 2013-06-21T00:07:10Z | 2014-06-16T20:26:09Z |
TST slicing regression test | diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index e7f824ace983c..295eaede443b1 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -724,6 +724,15 @@ def test_ix_general(self):
df.sortlevel(inplace=True)
df.ix[(4.0,2012)]
+ def test_ix_weird_slicing(self):
+ ## http://stackoverflow.com/q/17056560/1240268
+ df = DataFrame({'one' : [1, 2, 3, np.nan, np.nan], 'two' : [1, 2, 3, 4, 5]})
+ df.ix[df['one']>1, 'two'] = -df['two']
+
+ expected = DataFrame({'one': {0: 1.0, 1: 2.0, 2: 3.0, 3: nan, 4: nan},
+ 'two': {0: 1, 1: -2, 2: -3, 3: 4, 4: 5}})
+ assert_frame_equal(df, expected)
+
def test_xs_multiindex(self):
# GH2903
| From http://stackoverflow.com/questions/17056560/how-do-i-assign-a-vector-to-a-subset-of-rows-of-a-column-in-a-pandas-dataframe fixed in #3668 ?
| https://api.github.com/repos/pandas-dev/pandas/pulls/3858 | 2013-06-12T02:26:18Z | 2013-06-12T03:20:07Z | 2013-06-12T03:20:07Z | 2014-06-23T10:54:37Z |
ENH Prefer requests over urllib2 | diff --git a/pandas/io/common.py b/pandas/io/common.py
index 353930482c8b8..5fdaec0f0f0d1 100644
--- a/pandas/io/common.py
+++ b/pandas/io/common.py
@@ -34,23 +34,25 @@ def _is_s3_url(url):
return False
def get_filepath_or_buffer(filepath_or_buffer, encoding=None):
- """ if the filepath_or_buffer is a url, translate and return the buffer
- passthru otherwise
+ """
+ if the filepath_or_buffer is a url, translate and return the buffer
+ passthrough otherwise
- Parameters
- ----------
- filepath_or_buffer : a url, filepath, or buffer
- encoding : the encoding to use to decode py3 bytes, default is 'utf-8'
+ Parameters
+ ----------
+ filepath_or_buffer : a url, filepath, or buffer
+ encoding : the encoding to use to decode py3 bytes, default is 'utf-8'
- Returns
- -------
- a filepath_or_buffer, the encoding
-
- """
+ Returns
+ -------
+ a filepath_or_buffer, the encoding
+
+ """
if _is_url(filepath_or_buffer):
- from urllib2 import urlopen
- filepath_or_buffer = urlopen(filepath_or_buffer)
+
+ _, filepath_or_buffer = _req_url(filepath_or_buffer) # raise if not status_code 200?
+
if py3compat.PY3: # pragma: no cover
if encoding:
errors = 'strict'
@@ -65,7 +67,7 @@ def get_filepath_or_buffer(filepath_or_buffer, encoding=None):
if _is_s3_url(filepath_or_buffer):
try:
import boto
- except:
+ except ImportError:
raise ImportError("boto is required to handle s3 files")
# Assuming AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY
# are environment variables
@@ -78,3 +80,43 @@ def get_filepath_or_buffer(filepath_or_buffer, encoding=None):
return filepath_or_buffer, None
return filepath_or_buffer, None
+
+def _req_url(url):
+ '''
+ Retrieves text output of request to url
+ Raises on bad status_code or invalid urls
+ Prefer requests module if available
+
+ Parameters
+ ----------
+ url : string
+
+ Returns
+ -------
+ status_code : int, the HTTP status_code
+ buf_text : the text from the url request
+
+ '''
+ try_requests = True
+ if try_requests:
+ try:
+ import requests
+ resp = requests.get(url)
+ resp.raise_for_status()
+ buf_text = StringIO(resp.text)
+ status_code = resp.status_code
+ return status_code, buf_text
+ except (ImportError,):
+ pass
+ except (requests.exceptions.InvalidURL,
+ requests.exceptions.InvalidSchema):
+ # responses can't deal with local files
+ pass
+
+ import urllib2
+ resp = urllib2.urlopen(url)
+ # except urllib2.URLError: # don't think there was a purpose to this bit, raises itself
+ # raise ValueError('Invalid URL: "{0}"'.format(url))
+ status_code = resp.code
+ buf_text = resp # if status_code == 200 else '' # If not 200 does it raise?
+ return status_code, buf_text
diff --git a/pandas/io/data.py b/pandas/io/data.py
index 8bc3df561cadb..ca4e84eacf5ae 100644
--- a/pandas/io/data.py
+++ b/pandas/io/data.py
@@ -16,7 +16,7 @@
from pandas import Panel, DataFrame, Series, read_csv, concat
from pandas.io.parsers import TextParser
-
+from pandas.io.common import _req_url
def DataReader(name, data_source=None, start=None, end=None,
retry_count=3, pause=0):
@@ -166,10 +166,9 @@ def _get_hist_yahoo(sym=None, start=None, end=None, retry_count=3,
'&ignore=.csv'
for _ in range(retry_count):
- resp = urllib2.urlopen(url)
- if resp.code == 200:
- lines = resp.read()
- rs = read_csv(StringIO(bytes_to_str(lines)), index_col=0,
+ status_code, buf_text = _req_url(url)
+ if status_code == 200:
+ rs = read_csv(buf_text, index_col=0,
parse_dates=True)[::-1]
# Yahoo! Finance sometimes does this awesome thing where they
@@ -206,11 +205,9 @@ def _get_hist_google(sym=None, start=None, end=None, retry_count=3,
"startdate": start.strftime('%b %d, %Y'), \
"enddate": end.strftime('%b %d, %Y'), "output": "csv" })
for _ in range(retry_count):
- resp = urllib2.urlopen(url)
- if resp.code == 200:
- lines = resp.read()
- rs = read_csv(StringIO(bytes_to_str(lines)), index_col=0,
- parse_dates=True)[::-1]
+ status_code, buf_text = _req_url(url)
+ if status_code == 200:
+ rs = read_csv(buf_text, index_col=0, parse_dates=True)[::-1]
return rs
@@ -472,8 +469,7 @@ def get_data_fred(name=None, start=dt.datetime(2010, 1, 1),
fred_URL = "http://research.stlouisfed.org/fred2/series/"
- url = fred_URL + '%s' % name + \
- '/downloaddata/%s' % name + '.csv'
+ url = '%s%s/downloaddata/%s.csv' % (fred_URL, name, name)
data = read_csv(urllib.urlopen(url), index_col=0, parse_dates=True,
header=None, skiprows=1, names=["DATE", name],
na_values='.')
diff --git a/pandas/io/tests/test_json/test_pandas.py b/pandas/io/tests/test_json/test_pandas.py
index b64bfaacd38f2..b4f11d2c7cbde 100644
--- a/pandas/io/tests/test_json/test_pandas.py
+++ b/pandas/io/tests/test_json/test_pandas.py
@@ -350,6 +350,10 @@ def test_url(self):
url = 'http://search.twitter.com/search.json?q=pandas%20python'
result = read_json(url)
+
+ # gzip compression
+ url = 'https://api.stackexchange.com/2.1/search?page=1&pagesize=10&order=desc&sort=activity&tagged=pandas&site=stackoverflow'
+ result = pd.read_json(url)
except urllib2.URLError:
raise nose.SkipTest
| At the moment we use urllib2 for http requests, this branch prefers to use requests if it's installed. For one thing it means you can read gzipped json, which is more difficult in urllib2:
```
url = 'https://api.stackexchange.com/2.1/search?page=1&pagesize=10&order=desc&sort=activity&tagged=pandas&site=stackoverflow'
result = pd.read_json(url) # in master this raises ValueError: Expected object or value
```
cc #3804 @jreback
Thoughts?
| https://api.github.com/repos/pandas-dev/pandas/pulls/3856 | 2013-06-11T23:07:52Z | 2013-08-26T00:09:30Z | null | 2014-06-24T19:26:04Z |
TST: Fix missing import in io/tests/test_json | diff --git a/pandas/io/tests/test_json/test_pandas.py b/pandas/io/tests/test_json/test_pandas.py
index b64bfaacd38f2..4b1294b786df7 100644
--- a/pandas/io/tests/test_json/test_pandas.py
+++ b/pandas/io/tests/test_json/test_pandas.py
@@ -8,6 +8,7 @@
import os
import unittest
+import nose
import numpy as np
from pandas import Series, DataFrame, DatetimeIndex, Timestamp
| Nose import is missing. If you get to the error at the last line, it throws an error because nose is never imported.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3855 | 2013-06-11T22:55:48Z | 2013-06-12T00:54:34Z | 2013-06-12T00:54:34Z | 2014-06-27T11:37:18Z |
DOC: Clarify quote behavior parameters | diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 6e937ba696e39..bc06969ba1fa1 100644
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -42,7 +42,11 @@ class DateConversionError(Exception):
lineterminator : string (length 1), default None
Character to break file into lines. Only valid with C parser
quotechar : string
-quoting : string
+ The character to used to denote the start and end of a quoted item. Quoted items can include the delimiter and it will be ignored.
+quoting : int
+ Controls whether quotes should be recognized. Values are taken from
+ `csv.QUOTE_*` values. Acceptable values are 0, 1, 2, and 3 for
+ QUOTE_MINIMAL, QUOTE_ALL, QUOTE_NONE, and QUOTE_NONNUMERIC, respectively.
skipinitialspace : boolean, default False
Skip spaces after delimiter
escapechar : string
| I've been bit many times recently by mal-formed CSV. Non-closing quotes across lines. This clarifies how to avoid the problem a bit.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3853 | 2013-06-11T19:50:17Z | 2013-06-26T14:49:49Z | 2013-06-26T14:49:49Z | 2013-06-26T14:53:25Z |
ENH use pyperclip for read and to_clipboard | diff --git a/LICENSES/OTHER b/LICENSES/OTHER
index a1b367fe6061c..f0550b4ee208a 100644
--- a/LICENSES/OTHER
+++ b/LICENSES/OTHER
@@ -48,3 +48,33 @@ distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
+
+Pyperclip v1.3 license
+----------------------
+
+Copyright (c) 2010, Albert Sweigart
+All rights reserved.
+
+BSD-style license:
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of the pyperclip nor the
+ names of its contributors may be used to endorse or promote products
+ derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY Albert Sweigart "AS IS" AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL Albert Sweigart BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\ No newline at end of file
diff --git a/RELEASE.rst b/RELEASE.rst
index 307986ab81681..a03451542796a 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -73,6 +73,8 @@ pandas 0.11.1
- ``melt`` now accepts the optional parameters ``var_name`` and ``value_name``
to specify custom column names of the returned DataFrame (GH3649_),
thanks @hoechenberger
+ - clipboard functions use pyperclip (no dependencies on Windows, alternative
+ dependencies offered for Linux) (GH3837_).
- Plotting functions now raise a ``TypeError`` before trying to plot anything
if the associated objects have have a dtype of ``object`` (GH1818_,
GH3572_). This happens before any drawing takes place which elimnates any
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 5533584745167..1ea9c48f45269 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -492,6 +492,16 @@ def to_hdf(self, path_or_buf, key, **kwargs):
return pytables.to_hdf(path_or_buf, key, self, **kwargs)
def to_clipboard(self):
+ """
+ Attempt to write text representation of object to the system clipboard
+
+ Notes
+ -----
+ Requirements for your platform
+ - Linux: xclip, or xsel (with gtk or PyQt4 modules)
+ - Windows:
+ - OS X:
+ """
from pandas.io import clipboard
clipboard.to_clipboard(self)
diff --git a/pandas/io/clipboard.py b/pandas/io/clipboard.py
index c763c1e8faadb..4e3f7203a279e 100644
--- a/pandas/io/clipboard.py
+++ b/pandas/io/clipboard.py
@@ -23,8 +23,8 @@ def to_clipboard(obj): # pragma: no cover
Notes
-----
Requirements for your platform
- - Linux: xsel command line tool
- - Windows: Python win32 extensions
+ - Linux: xclip, or xsel (with gtk or PyQt4 modules)
+ - Windows:
- OS X:
"""
from pandas.util.clipboard import clipboard_set
diff --git a/pandas/util/clipboard.py b/pandas/util/clipboard.py
index bc58af8c0ea3c..9f3ee0638352f 100644
--- a/pandas/util/clipboard.py
+++ b/pandas/util/clipboard.py
@@ -1,119 +1,160 @@
-"""
-Taken from the IPython project http://ipython.org
-
-Used under the terms of the BSD license
-"""
-
-import subprocess
-import sys
-
-
-def clipboard_get():
- """ Get text from the clipboard.
- """
- if sys.platform == 'win32':
- try:
- return win32_clipboard_get()
- except Exception:
- pass
- elif sys.platform == 'darwin':
- try:
- return osx_clipboard_get()
- except Exception:
- pass
- return tkinter_clipboard_get()
-
-
-def clipboard_set(text):
- """ Get text from the clipboard.
- """
- if sys.platform == 'win32':
- try:
- return win32_clipboard_set(text)
- except Exception:
- raise
- elif sys.platform == 'darwin':
- try:
- return osx_clipboard_set(text)
- except Exception:
- pass
- xsel_clipboard_set(text)
-
-
-def win32_clipboard_get():
- """ Get the current clipboard's text on Windows.
-
- Requires Mark Hammond's pywin32 extensions.
- """
- try:
- import win32clipboard
- except ImportError:
- message = ("Getting text from the clipboard requires the pywin32 "
- "extensions: http://sourceforge.net/projects/pywin32/")
- raise Exception(message)
- win32clipboard.OpenClipboard()
- text = win32clipboard.GetClipboardData(win32clipboard.CF_TEXT)
- # FIXME: convert \r\n to \n?
- win32clipboard.CloseClipboard()
- return text
-
-
-def osx_clipboard_get():
- """ Get the clipboard's text on OS X.
- """
- p = subprocess.Popen(['pbpaste', '-Prefer', 'ascii'],
- stdout=subprocess.PIPE)
- text, stderr = p.communicate()
- # Text comes in with old Mac \r line endings. Change them to \n.
- text = text.replace('\r', '\n')
- return text
-
-
-def tkinter_clipboard_get():
- """ Get the clipboard's text using Tkinter.
-
- This is the default on systems that are not Windows or OS X. It may
- interfere with other UI toolkits and should be replaced with an
- implementation that uses that toolkit.
- """
+# Pyperclip v1.3
+# A cross-platform clipboard module for Python. (only handles plain text for now)
+# By Al Sweigart al@coffeeghost.net
+
+# Usage:
+# import pyperclip
+# pyperclip.copy('The text to be copied to the clipboard.')
+# spam = pyperclip.paste()
+
+# On Mac, this module makes use of the pbcopy and pbpaste commands, which should come with the os.
+# On Linux, this module makes use of the xclip command, which should come with the os. Otherwise run "sudo apt-get install xclip"
+
+
+# Copyright (c) 2010, Albert Sweigart
+# All rights reserved.
+#
+# BSD-style license:
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+# * Neither the name of the pyperclip nor the
+# names of its contributors may be used to endorse or promote products
+# derived from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY Albert Sweigart "AS IS" AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL Albert Sweigart BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Change Log:
+# 1.2 Use the platform module to help determine OS.
+# 1.3 Changed ctypes.windll.user32.OpenClipboard(None) to ctypes.windll.user32.OpenClipboard(0), after some people ran into some TypeError
+
+import platform, os
+
+def winGetClipboard():
+ ctypes.windll.user32.OpenClipboard(0)
+ pcontents = ctypes.windll.user32.GetClipboardData(1) # 1 is CF_TEXT
+ data = ctypes.c_char_p(pcontents).value
+ #ctypes.windll.kernel32.GlobalUnlock(pcontents)
+ ctypes.windll.user32.CloseClipboard()
+ return data
+
+def winSetClipboard(text):
+ GMEM_DDESHARE = 0x2000
+ ctypes.windll.user32.OpenClipboard(0)
+ ctypes.windll.user32.EmptyClipboard()
try:
- import Tkinter
- except ImportError:
- message = ("Getting text from the clipboard on this platform "
- "requires Tkinter.")
- raise Exception(message)
- root = Tkinter.Tk()
- root.withdraw()
- text = root.clipboard_get()
- root.destroy()
- return text
-
-
-def win32_clipboard_set(text):
- # idiosyncratic win32 import issues
- import pywintypes as _
- import win32clipboard
- win32clipboard.OpenClipboard()
+ # works on Python 2 (bytes() only takes one argument)
+ hCd = ctypes.windll.kernel32.GlobalAlloc(GMEM_DDESHARE, len(bytes(text))+1)
+ except TypeError:
+ # works on Python 3 (bytes() requires an encoding)
+ hCd = ctypes.windll.kernel32.GlobalAlloc(GMEM_DDESHARE, len(bytes(text, 'ascii'))+1)
+ pchData = ctypes.windll.kernel32.GlobalLock(hCd)
try:
- win32clipboard.EmptyClipboard()
- win32clipboard.SetClipboardText(_fix_line_endings(text))
- finally:
- win32clipboard.CloseClipboard()
-
-
-def _fix_line_endings(text):
- return '\r\n'.join(text.splitlines())
-
-
-def osx_clipboard_set(text):
- """ Get the clipboard's text on OS X.
- """
- p = subprocess.Popen(['pbcopy', '-Prefer', 'ascii'],
- stdin=subprocess.PIPE)
- p.communicate(input=text)
-
-
-def xsel_clipboard_set(text):
- from subprocess import Popen, PIPE
- p = Popen(['xsel', '-bi'], stdin=PIPE)
- p.communicate(input=text)
+ # works on Python 2 (bytes() only takes one argument)
+ ctypes.cdll.msvcrt.strcpy(ctypes.c_char_p(pchData), bytes(text))
+ except TypeError:
+ # works on Python 3 (bytes() requires an encoding)
+ ctypes.cdll.msvcrt.strcpy(ctypes.c_char_p(pchData), bytes(text, 'ascii'))
+ ctypes.windll.kernel32.GlobalUnlock(hCd)
+ ctypes.windll.user32.SetClipboardData(1,hCd)
+ ctypes.windll.user32.CloseClipboard()
+
+def macSetClipboard(text):
+ outf = os.popen('pbcopy', 'w')
+ outf.write(text)
+ outf.close()
+
+def macGetClipboard():
+ outf = os.popen('pbpaste', 'r')
+ content = outf.read()
+ outf.close()
+ return content
+
+def gtkGetClipboard():
+ return gtk.Clipboard().wait_for_text()
+
+def gtkSetClipboard(text):
+ cb = gtk.Clipboard()
+ cb.set_text(text)
+ cb.store()
+
+def qtGetClipboard():
+ return str(cb.text())
+
+def qtSetClipboard(text):
+ cb.setText(text)
+
+def xclipSetClipboard(text):
+ outf = os.popen('xclip -selection c', 'w')
+ outf.write(text)
+ outf.close()
+
+def xclipGetClipboard():
+ outf = os.popen('xclip -selection c -o', 'r')
+ content = outf.read()
+ outf.close()
+ return content
+
+def xselSetClipboard(text):
+ outf = os.popen('xsel -i', 'w')
+ outf.write(text)
+ outf.close()
+
+def xselGetClipboard():
+ outf = os.popen('xsel -o', 'r')
+ content = outf.read()
+ outf.close()
+ return content
+
+
+if os.name == 'nt' or platform.system() == 'Windows':
+ import ctypes
+ getcb = winGetClipboard
+ setcb = winSetClipboard
+elif os.name == 'mac' or platform.system() == 'Darwin':
+ getcb = macGetClipboard
+ setcb = macSetClipboard
+elif os.name == 'posix' or platform.system() == 'Linux':
+ xclipExists = os.system('which xclip') == 0
+ if xclipExists:
+ getcb = xclipGetClipboard
+ setcb = xclipSetClipboard
+ else:
+ xselExists = os.system('which xsel') == 0
+ if xselExists:
+ getcb = xselGetClipboard
+ setcb = xselSetClipboard
+ try:
+ import gtk
+ getcb = gtkGetClipboard
+ setcb = gtkSetClipboard
+ except:
+ try:
+ import PyQt4.QtCore
+ import PyQt4.QtGui
+ app = QApplication([])
+ cb = PyQt4.QtGui.QApplication.clipboard()
+ getcb = qtGetClipboard
+ setcb = qtSetClipboard
+ except:
+ raise Exception('Pyperclip requires the gtk or PyQt4 module installed, or the xclip command.')
+copy = setcb
+paste = getcb
+
+## pandas aliases
+clipboard_get = paste
+clipboard_set = copy
\ No newline at end of file
| Use [pyperclip](http://coffeeghost.net/src/pyperclip.py) to manage copy and pasting.
Fixes #3837, also cc #3845.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3848 | 2013-06-11T10:33:11Z | 2013-06-13T18:41:39Z | 2013-06-13T18:41:39Z | 2014-07-16T08:13:24Z |
Fixing get_data_yahoo/google pause and retry | diff --git a/RELEASE.rst b/RELEASE.rst
index 307986ab81681..e76576106b30c 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -222,6 +222,7 @@ pandas 0.11.1
- ``read_html`` now correctly skips tests (GH3741_)
- Fix incorrect arguments passed to concat that are not list-like (e.g. concat(df1,df2)) (GH3481_)
- Correctly parse when passed the ``dtype=str`` (or other variable-len string dtypes) in ``read_csv`` (GH3795_)
+ - Fix using provided pause and retry_count arguments for ``get_data_yahoo`` and ``get_data_google``
.. _GH3164: https://github.com/pydata/pandas/issues/3164
.. _GH2786: https://github.com/pydata/pandas/issues/2786
diff --git a/pandas/io/data.py b/pandas/io/data.py
index 8bc3df561cadb..d3df25992dff6 100644
--- a/pandas/io/data.py
+++ b/pandas/io/data.py
@@ -182,7 +182,7 @@ def _get_hist_yahoo(sym=None, start=None, end=None, retry_count=3,
time.sleep(pause)
raise Exception("after %d tries, Yahoo did not "
- "return a 200 for url %s" % (pause, url))
+ "return a 200 for url %s" % (retry_count, url))
def _get_hist_google(sym=None, start=None, end=None, retry_count=3,
@@ -217,7 +217,7 @@ def _get_hist_google(sym=None, start=None, end=None, retry_count=3,
time.sleep(pause)
raise Exception("after %d tries, Google did not "
- "return a 200 for url %s" % (pause, url))
+ "return a 200 for url %s" % (retry_count, url))
def _adjust_prices(hist_data, price_list=['Open', 'High', 'Low', 'Close']):
@@ -369,7 +369,9 @@ def dl_mult_symbols(symbols):
#If a single symbol, (e.g., 'GOOG')
if isinstance(symbols, (str, int)):
sym = symbols
- hist_data = _get_hist_yahoo(sym, start=start, end=end)
+ hist_data = _get_hist_yahoo(sym, start=start, end=end,
+ retry_count=retry_count,
+ pause=pause)
#Or multiple symbols, (e.g., ['GOOG', 'AAPL', 'MSFT'])
elif isinstance(symbols, DataFrame):
try:
@@ -441,7 +443,9 @@ def dl_mult_symbols(symbols):
#If a single symbol, (e.g., 'GOOG')
if isinstance(symbols, (str, int)):
sym = symbols
- hist_data = _get_hist_google(sym, start=start, end=end)
+ hist_data = _get_hist_google(sym, start=start, end=end,
+ retry_count=retry_count,
+ pause=pause)
#Or multiple symbols, (e.g., ['GOOG', 'AAPL', 'MSFT'])
elif isinstance(symbols, DataFrame):
try:
| Provide pause and retry_count to _get_hist_yahoo/google when single value given for symbols. Now pause option works, as described in docs.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3846 | 2013-06-11T06:41:47Z | 2013-06-29T18:46:08Z | null | 2014-07-14T05:25:17Z |
Io to clipboard | diff --git a/doc/source/io.rst b/doc/source/io.rst
index 9d923d2d0e0cf..d01b671bbae67 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -1231,6 +1231,26 @@ And then import the data directly to a DataFrame by calling:
clipdf
+The ``to_clipboard`` method can be used to write the contents of a DataFrame to
+the clipboard. Following which you can paste the clipboard contents into other
+applications (CTRL-V on many operating systems). Here we illustrate writing a
+DataFrame into clipboard and reading it back.
+
+.. ipython:: python
+
+ df=pd.DataFrame(randn(5,3))
+ df
+ df.to_clipboard()
+ pd.read_clipboard()
+
+We can see that we got the same content back, which we had earlier written to the clipboard.
+
+.. note::
+
+ You may need to install xclip or xsel (with gtk or PyQt4 modules) on Linux to use these methods.
+
+
+
.. _io.excel:
| Added documentation for to_clipboard().
Closes #3784
| https://api.github.com/repos/pandas-dev/pandas/pulls/3845 | 2013-06-11T05:21:44Z | 2013-06-13T18:42:30Z | 2013-06-13T18:42:30Z | 2014-07-16T08:13:23Z |
ENH: add figsize argument to DataFrame and Series hist methods | diff --git a/RELEASE.rst b/RELEASE.rst
index 307986ab81681..8256b13b4e553 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -79,6 +79,7 @@ pandas 0.11.1
spurious plots from showing up.
- Added Faq section on repr display options, to help users customize their setup.
- ``where`` operations that result in block splitting are much faster (GH3733_)
+ - Series and DataFrame hist methods now take a ``figsize`` argument (GH3834_)
**API Changes**
@@ -312,6 +313,8 @@ pandas 0.11.1
.. _GH3726: https://github.com/pydata/pandas/issues/3726
.. _GH3795: https://github.com/pydata/pandas/issues/3795
.. _GH3814: https://github.com/pydata/pandas/issues/3814
+.. _GH3834: https://github.com/pydata/pandas/issues/3834
+
pandas 0.11.0
=============
diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt
index 5045f73375a97..34ba9f0859641 100644
--- a/doc/source/v0.11.1.txt
+++ b/doc/source/v0.11.1.txt
@@ -288,6 +288,8 @@ Enhancements
dff.groupby('B').filter(lambda x: len(x) > 2, dropna=False)
+ - Series and DataFrame hist methods now take a ``figsize`` argument (GH3834_)
+
Bug Fixes
~~~~~~~~~
@@ -396,3 +398,4 @@ on GitHub for a complete list.
.. _GH3741: https://github.com/pydata/pandas/issues/3741
.. _GH3726: https://github.com/pydata/pandas/issues/3726
.. _GH3425: https://github.com/pydata/pandas/issues/3425
+.. _GH3834: https://github.com/pydata/pandas/issues/3834
diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py
index 5a1411ccf577e..0755caf45d336 100644
--- a/pandas/tests/test_graphics.py
+++ b/pandas/tests/test_graphics.py
@@ -8,7 +8,7 @@
from pandas import Series, DataFrame, MultiIndex, PeriodIndex, date_range
import pandas.util.testing as tm
from pandas.util.testing import ensure_clean
-from pandas.core.config import set_option,get_option,config_prefix
+from pandas.core.config import set_option
import numpy as np
@@ -28,11 +28,6 @@ class TestSeriesPlots(unittest.TestCase):
@classmethod
def setUpClass(cls):
- import sys
-
- # if 'IPython' in sys.modules:
- # raise nose.SkipTest
-
try:
import matplotlib as mpl
mpl.use('Agg', warn=False)
@@ -150,9 +145,16 @@ def test_irregular_datetime(self):
def test_hist(self):
_check_plot_works(self.ts.hist)
_check_plot_works(self.ts.hist, grid=False)
-
+ _check_plot_works(self.ts.hist, figsize=(8, 10))
_check_plot_works(self.ts.hist, by=self.ts.index.month)
+ def test_plot_fails_when_ax_differs_from_figure(self):
+ from pylab import figure
+ fig1 = figure()
+ fig2 = figure()
+ ax1 = fig1.add_subplot(111)
+ self.assertRaises(AssertionError, self.ts.hist, ax=ax1, figure=fig2)
+
@slow
def test_kde(self):
_skip_if_no_scipy()
@@ -258,7 +260,8 @@ def test_plot(self):
(u'\u03b4', 6),
(u'\u03b4', 7)], names=['i0', 'i1'])
columns = MultiIndex.from_tuples([('bar', u'\u0394'),
- ('bar', u'\u0395')], names=['c0', 'c1'])
+ ('bar', u'\u0395')], names=['c0',
+ 'c1'])
df = DataFrame(np.random.randint(0, 10, (8, 2)),
columns=columns,
index=index)
@@ -269,9 +272,9 @@ def test_nonnumeric_exclude(self):
import matplotlib.pyplot as plt
plt.close('all')
- df = DataFrame({'A': ["x", "y", "z"], 'B': [1,2,3]})
+ df = DataFrame({'A': ["x", "y", "z"], 'B': [1, 2, 3]})
ax = df.plot()
- self.assert_(len(ax.get_lines()) == 1) #B was plotted
+ self.assert_(len(ax.get_lines()) == 1) # B was plotted
@slow
def test_label(self):
@@ -434,21 +437,24 @@ def test_bar_center(self):
ax = df.plot(kind='bar', grid=True)
self.assertEqual(ax.xaxis.get_ticklocs()[0],
ax.patches[0].get_x() + ax.patches[0].get_width())
+
@slow
def test_bar_log(self):
# GH3254, GH3298 matplotlib/matplotlib#1882, #1892
# regressions in 1.2.1
- df = DataFrame({'A': [3] * 5, 'B': range(1,6)}, index=range(5))
- ax = df.plot(kind='bar', grid=True,log=True)
- self.assertEqual(ax.yaxis.get_ticklocs()[0],1.0)
+ df = DataFrame({'A': [3] * 5, 'B': range(1, 6)}, index=range(5))
+ ax = df.plot(kind='bar', grid=True, log=True)
+ self.assertEqual(ax.yaxis.get_ticklocs()[0], 1.0)
- p1 = Series([200,500]).plot(log=True,kind='bar')
- p2 = DataFrame([Series([200,300]),Series([300,500])]).plot(log=True,kind='bar',subplots=True)
+ p1 = Series([200, 500]).plot(log=True, kind='bar')
+ p2 = DataFrame([Series([200, 300]),
+ Series([300, 500])]).plot(log=True, kind='bar',
+ subplots=True)
- (p1.yaxis.get_ticklocs() == np.array([ 0.625, 1.625]))
- (p2[0].yaxis.get_ticklocs() == np.array([ 1., 10., 100., 1000.])).all()
- (p2[1].yaxis.get_ticklocs() == np.array([ 1., 10., 100., 1000.])).all()
+ (p1.yaxis.get_ticklocs() == np.array([0.625, 1.625]))
+ (p2[0].yaxis.get_ticklocs() == np.array([1., 10., 100., 1000.])).all()
+ (p2[1].yaxis.get_ticklocs() == np.array([1., 10., 100., 1000.])).all()
@slow
def test_boxplot(self):
@@ -508,6 +514,9 @@ def test_hist(self):
# make sure sharex, sharey is handled
_check_plot_works(df.hist, sharex=True, sharey=True)
+ # handle figsize arg
+ _check_plot_works(df.hist, figsize=(8, 10))
+
# make sure xlabelsize and xrot are handled
ser = df[0]
xf, yf = 20, 20
@@ -727,6 +736,7 @@ def test_invalid_kind(self):
df = DataFrame(np.random.randn(10, 2))
self.assertRaises(ValueError, df.plot, kind='aasdf')
+
class TestDataFrameGroupByPlots(unittest.TestCase):
@classmethod
@@ -786,10 +796,10 @@ def test_time_series_plot_color_with_empty_kwargs(self):
plt.close('all')
for i in range(3):
- ax = Series(np.arange(12) + 1, index=date_range(
- '1/1/2000', periods=12)).plot()
+ ax = Series(np.arange(12) + 1, index=date_range('1/1/2000',
+ periods=12)).plot()
- line_colors = [ l.get_color() for l in ax.get_lines() ]
+ line_colors = [l.get_color() for l in ax.get_lines()]
self.assert_(line_colors == ['b', 'g', 'r'])
@slow
@@ -829,7 +839,6 @@ def test_grouped_hist(self):
self.assertRaises(AttributeError, plotting.grouped_hist, df.A,
by=df.C, foo='bar')
-
def test_option_mpl_style(self):
# just a sanity check
try:
@@ -845,6 +854,7 @@ def test_option_mpl_style(self):
except ValueError:
pass
+
def _check_plot_works(f, *args, **kwargs):
import matplotlib.pyplot as plt
@@ -852,7 +862,7 @@ def _check_plot_works(f, *args, **kwargs):
plt.clf()
ax = fig.add_subplot(211)
ret = f(*args, **kwargs)
- assert(ret is not None) # do something more intelligent
+ assert ret is not None # do something more intelligent
ax = fig.add_subplot(212)
try:
@@ -865,10 +875,12 @@ def _check_plot_works(f, *args, **kwargs):
with ensure_clean() as path:
plt.savefig(path)
+
def curpath():
pth, _ = os.path.split(os.path.abspath(__file__))
return pth
+
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py
index e25e83a40b267..83ad58c1eb41c 100644
--- a/pandas/tools/plotting.py
+++ b/pandas/tools/plotting.py
@@ -658,9 +658,9 @@ def r(h):
return ax
-def grouped_hist(data, column=None, by=None, ax=None, bins=50,
- figsize=None, layout=None, sharex=False, sharey=False,
- rot=90, grid=True, **kwargs):
+def grouped_hist(data, column=None, by=None, ax=None, bins=50, figsize=None,
+ layout=None, sharex=False, sharey=False, rot=90, grid=True,
+ **kwargs):
"""
Grouped histogram
@@ -1839,10 +1839,9 @@ def plot_group(group, ax):
return fig
-def hist_frame(
- data, column=None, by=None, grid=True, xlabelsize=None, xrot=None,
- ylabelsize=None, yrot=None, ax=None,
- sharex=False, sharey=False, **kwds):
+def hist_frame(data, column=None, by=None, grid=True, xlabelsize=None,
+ xrot=None, ylabelsize=None, yrot=None, ax=None, sharex=False,
+ sharey=False, figsize=None, **kwds):
"""
Draw Histogram the DataFrame's series using matplotlib / pylab.
@@ -1866,17 +1865,20 @@ def hist_frame(
ax : matplotlib axes object, default None
sharex : bool, if True, the X axis will be shared amongst all subplots.
sharey : bool, if True, the Y axis will be shared amongst all subplots.
+ figsize : tuple
+ The size of the figure to create in inches by default
kwds : other plotting keyword arguments
To be passed to hist function
"""
if column is not None:
if not isinstance(column, (list, np.ndarray)):
column = [column]
- data = data.ix[:, column]
+ data = data[column]
if by is not None:
- axes = grouped_hist(data, by=by, ax=ax, grid=grid, **kwds)
+ axes = grouped_hist(data, by=by, ax=ax, grid=grid, figsize=figsize,
+ **kwds)
for ax in axes.ravel():
if xlabelsize is not None:
@@ -1898,11 +1900,11 @@ def hist_frame(
rows += 1
else:
cols += 1
- _, axes = _subplots(nrows=rows, ncols=cols, ax=ax, squeeze=False,
- sharex=sharex, sharey=sharey)
+ fig, axes = _subplots(nrows=rows, ncols=cols, ax=ax, squeeze=False,
+ sharex=sharex, sharey=sharey, figsize=figsize)
for i, col in enumerate(com._try_sort(data.columns)):
- ax = axes[i / cols][i % cols]
+ ax = axes[i / cols, i % cols]
ax.xaxis.set_visible(True)
ax.yaxis.set_visible(True)
ax.hist(data[col].dropna().values, **kwds)
@@ -1922,13 +1924,13 @@ def hist_frame(
ax = axes[j / cols, j % cols]
ax.set_visible(False)
- ax.get_figure().subplots_adjust(wspace=0.3, hspace=0.3)
+ fig.subplots_adjust(wspace=0.3, hspace=0.3)
return axes
def hist_series(self, by=None, ax=None, grid=True, xlabelsize=None,
- xrot=None, ylabelsize=None, yrot=None, **kwds):
+ xrot=None, ylabelsize=None, yrot=None, figsize=None, **kwds):
"""
Draw histogram of the input series using matplotlib
@@ -1948,6 +1950,8 @@ def hist_series(self, by=None, ax=None, grid=True, xlabelsize=None,
If specified changes the y-axis label size
yrot : float, default None
rotation of y axis labels
+ figsize : tuple, default None
+ figure size in inches by default
kwds : keywords
To be passed to the actual plotting function
@@ -1958,16 +1962,22 @@ def hist_series(self, by=None, ax=None, grid=True, xlabelsize=None,
"""
import matplotlib.pyplot as plt
+ fig = kwds.setdefault('figure', plt.figure(figsize=figsize))
+
if by is None:
if ax is None:
- ax = plt.gca()
+ ax = fig.add_subplot(111)
+ else:
+ if ax.get_figure() != fig:
+ raise AssertionError('passed axis not bound to passed figure')
values = self.dropna().values
ax.hist(values, **kwds)
ax.grid(grid)
axes = np.array([ax])
else:
- axes = grouped_hist(self, by=by, ax=ax, grid=grid, **kwds)
+ axes = grouped_hist(self, by=by, ax=ax, grid=grid, figsize=figsize,
+ **kwds)
for ax in axes.ravel():
if xlabelsize is not None:
| closes #3834
| https://api.github.com/repos/pandas-dev/pandas/pulls/3842 | 2013-06-10T21:24:08Z | 2013-06-12T19:41:01Z | 2013-06-12T19:41:00Z | 2014-06-20T00:43:51Z |
BUG: GH3611 fix again, float na_values were not stringified correctly | diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 6e937ba696e39..e4fb478a2a288 100644
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -297,6 +297,7 @@ def parser_f(filepath_or_buffer,
skipfooter=None,
skip_footer=0,
na_values=None,
+ na_fvalues=None,
true_values=None,
false_values=None,
delimiter=None,
@@ -359,6 +360,7 @@ def parser_f(filepath_or_buffer,
prefix=prefix,
skiprows=skiprows,
na_values=na_values,
+ na_fvalues=na_fvalues,
true_values=true_values,
false_values=false_values,
keep_default_na=keep_default_na,
@@ -554,7 +556,7 @@ def _clean_options(self, options, engine):
converters = {}
# Converting values to NA
- na_values = _clean_na_values(na_values, keep_default_na)
+ na_values, na_fvalues = _clean_na_values(na_values, keep_default_na)
if com.is_integer(skiprows):
skiprows = range(skiprows)
@@ -565,6 +567,7 @@ def _clean_options(self, options, engine):
result['names'] = names
result['converters'] = converters
result['na_values'] = na_values
+ result['na_fvalues'] = na_fvalues
result['skiprows'] = skiprows
return result, engine
@@ -644,6 +647,7 @@ def __init__(self, kwds):
self.keep_date_col = kwds.pop('keep_date_col', False)
self.na_values = kwds.get('na_values')
+ self.na_fvalues = kwds.get('na_fvalues')
self.true_values = kwds.get('true_values')
self.false_values = kwds.get('false_values')
self.tupleize_cols = kwds.get('tupleize_cols',True)
@@ -837,31 +841,34 @@ def _agg_index(self, index, try_parse_dates=True):
arr = self._date_conv(arr)
col_na_values = self.na_values
+ col_na_fvalues = self.na_fvalues
if isinstance(self.na_values, dict):
col_name = self.index_names[i]
if col_name is not None:
- col_na_values = _get_na_values(col_name,
- self.na_values)
-
- arr, _ = self._convert_types(arr, col_na_values)
+ col_na_values, col_na_fvalues = _get_na_values(col_name,
+ self.na_values,
+ self.na_fvalues)
+
+ arr, _ = self._convert_types(arr, col_na_values | col_na_fvalues)
arrays.append(arr)
index = MultiIndex.from_arrays(arrays, names=self.index_names)
return index
- def _convert_to_ndarrays(self, dct, na_values, verbose=False,
+ def _convert_to_ndarrays(self, dct, na_values, na_fvalues, verbose=False,
converters=None):
result = {}
for c, values in dct.iteritems():
conv_f = None if converters is None else converters.get(c, None)
- col_na_values = _get_na_values(c, na_values)
+ col_na_values, col_na_fvalues = _get_na_values(c, na_values, na_fvalues)
coerce_type = True
if conv_f is not None:
values = lib.map_infer(values, conv_f)
coerce_type = False
- cvals, na_count = self._convert_types(values, col_na_values,
+ cvals, na_count = self._convert_types(values,
+ set(col_na_values) | col_na_fvalues,
coerce_type)
result[c] = cvals
if verbose and na_count:
@@ -1370,7 +1377,7 @@ def _convert_data(self, data):
col = self.orig_names[col]
clean_conv[col] = f
- return self._convert_to_ndarrays(data, self.na_values, self.verbose,
+ return self._convert_to_ndarrays(data, self.na_values, self.na_fvalues, self.verbose,
clean_conv)
def _infer_columns(self):
@@ -1754,37 +1761,26 @@ def _try_convert_dates(parser, colspec, data_dict, columns):
def _clean_na_values(na_values, keep_default_na=True):
+
if na_values is None and keep_default_na:
na_values = _NA_VALUES
+ na_fvalues = set()
elif isinstance(na_values, dict):
if keep_default_na:
for k, v in na_values.iteritems():
v = set(list(v)) | _NA_VALUES
na_values[k] = v
+ na_fvalues = dict([ (k, _floatify_na_values(v)) for k, v in na_values.items() ])
else:
if not com.is_list_like(na_values):
na_values = [na_values]
- na_values = set(_stringify_na_values(na_values))
+ na_values = _stringify_na_values(na_values)
if keep_default_na:
na_values = na_values | _NA_VALUES
- return na_values
+ na_fvalues = _floatify_na_values(na_values)
-def _stringify_na_values(na_values):
- """ return a stringified and numeric for these values """
- result = []
- for x in na_values:
- result.append(str(x))
- result.append(x)
- try:
- result.append(float(x))
- except:
- pass
- try:
- result.append(int(x))
- except:
- pass
- return result
+ return na_values, na_fvalues
def _clean_index_names(columns, index_col):
if not _is_index_col(index_col):
@@ -1832,14 +1828,52 @@ def _get_empty_meta(columns, index_col, index_names):
return index, columns, {}
-def _get_na_values(col, na_values):
+def _floatify_na_values(na_values):
+ # create float versions of the na_values
+ result = set()
+ for v in na_values:
+ try:
+ v = float(v)
+ if not np.isnan(v):
+ result.add(v)
+ except:
+ pass
+ return result
+
+def _stringify_na_values(na_values):
+ """ return a stringified and numeric for these values """
+ result = []
+ for x in na_values:
+ result.append(str(x))
+ result.append(x)
+ try:
+ v = float(x)
+
+ # we are like 999 here
+ if v == int(v):
+ v = int(v)
+ result.append("%s.0" % v)
+ result.append(str(v))
+
+ result.append(v)
+ except:
+ pass
+ try:
+ result.append(int(x))
+ except:
+ pass
+ return set(result)
+
+def _get_na_values(col, na_values, na_fvalues):
if isinstance(na_values, dict):
if col in na_values:
- return set(_stringify_na_values(list(na_values[col])))
+ values = na_values[col]
+ fvalues = na_fvalues[col]
+ return na_values[col], na_fvalues[col]
else:
- return _NA_VALUES
+ return _NA_VALUES, set()
else:
- return na_values
+ return na_values, na_fvalues
def _get_col_names(colspec, columns):
diff --git a/pandas/io/tests/test_parsers.py b/pandas/io/tests/test_parsers.py
index cae4c0902a97c..cc2dddd829302 100644
--- a/pandas/io/tests/test_parsers.py
+++ b/pandas/io/tests/test_parsers.py
@@ -540,6 +540,36 @@ def test_non_string_na_values(self):
tm.assert_frame_equal(result1,result2)
tm.assert_frame_equal(result2,result3)
+ result4 = read_csv(path, sep= ' ', header=0, na_values=['-999.0'])
+ result5 = read_csv(path, sep= ' ', header=0, na_values=['-999'])
+ result6 = read_csv(path, sep= ' ', header=0, na_values=[-999.0])
+ result7 = read_csv(path, sep= ' ', header=0, na_values=[-999])
+ tm.assert_frame_equal(result4,result3)
+ tm.assert_frame_equal(result5,result3)
+ tm.assert_frame_equal(result6,result3)
+ tm.assert_frame_equal(result7,result3)
+
+ good_compare = result3
+
+ # with an odd float format, so we can't match the string 999.0 exactly,
+ # but need float matching
+ df.to_csv(path, sep=' ', index=False, float_format = '%.3f')
+ result1 = read_csv(path, sep= ' ', header=0, na_values=['-999.0','-999'])
+ result2 = read_csv(path, sep= ' ', header=0, na_values=[-999,-999.0])
+ result3 = read_csv(path, sep= ' ', header=0, na_values=[-999.0,-999])
+ tm.assert_frame_equal(result1,good_compare)
+ tm.assert_frame_equal(result2,good_compare)
+ tm.assert_frame_equal(result3,good_compare)
+
+ result4 = read_csv(path, sep= ' ', header=0, na_values=['-999.0'])
+ result5 = read_csv(path, sep= ' ', header=0, na_values=['-999'])
+ result6 = read_csv(path, sep= ' ', header=0, na_values=[-999.0])
+ result7 = read_csv(path, sep= ' ', header=0, na_values=[-999])
+ tm.assert_frame_equal(result4,good_compare)
+ tm.assert_frame_equal(result5,good_compare)
+ tm.assert_frame_equal(result6,good_compare)
+ tm.assert_frame_equal(result7,good_compare)
+
def test_custom_na_values(self):
data = """A,B,C
ignore,this,row
diff --git a/pandas/parser.pyx b/pandas/parser.pyx
index 004c23d09ccdf..eaa588ef4d150 100644
--- a/pandas/parser.pyx
+++ b/pandas/parser.pyx
@@ -231,7 +231,7 @@ cdef class TextReader:
cdef:
parser_t *parser
- object file_handle
+ object file_handle, na_fvalues
bint factorize, na_filter, verbose, has_usecols, has_mi_columns
int parser_start
list clocks
@@ -294,6 +294,7 @@ cdef class TextReader:
na_filter=True,
na_values=None,
+ na_fvalues=None,
true_values=None,
false_values=None,
@@ -391,6 +392,9 @@ cdef class TextReader:
self.delim_whitespace = delim_whitespace
self.na_values = na_values
+ if na_fvalues is None:
+ na_fvalues = set()
+ self.na_fvalues = na_fvalues
self.true_values = _maybe_encode(true_values)
self.false_values = _maybe_encode(false_values)
@@ -834,7 +838,7 @@ cdef class TextReader:
Py_ssize_t i, nused, ncols
kh_str_t *na_hashset = NULL
int start, end
- object name
+ object name, na_flist
bint na_filter = 0
start = self.parser_start
@@ -863,8 +867,9 @@ cdef class TextReader:
conv = self._get_converter(i, name)
# XXX
+ na_flist = set()
if self.na_filter:
- na_list = self._get_na_list(i, name)
+ na_list, na_flist = self._get_na_list(i, name)
if na_list is None:
na_filter = 0
else:
@@ -880,7 +885,7 @@ cdef class TextReader:
# Should return as the desired dtype (inferred or specified)
col_res, na_count = self._convert_tokens(i, start, end, name,
- na_filter, na_hashset)
+ na_filter, na_hashset, na_flist)
if na_filter:
self._free_na_set(na_hashset)
@@ -906,7 +911,8 @@ cdef class TextReader:
cdef inline _convert_tokens(self, Py_ssize_t i, int start, int end,
object name, bint na_filter,
- kh_str_t *na_hashset):
+ kh_str_t *na_hashset,
+ object na_flist):
cdef:
object col_dtype = None
@@ -930,7 +936,7 @@ cdef class TextReader:
col_dtype = np.dtype(col_dtype).str
return self._convert_with_dtype(col_dtype, i, start, end,
- na_filter, 1, na_hashset)
+ na_filter, 1, na_hashset, na_flist)
if i in self.noconvert:
return self._string_convert(i, start, end, na_filter, na_hashset)
@@ -939,10 +945,10 @@ cdef class TextReader:
for dt in dtype_cast_order:
try:
col_res, na_count = self._convert_with_dtype(
- dt, i, start, end, na_filter, 0, na_hashset)
+ dt, i, start, end, na_filter, 0, na_hashset, na_flist)
except OverflowError:
col_res, na_count = self._convert_with_dtype(
- '|O8', i, start, end, na_filter, 0, na_hashset)
+ '|O8', i, start, end, na_filter, 0, na_hashset, na_flist)
if col_res is not None:
break
@@ -953,7 +959,8 @@ cdef class TextReader:
int start, int end,
bint na_filter,
bint user_dtype,
- kh_str_t *na_hashset):
+ kh_str_t *na_hashset,
+ object na_flist):
cdef kh_str_t *true_set, *false_set
if dtype[1] == 'i' or dtype[1] == 'u':
@@ -969,7 +976,7 @@ cdef class TextReader:
elif dtype[1] == 'f':
result, na_count = _try_double(self.parser, i, start, end,
- na_filter, na_hashset)
+ na_filter, na_hashset, na_flist)
if dtype[1:] != 'f8':
result = result.astype(dtype)
@@ -1060,7 +1067,7 @@ cdef class TextReader:
cdef _get_na_list(self, i, name):
if self.na_values is None:
- return None
+ return None, set()
if isinstance(self.na_values, dict):
values = None
@@ -1068,18 +1075,23 @@ cdef class TextReader:
values = self.na_values[name]
if values is not None and not isinstance(values, list):
values = list(values)
+ fvalues = self.na_fvalues[name]
+ if fvalues is not None and not isinstance(fvalues, set):
+ fvalues = set(fvalues)
else:
if i in self.na_values:
- return self.na_values[i]
+ return self.na_values[i], self.na_fvalues[i]
else:
- return _NA_VALUES
+ return _NA_VALUES, set()
- return _ensure_encoded(values)
+ return _ensure_encoded(values), fvalues
else:
if not isinstance(self.na_values, list):
self.na_values = list(self.na_values)
+ if not isinstance(self.na_fvalues, set):
+ self.na_fvalues = set(self.na_fvalues)
- return _ensure_encoded(self.na_values)
+ return _ensure_encoded(self.na_values), self.na_fvalues
cdef _free_na_set(self, kh_str_t *table):
kh_destroy_str(table)
@@ -1163,8 +1175,6 @@ def _maybe_upcast(arr):
# ----------------------------------------------------------------------
# Type conversions / inference support code
-
-
cdef _string_box_factorize(parser_t *parser, int col,
int line_start, int line_end,
bint na_filter, kh_str_t *na_hashset):
@@ -1357,7 +1367,7 @@ cdef char* cinf = b'inf'
cdef char* cneginf = b'-inf'
cdef _try_double(parser_t *parser, int col, int line_start, int line_end,
- bint na_filter, kh_str_t *na_hashset):
+ bint na_filter, kh_str_t *na_hashset, object na_flist):
cdef:
int error, na_count = 0
size_t i, lines
@@ -1367,6 +1377,7 @@ cdef _try_double(parser_t *parser, int col, int line_start, int line_end,
double NA = na_values[np.float64]
ndarray result
khiter_t k
+ bint use_na_flist = len(na_flist) > 0
lines = line_end - line_start
result = np.empty(lines, dtype=np.float64)
@@ -1391,6 +1402,10 @@ cdef _try_double(parser_t *parser, int col, int line_start, int line_end,
data[0] = NEGINF
else:
return None, None
+ if use_na_flist:
+ if data[0] in na_flist:
+ na_count += 1
+ data[0] = NA
data += 1
else:
for i in range(lines):
| now, 999.0 (a float) will have: ['999','999.0'] added for matching, as well as matching a float value of 999.0 in a float column
closes #3611 again!
| https://api.github.com/repos/pandas-dev/pandas/pulls/3841 | 2013-06-10T20:40:22Z | 2013-06-11T17:43:23Z | 2013-06-11T17:43:23Z | 2014-07-07T04:45:28Z |
BUG: fix Series.interpolate() corner cases, close #3674 | diff --git a/pandas/core/series.py b/pandas/core/series.py
index 3a7a7d0f49b66..3439aeb79e174 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -3180,14 +3180,15 @@ def interpolate(self, method='linear'):
invalid = isnull(values)
valid = -invalid
- firstIndex = valid.argmax()
- valid = valid[firstIndex:]
- invalid = invalid[firstIndex:]
- inds = inds[firstIndex:]
-
result = values.copy()
- result[firstIndex:][invalid] = np.interp(inds[invalid], inds[valid],
- values[firstIndex:][valid])
+ if valid.any():
+ firstIndex = valid.argmax()
+ valid = valid[firstIndex:]
+ invalid = invalid[firstIndex:]
+ inds = inds[firstIndex:]
+
+ result[firstIndex:][invalid] = np.interp(
+ inds[invalid], inds[valid], values[firstIndex:][valid])
return Series(result, index=self.index, name=self.name)
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index e1589b9499757..58ca34b73b6a0 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -4063,6 +4063,13 @@ def test_interpolate(self):
# try time interpolation on a non-TimeSeries
self.assertRaises(Exception, self.series.interpolate, method='time')
+ def test_interpolate_corners(self):
+ s = Series([np.nan, np.nan])
+ assert_series_equal(s.interpolate(), s)
+
+ s = Series([]).interpolate()
+ assert_series_equal(s.interpolate(), s)
+
def test_interpolate_index_values(self):
s = Series(np.nan, index=np.sort(np.random.rand(30)))
s[::3] = np.random.randn(10)
| closes #3674
| https://api.github.com/repos/pandas-dev/pandas/pulls/3840 | 2013-06-10T19:15:24Z | 2013-06-13T18:42:47Z | 2013-06-13T18:42:47Z | 2014-07-16T08:13:15Z |
TST regression tests for GH3836 | diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index ad3d150c7e0ad..e7f824ace983c 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -974,6 +974,23 @@ def test_iloc_mask(self):
(key,ans,r))
warnings.filterwarnings(action='always', category=UserWarning)
+ def test_ix_slicing_strings(self):
+ ##GH3836
+ data = {'Classification': ['SA EQUITY CFD', 'bbb', 'SA EQUITY', 'SA SSF', 'aaa'],
+ 'Random': [1,2,3,4,5],
+ 'X': ['correct', 'wrong','correct', 'correct','wrong']}
+ df = DataFrame(data)
+ x = df[~df.Classification.isin(['SA EQUITY CFD', 'SA EQUITY', 'SA SSF'])]
+ df.ix[x.index,'X'] = df['Classification']
+
+ expected = DataFrame({'Classification': {0: 'SA EQUITY CFD', 1: 'bbb',
+ 2: 'SA EQUITY', 3: 'SA SSF', 4: 'aaa'},
+ 'Random': {0: 1, 1: 2, 2: 3, 3: 4, 4: 5},
+ 'X': {0: 'correct', 1: 'bbb', 2: 'correct',
+ 3: 'correct', 4: 'aaa'}}) # bug was 4: 'bbb'
+
+ assert_frame_equal(df, expected)
+
def test_non_unique_loc(self):
## GH3659
## non-unique indexer with loc slice
| cc #3836
Added test for example from OP (which is not working in 0.11 but is fixed in master).
| https://api.github.com/repos/pandas-dev/pandas/pulls/3839 | 2013-06-10T19:13:04Z | 2013-06-10T19:13:17Z | 2013-06-10T19:13:17Z | 2014-06-26T11:27:24Z |
ENH: support for msgpack serialization/deserialization | diff --git a/LICENSES/MSGPACK_LICENSE b/LICENSES/MSGPACK_LICENSE
new file mode 100644
index 0000000000000..ae1b0f2f32f06
--- /dev/null
+++ b/LICENSES/MSGPACK_LICENSE
@@ -0,0 +1,13 @@
+Copyright (C) 2008-2011 INADA Naoki <songofacandy@gmail.com>
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
\ No newline at end of file
diff --git a/LICENSES/MSGPACK_NUMPY_LICENSE b/LICENSES/MSGPACK_NUMPY_LICENSE
new file mode 100644
index 0000000000000..e570011efac73
--- /dev/null
+++ b/LICENSES/MSGPACK_NUMPY_LICENSE
@@ -0,0 +1,33 @@
+.. -*- rst -*-
+
+License
+=======
+
+Copyright (c) 2013, Lev Givon.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+* Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials provided
+ with the distribution.
+* Neither the name of Lev Givon nor the names of any
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/doc/source/io.rst b/doc/source/io.rst
index 5e04fcff61539..9442f59425106 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -36,6 +36,7 @@ object.
* ``read_hdf``
* ``read_sql``
* ``read_json``
+ * ``read_msgpack`` (experimental)
* ``read_html``
* ``read_stata``
* ``read_clipboard``
@@ -48,6 +49,7 @@ The corresponding ``writer`` functions are object methods that are accessed like
* ``to_hdf``
* ``to_sql``
* ``to_json``
+ * ``to_msgpack`` (experimental)
* ``to_html``
* ``to_stata``
* ``to_clipboard``
@@ -1732,6 +1734,72 @@ module is installed you can use it as a xlsx writer engine as follows:
.. _io.hdf5:
+Serialization
+-------------
+
+msgpack (experimental)
+~~~~~~~~~~~~~~~~~~~~~~
+
+.. _io.msgpack:
+
+.. versionadded:: 0.13.0
+
+Starting in 0.13.0, pandas is supporting the ``msgpack`` format for
+object serialization. This is a lightweight portable binary format, similar
+to binary JSON, that is highly space efficient, and provides good performance
+both on the writing (serialization), and reading (deserialization).
+
+.. warning::
+
+ This is a very new feature of pandas. We intend to provide certain
+ optimizations in the io of the ``msgpack`` data. Since this is marked
+ as an EXPERIMENTAL LIBRARY, the storage format may not be stable until a future release.
+
+.. ipython:: python
+
+ df = DataFrame(np.random.rand(5,2),columns=list('AB'))
+ df.to_msgpack('foo.msg')
+ pd.read_msgpack('foo.msg')
+ s = Series(np.random.rand(5),index=date_range('20130101',periods=5))
+
+You can pass a list of objects and you will receive them back on deserialization.
+
+.. ipython:: python
+
+ pd.to_msgpack('foo.msg', df, 'foo', np.array([1,2,3]), s)
+ pd.read_msgpack('foo.msg')
+
+You can pass ``iterator=True`` to iterate over the unpacked results
+
+.. ipython:: python
+
+ for o in pd.read_msgpack('foo.msg',iterator=True):
+ print o
+
+You can pass ``append=True`` to the writer to append to an existing pack
+
+.. ipython:: python
+
+ df.to_msgpack('foo.msg',append=True)
+ pd.read_msgpack('foo.msg')
+
+Unlike other io methods, ``to_msgpack`` is available on both a per-object basis,
+``df.to_msgpack()`` and using the top-level ``pd.to_msgpack(...)`` where you
+can pack arbitrary collections of python lists, dicts, scalars, while intermixing
+pandas objects.
+
+.. ipython:: python
+
+ pd.to_msgpack('foo2.msg', { 'dict' : [ { 'df' : df }, { 'string' : 'foo' }, { 'scalar' : 1. }, { 's' : s } ] })
+ pd.read_msgpack('foo2.msg')
+
+.. ipython:: python
+ :suppress:
+ :okexcept:
+
+ os.remove('foo.msg')
+ os.remove('foo2.msg')
+
HDF5 (PyTables)
---------------
diff --git a/doc/source/release.rst b/doc/source/release.rst
index 65e6ca0e1d95c..be62ef7d31a0b 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -64,17 +64,19 @@ New features
Experimental Features
~~~~~~~~~~~~~~~~~~~~~
-- The new :func:`~pandas.eval` function implements expression evaluation using
- ``numexpr`` behind the scenes. This results in large speedups for complicated
- expressions involving large DataFrames/Series.
-- :class:`~pandas.DataFrame` has a new :meth:`~pandas.DataFrame.eval` that
- evaluates an expression in the context of the ``DataFrame``.
-- A :meth:`~pandas.DataFrame.query` method has been added that allows
- you to select elements of a ``DataFrame`` using a natural query syntax nearly
- identical to Python syntax.
-- ``pd.eval`` and friends now evaluate operations involving ``datetime64``
- objects in Python space because ``numexpr`` cannot handle ``NaT`` values
- (:issue:`4897`).
+ - The new :func:`~pandas.eval` function implements expression evaluation using
+ ``numexpr`` behind the scenes. This results in large speedups for complicated
+ expressions involving large DataFrames/Series.
+ - :class:`~pandas.DataFrame` has a new :meth:`~pandas.DataFrame.eval` that
+ evaluates an expression in the context of the ``DataFrame``.
+ - A :meth:`~pandas.DataFrame.query` method has been added that allows
+ you to select elements of a ``DataFrame`` using a natural query syntax nearly
+ identical to Python syntax.
+ - ``pd.eval`` and friends now evaluate operations involving ``datetime64``
+ objects in Python space because ``numexpr`` cannot handle ``NaT`` values
+ (:issue:`4897`).
+ - Add msgpack support via ``pd.read_msgpack()`` and ``pd.to_msgpack()/df.to_msgpack()`` for serialization
+ of arbitrary pandas (and python objects) in a lightweight portable binary format (:issue:`686`)
Improvements to existing features
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/source/v0.13.0.txt b/doc/source/v0.13.0.txt
index 5ff7038d02e45..98099bac15900 100644
--- a/doc/source/v0.13.0.txt
+++ b/doc/source/v0.13.0.txt
@@ -464,6 +464,15 @@ Enhancements
t = Timestamp('20130101 09:01:02')
t + pd.datetools.Nano(123)
+ - The ``isin`` method plays nicely with boolean indexing. To get the rows where each condition is met:
+
+ .. ipython:: python
+
+ mask = df.isin({'A': [1, 2], 'B': ['e', 'f']})
+ df[mask.all(1)]
+
+ See the :ref:`documentation<indexing.basics.indexing_isin>` for more.
+
.. _whatsnew_0130.experimental:
Experimental
@@ -553,21 +562,35 @@ Experimental
For more details see the :ref:`indexing documentation on query
<indexing.query>`.
- - DataFrame now has an ``isin`` method that can be used to easily check whether the DataFrame's values are contained in an iterable. Use a dictionary if you'd like to check specific iterables for specific columns or rows.
+- ``pd.read_msgpack()`` and ``pd.to_msgpack()`` are now a supported method of serialization
+ of arbitrary pandas (and python objects) in a lightweight portable binary format. :ref:`See the docs<io.msgpack>`
- .. ipython:: python
+ .. warning::
+
+ Since this is an EXPERIMENTAL LIBRARY, the storage format may not be stable until a future release.
- df = pd.DataFrame({'A': [1, 2, 3], 'B': ['d', 'e', 'f']})
- df.isin({'A': [1, 2], 'B': ['e', 'f']})
+ .. ipython:: python
- The ``isin`` method plays nicely with boolean indexing. To get the rows where each condition is met:
+ df = DataFrame(np.random.rand(5,2),columns=list('AB'))
+ df.to_msgpack('foo.msg')
+ pd.read_msgpack('foo.msg')
- .. ipython:: python
+ s = Series(np.random.rand(5),index=date_range('20130101',periods=5))
+ pd.to_msgpack('foo.msg', df, s)
+ pd.read_msgpack('foo.msg')
- mask = df.isin({'A': [1, 2], 'B': ['e', 'f']})
- df[mask.all(1)]
+ You can pass ``iterator=True`` to iterator over the unpacked results
+
+ .. ipython:: python
+
+ for o in pd.read_msgpack('foo.msg',iterator=True):
+ print o
+
+ .. ipython:: python
+ :suppress:
+ :okexcept:
- See the :ref:`documentation<indexing.basics.indexing_isin>` for more.
+ os.remove('foo.msg')
.. _whatsnew_0130.refactoring:
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 835b66512a89e..3142f74f2f5c5 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -805,6 +805,25 @@ def to_hdf(self, path_or_buf, key, **kwargs):
from pandas.io import pytables
return pytables.to_hdf(path_or_buf, key, self, **kwargs)
+ def to_msgpack(self, path_or_buf, **kwargs):
+ """
+ msgpack (serialize) object to input file path
+
+ THIS IS AN EXPERIMENTAL LIBRARY and the storage format
+ may not be stable until a future release.
+
+ Parameters
+ ----------
+ path : string File path
+ args : an object or objects to serialize
+ append : boolean whether to append to an existing msgpack
+ (default is False)
+ compress : type of compressor (zlib or blosc), default to None (no compression)
+ """
+
+ from pandas.io import packers
+ return packers.to_msgpack(path_or_buf, self, **kwargs)
+
def to_pickle(self, path):
"""
Pickle (serialize) object to input file path
diff --git a/pandas/io/api.py b/pandas/io/api.py
index 94deb51ab4b18..dc9ea290eb45e 100644
--- a/pandas/io/api.py
+++ b/pandas/io/api.py
@@ -11,3 +11,4 @@
from pandas.io.sql import read_sql
from pandas.io.stata import read_stata
from pandas.io.pickle import read_pickle, to_pickle
+from pandas.io.packers import read_msgpack, to_msgpack
diff --git a/pandas/io/packers.py b/pandas/io/packers.py
new file mode 100644
index 0000000000000..d6aa1ebeb896a
--- /dev/null
+++ b/pandas/io/packers.py
@@ -0,0 +1,534 @@
+"""
+Msgpack serializer support for reading and writing pandas data structures
+to disk
+"""
+
+# portions of msgpack_numpy package, by Lev Givon were incorporated
+# into this module (and tests_packers.py)
+
+"""
+License
+=======
+
+Copyright (c) 2013, Lev Givon.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+* Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials provided
+ with the distribution.
+* Neither the name of Lev Givon nor the names of any
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+"""
+
+from datetime import datetime, date, timedelta
+from dateutil.parser import parse
+
+import numpy as np
+from pandas import compat
+from pandas.compat import u
+from pandas import (
+ Timestamp, Period, Series, DataFrame, Panel, Panel4D,
+ Index, MultiIndex, Int64Index, PeriodIndex, DatetimeIndex, Float64Index, NaT
+)
+from pandas.sparse.api import SparseSeries, SparseDataFrame, SparsePanel
+from pandas.sparse.array import BlockIndex, IntIndex
+from pandas.core.generic import NDFrame
+from pandas.core.common import needs_i8_conversion
+from pandas.core.internals import BlockManager, make_block
+import pandas.core.internals as internals
+
+from pandas.msgpack import Unpacker as _Unpacker, Packer as _Packer
+import zlib
+
+try:
+ import blosc
+ _BLOSC = True
+except:
+ _BLOSC = False
+
+# until we can pass this into our conversion functions,
+# this is pretty hacky
+compressor = None
+
+
+def to_msgpack(path, *args, **kwargs):
+ """
+ msgpack (serialize) object to input file path
+
+ THIS IS AN EXPERIMENTAL LIBRARY and the storage format
+ may not be stable until a future release.
+
+ Parameters
+ ----------
+ path : string File path
+ args : an object or objects to serialize
+ append : boolean whether to append to an existing msgpack
+ (default is False)
+ compress : type of compressor (zlib or blosc), default to None (no compression)
+ """
+ global compressor
+ compressor = kwargs.pop('compress', None)
+ append = kwargs.pop('append', None)
+ if append:
+ f = open(path, 'a+b')
+ else:
+ f = open(path, 'wb')
+ try:
+ for a in args:
+ f.write(pack(a, **kwargs))
+ finally:
+ f.close()
+
+
+def read_msgpack(path, iterator=False, **kwargs):
+ """
+ Load msgpack pandas object from the specified
+ file path
+
+ THIS IS AN EXPERIMENTAL LIBRARY and the storage format
+ may not be stable until a future release.
+
+ Parameters
+ ----------
+ path : string
+ File path
+ iterator : boolean, if True, return an iterator to the unpacker
+ (default is False)
+
+ Returns
+ -------
+ obj : type of object stored in file
+
+ """
+ if iterator:
+ return Iterator(path)
+
+ with open(path, 'rb') as fh:
+ l = list(unpack(fh))
+ if len(l) == 1:
+ return l[0]
+ return l
+
+dtype_dict = {21: np.dtype('M8[ns]'),
+ u('datetime64[ns]'): np.dtype('M8[ns]'),
+ u('datetime64[us]'): np.dtype('M8[us]'),
+ 22: np.dtype('m8[ns]'),
+ u('timedelta64[ns]'): np.dtype('m8[ns]'),
+ u('timedelta64[us]'): np.dtype('m8[us]')}
+
+
+def dtype_for(t):
+ if t in dtype_dict:
+ return dtype_dict[t]
+ return np.typeDict[t]
+
+c2f_dict = {'complex': np.float64,
+ 'complex128': np.float64,
+ 'complex64': np.float32}
+
+# numpy 1.6.1 compat
+if hasattr(np, 'float128'):
+ c2f_dict['complex256'] = np.float128
+
+
+def c2f(r, i, ctype_name):
+ """
+ Convert strings to complex number instance with specified numpy type.
+ """
+
+ ftype = c2f_dict[ctype_name]
+ return np.typeDict[ctype_name](ftype(r) + 1j * ftype(i))
+
+
+def convert(values):
+ """ convert the numpy values to a list """
+
+ dtype = values.dtype
+ if needs_i8_conversion(dtype):
+ values = values.view('i8')
+ v = values.ravel()
+
+ if compressor == 'zlib':
+
+ # return string arrays like they are
+ if dtype == np.object_:
+ return v.tolist()
+
+ # convert to a bytes array
+ v = v.tostring()
+ return zlib.compress(v)
+
+ elif compressor == 'blosc' and _BLOSC:
+
+ # return string arrays like they are
+ if dtype == np.object_:
+ return v.tolist()
+
+ # convert to a bytes array
+ v = v.tostring()
+ return blosc.compress(v, typesize=dtype.itemsize)
+
+ # ndarray (on original dtype)
+ if dtype == 'float64' or dtype == 'int64':
+ return v
+
+ # as a list
+ return v.tolist()
+
+
+def unconvert(values, dtype, compress=None):
+
+ if dtype == np.object_:
+ return np.array(values, dtype=object)
+
+ if compress == 'zlib':
+
+ values = zlib.decompress(values)
+ return np.frombuffer(values, dtype=dtype)
+
+ elif compress == 'blosc':
+
+ if not _BLOSC:
+ raise Exception("cannot uncompress w/o blosc")
+
+ # decompress
+ values = blosc.decompress(values)
+
+ return np.frombuffer(values, dtype=dtype)
+
+ # as a list
+ return np.array(values, dtype=dtype)
+
+
+def encode(obj):
+ """
+ Data encoder
+ """
+
+ tobj = type(obj)
+ if isinstance(obj, Index):
+ if isinstance(obj, PeriodIndex):
+ return {'typ': 'period_index',
+ 'klass': obj.__class__.__name__,
+ 'name': getattr(obj, 'name', None),
+ 'freq': obj.freqstr,
+ 'dtype': obj.dtype.num,
+ 'data': convert(obj.asi8)}
+ elif isinstance(obj, DatetimeIndex):
+ return {'typ': 'datetime_index',
+ 'klass': obj.__class__.__name__,
+ 'name': getattr(obj, 'name', None),
+ 'dtype': obj.dtype.num,
+ 'data': convert(obj.asi8),
+ 'freq': obj.freqstr,
+ 'tz': obj.tz}
+ elif isinstance(obj, MultiIndex):
+ return {'typ': 'multi_index',
+ 'klass': obj.__class__.__name__,
+ 'names': getattr(obj, 'names', None),
+ 'dtype': obj.dtype.num,
+ 'data': convert(obj.values)}
+ else:
+ return {'typ': 'index',
+ 'klass': obj.__class__.__name__,
+ 'name': getattr(obj, 'name', None),
+ 'dtype': obj.dtype.num,
+ 'data': obj.tolist()}
+ elif isinstance(obj, Series):
+ if isinstance(obj, SparseSeries):
+ d = {'typ': 'sparse_series',
+ 'klass': obj.__class__.__name__,
+ 'dtype': obj.dtype.num,
+ 'index': obj.index,
+ 'sp_index': obj.sp_index,
+ 'sp_values': convert(obj.sp_values),
+ 'compress': compressor}
+ for f in ['name', 'fill_value', 'kind']:
+ d[f] = getattr(obj, f, None)
+ return d
+ else:
+ return {'typ': 'series',
+ 'klass': obj.__class__.__name__,
+ 'name': getattr(obj, 'name', None),
+ 'index': obj.index,
+ 'dtype': obj.dtype.num,
+ 'data': convert(obj.values),
+ 'compress': compressor}
+ elif issubclass(tobj, NDFrame):
+ if isinstance(obj, SparseDataFrame):
+ d = {'typ': 'sparse_dataframe',
+ 'klass': obj.__class__.__name__,
+ 'columns': obj.columns}
+ for f in ['default_fill_value', 'default_kind']:
+ d[f] = getattr(obj, f, None)
+ d['data'] = dict([(name, ss)
+ for name, ss in compat.iteritems(obj)])
+ return d
+ elif isinstance(obj, SparsePanel):
+ d = {'typ': 'sparse_panel',
+ 'klass': obj.__class__.__name__,
+ 'items': obj.items}
+ for f in ['default_fill_value', 'default_kind']:
+ d[f] = getattr(obj, f, None)
+ d['data'] = dict([(name, df)
+ for name, df in compat.iteritems(obj)])
+ return d
+ else:
+
+ data = obj._data
+ if not data.is_consolidated():
+ data = data.consolidate()
+
+ # the block manager
+ return {'typ': 'block_manager',
+ 'klass': obj.__class__.__name__,
+ 'axes': data.axes,
+ 'blocks': [{'items': b.items,
+ 'values': convert(b.values),
+ 'shape': b.values.shape,
+ 'dtype': b.dtype.num,
+ 'klass': b.__class__.__name__,
+ 'compress': compressor
+ } for b in data.blocks]}
+
+ elif isinstance(obj, (datetime, date, np.datetime64, timedelta, np.timedelta64)):
+ if isinstance(obj, Timestamp):
+ tz = obj.tzinfo
+ if tz is not None:
+ tz = tz.zone
+ offset = obj.offset
+ if offset is not None:
+ offset = offset.freqstr
+ return {'typ': 'timestamp',
+ 'value': obj.value,
+ 'offset': offset,
+ 'tz': tz}
+ elif isinstance(obj, np.timedelta64):
+ return {'typ': 'timedelta64',
+ 'data': obj.view('i8')}
+ elif isinstance(obj, timedelta):
+ return {'typ': 'timedelta',
+ 'data': (obj.days, obj.seconds, obj.microseconds)}
+ elif isinstance(obj, np.datetime64):
+ return {'typ': 'datetime64',
+ 'data': str(obj)}
+ elif isinstance(obj, datetime):
+ return {'typ': 'datetime',
+ 'data': obj.isoformat()}
+ elif isinstance(obj, date):
+ return {'typ': 'date',
+ 'data': obj.isoformat()}
+ raise Exception("cannot encode this datetimelike object: %s" % obj)
+ elif isinstance(obj, Period):
+ return {'typ': 'period',
+ 'ordinal': obj.ordinal,
+ 'freq': obj.freq}
+ elif isinstance(obj, BlockIndex):
+ return {'typ': 'block_index',
+ 'klass': obj.__class__.__name__,
+ 'blocs': obj.blocs,
+ 'blengths': obj.blengths,
+ 'length': obj.length}
+ elif isinstance(obj, IntIndex):
+ return {'typ': 'int_index',
+ 'klass': obj.__class__.__name__,
+ 'indices': obj.indices,
+ 'length': obj.length}
+ elif isinstance(obj, np.ndarray) and obj.dtype not in ['float64', 'int64']:
+ return {'typ': 'ndarray',
+ 'shape': obj.shape,
+ 'ndim': obj.ndim,
+ 'dtype': obj.dtype.num,
+ 'data': convert(obj),
+ 'compress': compressor}
+ elif isinstance(obj, np.number):
+ if np.iscomplexobj(obj):
+ return {'typ': 'np_scalar',
+ 'sub_typ': 'np_complex',
+ 'dtype': obj.dtype.name,
+ 'real': obj.real.__repr__(),
+ 'imag': obj.imag.__repr__()}
+ else:
+ return {'typ': 'np_scalar',
+ 'dtype': obj.dtype.name,
+ 'data': obj.__repr__()}
+ elif isinstance(obj, complex):
+ return {'typ': 'np_complex',
+ 'real': obj.real.__repr__(),
+ 'imag': obj.imag.__repr__()}
+
+ return obj
+
+
+def decode(obj):
+ """
+ Decoder for deserializing numpy data types.
+ """
+
+ typ = obj.get('typ')
+ if typ is None:
+ return obj
+ elif typ == 'timestamp':
+ return Timestamp(obj['value'], tz=obj['tz'], offset=obj['offset'])
+ elif typ == 'period':
+ return Period(ordinal=obj['ordinal'], freq=obj['freq'])
+ elif typ == 'index':
+ dtype = dtype_for(obj['dtype'])
+ data = obj['data']
+ return globals()[obj['klass']](data, dtype=dtype, name=obj['name'])
+ elif typ == 'multi_index':
+ return globals()[obj['klass']].from_tuples(obj['data'], names=obj['names'])
+ elif typ == 'period_index':
+ return globals()[obj['klass']](obj['data'], name=obj['name'], freq=obj['freq'])
+ elif typ == 'datetime_index':
+ return globals()[obj['klass']](obj['data'], freq=obj['freq'], tz=obj['tz'], name=obj['name'])
+ elif typ == 'series':
+ dtype = dtype_for(obj['dtype'])
+ index = obj['index']
+ return globals()[obj['klass']](unconvert(obj['data'], dtype, obj['compress']), index=index, name=obj['name'])
+ elif typ == 'block_manager':
+ axes = obj['axes']
+
+ def create_block(b):
+ dtype = dtype_for(b['dtype'])
+ return make_block(unconvert(b['values'], dtype, b['compress']).reshape(b['shape']), b['items'], axes[0], klass=getattr(internals, b['klass']))
+
+ blocks = [create_block(b) for b in obj['blocks']]
+ return globals()[obj['klass']](BlockManager(blocks, axes))
+ elif typ == 'datetime':
+ return parse(obj['data'])
+ elif typ == 'datetime64':
+ return np.datetime64(parse(obj['data']))
+ elif typ == 'date':
+ return parse(obj['data']).date()
+ elif typ == 'timedelta':
+ return timedelta(*obj['data'])
+ elif typ == 'timedelta64':
+ return np.timedelta64(int(obj['data']))
+ elif typ == 'sparse_series':
+ dtype = dtype_for(obj['dtype'])
+ return globals(
+ )[obj['klass']](unconvert(obj['sp_values'], dtype, obj['compress']), sparse_index=obj['sp_index'],
+ index=obj['index'], fill_value=obj['fill_value'], kind=obj['kind'], name=obj['name'])
+ elif typ == 'sparse_dataframe':
+ return globals()[obj['klass']](obj['data'],
+ columns=obj['columns'], default_fill_value=obj['default_fill_value'], default_kind=obj['default_kind'])
+ elif typ == 'sparse_panel':
+ return globals()[obj['klass']](obj['data'],
+ items=obj['items'], default_fill_value=obj['default_fill_value'], default_kind=obj['default_kind'])
+ elif typ == 'block_index':
+ return globals()[obj['klass']](obj['length'], obj['blocs'], obj['blengths'])
+ elif typ == 'int_index':
+ return globals()[obj['klass']](obj['length'], obj['indices'])
+ elif typ == 'ndarray':
+ return unconvert(obj['data'], np.typeDict[obj['dtype']], obj.get('compress')).reshape(obj['shape'])
+ elif typ == 'np_scalar':
+ if obj.get('sub_typ') == 'np_complex':
+ return c2f(obj['real'], obj['imag'], obj['dtype'])
+ else:
+ dtype = dtype_for(obj['dtype'])
+ try:
+ return dtype(obj['data'])
+ except:
+ return dtype.type(obj['data'])
+ elif typ == 'np_complex':
+ return complex(obj['real'] + '+' + obj['imag'] + 'j')
+ elif isinstance(obj, (dict, list, set)):
+ return obj
+ else:
+ return obj
+
+
+def pack(o, default=encode,
+ encoding='utf-8', unicode_errors='strict', use_single_float=False):
+ """
+ Pack an object and return the packed bytes.
+ """
+
+ return Packer(default=default, encoding=encoding,
+ unicode_errors=unicode_errors,
+ use_single_float=use_single_float).pack(o)
+
+
+def unpack(packed, object_hook=decode,
+ list_hook=None, use_list=False, encoding='utf-8',
+ unicode_errors='strict', object_pairs_hook=None):
+ """
+ Unpack a packed object, return an iterator
+ Note: packed lists will be returned as tuples
+ """
+
+ return Unpacker(packed, object_hook=object_hook,
+ list_hook=list_hook,
+ use_list=use_list, encoding=encoding,
+ unicode_errors=unicode_errors,
+ object_pairs_hook=object_pairs_hook)
+
+
+class Packer(_Packer):
+
+ def __init__(self, default=encode,
+ encoding='utf-8',
+ unicode_errors='strict',
+ use_single_float=False):
+ super(Packer, self).__init__(default=default,
+ encoding=encoding,
+ unicode_errors=unicode_errors,
+ use_single_float=use_single_float)
+
+
+class Unpacker(_Unpacker):
+
+ def __init__(self, file_like=None, read_size=0, use_list=False,
+ object_hook=decode,
+ object_pairs_hook=None, list_hook=None, encoding='utf-8',
+ unicode_errors='strict', max_buffer_size=0):
+ super(Unpacker, self).__init__(file_like=file_like,
+ read_size=read_size,
+ use_list=use_list,
+ object_hook=object_hook,
+ object_pairs_hook=object_pairs_hook,
+ list_hook=list_hook,
+ encoding=encoding,
+ unicode_errors=unicode_errors,
+ max_buffer_size=max_buffer_size)
+
+
+class Iterator(object):
+
+ """ manage the unpacking iteration,
+ close the file on completion """
+
+ def __init__(self, path, **kwargs):
+ self.path = path
+ self.kwargs = kwargs
+
+ def __iter__(self):
+
+ try:
+ fh = open(self.path, 'rb')
+ unpacker = unpack(fh)
+ for o in unpacker:
+ yield o
+ finally:
+ fh.close()
diff --git a/pandas/io/tests/test_packers.py b/pandas/io/tests/test_packers.py
new file mode 100644
index 0000000000000..79b421ff7b047
--- /dev/null
+++ b/pandas/io/tests/test_packers.py
@@ -0,0 +1,387 @@
+import nose
+import unittest
+
+import datetime
+import numpy as np
+
+from pandas import compat
+from pandas.compat import u
+from pandas import (Series, DataFrame, Panel, MultiIndex, bdate_range,
+ date_range, period_range, Index, SparseSeries, SparseDataFrame,
+ SparsePanel)
+import pandas.util.testing as tm
+from pandas.util.testing import ensure_clean
+from pandas.tests.test_series import assert_series_equal
+from pandas.tests.test_frame import assert_frame_equal
+from pandas.tests.test_panel import assert_panel_equal
+
+import pandas
+from pandas.sparse.tests.test_sparse import assert_sp_series_equal, assert_sp_frame_equal
+from pandas import Timestamp, tslib
+
+nan = np.nan
+
+from pandas.io.packers import to_msgpack, read_msgpack
+
+_multiprocess_can_split_ = False
+
+
+def check_arbitrary(a, b):
+
+ if isinstance(a, (list, tuple)) and isinstance(b, (list, tuple)):
+ assert(len(a) == len(b))
+ for a_, b_ in zip(a, b):
+ check_arbitrary(a_, b_)
+ elif isinstance(a, Panel):
+ assert_panel_equal(a, b)
+ elif isinstance(a, DataFrame):
+ assert_frame_equal(a, b)
+ elif isinstance(a, Series):
+ assert_series_equal(a, b)
+ else:
+ assert(a == b)
+
+
+class Test(unittest.TestCase):
+
+ def setUp(self):
+ self.path = '__%s__.msg' % tm.rands(10)
+
+ def tearDown(self):
+ pass
+
+ def encode_decode(self, x, **kwargs):
+ with ensure_clean(self.path) as p:
+ to_msgpack(p, x, **kwargs)
+ return read_msgpack(p, **kwargs)
+
+
+class TestNumpy(Test):
+
+ def test_numpy_scalar_float(self):
+ x = np.float32(np.random.rand())
+ x_rec = self.encode_decode(x)
+ self.assert_(np.allclose(x, x_rec) and type(x) == type(x_rec))
+
+ def test_numpy_scalar_complex(self):
+ x = np.complex64(np.random.rand() + 1j * np.random.rand())
+ x_rec = self.encode_decode(x)
+ self.assert_(np.allclose(x, x_rec) and type(x) == type(x_rec))
+
+ def test_scalar_float(self):
+ x = np.random.rand()
+ x_rec = self.encode_decode(x)
+ self.assert_(np.allclose(x, x_rec) and type(x) == type(x_rec))
+
+ def test_scalar_complex(self):
+ x = np.random.rand() + 1j * np.random.rand()
+ x_rec = self.encode_decode(x)
+ self.assert_(np.allclose(x, x_rec) and type(x) == type(x_rec))
+
+ def test_list_numpy_float(self):
+ raise nose.SkipTest('buggy test')
+ x = [np.float32(np.random.rand()) for i in range(5)]
+ x_rec = self.encode_decode(x)
+ self.assert_(all(map(lambda x, y:
+ x == y, x, x_rec)) and
+ all(map(lambda x, y: type(x) == type(y), x, x_rec)))
+
+ def test_list_numpy_float_complex(self):
+ if not hasattr(np, 'complex128'):
+ raise nose.SkipTest('numpy cant handle complex128')
+
+ # buggy test
+ raise nose.SkipTest('buggy test')
+ x = [np.float32(np.random.rand()) for i in range(5)] + \
+ [np.complex128(np.random.rand() + 1j * np.random.rand())
+ for i in range(5)]
+ x_rec = self.encode_decode(x)
+ self.assert_(all(map(lambda x, y: x == y, x, x_rec)) and
+ all(map(lambda x, y: type(x) == type(y), x, x_rec)))
+
+ def test_list_float(self):
+ x = [np.random.rand() for i in range(5)]
+ x_rec = self.encode_decode(x)
+ self.assert_(all(map(lambda x, y: x == y, x, x_rec)) and
+ all(map(lambda x, y: type(x) == type(y), x, x_rec)))
+
+ def test_list_float_complex(self):
+ x = [np.random.rand() for i in range(5)] + \
+ [(np.random.rand() + 1j * np.random.rand()) for i in range(5)]
+ x_rec = self.encode_decode(x)
+ self.assert_(all(map(lambda x, y: x == y, x, x_rec)) and
+ all(map(lambda x, y: type(x) == type(y), x, x_rec)))
+
+ def test_dict_float(self):
+ x = {'foo': 1.0, 'bar': 2.0}
+ x_rec = self.encode_decode(x)
+ self.assert_(all(map(lambda x, y: x == y, x.values(), x_rec.values())) and
+ all(map(lambda x, y: type(x) == type(y), x.values(), x_rec.values())))
+
+ def test_dict_complex(self):
+ x = {'foo': 1.0 + 1.0j, 'bar': 2.0 + 2.0j}
+ x_rec = self.encode_decode(x)
+ self.assert_(all(map(lambda x, y: x == y, x.values(), x_rec.values())) and
+ all(map(lambda x, y: type(x) == type(y), x.values(), x_rec.values())))
+
+ def test_dict_numpy_float(self):
+ x = {'foo': np.float32(1.0), 'bar': np.float32(2.0)}
+ x_rec = self.encode_decode(x)
+ self.assert_(all(map(lambda x, y: x == y, x.values(), x_rec.values())) and
+ all(map(lambda x, y: type(x) == type(y), x.values(), x_rec.values())))
+
+ def test_dict_numpy_complex(self):
+ x = {'foo': np.complex128(
+ 1.0 + 1.0j), 'bar': np.complex128(2.0 + 2.0j)}
+ x_rec = self.encode_decode(x)
+ self.assert_(all(map(lambda x, y: x == y, x.values(), x_rec.values())) and
+ all(map(lambda x, y: type(x) == type(y), x.values(), x_rec.values())))
+
+ def test_numpy_array_float(self):
+ x = np.random.rand(5).astype(np.float32)
+ x_rec = self.encode_decode(x)
+ self.assert_(all(map(lambda x, y: x == y, x, x_rec)) and
+ x.dtype == x_rec.dtype)
+
+ def test_numpy_array_complex(self):
+ x = (np.random.rand(5) + 1j * np.random.rand(5)).astype(np.complex128)
+ x_rec = self.encode_decode(x)
+ self.assert_(all(map(lambda x, y: x == y, x, x_rec)) and
+ x.dtype == x_rec.dtype)
+
+ def test_list_mixed(self):
+ x = [1.0, np.float32(3.5), np.complex128(4.25), u('foo')]
+ x_rec = self.encode_decode(x)
+ self.assert_(all(map(lambda x, y: x == y, x, x_rec)) and
+ all(map(lambda x, y: type(x) == type(y), x, x_rec)))
+
+
+class TestBasic(Test):
+
+ def test_timestamp(self):
+
+ for i in [Timestamp(
+ '20130101'), Timestamp('20130101', tz='US/Eastern'),
+ Timestamp('201301010501')]:
+ i_rec = self.encode_decode(i)
+ self.assert_(i == i_rec)
+
+ def test_datetimes(self):
+
+ for i in [datetime.datetime(
+ 2013, 1, 1), datetime.datetime(2013, 1, 1, 5, 1),
+ datetime.date(2013, 1, 1), np.datetime64(datetime.datetime(2013, 1, 5, 2, 15))]:
+ i_rec = self.encode_decode(i)
+ self.assert_(i == i_rec)
+
+ def test_timedeltas(self):
+
+ for i in [datetime.timedelta(days=1),
+ datetime.timedelta(days=1, seconds=10),
+ np.timedelta64(1000000)]:
+ i_rec = self.encode_decode(i)
+ self.assert_(i == i_rec)
+
+
+class TestIndex(Test):
+
+ def setUp(self):
+ super(TestIndex, self).setUp()
+
+ self.d = {
+ 'string': tm.makeStringIndex(100),
+ 'date': tm.makeDateIndex(100),
+ 'int': tm.makeIntIndex(100),
+ 'float': tm.makeFloatIndex(100),
+ 'empty': Index([]),
+ 'tuple': Index(zip(['foo', 'bar', 'baz'], [1, 2, 3])),
+ 'period': Index(period_range('2012-1-1', freq='M', periods=3)),
+ 'date2': Index(date_range('2013-01-1', periods=10)),
+ 'bdate': Index(bdate_range('2013-01-02', periods=10)),
+ }
+
+ self.mi = {
+ 'reg': MultiIndex.from_tuples([('bar', 'one'), ('baz', 'two'), ('foo', 'two'),
+ ('qux', 'one'), ('qux', 'two')], names=['first', 'second']),
+ }
+
+ def test_basic_index(self):
+
+ for s, i in self.d.items():
+ i_rec = self.encode_decode(i)
+ self.assert_(i.equals(i_rec))
+
+ def test_multi_index(self):
+
+ for s, i in self.mi.items():
+ i_rec = self.encode_decode(i)
+ self.assert_(i.equals(i_rec))
+
+ def test_unicode(self):
+ i = tm.makeUnicodeIndex(100)
+ i_rec = self.encode_decode(i)
+ self.assert_(i.equals(i_rec))
+
+
+class TestSeries(Test):
+
+ def setUp(self):
+ super(TestSeries, self).setUp()
+
+ self.d = {}
+
+ s = tm.makeStringSeries()
+ s.name = 'string'
+ self.d['string'] = s
+
+ s = tm.makeObjectSeries()
+ s.name = 'object'
+ self.d['object'] = s
+
+ s = Series(tslib.iNaT, dtype='M8[ns]', index=range(5))
+ self.d['date'] = s
+
+ data = {
+ 'A': [0., 1., 2., 3., np.nan],
+ 'B': [0, 1, 0, 1, 0],
+ 'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'],
+ 'D': date_range('1/1/2009', periods=5),
+ 'E': [0., 1, Timestamp('20100101'), 'foo', 2.],
+ }
+
+ self.d['float'] = Series(data['A'])
+ self.d['int'] = Series(data['B'])
+ self.d['mixed'] = Series(data['E'])
+
+ def test_basic(self):
+
+ for s, i in self.d.items():
+ i_rec = self.encode_decode(i)
+ assert_series_equal(i, i_rec)
+
+
+class TestNDFrame(Test):
+
+ def setUp(self):
+ super(TestNDFrame, self).setUp()
+
+ data = {
+ 'A': [0., 1., 2., 3., np.nan],
+ 'B': [0, 1, 0, 1, 0],
+ 'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'],
+ 'D': date_range('1/1/2009', periods=5),
+ 'E': [0., 1, Timestamp('20100101'), 'foo', 2.],
+ }
+
+ self.frame = {
+ 'float': DataFrame(dict(A=data['A'], B=Series(data['A']) + 1)),
+ 'int': DataFrame(dict(A=data['B'], B=Series(data['B']) + 1)),
+ 'mixed': DataFrame(dict([(k, data[k]) for k in ['A', 'B', 'C', 'D']]))}
+
+ self.panel = {
+ 'float': Panel(dict(ItemA=self.frame['float'], ItemB=self.frame['float'] + 1))}
+
+ def test_basic_frame(self):
+
+ for s, i in self.frame.items():
+ i_rec = self.encode_decode(i)
+ assert_frame_equal(i, i_rec)
+
+ def test_basic_panel(self):
+
+ for s, i in self.panel.items():
+ i_rec = self.encode_decode(i)
+ assert_panel_equal(i, i_rec)
+
+ def test_multi(self):
+
+ i_rec = self.encode_decode(self.frame)
+ for k in self.frame.keys():
+ assert_frame_equal(self.frame[k], i_rec[k])
+
+ l = tuple(
+ [self.frame['float'], self.frame['float'].A, self.frame['float'].B, None])
+ l_rec = self.encode_decode(l)
+ check_arbitrary(l, l_rec)
+
+ # this is an oddity in that packed lists will be returned as tuples
+ l = [self.frame['float'], self.frame['float']
+ .A, self.frame['float'].B, None]
+ l_rec = self.encode_decode(l)
+ self.assert_(isinstance(l_rec, tuple))
+ check_arbitrary(l, l_rec)
+
+ def test_iterator(self):
+
+ l = [self.frame['float'], self.frame['float']
+ .A, self.frame['float'].B, None]
+
+ with ensure_clean(self.path) as path:
+ to_msgpack(path, *l)
+ for i, packed in enumerate(read_msgpack(path, iterator=True)):
+ check_arbitrary(packed, l[i])
+
+
+class TestSparse(Test):
+
+ def _check_roundtrip(self, obj, comparator, **kwargs):
+
+ i_rec = self.encode_decode(obj)
+ comparator(obj, i_rec, **kwargs)
+
+ def test_sparse_series(self):
+
+ s = tm.makeStringSeries()
+ s[3:5] = np.nan
+ ss = s.to_sparse()
+ self._check_roundtrip(ss, tm.assert_series_equal,
+ check_series_type=True)
+
+ ss2 = s.to_sparse(kind='integer')
+ self._check_roundtrip(ss2, tm.assert_series_equal,
+ check_series_type=True)
+
+ ss3 = s.to_sparse(fill_value=0)
+ self._check_roundtrip(ss3, tm.assert_series_equal,
+ check_series_type=True)
+
+ def test_sparse_frame(self):
+
+ s = tm.makeDataFrame()
+ s.ix[3:5, 1:3] = np.nan
+ s.ix[8:10, -2] = np.nan
+ ss = s.to_sparse()
+
+ self._check_roundtrip(ss, tm.assert_frame_equal,
+ check_frame_type=True)
+
+ ss2 = s.to_sparse(kind='integer')
+ self._check_roundtrip(ss2, tm.assert_frame_equal,
+ check_frame_type=True)
+
+ ss3 = s.to_sparse(fill_value=0)
+ self._check_roundtrip(ss3, tm.assert_frame_equal,
+ check_frame_type=True)
+
+ def test_sparse_panel(self):
+
+ items = ['x', 'y', 'z']
+ p = Panel(dict((i, tm.makeDataFrame().ix[:2, :2]) for i in items))
+ sp = p.to_sparse()
+
+ self._check_roundtrip(sp, tm.assert_panel_equal,
+ check_panel_type=True)
+
+ sp2 = p.to_sparse(kind='integer')
+ self._check_roundtrip(sp2, tm.assert_panel_equal,
+ check_panel_type=True)
+
+ sp3 = p.to_sparse(fill_value=0)
+ self._check_roundtrip(sp3, tm.assert_panel_equal,
+ check_panel_type=True)
+
+
+if __name__ == '__main__':
+ import nose
+ nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
+ exit=False)
diff --git a/pandas/msgpack.pyx b/pandas/msgpack.pyx
new file mode 100644
index 0000000000000..2c8d7fd014b94
--- /dev/null
+++ b/pandas/msgpack.pyx
@@ -0,0 +1,711 @@
+# coding: utf-8
+#cython: embedsignature=True
+#cython: profile=False
+
+from cpython cimport *
+cdef extern from "Python.h":
+ ctypedef char* const_char_ptr "const char*"
+ ctypedef char* const_void_ptr "const void*"
+ ctypedef struct PyObject
+ cdef int PyObject_AsReadBuffer(object o, const_void_ptr* buff, Py_ssize_t* buf_len) except -1
+
+from libc.stdlib cimport *
+from libc.string cimport *
+from libc.limits cimport *
+
+import cython
+import numpy as np
+from numpy cimport *
+
+class UnpackException(IOError):
+ pass
+
+
+class BufferFull(UnpackException):
+ pass
+
+
+class OutOfData(UnpackException):
+ pass
+
+
+class UnpackValueError(UnpackException, ValueError):
+ pass
+
+
+class ExtraData(ValueError):
+ def __init__(self, unpacked, extra):
+ self.unpacked = unpacked
+ self.extra = extra
+
+ def __str__(self):
+ return "unpack(b) recieved extra data."
+
+class PackException(IOError):
+ pass
+
+class PackValueError(PackException, ValueError):
+ pass
+
+cdef extern from "msgpack/unpack.h":
+ ctypedef struct msgpack_user:
+ bint use_list
+ PyObject* object_hook
+ bint has_pairs_hook # call object_hook with k-v pairs
+ PyObject* list_hook
+ char *encoding
+ char *unicode_errors
+
+ ctypedef struct template_context:
+ msgpack_user user
+ PyObject* obj
+ size_t count
+ unsigned int ct
+ PyObject* key
+
+ ctypedef int (*execute_fn)(template_context* ctx, const_char_ptr data,
+ size_t len, size_t* off) except? -1
+ execute_fn template_construct
+ execute_fn template_skip
+ execute_fn read_array_header
+ execute_fn read_map_header
+ void template_init(template_context* ctx)
+ object template_data(template_context* ctx)
+
+cdef extern from "msgpack/pack.h":
+ struct msgpack_packer:
+ char* buf
+ size_t length
+ size_t buf_size
+
+ int msgpack_pack_int(msgpack_packer* pk, int d)
+ int msgpack_pack_nil(msgpack_packer* pk)
+ int msgpack_pack_true(msgpack_packer* pk)
+ int msgpack_pack_false(msgpack_packer* pk)
+ int msgpack_pack_long(msgpack_packer* pk, long d)
+ int msgpack_pack_long_long(msgpack_packer* pk, long long d)
+ int msgpack_pack_unsigned_long_long(msgpack_packer* pk, unsigned long long d)
+ int msgpack_pack_float(msgpack_packer* pk, float d)
+ int msgpack_pack_double(msgpack_packer* pk, double d)
+ int msgpack_pack_array(msgpack_packer* pk, size_t l)
+ int msgpack_pack_map(msgpack_packer* pk, size_t l)
+ int msgpack_pack_raw(msgpack_packer* pk, size_t l)
+ int msgpack_pack_raw_body(msgpack_packer* pk, char* body, size_t l)
+
+cdef int DEFAULT_RECURSE_LIMIT=511
+
+
+
+cdef class Packer(object):
+ """MessagePack Packer
+
+ usage:
+
+ packer = Packer()
+ astream.write(packer.pack(a))
+ astream.write(packer.pack(b))
+
+ Packer's constructor has some keyword arguments:
+
+ * *defaut* - Convert user type to builtin type that Packer supports.
+ See also simplejson's document.
+ * *encoding* - Convert unicode to bytes with this encoding. (default: 'utf-8')
+ * *unicode_errors* - Error handler for encoding unicode. (default: 'strict')
+ * *use_single_float* - Use single precision float type for float. (default: False)
+ * *autoreset* - Reset buffer after each pack and return it's content as `bytes`. (default: True).
+ If set this to false, use `bytes()` to get content and `.reset()` to clear buffer.
+ """
+ cdef msgpack_packer pk
+ cdef object _default
+ cdef object _bencoding
+ cdef object _berrors
+ cdef char *encoding
+ cdef char *unicode_errors
+ cdef bool use_float
+ cdef bint autoreset
+
+ def __cinit__(self):
+ cdef int buf_size = 1024*1024
+ self.pk.buf = <char*> malloc(buf_size);
+ if self.pk.buf == NULL:
+ raise MemoryError("Unable to allocate internal buffer.")
+ self.pk.buf_size = buf_size
+ self.pk.length = 0
+
+ def __init__(self, default=None, encoding='utf-8', unicode_errors='strict',
+ use_single_float=False, bint autoreset=1):
+ self.use_float = use_single_float
+ self.autoreset = autoreset
+ if default is not None:
+ if not PyCallable_Check(default):
+ raise TypeError("default must be a callable.")
+ self._default = default
+ if encoding is None:
+ self.encoding = NULL
+ self.unicode_errors = NULL
+ else:
+ if isinstance(encoding, unicode):
+ self._bencoding = encoding.encode('ascii')
+ else:
+ self._bencoding = encoding
+ self.encoding = PyBytes_AsString(self._bencoding)
+ if isinstance(unicode_errors, unicode):
+ self._berrors = unicode_errors.encode('ascii')
+ else:
+ self._berrors = unicode_errors
+ self.unicode_errors = PyBytes_AsString(self._berrors)
+
+ def __dealloc__(self):
+ free(self.pk.buf);
+
+ @cython.boundscheck(False)
+ @cython.wraparound(False)
+ cdef int _pack(self, object o, int nest_limit=DEFAULT_RECURSE_LIMIT) except -1:
+ cdef long long llval
+ cdef unsigned long long ullval
+ cdef long longval
+ cdef float fval
+ cdef double dval
+ cdef char* rawval
+ cdef int ret
+ cdef dict d
+ cdef object dtype
+
+ cdef int n,i
+ cdef double f8val
+ cdef int64_t i8val
+ cdef ndarray[float64_t,ndim=1] array_double
+ cdef ndarray[int64_t,ndim=1] array_int
+
+ if nest_limit < 0:
+ raise PackValueError("recursion limit exceeded.")
+
+ if o is None:
+ ret = msgpack_pack_nil(&self.pk)
+ elif isinstance(o, bool):
+ if o:
+ ret = msgpack_pack_true(&self.pk)
+ else:
+ ret = msgpack_pack_false(&self.pk)
+ elif PyLong_Check(o):
+ if o > 0:
+ ullval = o
+ ret = msgpack_pack_unsigned_long_long(&self.pk, ullval)
+ else:
+ llval = o
+ ret = msgpack_pack_long_long(&self.pk, llval)
+ elif PyInt_Check(o):
+ longval = o
+ ret = msgpack_pack_long(&self.pk, longval)
+ elif PyFloat_Check(o):
+ if self.use_float:
+ fval = o
+ ret = msgpack_pack_float(&self.pk, fval)
+ else:
+ dval = o
+ ret = msgpack_pack_double(&self.pk, dval)
+ elif PyBytes_Check(o):
+ rawval = o
+ ret = msgpack_pack_raw(&self.pk, len(o))
+ if ret == 0:
+ ret = msgpack_pack_raw_body(&self.pk, rawval, len(o))
+ elif PyUnicode_Check(o):
+ if not self.encoding:
+ raise TypeError("Can't encode unicode string: no encoding is specified")
+ o = PyUnicode_AsEncodedString(o, self.encoding, self.unicode_errors)
+ rawval = o
+ ret = msgpack_pack_raw(&self.pk, len(o))
+ if ret == 0:
+ ret = msgpack_pack_raw_body(&self.pk, rawval, len(o))
+ elif PyDict_CheckExact(o):
+ d = <dict>o
+ ret = msgpack_pack_map(&self.pk, len(d))
+ if ret == 0:
+ for k, v in d.iteritems():
+ ret = self._pack(k, nest_limit-1)
+ if ret != 0: break
+ ret = self._pack(v, nest_limit-1)
+ if ret != 0: break
+ elif PyDict_Check(o):
+ ret = msgpack_pack_map(&self.pk, len(o))
+ if ret == 0:
+ for k, v in o.items():
+ ret = self._pack(k, nest_limit-1)
+ if ret != 0: break
+ ret = self._pack(v, nest_limit-1)
+ if ret != 0: break
+ elif PyTuple_Check(o) or PyList_Check(o):
+ ret = msgpack_pack_array(&self.pk, len(o))
+ if ret == 0:
+ for v in o:
+ ret = self._pack(v, nest_limit-1)
+ if ret != 0: break
+
+ # ndarray support ONLY (and float64/int64) for now
+ elif isinstance(o, np.ndarray) and not hasattr(o,'values') and (o.dtype == 'float64' or o.dtype == 'int64'):
+
+ ret = msgpack_pack_map(&self.pk, 5)
+ if ret != 0: return -1
+
+ dtype = o.dtype
+ self.pack_pair('typ', 'ndarray', nest_limit)
+ self.pack_pair('shape', o.shape, nest_limit)
+ self.pack_pair('ndim', o.ndim, nest_limit)
+ self.pack_pair('dtype', dtype.num, nest_limit)
+
+ ret = self._pack('data', nest_limit-1)
+ if ret != 0: return ret
+
+ if dtype == 'float64':
+ array_double = o.ravel()
+ n = len(array_double)
+ ret = msgpack_pack_array(&self.pk, n)
+ if ret != 0: return ret
+
+ for i in range(n):
+
+ f8val = array_double[i]
+ ret = msgpack_pack_double(&self.pk, f8val)
+ if ret != 0: break
+ elif dtype == 'int64':
+ array_int = o.ravel()
+ n = len(array_int)
+ ret = msgpack_pack_array(&self.pk, n)
+ if ret != 0: return ret
+
+ for i in range(n):
+
+ i8val = array_int[i]
+ ret = msgpack_pack_long_long(&self.pk, i8val)
+ if ret != 0: break
+
+ elif self._default:
+ o = self._default(o)
+ ret = self._pack(o, nest_limit-1)
+ else:
+ raise TypeError("can't serialize %r" % (o,))
+ return ret
+
+ cpdef pack(self, object obj):
+ cdef int ret
+ ret = self._pack(obj, DEFAULT_RECURSE_LIMIT)
+ if ret == -1:
+ raise MemoryError
+ elif ret: # should not happen.
+ raise TypeError
+ if self.autoreset:
+ buf = PyBytes_FromStringAndSize(self.pk.buf, self.pk.length)
+ self.pk.length = 0
+ return buf
+
+ def pack_array_header(self, size_t size):
+ cdef int ret = msgpack_pack_array(&self.pk, size)
+ if ret == -1:
+ raise MemoryError
+ elif ret: # should not happen
+ raise TypeError
+ if self.autoreset:
+ buf = PyBytes_FromStringAndSize(self.pk.buf, self.pk.length)
+ self.pk.length = 0
+ return buf
+
+ def pack_map_header(self, size_t size):
+ cdef int ret = msgpack_pack_map(&self.pk, size)
+ if ret == -1:
+ raise MemoryError
+ elif ret: # should not happen
+ raise TypeError
+ if self.autoreset:
+ buf = PyBytes_FromStringAndSize(self.pk.buf, self.pk.length)
+ self.pk.length = 0
+ return buf
+
+ def pack_map_pairs(self, object pairs):
+ """
+ Pack *pairs* as msgpack map type.
+
+ *pairs* should sequence of pair.
+ (`len(pairs)` and `for k, v in *pairs*:` should be supported.)
+ """
+ cdef int ret = msgpack_pack_map(&self.pk, len(pairs))
+ if ret == 0:
+ for k, v in pairs:
+ ret = self._pack(k)
+ if ret != 0: break
+ ret = self._pack(v)
+ if ret != 0: break
+ if ret == -1:
+ raise MemoryError
+ elif ret: # should not happen
+ raise TypeError
+ if self.autoreset:
+ buf = PyBytes_FromStringAndSize(self.pk.buf, self.pk.length)
+ self.pk.length = 0
+ return buf
+
+ def reset(self):
+ """Clear internal buffer."""
+ self.pk.length = 0
+
+ def bytes(self):
+ """Return buffer content."""
+ return PyBytes_FromStringAndSize(self.pk.buf, self.pk.length)
+
+
+ cdef inline pack_pair(self, object k, object v, int nest_limit):
+ ret = self._pack(k, nest_limit-1)
+ if ret != 0: raise PackException("cannot pack : %s" % k)
+ ret = self._pack(v, nest_limit-1)
+ if ret != 0: raise PackException("cannot pack : %s" % v)
+ return ret
+
+def pack(object o, object stream, default=None, encoding='utf-8', unicode_errors='strict'):
+ """
+ pack an object `o` and write it to stream)."""
+ packer = Packer(default=default, encoding=encoding, unicode_errors=unicode_errors)
+ stream.write(packer.pack(o))
+
+def packb(object o, default=None, encoding='utf-8', unicode_errors='strict', use_single_float=False):
+ """
+ pack o and return packed bytes."""
+ packer = Packer(default=default, encoding=encoding, unicode_errors=unicode_errors,
+ use_single_float=use_single_float)
+ return packer.pack(o)
+
+
+cdef inline init_ctx(template_context *ctx,
+ object object_hook, object object_pairs_hook, object list_hook,
+ bint use_list, char* encoding, char* unicode_errors):
+ template_init(ctx)
+ ctx.user.use_list = use_list
+ ctx.user.object_hook = ctx.user.list_hook = <PyObject*>NULL
+
+ if object_hook is not None and object_pairs_hook is not None:
+ raise ValueError("object_pairs_hook and object_hook are mutually exclusive.")
+
+ if object_hook is not None:
+ if not PyCallable_Check(object_hook):
+ raise TypeError("object_hook must be a callable.")
+ ctx.user.object_hook = <PyObject*>object_hook
+
+ if object_pairs_hook is None:
+ ctx.user.has_pairs_hook = False
+ else:
+ if not PyCallable_Check(object_pairs_hook):
+ raise TypeError("object_pairs_hook must be a callable.")
+ ctx.user.object_hook = <PyObject*>object_pairs_hook
+ ctx.user.has_pairs_hook = True
+
+ if list_hook is not None:
+ if not PyCallable_Check(list_hook):
+ raise TypeError("list_hook must be a callable.")
+ ctx.user.list_hook = <PyObject*>list_hook
+
+ ctx.user.encoding = encoding
+ ctx.user.unicode_errors = unicode_errors
+
+def unpackb(object packed, object object_hook=None, object list_hook=None,
+ bint use_list=1, encoding=None, unicode_errors="strict",
+ object_pairs_hook=None,
+ ):
+ """Unpack packed_bytes to object. Returns an unpacked object.
+
+ Raises `ValueError` when `packed` contains extra bytes.
+ """
+ cdef template_context ctx
+ cdef size_t off = 0
+ cdef int ret
+
+ cdef char* buf
+ cdef Py_ssize_t buf_len
+ cdef char* cenc = NULL
+ cdef char* cerr = NULL
+
+ PyObject_AsReadBuffer(packed, <const_void_ptr*>&buf, &buf_len)
+
+ if encoding is not None:
+ if isinstance(encoding, unicode):
+ encoding = encoding.encode('ascii')
+ cenc = PyBytes_AsString(encoding)
+
+ if unicode_errors is not None:
+ if isinstance(unicode_errors, unicode):
+ unicode_errors = unicode_errors.encode('ascii')
+ cerr = PyBytes_AsString(unicode_errors)
+
+ init_ctx(&ctx, object_hook, object_pairs_hook, list_hook, use_list, cenc, cerr)
+ ret = template_construct(&ctx, buf, buf_len, &off)
+ if ret == 1:
+ obj = template_data(&ctx)
+ if off < buf_len:
+ raise ExtraData(obj, PyBytes_FromStringAndSize(buf+off, buf_len-off))
+ return obj
+ elif ret < 0:
+ raise ValueError("Unpack failed: error = %d" % (ret,))
+ else:
+ raise UnpackValueError
+
+
+def unpack(object stream, object object_hook=None, object list_hook=None,
+ bint use_list=1, encoding=None, unicode_errors="strict",
+ object_pairs_hook=None,
+ ):
+ """Unpack an object from `stream`.
+
+ Raises `ValueError` when `stream` has extra bytes.
+ """
+ return unpackb(stream.read(), use_list=use_list,
+ object_hook=object_hook, object_pairs_hook=object_pairs_hook, list_hook=list_hook,
+ encoding=encoding, unicode_errors=unicode_errors,
+ )
+
+
+cdef class Unpacker(object):
+ """
+ Streaming unpacker.
+
+ `file_like` is a file-like object having `.read(n)` method.
+ When `Unpacker` initialized with `file_like`, unpacker reads serialized data
+ from it and `.feed()` method is not usable.
+
+ `read_size` is used as `file_like.read(read_size)`.
+ (default: min(1024**2, max_buffer_size))
+
+ If `use_list` is true (default), msgpack list is deserialized to Python list.
+ Otherwise, it is deserialized to Python tuple.
+
+ `object_hook` is same to simplejson. If it is not None, it should be callable
+ and Unpacker calls it with a dict argument after deserializing a map.
+
+ `object_pairs_hook` is same to simplejson. If it is not None, it should be callable
+ and Unpacker calls it with a list of key-value pairs after deserializing a map.
+
+ `encoding` is encoding used for decoding msgpack bytes. If it is None (default),
+ msgpack bytes is deserialized to Python bytes.
+
+ `unicode_errors` is used for decoding bytes.
+
+ `max_buffer_size` limits size of data waiting unpacked.
+ 0 means system's INT_MAX (default).
+ Raises `BufferFull` exception when it is insufficient.
+ You shoud set this parameter when unpacking data from untrasted source.
+
+ example of streaming deserialize from file-like object::
+
+ unpacker = Unpacker(file_like)
+ for o in unpacker:
+ do_something(o)
+
+ example of streaming deserialize from socket::
+
+ unpacker = Unpacker()
+ while 1:
+ buf = sock.recv(1024**2)
+ if not buf:
+ break
+ unpacker.feed(buf)
+ for o in unpacker:
+ do_something(o)
+ """
+ cdef template_context ctx
+ cdef char* buf
+ cdef size_t buf_size, buf_head, buf_tail
+ cdef object file_like
+ cdef object file_like_read
+ cdef Py_ssize_t read_size
+ cdef object object_hook
+ cdef object encoding, unicode_errors
+ cdef size_t max_buffer_size
+
+ def __cinit__(self):
+ self.buf = NULL
+
+ def __dealloc__(self):
+ free(self.buf)
+ self.buf = NULL
+
+ def __init__(self, file_like=None, Py_ssize_t read_size=0, bint use_list=1,
+ object object_hook=None, object object_pairs_hook=None, object list_hook=None,
+ encoding=None, unicode_errors='strict', int max_buffer_size=0,
+ ):
+ cdef char *cenc=NULL, *cerr=NULL
+
+ self.file_like = file_like
+ if file_like:
+ self.file_like_read = file_like.read
+ if not PyCallable_Check(self.file_like_read):
+ raise ValueError("`file_like.read` must be a callable.")
+ if not max_buffer_size:
+ max_buffer_size = INT_MAX
+ if read_size > max_buffer_size:
+ raise ValueError("read_size should be less or equal to max_buffer_size")
+ if not read_size:
+ read_size = min(max_buffer_size, 1024**2)
+ self.max_buffer_size = max_buffer_size
+ self.read_size = read_size
+ self.buf = <char*>malloc(read_size)
+ if self.buf == NULL:
+ raise MemoryError("Unable to allocate internal buffer.")
+ self.buf_size = read_size
+ self.buf_head = 0
+ self.buf_tail = 0
+
+ if encoding is not None:
+ if isinstance(encoding, unicode):
+ encoding = encoding.encode('ascii')
+ self.encoding = encoding
+ cenc = PyBytes_AsString(encoding)
+
+ if unicode_errors is not None:
+ if isinstance(unicode_errors, unicode):
+ unicode_errors = unicode_errors.encode('ascii')
+ self.unicode_errors = unicode_errors
+ cerr = PyBytes_AsString(unicode_errors)
+
+ init_ctx(&self.ctx, object_hook, object_pairs_hook, list_hook, use_list, cenc, cerr)
+
+ def feed(self, object next_bytes):
+ """Append `next_bytes` to internal buffer."""
+ cdef char* buf
+ cdef Py_ssize_t buf_len
+ if self.file_like is not None:
+ raise TypeError(
+ "unpacker.feed() is not be able to use with `file_like`.")
+ PyObject_AsReadBuffer(next_bytes, <const_void_ptr*>&buf, &buf_len)
+ self.append_buffer(buf, buf_len)
+
+ cdef append_buffer(self, void* _buf, Py_ssize_t _buf_len):
+ cdef:
+ char* buf = self.buf
+ char* new_buf
+ size_t head = self.buf_head
+ size_t tail = self.buf_tail
+ size_t buf_size = self.buf_size
+ size_t new_size
+
+ if tail + _buf_len > buf_size:
+ if ((tail - head) + _buf_len) <= buf_size:
+ # move to front.
+ memmove(buf, buf + head, tail - head)
+ tail -= head
+ head = 0
+ else:
+ # expand buffer.
+ new_size = (tail-head) + _buf_len
+ if new_size > self.max_buffer_size:
+ raise BufferFull
+ new_size = min(new_size*2, self.max_buffer_size)
+ new_buf = <char*>malloc(new_size)
+ if new_buf == NULL:
+ # self.buf still holds old buffer and will be freed during
+ # obj destruction
+ raise MemoryError("Unable to enlarge internal buffer.")
+ memcpy(new_buf, buf + head, tail - head)
+ free(buf)
+
+ buf = new_buf
+ buf_size = new_size
+ tail -= head
+ head = 0
+
+ memcpy(buf + tail, <char*>(_buf), _buf_len)
+ self.buf = buf
+ self.buf_head = head
+ self.buf_size = buf_size
+ self.buf_tail = tail + _buf_len
+
+ cdef read_from_file(self):
+ next_bytes = self.file_like_read(
+ min(self.read_size,
+ self.max_buffer_size - (self.buf_tail - self.buf_head)
+ ))
+ if next_bytes:
+ self.append_buffer(PyBytes_AsString(next_bytes), PyBytes_Size(next_bytes))
+ else:
+ self.file_like = None
+
+ cdef object _unpack(self, execute_fn execute, object write_bytes, bint iter=0):
+ cdef int ret
+ cdef object obj
+ cdef size_t prev_head
+ while 1:
+ prev_head = self.buf_head
+ ret = execute(&self.ctx, self.buf, self.buf_tail, &self.buf_head)
+ if write_bytes is not None:
+ write_bytes(PyBytes_FromStringAndSize(self.buf + prev_head, self.buf_head - prev_head))
+
+ if ret == 1:
+ obj = template_data(&self.ctx)
+ template_init(&self.ctx)
+ return obj
+ elif ret == 0:
+ if self.file_like is not None:
+ self.read_from_file()
+ continue
+ if iter:
+ raise StopIteration("No more data to unpack.")
+ else:
+ raise OutOfData("No more data to unpack.")
+ else:
+ raise ValueError("Unpack failed: error = %d" % (ret,))
+
+ def read_bytes(self, Py_ssize_t nbytes):
+ """read a specified number of raw bytes from the stream"""
+ cdef size_t nread
+ nread = min(self.buf_tail - self.buf_head, nbytes)
+ ret = PyBytes_FromStringAndSize(self.buf + self.buf_head, nread)
+ self.buf_head += nread
+ if len(ret) < nbytes and self.file_like is not None:
+ ret += self.file_like.read(nbytes - len(ret))
+ return ret
+
+ def unpack(self, object write_bytes=None):
+ """
+ unpack one object
+
+ If write_bytes is not None, it will be called with parts of the raw
+ message as it is unpacked.
+
+ Raises `OutOfData` when there are no more bytes to unpack.
+ """
+ return self._unpack(template_construct, write_bytes)
+
+ def skip(self, object write_bytes=None):
+ """
+ read and ignore one object, returning None
+
+ If write_bytes is not None, it will be called with parts of the raw
+ message as it is unpacked.
+
+ Raises `OutOfData` when there are no more bytes to unpack.
+ """
+ return self._unpack(template_skip, write_bytes)
+
+ def read_array_header(self, object write_bytes=None):
+ """assuming the next object is an array, return its size n, such that
+ the next n unpack() calls will iterate over its contents.
+
+ Raises `OutOfData` when there are no more bytes to unpack.
+ """
+ return self._unpack(read_array_header, write_bytes)
+
+ def read_map_header(self, object write_bytes=None):
+ """assuming the next object is a map, return its size n, such that the
+ next n * 2 unpack() calls will iterate over its key-value pairs.
+
+ Raises `OutOfData` when there are no more bytes to unpack.
+ """
+ return self._unpack(read_map_header, write_bytes)
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ return self._unpack(template_construct, None, 1)
+
+ # for debug.
+ #def _buf(self):
+ # return PyString_FromStringAndSize(self.buf, self.buf_tail)
+
+ #def _off(self):
+ # return self.buf_head
diff --git a/pandas/src/msgpack/pack.h b/pandas/src/msgpack/pack.h
new file mode 100644
index 0000000000000..bb939d93ebeca
--- /dev/null
+++ b/pandas/src/msgpack/pack.h
@@ -0,0 +1,108 @@
+/*
+ * MessagePack for Python packing routine
+ *
+ * Copyright (C) 2009 Naoki INADA
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stddef.h>
+#include <stdlib.h>
+#include "sysdep.h"
+#include <limits.h>
+#include <string.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef _MSC_VER
+#define inline __inline
+#endif
+
+typedef struct msgpack_packer {
+ char *buf;
+ size_t length;
+ size_t buf_size;
+} msgpack_packer;
+
+typedef struct Packer Packer;
+
+static inline int msgpack_pack_short(msgpack_packer* pk, short d);
+static inline int msgpack_pack_int(msgpack_packer* pk, int d);
+static inline int msgpack_pack_long(msgpack_packer* pk, long d);
+static inline int msgpack_pack_long_long(msgpack_packer* pk, long long d);
+static inline int msgpack_pack_unsigned_short(msgpack_packer* pk, unsigned short d);
+static inline int msgpack_pack_unsigned_int(msgpack_packer* pk, unsigned int d);
+static inline int msgpack_pack_unsigned_long(msgpack_packer* pk, unsigned long d);
+static inline int msgpack_pack_unsigned_long_long(msgpack_packer* pk, unsigned long long d);
+
+static inline int msgpack_pack_uint8(msgpack_packer* pk, uint8_t d);
+static inline int msgpack_pack_uint16(msgpack_packer* pk, uint16_t d);
+static inline int msgpack_pack_uint32(msgpack_packer* pk, uint32_t d);
+static inline int msgpack_pack_uint64(msgpack_packer* pk, uint64_t d);
+static inline int msgpack_pack_int8(msgpack_packer* pk, int8_t d);
+static inline int msgpack_pack_int16(msgpack_packer* pk, int16_t d);
+static inline int msgpack_pack_int32(msgpack_packer* pk, int32_t d);
+static inline int msgpack_pack_int64(msgpack_packer* pk, int64_t d);
+
+static inline int msgpack_pack_float(msgpack_packer* pk, float d);
+static inline int msgpack_pack_double(msgpack_packer* pk, double d);
+
+static inline int msgpack_pack_nil(msgpack_packer* pk);
+static inline int msgpack_pack_true(msgpack_packer* pk);
+static inline int msgpack_pack_false(msgpack_packer* pk);
+
+static inline int msgpack_pack_array(msgpack_packer* pk, unsigned int n);
+
+static inline int msgpack_pack_map(msgpack_packer* pk, unsigned int n);
+
+static inline int msgpack_pack_raw(msgpack_packer* pk, size_t l);
+static inline int msgpack_pack_raw_body(msgpack_packer* pk, const void* b, size_t l);
+
+static inline int msgpack_pack_write(msgpack_packer* pk, const char *data, size_t l)
+{
+ char* buf = pk->buf;
+ size_t bs = pk->buf_size;
+ size_t len = pk->length;
+
+ if (len + l > bs) {
+ bs = (len + l) * 2;
+ buf = (char*)realloc(buf, bs);
+ if (!buf) return -1;
+ }
+ memcpy(buf + len, data, l);
+ len += l;
+
+ pk->buf = buf;
+ pk->buf_size = bs;
+ pk->length = len;
+ return 0;
+}
+
+#define msgpack_pack_inline_func(name) \
+ static inline int msgpack_pack ## name
+
+#define msgpack_pack_inline_func_cint(name) \
+ static inline int msgpack_pack ## name
+
+#define msgpack_pack_user msgpack_packer*
+
+#define msgpack_pack_append_buffer(user, buf, len) \
+ return msgpack_pack_write(user, (const char*)buf, len)
+
+#include "pack_template.h"
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/pandas/src/msgpack/pack_template.h b/pandas/src/msgpack/pack_template.h
new file mode 100644
index 0000000000000..65c959dd8ce63
--- /dev/null
+++ b/pandas/src/msgpack/pack_template.h
@@ -0,0 +1,771 @@
+/*
+ * MessagePack packing routine template
+ *
+ * Copyright (C) 2008-2010 FURUHASHI Sadayuki
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#if defined(__LITTLE_ENDIAN__)
+#define TAKE8_8(d) ((uint8_t*)&d)[0]
+#define TAKE8_16(d) ((uint8_t*)&d)[0]
+#define TAKE8_32(d) ((uint8_t*)&d)[0]
+#define TAKE8_64(d) ((uint8_t*)&d)[0]
+#elif defined(__BIG_ENDIAN__)
+#define TAKE8_8(d) ((uint8_t*)&d)[0]
+#define TAKE8_16(d) ((uint8_t*)&d)[1]
+#define TAKE8_32(d) ((uint8_t*)&d)[3]
+#define TAKE8_64(d) ((uint8_t*)&d)[7]
+#endif
+
+#ifndef msgpack_pack_inline_func
+#error msgpack_pack_inline_func template is not defined
+#endif
+
+#ifndef msgpack_pack_user
+#error msgpack_pack_user type is not defined
+#endif
+
+#ifndef msgpack_pack_append_buffer
+#error msgpack_pack_append_buffer callback is not defined
+#endif
+
+
+/*
+ * Integer
+ */
+
+#define msgpack_pack_real_uint8(x, d) \
+do { \
+ if(d < (1<<7)) { \
+ /* fixnum */ \
+ msgpack_pack_append_buffer(x, &TAKE8_8(d), 1); \
+ } else { \
+ /* unsigned 8 */ \
+ unsigned char buf[2] = {0xcc, TAKE8_8(d)}; \
+ msgpack_pack_append_buffer(x, buf, 2); \
+ } \
+} while(0)
+
+#define msgpack_pack_real_uint16(x, d) \
+do { \
+ if(d < (1<<7)) { \
+ /* fixnum */ \
+ msgpack_pack_append_buffer(x, &TAKE8_16(d), 1); \
+ } else if(d < (1<<8)) { \
+ /* unsigned 8 */ \
+ unsigned char buf[2] = {0xcc, TAKE8_16(d)}; \
+ msgpack_pack_append_buffer(x, buf, 2); \
+ } else { \
+ /* unsigned 16 */ \
+ unsigned char buf[3]; \
+ buf[0] = 0xcd; _msgpack_store16(&buf[1], (uint16_t)d); \
+ msgpack_pack_append_buffer(x, buf, 3); \
+ } \
+} while(0)
+
+#define msgpack_pack_real_uint32(x, d) \
+do { \
+ if(d < (1<<8)) { \
+ if(d < (1<<7)) { \
+ /* fixnum */ \
+ msgpack_pack_append_buffer(x, &TAKE8_32(d), 1); \
+ } else { \
+ /* unsigned 8 */ \
+ unsigned char buf[2] = {0xcc, TAKE8_32(d)}; \
+ msgpack_pack_append_buffer(x, buf, 2); \
+ } \
+ } else { \
+ if(d < (1<<16)) { \
+ /* unsigned 16 */ \
+ unsigned char buf[3]; \
+ buf[0] = 0xcd; _msgpack_store16(&buf[1], (uint16_t)d); \
+ msgpack_pack_append_buffer(x, buf, 3); \
+ } else { \
+ /* unsigned 32 */ \
+ unsigned char buf[5]; \
+ buf[0] = 0xce; _msgpack_store32(&buf[1], (uint32_t)d); \
+ msgpack_pack_append_buffer(x, buf, 5); \
+ } \
+ } \
+} while(0)
+
+#define msgpack_pack_real_uint64(x, d) \
+do { \
+ if(d < (1ULL<<8)) { \
+ if(d < (1ULL<<7)) { \
+ /* fixnum */ \
+ msgpack_pack_append_buffer(x, &TAKE8_64(d), 1); \
+ } else { \
+ /* unsigned 8 */ \
+ unsigned char buf[2] = {0xcc, TAKE8_64(d)}; \
+ msgpack_pack_append_buffer(x, buf, 2); \
+ } \
+ } else { \
+ if(d < (1ULL<<16)) { \
+ /* unsigned 16 */ \
+ unsigned char buf[3]; \
+ buf[0] = 0xcd; _msgpack_store16(&buf[1], (uint16_t)d); \
+ msgpack_pack_append_buffer(x, buf, 3); \
+ } else if(d < (1ULL<<32)) { \
+ /* unsigned 32 */ \
+ unsigned char buf[5]; \
+ buf[0] = 0xce; _msgpack_store32(&buf[1], (uint32_t)d); \
+ msgpack_pack_append_buffer(x, buf, 5); \
+ } else { \
+ /* unsigned 64 */ \
+ unsigned char buf[9]; \
+ buf[0] = 0xcf; _msgpack_store64(&buf[1], d); \
+ msgpack_pack_append_buffer(x, buf, 9); \
+ } \
+ } \
+} while(0)
+
+#define msgpack_pack_real_int8(x, d) \
+do { \
+ if(d < -(1<<5)) { \
+ /* signed 8 */ \
+ unsigned char buf[2] = {0xd0, TAKE8_8(d)}; \
+ msgpack_pack_append_buffer(x, buf, 2); \
+ } else { \
+ /* fixnum */ \
+ msgpack_pack_append_buffer(x, &TAKE8_8(d), 1); \
+ } \
+} while(0)
+
+#define msgpack_pack_real_int16(x, d) \
+do { \
+ if(d < -(1<<5)) { \
+ if(d < -(1<<7)) { \
+ /* signed 16 */ \
+ unsigned char buf[3]; \
+ buf[0] = 0xd1; _msgpack_store16(&buf[1], (int16_t)d); \
+ msgpack_pack_append_buffer(x, buf, 3); \
+ } else { \
+ /* signed 8 */ \
+ unsigned char buf[2] = {0xd0, TAKE8_16(d)}; \
+ msgpack_pack_append_buffer(x, buf, 2); \
+ } \
+ } else if(d < (1<<7)) { \
+ /* fixnum */ \
+ msgpack_pack_append_buffer(x, &TAKE8_16(d), 1); \
+ } else { \
+ if(d < (1<<8)) { \
+ /* unsigned 8 */ \
+ unsigned char buf[2] = {0xcc, TAKE8_16(d)}; \
+ msgpack_pack_append_buffer(x, buf, 2); \
+ } else { \
+ /* unsigned 16 */ \
+ unsigned char buf[3]; \
+ buf[0] = 0xcd; _msgpack_store16(&buf[1], (uint16_t)d); \
+ msgpack_pack_append_buffer(x, buf, 3); \
+ } \
+ } \
+} while(0)
+
+#define msgpack_pack_real_int32(x, d) \
+do { \
+ if(d < -(1<<5)) { \
+ if(d < -(1<<15)) { \
+ /* signed 32 */ \
+ unsigned char buf[5]; \
+ buf[0] = 0xd2; _msgpack_store32(&buf[1], (int32_t)d); \
+ msgpack_pack_append_buffer(x, buf, 5); \
+ } else if(d < -(1<<7)) { \
+ /* signed 16 */ \
+ unsigned char buf[3]; \
+ buf[0] = 0xd1; _msgpack_store16(&buf[1], (int16_t)d); \
+ msgpack_pack_append_buffer(x, buf, 3); \
+ } else { \
+ /* signed 8 */ \
+ unsigned char buf[2] = {0xd0, TAKE8_32(d)}; \
+ msgpack_pack_append_buffer(x, buf, 2); \
+ } \
+ } else if(d < (1<<7)) { \
+ /* fixnum */ \
+ msgpack_pack_append_buffer(x, &TAKE8_32(d), 1); \
+ } else { \
+ if(d < (1<<8)) { \
+ /* unsigned 8 */ \
+ unsigned char buf[2] = {0xcc, TAKE8_32(d)}; \
+ msgpack_pack_append_buffer(x, buf, 2); \
+ } else if(d < (1<<16)) { \
+ /* unsigned 16 */ \
+ unsigned char buf[3]; \
+ buf[0] = 0xcd; _msgpack_store16(&buf[1], (uint16_t)d); \
+ msgpack_pack_append_buffer(x, buf, 3); \
+ } else { \
+ /* unsigned 32 */ \
+ unsigned char buf[5]; \
+ buf[0] = 0xce; _msgpack_store32(&buf[1], (uint32_t)d); \
+ msgpack_pack_append_buffer(x, buf, 5); \
+ } \
+ } \
+} while(0)
+
+#define msgpack_pack_real_int64(x, d) \
+do { \
+ if(d < -(1LL<<5)) { \
+ if(d < -(1LL<<15)) { \
+ if(d < -(1LL<<31)) { \
+ /* signed 64 */ \
+ unsigned char buf[9]; \
+ buf[0] = 0xd3; _msgpack_store64(&buf[1], d); \
+ msgpack_pack_append_buffer(x, buf, 9); \
+ } else { \
+ /* signed 32 */ \
+ unsigned char buf[5]; \
+ buf[0] = 0xd2; _msgpack_store32(&buf[1], (int32_t)d); \
+ msgpack_pack_append_buffer(x, buf, 5); \
+ } \
+ } else { \
+ if(d < -(1<<7)) { \
+ /* signed 16 */ \
+ unsigned char buf[3]; \
+ buf[0] = 0xd1; _msgpack_store16(&buf[1], (int16_t)d); \
+ msgpack_pack_append_buffer(x, buf, 3); \
+ } else { \
+ /* signed 8 */ \
+ unsigned char buf[2] = {0xd0, TAKE8_64(d)}; \
+ msgpack_pack_append_buffer(x, buf, 2); \
+ } \
+ } \
+ } else if(d < (1<<7)) { \
+ /* fixnum */ \
+ msgpack_pack_append_buffer(x, &TAKE8_64(d), 1); \
+ } else { \
+ if(d < (1LL<<16)) { \
+ if(d < (1<<8)) { \
+ /* unsigned 8 */ \
+ unsigned char buf[2] = {0xcc, TAKE8_64(d)}; \
+ msgpack_pack_append_buffer(x, buf, 2); \
+ } else { \
+ /* unsigned 16 */ \
+ unsigned char buf[3]; \
+ buf[0] = 0xcd; _msgpack_store16(&buf[1], (uint16_t)d); \
+ msgpack_pack_append_buffer(x, buf, 3); \
+ } \
+ } else { \
+ if(d < (1LL<<32)) { \
+ /* unsigned 32 */ \
+ unsigned char buf[5]; \
+ buf[0] = 0xce; _msgpack_store32(&buf[1], (uint32_t)d); \
+ msgpack_pack_append_buffer(x, buf, 5); \
+ } else { \
+ /* unsigned 64 */ \
+ unsigned char buf[9]; \
+ buf[0] = 0xcf; _msgpack_store64(&buf[1], d); \
+ msgpack_pack_append_buffer(x, buf, 9); \
+ } \
+ } \
+ } \
+} while(0)
+
+
+#ifdef msgpack_pack_inline_func_fixint
+
+msgpack_pack_inline_func_fixint(_uint8)(msgpack_pack_user x, uint8_t d)
+{
+ unsigned char buf[2] = {0xcc, TAKE8_8(d)};
+ msgpack_pack_append_buffer(x, buf, 2);
+}
+
+msgpack_pack_inline_func_fixint(_uint16)(msgpack_pack_user x, uint16_t d)
+{
+ unsigned char buf[3];
+ buf[0] = 0xcd; _msgpack_store16(&buf[1], d);
+ msgpack_pack_append_buffer(x, buf, 3);
+}
+
+msgpack_pack_inline_func_fixint(_uint32)(msgpack_pack_user x, uint32_t d)
+{
+ unsigned char buf[5];
+ buf[0] = 0xce; _msgpack_store32(&buf[1], d);
+ msgpack_pack_append_buffer(x, buf, 5);
+}
+
+msgpack_pack_inline_func_fixint(_uint64)(msgpack_pack_user x, uint64_t d)
+{
+ unsigned char buf[9];
+ buf[0] = 0xcf; _msgpack_store64(&buf[1], d);
+ msgpack_pack_append_buffer(x, buf, 9);
+}
+
+msgpack_pack_inline_func_fixint(_int8)(msgpack_pack_user x, int8_t d)
+{
+ unsigned char buf[2] = {0xd0, TAKE8_8(d)};
+ msgpack_pack_append_buffer(x, buf, 2);
+}
+
+msgpack_pack_inline_func_fixint(_int16)(msgpack_pack_user x, int16_t d)
+{
+ unsigned char buf[3];
+ buf[0] = 0xd1; _msgpack_store16(&buf[1], d);
+ msgpack_pack_append_buffer(x, buf, 3);
+}
+
+msgpack_pack_inline_func_fixint(_int32)(msgpack_pack_user x, int32_t d)
+{
+ unsigned char buf[5];
+ buf[0] = 0xd2; _msgpack_store32(&buf[1], d);
+ msgpack_pack_append_buffer(x, buf, 5);
+}
+
+msgpack_pack_inline_func_fixint(_int64)(msgpack_pack_user x, int64_t d)
+{
+ unsigned char buf[9];
+ buf[0] = 0xd3; _msgpack_store64(&buf[1], d);
+ msgpack_pack_append_buffer(x, buf, 9);
+}
+
+#undef msgpack_pack_inline_func_fixint
+#endif
+
+
+msgpack_pack_inline_func(_uint8)(msgpack_pack_user x, uint8_t d)
+{
+ msgpack_pack_real_uint8(x, d);
+}
+
+msgpack_pack_inline_func(_uint16)(msgpack_pack_user x, uint16_t d)
+{
+ msgpack_pack_real_uint16(x, d);
+}
+
+msgpack_pack_inline_func(_uint32)(msgpack_pack_user x, uint32_t d)
+{
+ msgpack_pack_real_uint32(x, d);
+}
+
+msgpack_pack_inline_func(_uint64)(msgpack_pack_user x, uint64_t d)
+{
+ msgpack_pack_real_uint64(x, d);
+}
+
+msgpack_pack_inline_func(_int8)(msgpack_pack_user x, int8_t d)
+{
+ msgpack_pack_real_int8(x, d);
+}
+
+msgpack_pack_inline_func(_int16)(msgpack_pack_user x, int16_t d)
+{
+ msgpack_pack_real_int16(x, d);
+}
+
+msgpack_pack_inline_func(_int32)(msgpack_pack_user x, int32_t d)
+{
+ msgpack_pack_real_int32(x, d);
+}
+
+msgpack_pack_inline_func(_int64)(msgpack_pack_user x, int64_t d)
+{
+ msgpack_pack_real_int64(x, d);
+}
+
+
+#ifdef msgpack_pack_inline_func_cint
+
+msgpack_pack_inline_func_cint(_short)(msgpack_pack_user x, short d)
+{
+#if defined(SIZEOF_SHORT)
+#if SIZEOF_SHORT == 2
+ msgpack_pack_real_int16(x, d);
+#elif SIZEOF_SHORT == 4
+ msgpack_pack_real_int32(x, d);
+#else
+ msgpack_pack_real_int64(x, d);
+#endif
+
+#elif defined(SHRT_MAX)
+#if SHRT_MAX == 0x7fff
+ msgpack_pack_real_int16(x, d);
+#elif SHRT_MAX == 0x7fffffff
+ msgpack_pack_real_int32(x, d);
+#else
+ msgpack_pack_real_int64(x, d);
+#endif
+
+#else
+if(sizeof(short) == 2) {
+ msgpack_pack_real_int16(x, d);
+} else if(sizeof(short) == 4) {
+ msgpack_pack_real_int32(x, d);
+} else {
+ msgpack_pack_real_int64(x, d);
+}
+#endif
+}
+
+msgpack_pack_inline_func_cint(_int)(msgpack_pack_user x, int d)
+{
+#if defined(SIZEOF_INT)
+#if SIZEOF_INT == 2
+ msgpack_pack_real_int16(x, d);
+#elif SIZEOF_INT == 4
+ msgpack_pack_real_int32(x, d);
+#else
+ msgpack_pack_real_int64(x, d);
+#endif
+
+#elif defined(INT_MAX)
+#if INT_MAX == 0x7fff
+ msgpack_pack_real_int16(x, d);
+#elif INT_MAX == 0x7fffffff
+ msgpack_pack_real_int32(x, d);
+#else
+ msgpack_pack_real_int64(x, d);
+#endif
+
+#else
+if(sizeof(int) == 2) {
+ msgpack_pack_real_int16(x, d);
+} else if(sizeof(int) == 4) {
+ msgpack_pack_real_int32(x, d);
+} else {
+ msgpack_pack_real_int64(x, d);
+}
+#endif
+}
+
+msgpack_pack_inline_func_cint(_long)(msgpack_pack_user x, long d)
+{
+#if defined(SIZEOF_LONG)
+#if SIZEOF_LONG == 2
+ msgpack_pack_real_int16(x, d);
+#elif SIZEOF_LONG == 4
+ msgpack_pack_real_int32(x, d);
+#else
+ msgpack_pack_real_int64(x, d);
+#endif
+
+#elif defined(LONG_MAX)
+#if LONG_MAX == 0x7fffL
+ msgpack_pack_real_int16(x, d);
+#elif LONG_MAX == 0x7fffffffL
+ msgpack_pack_real_int32(x, d);
+#else
+ msgpack_pack_real_int64(x, d);
+#endif
+
+#else
+if(sizeof(long) == 2) {
+ msgpack_pack_real_int16(x, d);
+} else if(sizeof(long) == 4) {
+ msgpack_pack_real_int32(x, d);
+} else {
+ msgpack_pack_real_int64(x, d);
+}
+#endif
+}
+
+msgpack_pack_inline_func_cint(_long_long)(msgpack_pack_user x, long long d)
+{
+#if defined(SIZEOF_LONG_LONG)
+#if SIZEOF_LONG_LONG == 2
+ msgpack_pack_real_int16(x, d);
+#elif SIZEOF_LONG_LONG == 4
+ msgpack_pack_real_int32(x, d);
+#else
+ msgpack_pack_real_int64(x, d);
+#endif
+
+#elif defined(LLONG_MAX)
+#if LLONG_MAX == 0x7fffL
+ msgpack_pack_real_int16(x, d);
+#elif LLONG_MAX == 0x7fffffffL
+ msgpack_pack_real_int32(x, d);
+#else
+ msgpack_pack_real_int64(x, d);
+#endif
+
+#else
+if(sizeof(long long) == 2) {
+ msgpack_pack_real_int16(x, d);
+} else if(sizeof(long long) == 4) {
+ msgpack_pack_real_int32(x, d);
+} else {
+ msgpack_pack_real_int64(x, d);
+}
+#endif
+}
+
+msgpack_pack_inline_func_cint(_unsigned_short)(msgpack_pack_user x, unsigned short d)
+{
+#if defined(SIZEOF_SHORT)
+#if SIZEOF_SHORT == 2
+ msgpack_pack_real_uint16(x, d);
+#elif SIZEOF_SHORT == 4
+ msgpack_pack_real_uint32(x, d);
+#else
+ msgpack_pack_real_uint64(x, d);
+#endif
+
+#elif defined(USHRT_MAX)
+#if USHRT_MAX == 0xffffU
+ msgpack_pack_real_uint16(x, d);
+#elif USHRT_MAX == 0xffffffffU
+ msgpack_pack_real_uint32(x, d);
+#else
+ msgpack_pack_real_uint64(x, d);
+#endif
+
+#else
+if(sizeof(unsigned short) == 2) {
+ msgpack_pack_real_uint16(x, d);
+} else if(sizeof(unsigned short) == 4) {
+ msgpack_pack_real_uint32(x, d);
+} else {
+ msgpack_pack_real_uint64(x, d);
+}
+#endif
+}
+
+msgpack_pack_inline_func_cint(_unsigned_int)(msgpack_pack_user x, unsigned int d)
+{
+#if defined(SIZEOF_INT)
+#if SIZEOF_INT == 2
+ msgpack_pack_real_uint16(x, d);
+#elif SIZEOF_INT == 4
+ msgpack_pack_real_uint32(x, d);
+#else
+ msgpack_pack_real_uint64(x, d);
+#endif
+
+#elif defined(UINT_MAX)
+#if UINT_MAX == 0xffffU
+ msgpack_pack_real_uint16(x, d);
+#elif UINT_MAX == 0xffffffffU
+ msgpack_pack_real_uint32(x, d);
+#else
+ msgpack_pack_real_uint64(x, d);
+#endif
+
+#else
+if(sizeof(unsigned int) == 2) {
+ msgpack_pack_real_uint16(x, d);
+} else if(sizeof(unsigned int) == 4) {
+ msgpack_pack_real_uint32(x, d);
+} else {
+ msgpack_pack_real_uint64(x, d);
+}
+#endif
+}
+
+msgpack_pack_inline_func_cint(_unsigned_long)(msgpack_pack_user x, unsigned long d)
+{
+#if defined(SIZEOF_LONG)
+#if SIZEOF_LONG == 2
+ msgpack_pack_real_uint16(x, d);
+#elif SIZEOF_LONG == 4
+ msgpack_pack_real_uint32(x, d);
+#else
+ msgpack_pack_real_uint64(x, d);
+#endif
+
+#elif defined(ULONG_MAX)
+#if ULONG_MAX == 0xffffUL
+ msgpack_pack_real_uint16(x, d);
+#elif ULONG_MAX == 0xffffffffUL
+ msgpack_pack_real_uint32(x, d);
+#else
+ msgpack_pack_real_uint64(x, d);
+#endif
+
+#else
+if(sizeof(unsigned long) == 2) {
+ msgpack_pack_real_uint16(x, d);
+} else if(sizeof(unsigned long) == 4) {
+ msgpack_pack_real_uint32(x, d);
+} else {
+ msgpack_pack_real_uint64(x, d);
+}
+#endif
+}
+
+msgpack_pack_inline_func_cint(_unsigned_long_long)(msgpack_pack_user x, unsigned long long d)
+{
+#if defined(SIZEOF_LONG_LONG)
+#if SIZEOF_LONG_LONG == 2
+ msgpack_pack_real_uint16(x, d);
+#elif SIZEOF_LONG_LONG == 4
+ msgpack_pack_real_uint32(x, d);
+#else
+ msgpack_pack_real_uint64(x, d);
+#endif
+
+#elif defined(ULLONG_MAX)
+#if ULLONG_MAX == 0xffffUL
+ msgpack_pack_real_uint16(x, d);
+#elif ULLONG_MAX == 0xffffffffUL
+ msgpack_pack_real_uint32(x, d);
+#else
+ msgpack_pack_real_uint64(x, d);
+#endif
+
+#else
+if(sizeof(unsigned long long) == 2) {
+ msgpack_pack_real_uint16(x, d);
+} else if(sizeof(unsigned long long) == 4) {
+ msgpack_pack_real_uint32(x, d);
+} else {
+ msgpack_pack_real_uint64(x, d);
+}
+#endif
+}
+
+#undef msgpack_pack_inline_func_cint
+#endif
+
+
+
+/*
+ * Float
+ */
+
+msgpack_pack_inline_func(_float)(msgpack_pack_user x, float d)
+{
+ union { float f; uint32_t i; } mem;
+ mem.f = d;
+ unsigned char buf[5];
+ buf[0] = 0xca; _msgpack_store32(&buf[1], mem.i);
+ msgpack_pack_append_buffer(x, buf, 5);
+}
+
+msgpack_pack_inline_func(_double)(msgpack_pack_user x, double d)
+{
+ union { double f; uint64_t i; } mem;
+ mem.f = d;
+ unsigned char buf[9];
+ buf[0] = 0xcb;
+#if defined(__arm__) && !(__ARM_EABI__) // arm-oabi
+ // https://github.com/msgpack/msgpack-perl/pull/1
+ mem.i = (mem.i & 0xFFFFFFFFUL) << 32UL | (mem.i >> 32UL);
+#endif
+ _msgpack_store64(&buf[1], mem.i);
+ msgpack_pack_append_buffer(x, buf, 9);
+}
+
+
+/*
+ * Nil
+ */
+
+msgpack_pack_inline_func(_nil)(msgpack_pack_user x)
+{
+ static const unsigned char d = 0xc0;
+ msgpack_pack_append_buffer(x, &d, 1);
+}
+
+
+/*
+ * Boolean
+ */
+
+msgpack_pack_inline_func(_true)(msgpack_pack_user x)
+{
+ static const unsigned char d = 0xc3;
+ msgpack_pack_append_buffer(x, &d, 1);
+}
+
+msgpack_pack_inline_func(_false)(msgpack_pack_user x)
+{
+ static const unsigned char d = 0xc2;
+ msgpack_pack_append_buffer(x, &d, 1);
+}
+
+
+/*
+ * Array
+ */
+
+msgpack_pack_inline_func(_array)(msgpack_pack_user x, unsigned int n)
+{
+ if(n < 16) {
+ unsigned char d = 0x90 | n;
+ msgpack_pack_append_buffer(x, &d, 1);
+ } else if(n < 65536) {
+ unsigned char buf[3];
+ buf[0] = 0xdc; _msgpack_store16(&buf[1], (uint16_t)n);
+ msgpack_pack_append_buffer(x, buf, 3);
+ } else {
+ unsigned char buf[5];
+ buf[0] = 0xdd; _msgpack_store32(&buf[1], (uint32_t)n);
+ msgpack_pack_append_buffer(x, buf, 5);
+ }
+}
+
+
+/*
+ * Map
+ */
+
+msgpack_pack_inline_func(_map)(msgpack_pack_user x, unsigned int n)
+{
+ if(n < 16) {
+ unsigned char d = 0x80 | n;
+ msgpack_pack_append_buffer(x, &TAKE8_8(d), 1);
+ } else if(n < 65536) {
+ unsigned char buf[3];
+ buf[0] = 0xde; _msgpack_store16(&buf[1], (uint16_t)n);
+ msgpack_pack_append_buffer(x, buf, 3);
+ } else {
+ unsigned char buf[5];
+ buf[0] = 0xdf; _msgpack_store32(&buf[1], (uint32_t)n);
+ msgpack_pack_append_buffer(x, buf, 5);
+ }
+}
+
+
+/*
+ * Raw
+ */
+
+msgpack_pack_inline_func(_raw)(msgpack_pack_user x, size_t l)
+{
+ if(l < 32) {
+ unsigned char d = 0xa0 | (uint8_t)l;
+ msgpack_pack_append_buffer(x, &TAKE8_8(d), 1);
+ } else if(l < 65536) {
+ unsigned char buf[3];
+ buf[0] = 0xda; _msgpack_store16(&buf[1], (uint16_t)l);
+ msgpack_pack_append_buffer(x, buf, 3);
+ } else {
+ unsigned char buf[5];
+ buf[0] = 0xdb; _msgpack_store32(&buf[1], (uint32_t)l);
+ msgpack_pack_append_buffer(x, buf, 5);
+ }
+}
+
+msgpack_pack_inline_func(_raw_body)(msgpack_pack_user x, const void* b, size_t l)
+{
+ msgpack_pack_append_buffer(x, (const unsigned char*)b, l);
+}
+
+#undef msgpack_pack_inline_func
+#undef msgpack_pack_user
+#undef msgpack_pack_append_buffer
+
+#undef TAKE8_8
+#undef TAKE8_16
+#undef TAKE8_32
+#undef TAKE8_64
+
+#undef msgpack_pack_real_uint8
+#undef msgpack_pack_real_uint16
+#undef msgpack_pack_real_uint32
+#undef msgpack_pack_real_uint64
+#undef msgpack_pack_real_int8
+#undef msgpack_pack_real_int16
+#undef msgpack_pack_real_int32
+#undef msgpack_pack_real_int64
+
diff --git a/pandas/src/msgpack/sysdep.h b/pandas/src/msgpack/sysdep.h
new file mode 100644
index 0000000000000..4fedbd8ba472f
--- /dev/null
+++ b/pandas/src/msgpack/sysdep.h
@@ -0,0 +1,195 @@
+/*
+ * MessagePack system dependencies
+ *
+ * Copyright (C) 2008-2010 FURUHASHI Sadayuki
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef MSGPACK_SYSDEP_H__
+#define MSGPACK_SYSDEP_H__
+
+#include <stdlib.h>
+#include <stddef.h>
+#if defined(_MSC_VER) && _MSC_VER < 1600
+typedef __int8 int8_t;
+typedef unsigned __int8 uint8_t;
+typedef __int16 int16_t;
+typedef unsigned __int16 uint16_t;
+typedef __int32 int32_t;
+typedef unsigned __int32 uint32_t;
+typedef __int64 int64_t;
+typedef unsigned __int64 uint64_t;
+#elif defined(_MSC_VER) // && _MSC_VER >= 1600
+#include <stdint.h>
+#else
+#include <stdint.h>
+#include <stdbool.h>
+#endif
+
+#ifdef _WIN32
+#define _msgpack_atomic_counter_header <windows.h>
+typedef long _msgpack_atomic_counter_t;
+#define _msgpack_sync_decr_and_fetch(ptr) InterlockedDecrement(ptr)
+#define _msgpack_sync_incr_and_fetch(ptr) InterlockedIncrement(ptr)
+#elif defined(__GNUC__) && ((__GNUC__*10 + __GNUC_MINOR__) < 41)
+#define _msgpack_atomic_counter_header "gcc_atomic.h"
+#else
+typedef unsigned int _msgpack_atomic_counter_t;
+#define _msgpack_sync_decr_and_fetch(ptr) __sync_sub_and_fetch(ptr, 1)
+#define _msgpack_sync_incr_and_fetch(ptr) __sync_add_and_fetch(ptr, 1)
+#endif
+
+#ifdef _WIN32
+
+#ifdef __cplusplus
+/* numeric_limits<T>::min,max */
+#ifdef max
+#undef max
+#endif
+#ifdef min
+#undef min
+#endif
+#endif
+
+#else
+#include <arpa/inet.h> /* __BYTE_ORDER */
+#endif
+
+#if !defined(__LITTLE_ENDIAN__) && !defined(__BIG_ENDIAN__)
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+#define __LITTLE_ENDIAN__
+#elif __BYTE_ORDER == __BIG_ENDIAN
+#define __BIG_ENDIAN__
+#elif _WIN32
+#define __LITTLE_ENDIAN__
+#endif
+#endif
+
+
+#ifdef __LITTLE_ENDIAN__
+
+#ifdef _WIN32
+# if defined(ntohs)
+# define _msgpack_be16(x) ntohs(x)
+# elif defined(_byteswap_ushort) || (defined(_MSC_VER) && _MSC_VER >= 1400)
+# define _msgpack_be16(x) ((uint16_t)_byteswap_ushort((unsigned short)x))
+# else
+# define _msgpack_be16(x) ( \
+ ((((uint16_t)x) << 8) ) | \
+ ((((uint16_t)x) >> 8) ) )
+# endif
+#else
+# define _msgpack_be16(x) ntohs(x)
+#endif
+
+#ifdef _WIN32
+# if defined(ntohl)
+# define _msgpack_be32(x) ntohl(x)
+# elif defined(_byteswap_ulong) || (defined(_MSC_VER) && _MSC_VER >= 1400)
+# define _msgpack_be32(x) ((uint32_t)_byteswap_ulong((unsigned long)x))
+# else
+# define _msgpack_be32(x) \
+ ( ((((uint32_t)x) << 24) ) | \
+ ((((uint32_t)x) << 8) & 0x00ff0000U ) | \
+ ((((uint32_t)x) >> 8) & 0x0000ff00U ) | \
+ ((((uint32_t)x) >> 24) ) )
+# endif
+#else
+# define _msgpack_be32(x) ntohl(x)
+#endif
+
+#if defined(_byteswap_uint64) || (defined(_MSC_VER) && _MSC_VER >= 1400)
+# define _msgpack_be64(x) (_byteswap_uint64(x))
+#elif defined(bswap_64)
+# define _msgpack_be64(x) bswap_64(x)
+#elif defined(__DARWIN_OSSwapInt64)
+# define _msgpack_be64(x) __DARWIN_OSSwapInt64(x)
+#else
+#define _msgpack_be64(x) \
+ ( ((((uint64_t)x) << 56) ) | \
+ ((((uint64_t)x) << 40) & 0x00ff000000000000ULL ) | \
+ ((((uint64_t)x) << 24) & 0x0000ff0000000000ULL ) | \
+ ((((uint64_t)x) << 8) & 0x000000ff00000000ULL ) | \
+ ((((uint64_t)x) >> 8) & 0x00000000ff000000ULL ) | \
+ ((((uint64_t)x) >> 24) & 0x0000000000ff0000ULL ) | \
+ ((((uint64_t)x) >> 40) & 0x000000000000ff00ULL ) | \
+ ((((uint64_t)x) >> 56) ) )
+#endif
+
+#define _msgpack_load16(cast, from) ((cast)( \
+ (((uint16_t)((uint8_t*)(from))[0]) << 8) | \
+ (((uint16_t)((uint8_t*)(from))[1]) ) ))
+
+#define _msgpack_load32(cast, from) ((cast)( \
+ (((uint32_t)((uint8_t*)(from))[0]) << 24) | \
+ (((uint32_t)((uint8_t*)(from))[1]) << 16) | \
+ (((uint32_t)((uint8_t*)(from))[2]) << 8) | \
+ (((uint32_t)((uint8_t*)(from))[3]) ) ))
+
+#define _msgpack_load64(cast, from) ((cast)( \
+ (((uint64_t)((uint8_t*)(from))[0]) << 56) | \
+ (((uint64_t)((uint8_t*)(from))[1]) << 48) | \
+ (((uint64_t)((uint8_t*)(from))[2]) << 40) | \
+ (((uint64_t)((uint8_t*)(from))[3]) << 32) | \
+ (((uint64_t)((uint8_t*)(from))[4]) << 24) | \
+ (((uint64_t)((uint8_t*)(from))[5]) << 16) | \
+ (((uint64_t)((uint8_t*)(from))[6]) << 8) | \
+ (((uint64_t)((uint8_t*)(from))[7]) ) ))
+
+#else
+
+#define _msgpack_be16(x) (x)
+#define _msgpack_be32(x) (x)
+#define _msgpack_be64(x) (x)
+
+#define _msgpack_load16(cast, from) ((cast)( \
+ (((uint16_t)((uint8_t*)from)[0]) << 8) | \
+ (((uint16_t)((uint8_t*)from)[1]) ) ))
+
+#define _msgpack_load32(cast, from) ((cast)( \
+ (((uint32_t)((uint8_t*)from)[0]) << 24) | \
+ (((uint32_t)((uint8_t*)from)[1]) << 16) | \
+ (((uint32_t)((uint8_t*)from)[2]) << 8) | \
+ (((uint32_t)((uint8_t*)from)[3]) ) ))
+
+#define _msgpack_load64(cast, from) ((cast)( \
+ (((uint64_t)((uint8_t*)from)[0]) << 56) | \
+ (((uint64_t)((uint8_t*)from)[1]) << 48) | \
+ (((uint64_t)((uint8_t*)from)[2]) << 40) | \
+ (((uint64_t)((uint8_t*)from)[3]) << 32) | \
+ (((uint64_t)((uint8_t*)from)[4]) << 24) | \
+ (((uint64_t)((uint8_t*)from)[5]) << 16) | \
+ (((uint64_t)((uint8_t*)from)[6]) << 8) | \
+ (((uint64_t)((uint8_t*)from)[7]) ) ))
+#endif
+
+
+#define _msgpack_store16(to, num) \
+ do { uint16_t val = _msgpack_be16(num); memcpy(to, &val, 2); } while(0)
+#define _msgpack_store32(to, num) \
+ do { uint32_t val = _msgpack_be32(num); memcpy(to, &val, 4); } while(0)
+#define _msgpack_store64(to, num) \
+ do { uint64_t val = _msgpack_be64(num); memcpy(to, &val, 8); } while(0)
+
+/*
+#define _msgpack_load16(cast, from) \
+ ({ cast val; memcpy(&val, (char*)from, 2); _msgpack_be16(val); })
+#define _msgpack_load32(cast, from) \
+ ({ cast val; memcpy(&val, (char*)from, 4); _msgpack_be32(val); })
+#define _msgpack_load64(cast, from) \
+ ({ cast val; memcpy(&val, (char*)from, 8); _msgpack_be64(val); })
+*/
+
+
+#endif /* msgpack/sysdep.h */
+
diff --git a/pandas/src/msgpack/unpack.h b/pandas/src/msgpack/unpack.h
new file mode 100644
index 0000000000000..3dc88e5fbded0
--- /dev/null
+++ b/pandas/src/msgpack/unpack.h
@@ -0,0 +1,235 @@
+/*
+ * MessagePack for Python unpacking routine
+ *
+ * Copyright (C) 2009 Naoki INADA
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define MSGPACK_EMBED_STACK_SIZE (1024)
+#include "unpack_define.h"
+
+typedef struct unpack_user {
+ int use_list;
+ PyObject *object_hook;
+ bool has_pairs_hook;
+ PyObject *list_hook;
+ const char *encoding;
+ const char *unicode_errors;
+} unpack_user;
+
+
+#define msgpack_unpack_struct(name) \
+ struct template ## name
+
+#define msgpack_unpack_func(ret, name) \
+ static inline ret template ## name
+
+#define msgpack_unpack_callback(name) \
+ template_callback ## name
+
+#define msgpack_unpack_object PyObject*
+
+#define msgpack_unpack_user unpack_user
+
+typedef int (*execute_fn)(msgpack_unpack_struct(_context)* ctx, const char* data, size_t len, size_t* off);
+
+struct template_context;
+typedef struct template_context template_context;
+
+static inline msgpack_unpack_object template_callback_root(unpack_user* u)
+{
+ return NULL;
+}
+
+static inline int template_callback_uint16(unpack_user* u, uint16_t d, msgpack_unpack_object* o)
+{
+ PyObject *p = PyInt_FromLong((long)d);
+ if (!p)
+ return -1;
+ *o = p;
+ return 0;
+}
+static inline int template_callback_uint8(unpack_user* u, uint8_t d, msgpack_unpack_object* o)
+{
+ return template_callback_uint16(u, d, o);
+}
+
+
+static inline int template_callback_uint32(unpack_user* u, uint32_t d, msgpack_unpack_object* o)
+{
+ PyObject *p;
+ if (d > LONG_MAX) {
+ p = PyLong_FromUnsignedLong((unsigned long)d);
+ } else {
+ p = PyInt_FromLong((long)d);
+ }
+ if (!p)
+ return -1;
+ *o = p;
+ return 0;
+}
+
+static inline int template_callback_uint64(unpack_user* u, uint64_t d, msgpack_unpack_object* o)
+{
+ PyObject *p = PyLong_FromUnsignedLongLong(d);
+ if (!p)
+ return -1;
+ *o = p;
+ return 0;
+}
+
+static inline int template_callback_int32(unpack_user* u, int32_t d, msgpack_unpack_object* o)
+{
+ PyObject *p = PyInt_FromLong(d);
+ if (!p)
+ return -1;
+ *o = p;
+ return 0;
+}
+
+static inline int template_callback_int16(unpack_user* u, int16_t d, msgpack_unpack_object* o)
+{
+ return template_callback_int32(u, d, o);
+}
+
+static inline int template_callback_int8(unpack_user* u, int8_t d, msgpack_unpack_object* o)
+{
+ return template_callback_int32(u, d, o);
+}
+
+static inline int template_callback_int64(unpack_user* u, int64_t d, msgpack_unpack_object* o)
+{
+ PyObject *p = PyLong_FromLongLong(d);
+ if (!p)
+ return -1;
+ *o = p;
+ return 0;
+}
+
+static inline int template_callback_double(unpack_user* u, double d, msgpack_unpack_object* o)
+{
+ PyObject *p = PyFloat_FromDouble(d);
+ if (!p)
+ return -1;
+ *o = p;
+ return 0;
+}
+
+static inline int template_callback_float(unpack_user* u, float d, msgpack_unpack_object* o)
+{
+ return template_callback_double(u, d, o);
+}
+
+static inline int template_callback_nil(unpack_user* u, msgpack_unpack_object* o)
+{ Py_INCREF(Py_None); *o = Py_None; return 0; }
+
+static inline int template_callback_true(unpack_user* u, msgpack_unpack_object* o)
+{ Py_INCREF(Py_True); *o = Py_True; return 0; }
+
+static inline int template_callback_false(unpack_user* u, msgpack_unpack_object* o)
+{ Py_INCREF(Py_False); *o = Py_False; return 0; }
+
+static inline int template_callback_array(unpack_user* u, unsigned int n, msgpack_unpack_object* o)
+{
+ PyObject *p = u->use_list ? PyList_New(n) : PyTuple_New(n);
+
+ if (!p)
+ return -1;
+ *o = p;
+ return 0;
+}
+
+static inline int template_callback_array_item(unpack_user* u, unsigned int current, msgpack_unpack_object* c, msgpack_unpack_object o)
+{
+ if (u->use_list)
+ PyList_SET_ITEM(*c, current, o);
+ else
+ PyTuple_SET_ITEM(*c, current, o);
+ return 0;
+}
+
+static inline int template_callback_array_end(unpack_user* u, msgpack_unpack_object* c)
+{
+ if (u->list_hook) {
+ PyObject *new_c = PyEval_CallFunction(u->list_hook, "(O)", *c);
+ if (!new_c)
+ return -1;
+ Py_DECREF(*c);
+ *c = new_c;
+ }
+ return 0;
+}
+
+static inline int template_callback_map(unpack_user* u, unsigned int n, msgpack_unpack_object* o)
+{
+ PyObject *p;
+ if (u->has_pairs_hook) {
+ p = PyList_New(n); // Or use tuple?
+ }
+ else {
+ p = PyDict_New();
+ }
+ if (!p)
+ return -1;
+ *o = p;
+ return 0;
+}
+
+static inline int template_callback_map_item(unpack_user* u, unsigned int current, msgpack_unpack_object* c, msgpack_unpack_object k, msgpack_unpack_object v)
+{
+ if (u->has_pairs_hook) {
+ msgpack_unpack_object item = PyTuple_Pack(2, k, v);
+ if (!item)
+ return -1;
+ Py_DECREF(k);
+ Py_DECREF(v);
+ PyList_SET_ITEM(*c, current, item);
+ return 0;
+ }
+ else if (PyDict_SetItem(*c, k, v) == 0) {
+ Py_DECREF(k);
+ Py_DECREF(v);
+ return 0;
+ }
+ return -1;
+}
+
+static inline int template_callback_map_end(unpack_user* u, msgpack_unpack_object* c)
+{
+ if (u->object_hook) {
+ PyObject *new_c = PyEval_CallFunction(u->object_hook, "(O)", *c);
+ if (!new_c)
+ return -1;
+
+ Py_DECREF(*c);
+ *c = new_c;
+ }
+ return 0;
+}
+
+static inline int template_callback_raw(unpack_user* u, const char* b, const char* p, unsigned int l, msgpack_unpack_object* o)
+{
+ PyObject *py;
+ if(u->encoding) {
+ py = PyUnicode_Decode(p, l, u->encoding, u->unicode_errors);
+ } else {
+ py = PyBytes_FromStringAndSize(p, l);
+ }
+ if (!py)
+ return -1;
+ *o = py;
+ return 0;
+}
+
+#include "unpack_template.h"
diff --git a/pandas/src/msgpack/unpack_define.h b/pandas/src/msgpack/unpack_define.h
new file mode 100644
index 0000000000000..959d3519e7b5c
--- /dev/null
+++ b/pandas/src/msgpack/unpack_define.h
@@ -0,0 +1,93 @@
+/*
+ * MessagePack unpacking routine template
+ *
+ * Copyright (C) 2008-2010 FURUHASHI Sadayuki
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef MSGPACK_UNPACK_DEFINE_H__
+#define MSGPACK_UNPACK_DEFINE_H__
+
+#include "msgpack/sysdep.h"
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+#include <stdio.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+#ifndef MSGPACK_EMBED_STACK_SIZE
+#define MSGPACK_EMBED_STACK_SIZE 32
+#endif
+
+
+typedef enum {
+ CS_HEADER = 0x00, // nil
+
+ //CS_ = 0x01,
+ //CS_ = 0x02, // false
+ //CS_ = 0x03, // true
+
+ //CS_ = 0x04,
+ //CS_ = 0x05,
+ //CS_ = 0x06,
+ //CS_ = 0x07,
+
+ //CS_ = 0x08,
+ //CS_ = 0x09,
+ CS_FLOAT = 0x0a,
+ CS_DOUBLE = 0x0b,
+ CS_UINT_8 = 0x0c,
+ CS_UINT_16 = 0x0d,
+ CS_UINT_32 = 0x0e,
+ CS_UINT_64 = 0x0f,
+ CS_INT_8 = 0x10,
+ CS_INT_16 = 0x11,
+ CS_INT_32 = 0x12,
+ CS_INT_64 = 0x13,
+
+ //CS_ = 0x14,
+ //CS_ = 0x15,
+ //CS_BIG_INT_16 = 0x16,
+ //CS_BIG_INT_32 = 0x17,
+ //CS_BIG_FLOAT_16 = 0x18,
+ //CS_BIG_FLOAT_32 = 0x19,
+ CS_RAW_16 = 0x1a,
+ CS_RAW_32 = 0x1b,
+ CS_ARRAY_16 = 0x1c,
+ CS_ARRAY_32 = 0x1d,
+ CS_MAP_16 = 0x1e,
+ CS_MAP_32 = 0x1f,
+
+ //ACS_BIG_INT_VALUE,
+ //ACS_BIG_FLOAT_VALUE,
+ ACS_RAW_VALUE,
+} msgpack_unpack_state;
+
+
+typedef enum {
+ CT_ARRAY_ITEM,
+ CT_MAP_KEY,
+ CT_MAP_VALUE,
+} msgpack_container_type;
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* msgpack/unpack_define.h */
+
diff --git a/pandas/src/msgpack/unpack_template.h b/pandas/src/msgpack/unpack_template.h
new file mode 100644
index 0000000000000..83b6918dc6686
--- /dev/null
+++ b/pandas/src/msgpack/unpack_template.h
@@ -0,0 +1,492 @@
+/*
+ * MessagePack unpacking routine template
+ *
+ * Copyright (C) 2008-2010 FURUHASHI Sadayuki
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef msgpack_unpack_func
+#error msgpack_unpack_func template is not defined
+#endif
+
+#ifndef msgpack_unpack_callback
+#error msgpack_unpack_callback template is not defined
+#endif
+
+#ifndef msgpack_unpack_struct
+#error msgpack_unpack_struct template is not defined
+#endif
+
+#ifndef msgpack_unpack_struct_decl
+#define msgpack_unpack_struct_decl(name) msgpack_unpack_struct(name)
+#endif
+
+#ifndef msgpack_unpack_object
+#error msgpack_unpack_object type is not defined
+#endif
+
+#ifndef msgpack_unpack_user
+#error msgpack_unpack_user type is not defined
+#endif
+
+#ifndef USE_CASE_RANGE
+#if !defined(_MSC_VER)
+#define USE_CASE_RANGE
+#endif
+#endif
+
+msgpack_unpack_struct_decl(_stack) {
+ msgpack_unpack_object obj;
+ size_t size;
+ size_t count;
+ unsigned int ct;
+ msgpack_unpack_object map_key;
+};
+
+msgpack_unpack_struct_decl(_context) {
+ msgpack_unpack_user user;
+ unsigned int cs;
+ unsigned int trail;
+ unsigned int top;
+ /*
+ msgpack_unpack_struct(_stack)* stack;
+ unsigned int stack_size;
+ msgpack_unpack_struct(_stack) embed_stack[MSGPACK_EMBED_STACK_SIZE];
+ */
+ msgpack_unpack_struct(_stack) stack[MSGPACK_EMBED_STACK_SIZE];
+};
+
+
+msgpack_unpack_func(void, _init)(msgpack_unpack_struct(_context)* ctx)
+{
+ ctx->cs = CS_HEADER;
+ ctx->trail = 0;
+ ctx->top = 0;
+ /*
+ ctx->stack = ctx->embed_stack;
+ ctx->stack_size = MSGPACK_EMBED_STACK_SIZE;
+ */
+ ctx->stack[0].obj = msgpack_unpack_callback(_root)(&ctx->user);
+}
+
+/*
+msgpack_unpack_func(void, _destroy)(msgpack_unpack_struct(_context)* ctx)
+{
+ if(ctx->stack_size != MSGPACK_EMBED_STACK_SIZE) {
+ free(ctx->stack);
+ }
+}
+*/
+
+msgpack_unpack_func(msgpack_unpack_object, _data)(msgpack_unpack_struct(_context)* ctx)
+{
+ return (ctx)->stack[0].obj;
+}
+
+
+template <bool construct>
+msgpack_unpack_func(int, _execute)(msgpack_unpack_struct(_context)* ctx, const char* data, size_t len, size_t* off)
+{
+ assert(len >= *off);
+
+ const unsigned char* p = (unsigned char*)data + *off;
+ const unsigned char* const pe = (unsigned char*)data + len;
+ const void* n = NULL;
+
+ unsigned int trail = ctx->trail;
+ unsigned int cs = ctx->cs;
+ unsigned int top = ctx->top;
+ msgpack_unpack_struct(_stack)* stack = ctx->stack;
+ /*
+ unsigned int stack_size = ctx->stack_size;
+ */
+ msgpack_unpack_user* user = &ctx->user;
+
+ msgpack_unpack_object obj;
+ msgpack_unpack_struct(_stack)* c = NULL;
+
+ int ret;
+
+#define construct_cb(name) \
+ construct && msgpack_unpack_callback(name)
+
+#define push_simple_value(func) \
+ if(construct_cb(func)(user, &obj) < 0) { goto _failed; } \
+ goto _push
+#define push_fixed_value(func, arg) \
+ if(construct_cb(func)(user, arg, &obj) < 0) { goto _failed; } \
+ goto _push
+#define push_variable_value(func, base, pos, len) \
+ if(construct_cb(func)(user, \
+ (const char*)base, (const char*)pos, len, &obj) < 0) { goto _failed; } \
+ goto _push
+
+#define again_fixed_trail(_cs, trail_len) \
+ trail = trail_len; \
+ cs = _cs; \
+ goto _fixed_trail_again
+#define again_fixed_trail_if_zero(_cs, trail_len, ifzero) \
+ trail = trail_len; \
+ if(trail == 0) { goto ifzero; } \
+ cs = _cs; \
+ goto _fixed_trail_again
+
+#define start_container(func, count_, ct_) \
+ if(top >= MSGPACK_EMBED_STACK_SIZE) { goto _failed; } /* FIXME */ \
+ if(construct_cb(func)(user, count_, &stack[top].obj) < 0) { goto _failed; } \
+ if((count_) == 0) { obj = stack[top].obj; \
+ if (construct_cb(func##_end)(user, &obj) < 0) { goto _failed; } \
+ goto _push; } \
+ stack[top].ct = ct_; \
+ stack[top].size = count_; \
+ stack[top].count = 0; \
+ ++top; \
+ /*printf("container %d count %d stack %d\n",stack[top].obj,count_,top);*/ \
+ /*printf("stack push %d\n", top);*/ \
+ /* FIXME \
+ if(top >= stack_size) { \
+ if(stack_size == MSGPACK_EMBED_STACK_SIZE) { \
+ size_t csize = sizeof(msgpack_unpack_struct(_stack)) * MSGPACK_EMBED_STACK_SIZE; \
+ size_t nsize = csize * 2; \
+ msgpack_unpack_struct(_stack)* tmp = (msgpack_unpack_struct(_stack)*)malloc(nsize); \
+ if(tmp == NULL) { goto _failed; } \
+ memcpy(tmp, ctx->stack, csize); \
+ ctx->stack = stack = tmp; \
+ ctx->stack_size = stack_size = MSGPACK_EMBED_STACK_SIZE * 2; \
+ } else { \
+ size_t nsize = sizeof(msgpack_unpack_struct(_stack)) * ctx->stack_size * 2; \
+ msgpack_unpack_struct(_stack)* tmp = (msgpack_unpack_struct(_stack)*)realloc(ctx->stack, nsize); \
+ if(tmp == NULL) { goto _failed; } \
+ ctx->stack = stack = tmp; \
+ ctx->stack_size = stack_size = stack_size * 2; \
+ } \
+ } \
+ */ \
+ goto _header_again
+
+#define NEXT_CS(p) \
+ ((unsigned int)*p & 0x1f)
+
+#ifdef USE_CASE_RANGE
+#define SWITCH_RANGE_BEGIN switch(*p) {
+#define SWITCH_RANGE(FROM, TO) case FROM ... TO:
+#define SWITCH_RANGE_DEFAULT default:
+#define SWITCH_RANGE_END }
+#else
+#define SWITCH_RANGE_BEGIN { if(0) {
+#define SWITCH_RANGE(FROM, TO) } else if(FROM <= *p && *p <= TO) {
+#define SWITCH_RANGE_DEFAULT } else {
+#define SWITCH_RANGE_END } }
+#endif
+
+ if(p == pe) { goto _out; }
+ do {
+ switch(cs) {
+ case CS_HEADER:
+ SWITCH_RANGE_BEGIN
+ SWITCH_RANGE(0x00, 0x7f) // Positive Fixnum
+ push_fixed_value(_uint8, *(uint8_t*)p);
+ SWITCH_RANGE(0xe0, 0xff) // Negative Fixnum
+ push_fixed_value(_int8, *(int8_t*)p);
+ SWITCH_RANGE(0xc0, 0xdf) // Variable
+ switch(*p) {
+ case 0xc0: // nil
+ push_simple_value(_nil);
+ //case 0xc1: // string
+ // again_terminal_trail(NEXT_CS(p), p+1);
+ case 0xc2: // false
+ push_simple_value(_false);
+ case 0xc3: // true
+ push_simple_value(_true);
+ //case 0xc4:
+ //case 0xc5:
+ //case 0xc6:
+ //case 0xc7:
+ //case 0xc8:
+ //case 0xc9:
+ case 0xca: // float
+ case 0xcb: // double
+ case 0xcc: // unsigned int 8
+ case 0xcd: // unsigned int 16
+ case 0xce: // unsigned int 32
+ case 0xcf: // unsigned int 64
+ case 0xd0: // signed int 8
+ case 0xd1: // signed int 16
+ case 0xd2: // signed int 32
+ case 0xd3: // signed int 64
+ again_fixed_trail(NEXT_CS(p), 1 << (((unsigned int)*p) & 0x03));
+ //case 0xd4:
+ //case 0xd5:
+ //case 0xd6: // big integer 16
+ //case 0xd7: // big integer 32
+ //case 0xd8: // big float 16
+ //case 0xd9: // big float 32
+ case 0xda: // raw 16
+ case 0xdb: // raw 32
+ case 0xdc: // array 16
+ case 0xdd: // array 32
+ case 0xde: // map 16
+ case 0xdf: // map 32
+ again_fixed_trail(NEXT_CS(p), 2 << (((unsigned int)*p) & 0x01));
+ default:
+ goto _failed;
+ }
+ SWITCH_RANGE(0xa0, 0xbf) // FixRaw
+ again_fixed_trail_if_zero(ACS_RAW_VALUE, ((unsigned int)*p & 0x1f), _raw_zero);
+ SWITCH_RANGE(0x90, 0x9f) // FixArray
+ start_container(_array, ((unsigned int)*p) & 0x0f, CT_ARRAY_ITEM);
+ SWITCH_RANGE(0x80, 0x8f) // FixMap
+ start_container(_map, ((unsigned int)*p) & 0x0f, CT_MAP_KEY);
+
+ SWITCH_RANGE_DEFAULT
+ goto _failed;
+ SWITCH_RANGE_END
+ // end CS_HEADER
+
+
+ _fixed_trail_again:
+ ++p;
+
+ default:
+ if((size_t)(pe - p) < trail) { goto _out; }
+ n = p; p += trail - 1;
+ switch(cs) {
+ //case CS_
+ //case CS_
+ case CS_FLOAT: {
+ union { uint32_t i; float f; } mem;
+ mem.i = _msgpack_load32(uint32_t,n);
+ push_fixed_value(_float, mem.f); }
+ case CS_DOUBLE: {
+ union { uint64_t i; double f; } mem;
+ mem.i = _msgpack_load64(uint64_t,n);
+#if defined(__arm__) && !(__ARM_EABI__) // arm-oabi
+ // https://github.com/msgpack/msgpack-perl/pull/1
+ mem.i = (mem.i & 0xFFFFFFFFUL) << 32UL | (mem.i >> 32UL);
+#endif
+ push_fixed_value(_double, mem.f); }
+ case CS_UINT_8:
+ push_fixed_value(_uint8, *(uint8_t*)n);
+ case CS_UINT_16:
+ push_fixed_value(_uint16, _msgpack_load16(uint16_t,n));
+ case CS_UINT_32:
+ push_fixed_value(_uint32, _msgpack_load32(uint32_t,n));
+ case CS_UINT_64:
+ push_fixed_value(_uint64, _msgpack_load64(uint64_t,n));
+
+ case CS_INT_8:
+ push_fixed_value(_int8, *(int8_t*)n);
+ case CS_INT_16:
+ push_fixed_value(_int16, _msgpack_load16(int16_t,n));
+ case CS_INT_32:
+ push_fixed_value(_int32, _msgpack_load32(int32_t,n));
+ case CS_INT_64:
+ push_fixed_value(_int64, _msgpack_load64(int64_t,n));
+
+ //case CS_
+ //case CS_
+ //case CS_BIG_INT_16:
+ // again_fixed_trail_if_zero(ACS_BIG_INT_VALUE, _msgpack_load16(uint16_t,n), _big_int_zero);
+ //case CS_BIG_INT_32:
+ // again_fixed_trail_if_zero(ACS_BIG_INT_VALUE, _msgpack_load32(uint32_t,n), _big_int_zero);
+ //case ACS_BIG_INT_VALUE:
+ //_big_int_zero:
+ // // FIXME
+ // push_variable_value(_big_int, data, n, trail);
+
+ //case CS_BIG_FLOAT_16:
+ // again_fixed_trail_if_zero(ACS_BIG_FLOAT_VALUE, _msgpack_load16(uint16_t,n), _big_float_zero);
+ //case CS_BIG_FLOAT_32:
+ // again_fixed_trail_if_zero(ACS_BIG_FLOAT_VALUE, _msgpack_load32(uint32_t,n), _big_float_zero);
+ //case ACS_BIG_FLOAT_VALUE:
+ //_big_float_zero:
+ // // FIXME
+ // push_variable_value(_big_float, data, n, trail);
+
+ case CS_RAW_16:
+ again_fixed_trail_if_zero(ACS_RAW_VALUE, _msgpack_load16(uint16_t,n), _raw_zero);
+ case CS_RAW_32:
+ again_fixed_trail_if_zero(ACS_RAW_VALUE, _msgpack_load32(uint32_t,n), _raw_zero);
+ case ACS_RAW_VALUE:
+ _raw_zero:
+ push_variable_value(_raw, data, n, trail);
+
+ case CS_ARRAY_16:
+ start_container(_array, _msgpack_load16(uint16_t,n), CT_ARRAY_ITEM);
+ case CS_ARRAY_32:
+ /* FIXME security guard */
+ start_container(_array, _msgpack_load32(uint32_t,n), CT_ARRAY_ITEM);
+
+ case CS_MAP_16:
+ start_container(_map, _msgpack_load16(uint16_t,n), CT_MAP_KEY);
+ case CS_MAP_32:
+ /* FIXME security guard */
+ start_container(_map, _msgpack_load32(uint32_t,n), CT_MAP_KEY);
+
+ default:
+ goto _failed;
+ }
+ }
+
+_push:
+ if(top == 0) { goto _finish; }
+ c = &stack[top-1];
+ switch(c->ct) {
+ case CT_ARRAY_ITEM:
+ if(construct_cb(_array_item)(user, c->count, &c->obj, obj) < 0) { goto _failed; }
+ if(++c->count == c->size) {
+ obj = c->obj;
+ if (construct_cb(_array_end)(user, &obj) < 0) { goto _failed; }
+ --top;
+ /*printf("stack pop %d\n", top);*/
+ goto _push;
+ }
+ goto _header_again;
+ case CT_MAP_KEY:
+ c->map_key = obj;
+ c->ct = CT_MAP_VALUE;
+ goto _header_again;
+ case CT_MAP_VALUE:
+ if(construct_cb(_map_item)(user, c->count, &c->obj, c->map_key, obj) < 0) { goto _failed; }
+ if(++c->count == c->size) {
+ obj = c->obj;
+ if (construct_cb(_map_end)(user, &obj) < 0) { goto _failed; }
+ --top;
+ /*printf("stack pop %d\n", top);*/
+ goto _push;
+ }
+ c->ct = CT_MAP_KEY;
+ goto _header_again;
+
+ default:
+ goto _failed;
+ }
+
+_header_again:
+ cs = CS_HEADER;
+ ++p;
+ } while(p != pe);
+ goto _out;
+
+
+_finish:
+ if (!construct)
+ msgpack_unpack_callback(_nil)(user, &obj);
+ stack[0].obj = obj;
+ ++p;
+ ret = 1;
+ /*printf("-- finish --\n"); */
+ goto _end;
+
+_failed:
+ /*printf("** FAILED **\n"); */
+ ret = -1;
+ goto _end;
+
+_out:
+ ret = 0;
+ goto _end;
+
+_end:
+ ctx->cs = cs;
+ ctx->trail = trail;
+ ctx->top = top;
+ *off = p - (const unsigned char*)data;
+
+ return ret;
+#undef construct_cb
+}
+
+#undef SWITCH_RANGE_BEGIN
+#undef SWITCH_RANGE
+#undef SWITCH_RANGE_DEFAULT
+#undef SWITCH_RANGE_END
+#undef push_simple_value
+#undef push_fixed_value
+#undef push_variable_value
+#undef again_fixed_trail
+#undef again_fixed_trail_if_zero
+#undef start_container
+
+template <unsigned int fixed_offset, unsigned int var_offset>
+msgpack_unpack_func(int, _container_header)(msgpack_unpack_struct(_context)* ctx, const char* data, size_t len, size_t* off)
+{
+ assert(len >= *off);
+ uint32_t size;
+ const unsigned char *const p = (unsigned char*)data + *off;
+
+#define inc_offset(inc) \
+ if (len - *off < inc) \
+ return 0; \
+ *off += inc;
+
+ switch (*p) {
+ case var_offset:
+ inc_offset(3);
+ size = _msgpack_load16(uint16_t, p + 1);
+ break;
+ case var_offset + 1:
+ inc_offset(5);
+ size = _msgpack_load32(uint32_t, p + 1);
+ break;
+#ifdef USE_CASE_RANGE
+ case fixed_offset + 0x0 ... fixed_offset + 0xf:
+#else
+ case fixed_offset + 0x0:
+ case fixed_offset + 0x1:
+ case fixed_offset + 0x2:
+ case fixed_offset + 0x3:
+ case fixed_offset + 0x4:
+ case fixed_offset + 0x5:
+ case fixed_offset + 0x6:
+ case fixed_offset + 0x7:
+ case fixed_offset + 0x8:
+ case fixed_offset + 0x9:
+ case fixed_offset + 0xa:
+ case fixed_offset + 0xb:
+ case fixed_offset + 0xc:
+ case fixed_offset + 0xd:
+ case fixed_offset + 0xe:
+ case fixed_offset + 0xf:
+#endif
+ ++*off;
+ size = ((unsigned int)*p) & 0x0f;
+ break;
+ default:
+ PyErr_SetString(PyExc_ValueError, "Unexpected type header on stream");
+ return -1;
+ }
+ msgpack_unpack_callback(_uint32)(&ctx->user, size, &ctx->stack[0].obj);
+ return 1;
+}
+
+#undef SWITCH_RANGE_BEGIN
+#undef SWITCH_RANGE
+#undef SWITCH_RANGE_DEFAULT
+#undef SWITCH_RANGE_END
+
+static const execute_fn template_construct = &template_execute<true>;
+static const execute_fn template_skip = &template_execute<false>;
+static const execute_fn read_array_header = &template_container_header<0x90, 0xdc>;
+static const execute_fn read_map_header = &template_container_header<0x80, 0xde>;
+
+#undef msgpack_unpack_func
+#undef msgpack_unpack_callback
+#undef msgpack_unpack_struct
+#undef msgpack_unpack_object
+#undef msgpack_unpack_user
+
+#undef NEXT_CS
+
+/* vim: set ts=4 sw=4 noexpandtab */
diff --git a/pandas/tests/test_msgpack/__init__.py b/pandas/tests/test_msgpack/__init__.py
new file mode 100644
index 0000000000000..e69de29bb2d1d
diff --git a/pandas/tests/test_msgpack/test_buffer.py b/pandas/tests/test_msgpack/test_buffer.py
new file mode 100644
index 0000000000000..940b65406103e
--- /dev/null
+++ b/pandas/tests/test_msgpack/test_buffer.py
@@ -0,0 +1,12 @@
+#!/usr/bin/env python
+# coding: utf-8
+
+from pandas.msgpack import packb, unpackb
+
+
+def test_unpack_buffer():
+ from array import array
+ buf = array('b')
+ buf.fromstring(packb(('foo', 'bar')))
+ obj = unpackb(buf, use_list=1)
+ assert [b'foo', b'bar'] == obj
diff --git a/pandas/tests/test_msgpack/test_case.py b/pandas/tests/test_msgpack/test_case.py
new file mode 100644
index 0000000000000..e78456b2ddb62
--- /dev/null
+++ b/pandas/tests/test_msgpack/test_case.py
@@ -0,0 +1,101 @@
+#!/usr/bin/env python
+# coding: utf-8
+
+from pandas.msgpack import packb, unpackb
+
+
+def check(length, obj):
+ v = packb(obj)
+ assert len(v) == length, \
+ "%r length should be %r but get %r" % (obj, length, len(v))
+ assert unpackb(v, use_list=0) == obj
+
+def test_1():
+ for o in [None, True, False, 0, 1, (1 << 6), (1 << 7) - 1, -1,
+ -((1<<5)-1), -(1<<5)]:
+ check(1, o)
+
+def test_2():
+ for o in [1 << 7, (1 << 8) - 1,
+ -((1<<5)+1), -(1<<7)
+ ]:
+ check(2, o)
+
+def test_3():
+ for o in [1 << 8, (1 << 16) - 1,
+ -((1<<7)+1), -(1<<15)]:
+ check(3, o)
+
+def test_5():
+ for o in [1 << 16, (1 << 32) - 1,
+ -((1<<15)+1), -(1<<31)]:
+ check(5, o)
+
+def test_9():
+ for o in [1 << 32, (1 << 64) - 1,
+ -((1<<31)+1), -(1<<63),
+ 1.0, 0.1, -0.1, -1.0]:
+ check(9, o)
+
+
+def check_raw(overhead, num):
+ check(num + overhead, b" " * num)
+
+def test_fixraw():
+ check_raw(1, 0)
+ check_raw(1, (1<<5) - 1)
+
+def test_raw16():
+ check_raw(3, 1<<5)
+ check_raw(3, (1<<16) - 1)
+
+def test_raw32():
+ check_raw(5, 1<<16)
+
+
+def check_array(overhead, num):
+ check(num + overhead, (None,) * num)
+
+def test_fixarray():
+ check_array(1, 0)
+ check_array(1, (1 << 4) - 1)
+
+def test_array16():
+ check_array(3, 1 << 4)
+ check_array(3, (1<<16)-1)
+
+def test_array32():
+ check_array(5, (1<<16))
+
+
+def match(obj, buf):
+ assert packb(obj) == buf
+ assert unpackb(buf, use_list=0) == obj
+
+def test_match():
+ cases = [
+ (None, b'\xc0'),
+ (False, b'\xc2'),
+ (True, b'\xc3'),
+ (0, b'\x00'),
+ (127, b'\x7f'),
+ (128, b'\xcc\x80'),
+ (256, b'\xcd\x01\x00'),
+ (-1, b'\xff'),
+ (-33, b'\xd0\xdf'),
+ (-129, b'\xd1\xff\x7f'),
+ ({1:1}, b'\x81\x01\x01'),
+ (1.0, b"\xcb\x3f\xf0\x00\x00\x00\x00\x00\x00"),
+ ((), b'\x90'),
+ (tuple(range(15)),b"\x9f\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e"),
+ (tuple(range(16)),b"\xdc\x00\x10\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"),
+ ({}, b'\x80'),
+ (dict([(x,x) for x in range(15)]), b'\x8f\x00\x00\x01\x01\x02\x02\x03\x03\x04\x04\x05\x05\x06\x06\x07\x07\x08\x08\t\t\n\n\x0b\x0b\x0c\x0c\r\r\x0e\x0e'),
+ (dict([(x,x) for x in range(16)]), b'\xde\x00\x10\x00\x00\x01\x01\x02\x02\x03\x03\x04\x04\x05\x05\x06\x06\x07\x07\x08\x08\t\t\n\n\x0b\x0b\x0c\x0c\r\r\x0e\x0e\x0f\x0f'),
+ ]
+
+ for v, p in cases:
+ match(v, p)
+
+def test_unicode():
+ assert unpackb(packb('foobar'), use_list=1) == b'foobar'
diff --git a/pandas/tests/test_msgpack/test_except.py b/pandas/tests/test_msgpack/test_except.py
new file mode 100644
index 0000000000000..a0239336ca20d
--- /dev/null
+++ b/pandas/tests/test_msgpack/test_except.py
@@ -0,0 +1,29 @@
+#!/usr/bin/env python
+# coding: utf-8
+
+import unittest
+import nose
+
+import datetime
+from pandas.msgpack import packb, unpackb
+
+class DummyException(Exception):
+ pass
+
+class TestExceptions(unittest.TestCase):
+
+ def test_raise_on_find_unsupported_value(self):
+ import datetime
+ self.assertRaises(TypeError, packb, datetime.datetime.now())
+
+ def test_raise_from_object_hook(self):
+ def hook(obj):
+ raise DummyException
+ self.assertRaises(DummyException, unpackb, packb({}), object_hook=hook)
+ self.assertRaises(DummyException, unpackb, packb({'fizz': 'buzz'}), object_hook=hook)
+ self.assertRaises(DummyException, unpackb, packb({'fizz': 'buzz'}), object_pairs_hook=hook)
+ self.assertRaises(DummyException, unpackb, packb({'fizz': {'buzz': 'spam'}}), object_hook=hook)
+ self.assertRaises(DummyException, unpackb, packb({'fizz': {'buzz': 'spam'}}), object_pairs_hook=hook)
+
+ def test_invalidvalue(self):
+ self.assertRaises(ValueError, unpackb, b'\xd9\x97#DL_')
diff --git a/pandas/tests/test_msgpack/test_format.py b/pandas/tests/test_msgpack/test_format.py
new file mode 100644
index 0000000000000..a3a3afd046ce2
--- /dev/null
+++ b/pandas/tests/test_msgpack/test_format.py
@@ -0,0 +1,70 @@
+#!/usr/bin/env python
+# coding: utf-8
+
+from pandas.msgpack import unpackb
+
+def check(src, should, use_list=0):
+ assert unpackb(src, use_list=use_list) == should
+
+def testSimpleValue():
+ check(b"\x93\xc0\xc2\xc3",
+ (None, False, True,))
+
+def testFixnum():
+ check(b"\x92\x93\x00\x40\x7f\x93\xe0\xf0\xff",
+ ((0,64,127,), (-32,-16,-1,),)
+ )
+
+def testFixArray():
+ check(b"\x92\x90\x91\x91\xc0",
+ ((),((None,),),),
+ )
+
+def testFixRaw():
+ check(b"\x94\xa0\xa1a\xa2bc\xa3def",
+ (b"", b"a", b"bc", b"def",),
+ )
+
+def testFixMap():
+ check(
+ b"\x82\xc2\x81\xc0\xc0\xc3\x81\xc0\x80",
+ {False: {None: None}, True:{None:{}}},
+ )
+
+def testUnsignedInt():
+ check(
+ b"\x99\xcc\x00\xcc\x80\xcc\xff\xcd\x00\x00\xcd\x80\x00"
+ b"\xcd\xff\xff\xce\x00\x00\x00\x00\xce\x80\x00\x00\x00"
+ b"\xce\xff\xff\xff\xff",
+ (0, 128, 255, 0, 32768, 65535, 0, 2147483648, 4294967295,),
+ )
+
+def testSignedInt():
+ check(b"\x99\xd0\x00\xd0\x80\xd0\xff\xd1\x00\x00\xd1\x80\x00"
+ b"\xd1\xff\xff\xd2\x00\x00\x00\x00\xd2\x80\x00\x00\x00"
+ b"\xd2\xff\xff\xff\xff",
+ (0, -128, -1, 0, -32768, -1, 0, -2147483648, -1,))
+
+def testRaw():
+ check(b"\x96\xda\x00\x00\xda\x00\x01a\xda\x00\x02ab\xdb\x00\x00"
+ b"\x00\x00\xdb\x00\x00\x00\x01a\xdb\x00\x00\x00\x02ab",
+ (b"", b"a", b"ab", b"", b"a", b"ab"))
+
+def testArray():
+ check(b"\x96\xdc\x00\x00\xdc\x00\x01\xc0\xdc\x00\x02\xc2\xc3\xdd\x00"
+ b"\x00\x00\x00\xdd\x00\x00\x00\x01\xc0\xdd\x00\x00\x00\x02"
+ b"\xc2\xc3",
+ ((), (None,), (False,True), (), (None,), (False,True))
+ )
+
+def testMap():
+ check(
+ b"\x96"
+ b"\xde\x00\x00"
+ b"\xde\x00\x01\xc0\xc2"
+ b"\xde\x00\x02\xc0\xc2\xc3\xc2"
+ b"\xdf\x00\x00\x00\x00"
+ b"\xdf\x00\x00\x00\x01\xc0\xc2"
+ b"\xdf\x00\x00\x00\x02\xc0\xc2\xc3\xc2",
+ ({}, {None: False}, {True: False, None: False}, {},
+ {None: False}, {True: False, None: False}))
diff --git a/pandas/tests/test_msgpack/test_obj.py b/pandas/tests/test_msgpack/test_obj.py
new file mode 100644
index 0000000000000..4a018bc8b87f1
--- /dev/null
+++ b/pandas/tests/test_msgpack/test_obj.py
@@ -0,0 +1,71 @@
+# coding: utf-8
+
+import unittest
+import nose
+
+import datetime
+from pandas.msgpack import packb, unpackb
+
+class DecodeError(Exception):
+ pass
+
+class TestObj(unittest.TestCase):
+
+ def _arr_to_str(self, arr):
+ return ''.join(str(c) for c in arr)
+
+ def bad_complex_decoder(self, o):
+ raise DecodeError("Ooops!")
+
+ def _decode_complex(self, obj):
+ if b'__complex__' in obj:
+ return complex(obj[b'real'], obj[b'imag'])
+ return obj
+
+ def _encode_complex(self, obj):
+ if isinstance(obj, complex):
+ return {b'__complex__': True, b'real': 1, b'imag': 2}
+ return obj
+
+ def test_encode_hook(self):
+ packed = packb([3, 1+2j], default=self._encode_complex)
+ unpacked = unpackb(packed, use_list=1)
+ assert unpacked[1] == {b'__complex__': True, b'real': 1, b'imag': 2}
+
+ def test_decode_hook(self):
+ packed = packb([3, {b'__complex__': True, b'real': 1, b'imag': 2}])
+ unpacked = unpackb(packed, object_hook=self._decode_complex, use_list=1)
+ assert unpacked[1] == 1+2j
+
+ def test_decode_pairs_hook(self):
+ packed = packb([3, {1: 2, 3: 4}])
+ prod_sum = 1 * 2 + 3 * 4
+ unpacked = unpackb(packed, object_pairs_hook=lambda l: sum(k * v for k, v in l), use_list=1)
+ assert unpacked[1] == prod_sum
+
+ def test_only_one_obj_hook(self):
+ self.assertRaises(ValueError, unpackb, b'', object_hook=lambda x: x, object_pairs_hook=lambda x: x)
+
+ def test_bad_hook(self):
+ def f():
+ packed = packb([3, 1+2j], default=lambda o: o)
+ unpacked = unpackb(packed, use_list=1)
+ self.assertRaises(ValueError, f)
+
+ def test_array_hook(self):
+ packed = packb([1,2,3])
+ unpacked = unpackb(packed, list_hook=self._arr_to_str, use_list=1)
+ assert unpacked == '123'
+
+ def test_an_exception_in_objecthook1(self):
+ def f():
+ packed = packb({1: {'__complex__': True, 'real': 1, 'imag': 2}})
+ unpackb(packed, object_hook=self.bad_complex_decoder)
+ self.assertRaises(DecodeError, f)
+
+
+ def test_an_exception_in_objecthook2(self):
+ def f():
+ packed = packb({1: [{'__complex__': True, 'real': 1, 'imag': 2}]})
+ unpackb(packed, list_hook=self.bad_complex_decoder, use_list=1)
+ self.assertRaises(DecodeError, f)
diff --git a/pandas/tests/test_msgpack/test_pack.py b/pandas/tests/test_msgpack/test_pack.py
new file mode 100644
index 0000000000000..22df6df5e2e45
--- /dev/null
+++ b/pandas/tests/test_msgpack/test_pack.py
@@ -0,0 +1,144 @@
+#!/usr/bin/env python
+# coding: utf-8
+
+import unittest
+import nose
+
+import struct
+from pandas import compat
+from pandas.compat import u, OrderedDict
+from pandas.msgpack import packb, unpackb, Unpacker, Packer
+
+class TestPack(unittest.TestCase):
+
+ def check(self, data, use_list=False):
+ re = unpackb(packb(data), use_list=use_list)
+ assert re == data
+
+ def testPack(self):
+ test_data = [
+ 0, 1, 127, 128, 255, 256, 65535, 65536,
+ -1, -32, -33, -128, -129, -32768, -32769,
+ 1.0,
+ b"", b"a", b"a"*31, b"a"*32,
+ None, True, False,
+ (), ((),), ((), None,),
+ {None: 0},
+ (1<<23),
+ ]
+ for td in test_data:
+ self.check(td)
+
+ def testPackUnicode(self):
+ test_data = [
+ u(""), u("abcd"), [u("defgh")], u("Русский текст"),
+ ]
+ for td in test_data:
+ re = unpackb(packb(td, encoding='utf-8'), use_list=1, encoding='utf-8')
+ assert re == td
+ packer = Packer(encoding='utf-8')
+ data = packer.pack(td)
+ re = Unpacker(compat.BytesIO(data), encoding='utf-8', use_list=1).unpack()
+ assert re == td
+
+ def testPackUTF32(self):
+ test_data = [
+ compat.u(""),
+ compat.u("abcd"),
+ [compat.u("defgh")],
+ compat.u("Русский текст"),
+ ]
+ for td in test_data:
+ re = unpackb(packb(td, encoding='utf-32'), use_list=1, encoding='utf-32')
+ assert re == td
+
+ def testPackBytes(self):
+ test_data = [
+ b"", b"abcd", (b"defgh",),
+ ]
+ for td in test_data:
+ self.check(td)
+
+ def testIgnoreUnicodeErrors(self):
+ re = unpackb(packb(b'abc\xeddef'), encoding='utf-8', unicode_errors='ignore', use_list=1)
+ assert re == "abcdef"
+
+ def testStrictUnicodeUnpack(self):
+ self.assertRaises(UnicodeDecodeError, unpackb, packb(b'abc\xeddef'), encoding='utf-8', use_list=1)
+
+ def testStrictUnicodePack(self):
+ self.assertRaises(UnicodeEncodeError, packb, compat.u("abc\xeddef"), encoding='ascii', unicode_errors='strict')
+
+ def testIgnoreErrorsPack(self):
+ re = unpackb(packb(compat.u("abcФФФdef"), encoding='ascii', unicode_errors='ignore'), encoding='utf-8', use_list=1)
+ assert re == compat.u("abcdef")
+
+ def testNoEncoding(self):
+ self.assertRaises(TypeError, packb, compat.u("abc"), encoding=None)
+
+ def testDecodeBinary(self):
+ re = unpackb(packb("abc"), encoding=None, use_list=1)
+ assert re == b"abc"
+
+ def testPackFloat(self):
+ assert packb(1.0, use_single_float=True) == b'\xca' + struct.pack('>f', 1.0)
+ assert packb(1.0, use_single_float=False) == b'\xcb' + struct.pack('>d', 1.0)
+
+ def testArraySize(self, sizes=[0, 5, 50, 1000]):
+ bio = compat.BytesIO()
+ packer = Packer()
+ for size in sizes:
+ bio.write(packer.pack_array_header(size))
+ for i in range(size):
+ bio.write(packer.pack(i))
+
+ bio.seek(0)
+ unpacker = Unpacker(bio, use_list=1)
+ for size in sizes:
+ assert unpacker.unpack() == list(range(size))
+
+ def test_manualreset(self, sizes=[0, 5, 50, 1000]):
+ packer = Packer(autoreset=False)
+ for size in sizes:
+ packer.pack_array_header(size)
+ for i in range(size):
+ packer.pack(i)
+
+ bio = compat.BytesIO(packer.bytes())
+ unpacker = Unpacker(bio, use_list=1)
+ for size in sizes:
+ assert unpacker.unpack() == list(range(size))
+
+ packer.reset()
+ assert packer.bytes() == b''
+
+ def testMapSize(self, sizes=[0, 5, 50, 1000]):
+ bio = compat.BytesIO()
+ packer = Packer()
+ for size in sizes:
+ bio.write(packer.pack_map_header(size))
+ for i in range(size):
+ bio.write(packer.pack(i)) # key
+ bio.write(packer.pack(i * 2)) # value
+
+ bio.seek(0)
+ unpacker = Unpacker(bio)
+ for size in sizes:
+ assert unpacker.unpack() == dict((i, i * 2) for i in range(size))
+
+
+ def test_odict(self):
+ seq = [(b'one', 1), (b'two', 2), (b'three', 3), (b'four', 4)]
+ od = OrderedDict(seq)
+ assert unpackb(packb(od), use_list=1) == dict(seq)
+ def pair_hook(seq):
+ return list(seq)
+ assert unpackb(packb(od), object_pairs_hook=pair_hook, use_list=1) == seq
+
+
+ def test_pairlist(self):
+ pairlist = [(b'a', 1), (2, b'b'), (b'foo', b'bar')]
+ packer = Packer()
+ packed = packer.pack_map_pairs(pairlist)
+ unpacked = unpackb(packed, object_pairs_hook=list)
+ assert pairlist == unpacked
diff --git a/pandas/tests/test_msgpack/test_read_size.py b/pandas/tests/test_msgpack/test_read_size.py
new file mode 100644
index 0000000000000..db3e1deb04f8f
--- /dev/null
+++ b/pandas/tests/test_msgpack/test_read_size.py
@@ -0,0 +1,65 @@
+"""Test Unpacker's read_array_header and read_map_header methods"""
+from pandas.msgpack import packb, Unpacker, OutOfData
+UnexpectedTypeException = ValueError
+
+def test_read_array_header():
+ unpacker = Unpacker()
+ unpacker.feed(packb(['a', 'b', 'c']))
+ assert unpacker.read_array_header() == 3
+ assert unpacker.unpack() == b'a'
+ assert unpacker.unpack() == b'b'
+ assert unpacker.unpack() == b'c'
+ try:
+ unpacker.unpack()
+ assert 0, 'should raise exception'
+ except OutOfData:
+ assert 1, 'okay'
+
+
+def test_read_map_header():
+ unpacker = Unpacker()
+ unpacker.feed(packb({'a': 'A'}))
+ assert unpacker.read_map_header() == 1
+ assert unpacker.unpack() == B'a'
+ assert unpacker.unpack() == B'A'
+ try:
+ unpacker.unpack()
+ assert 0, 'should raise exception'
+ except OutOfData:
+ assert 1, 'okay'
+
+def test_incorrect_type_array():
+ unpacker = Unpacker()
+ unpacker.feed(packb(1))
+ try:
+ unpacker.read_array_header()
+ assert 0, 'should raise exception'
+ except UnexpectedTypeException:
+ assert 1, 'okay'
+
+def test_incorrect_type_map():
+ unpacker = Unpacker()
+ unpacker.feed(packb(1))
+ try:
+ unpacker.read_map_header()
+ assert 0, 'should raise exception'
+ except UnexpectedTypeException:
+ assert 1, 'okay'
+
+def test_correct_type_nested_array():
+ unpacker = Unpacker()
+ unpacker.feed(packb({'a': ['b', 'c', 'd']}))
+ try:
+ unpacker.read_array_header()
+ assert 0, 'should raise exception'
+ except UnexpectedTypeException:
+ assert 1, 'okay'
+
+def test_incorrect_type_nested_map():
+ unpacker = Unpacker()
+ unpacker.feed(packb([{'a': 'b'}]))
+ try:
+ unpacker.read_map_header()
+ assert 0, 'should raise exception'
+ except UnexpectedTypeException:
+ assert 1, 'okay'
diff --git a/pandas/tests/test_msgpack/test_seq.py b/pandas/tests/test_msgpack/test_seq.py
new file mode 100644
index 0000000000000..e5ee68c4cab84
--- /dev/null
+++ b/pandas/tests/test_msgpack/test_seq.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python
+# coding: utf-8
+
+from pandas import compat
+from pandas.compat import u
+import pandas.msgpack as msgpack
+
+binarydata = [chr(i) for i in range(256)]
+binarydata = "".join(binarydata)
+if compat.PY3:
+ binarydata = binarydata.encode('utf-8')
+
+def gen_binary_data(idx):
+ data = binarydata[:idx % 300]
+ return data
+
+def test_exceeding_unpacker_read_size():
+ dumpf = compat.BytesIO()
+
+ packer = msgpack.Packer()
+
+ NUMBER_OF_STRINGS = 6
+ read_size = 16
+ # 5 ok for read_size=16, while 6 glibc detected *** python: double free or corruption (fasttop):
+ # 20 ok for read_size=256, while 25 segfaults / glibc detected *** python: double free or corruption (!prev)
+ # 40 ok for read_size=1024, while 50 introduces errors
+ # 7000 ok for read_size=1024*1024, while 8000 leads to glibc detected *** python: double free or corruption (!prev):
+
+ for idx in range(NUMBER_OF_STRINGS):
+ data = gen_binary_data(idx)
+ dumpf.write(packer.pack(data))
+
+ f = compat.BytesIO(dumpf.getvalue())
+ dumpf.close()
+
+ unpacker = msgpack.Unpacker(f, read_size=read_size, use_list=1)
+
+ read_count = 0
+ for idx, o in enumerate(unpacker):
+ assert type(o) == bytes
+ assert o == gen_binary_data(idx)
+ read_count += 1
+
+ assert read_count == NUMBER_OF_STRINGS
diff --git a/pandas/tests/test_msgpack/test_sequnpack.py b/pandas/tests/test_msgpack/test_sequnpack.py
new file mode 100644
index 0000000000000..4c3ad363e5b6e
--- /dev/null
+++ b/pandas/tests/test_msgpack/test_sequnpack.py
@@ -0,0 +1,84 @@
+#!/usr/bin/env python
+# coding: utf-8
+
+import unittest
+import nose
+
+from pandas import compat
+from pandas.msgpack import Unpacker, BufferFull
+from pandas.msgpack import OutOfData
+
+class TestPack(unittest.TestCase):
+
+ def test_partialdata(self):
+ unpacker = Unpacker()
+ unpacker.feed(b'\xa5')
+ self.assertRaises(StopIteration, next, iter(unpacker))
+ unpacker.feed(b'h')
+ self.assertRaises(StopIteration, next, iter(unpacker))
+ unpacker.feed(b'a')
+ self.assertRaises(StopIteration, next, iter(unpacker))
+ unpacker.feed(b'l')
+ self.assertRaises(StopIteration, next, iter(unpacker))
+ unpacker.feed(b'l')
+ self.assertRaises(StopIteration, next, iter(unpacker))
+ unpacker.feed(b'o')
+ assert next(iter(unpacker)) == b'hallo'
+
+ def test_foobar(self):
+ unpacker = Unpacker(read_size=3, use_list=1)
+ unpacker.feed(b'foobar')
+ assert unpacker.unpack() == ord(b'f')
+ assert unpacker.unpack() == ord(b'o')
+ assert unpacker.unpack() == ord(b'o')
+ assert unpacker.unpack() == ord(b'b')
+ assert unpacker.unpack() == ord(b'a')
+ assert unpacker.unpack() == ord(b'r')
+ self.assertRaises(OutOfData, unpacker.unpack)
+
+ unpacker.feed(b'foo')
+ unpacker.feed(b'bar')
+
+ k = 0
+ for o, e in zip(unpacker, 'foobarbaz'):
+ assert o == ord(e)
+ k += 1
+ assert k == len(b'foobar')
+
+ def test_foobar_skip(self):
+ unpacker = Unpacker(read_size=3, use_list=1)
+ unpacker.feed(b'foobar')
+ assert unpacker.unpack() == ord(b'f')
+ unpacker.skip()
+ assert unpacker.unpack() == ord(b'o')
+ unpacker.skip()
+ assert unpacker.unpack() == ord(b'a')
+ unpacker.skip()
+ self.assertRaises(OutOfData, unpacker.unpack)
+
+ def test_maxbuffersize(self):
+ self.assertRaises(ValueError, Unpacker, read_size=5, max_buffer_size=3)
+ unpacker = Unpacker(read_size=3, max_buffer_size=3, use_list=1)
+ unpacker.feed(b'fo')
+ self.assertRaises(BufferFull, unpacker.feed, b'ob')
+ unpacker.feed(b'o')
+ assert ord('f') == next(unpacker)
+ unpacker.feed(b'b')
+ assert ord('o') == next(unpacker)
+ assert ord('o') == next(unpacker)
+ assert ord('b') == next(unpacker)
+
+ def test_readbytes(self):
+ unpacker = Unpacker(read_size=3)
+ unpacker.feed(b'foobar')
+ assert unpacker.unpack() == ord(b'f')
+ assert unpacker.read_bytes(3) == b'oob'
+ assert unpacker.unpack() == ord(b'a')
+ assert unpacker.unpack() == ord(b'r')
+
+ # Test buffer refill
+ unpacker = Unpacker(compat.BytesIO(b'foobar'), read_size=3)
+ assert unpacker.unpack() == ord(b'f')
+ assert unpacker.read_bytes(3) == b'oob'
+ assert unpacker.unpack() == ord(b'a')
+ assert unpacker.unpack() == ord(b'r')
diff --git a/pandas/tests/test_msgpack/test_subtype.py b/pandas/tests/test_msgpack/test_subtype.py
new file mode 100644
index 0000000000000..0934b31cebeda
--- /dev/null
+++ b/pandas/tests/test_msgpack/test_subtype.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python
+# coding: utf-8
+
+from pandas.msgpack import packb, unpackb
+from collections import namedtuple
+
+class MyList(list):
+ pass
+
+class MyDict(dict):
+ pass
+
+class MyTuple(tuple):
+ pass
+
+MyNamedTuple = namedtuple('MyNamedTuple', 'x y')
+
+def test_types():
+ assert packb(MyDict()) == packb(dict())
+ assert packb(MyList()) == packb(list())
+ assert packb(MyNamedTuple(1, 2)) == packb((1, 2))
diff --git a/pandas/tests/test_msgpack/test_unpack_raw.py b/pandas/tests/test_msgpack/test_unpack_raw.py
new file mode 100644
index 0000000000000..0e96a79cf190a
--- /dev/null
+++ b/pandas/tests/test_msgpack/test_unpack_raw.py
@@ -0,0 +1,28 @@
+"""Tests for cases where the user seeks to obtain packed msgpack objects"""
+
+from pandas import compat
+from pandas.msgpack import Unpacker, packb
+
+def test_write_bytes():
+ unpacker = Unpacker()
+ unpacker.feed(b'abc')
+ f = compat.BytesIO()
+ assert unpacker.unpack(f.write) == ord('a')
+ assert f.getvalue() == b'a'
+ f = compat.BytesIO()
+ assert unpacker.skip(f.write) is None
+ assert f.getvalue() == b'b'
+ f = compat.BytesIO()
+ assert unpacker.skip() is None
+ assert f.getvalue() == b''
+
+
+def test_write_bytes_multi_buffer():
+ long_val = (5) * 100
+ expected = packb(long_val)
+ unpacker = Unpacker(compat.BytesIO(expected), read_size=3, max_buffer_size=3)
+
+ f = compat.BytesIO()
+ unpacked = unpacker.unpack(f.write)
+ assert unpacked == long_val
+ assert f.getvalue() == expected
diff --git a/setup.py b/setup.py
index ffd6089bdc88d..c326d14f552e0 100755
--- a/setup.py
+++ b/setup.py
@@ -464,6 +464,23 @@ def pxd(name):
extensions.extend([sparse_ext])
+#----------------------------------------------------------------------
+# msgpack stuff here
+
+if sys.byteorder == 'big':
+ macros = [('__BIG_ENDIAN__', '1')]
+else:
+ macros = [('__LITTLE_ENDIAN__', '1')]
+
+msgpack_ext = Extension('pandas.msgpack',
+ sources = [srcpath('msgpack',
+ suffix=suffix, subdir='')],
+ language='c++',
+ include_dirs=common_include,
+ define_macros=macros)
+
+extensions.append(msgpack_ext)
+
# if not ISRELEASED:
# extensions.extend([sandbox_ext])
@@ -517,6 +534,7 @@ def pxd(name):
'pandas.stats',
'pandas.util',
'pandas.tests',
+ 'pandas.tests.test_msgpack',
'pandas.tools',
'pandas.tools.tests',
'pandas.tseries',
diff --git a/vb_suite/packers.py b/vb_suite/packers.py
new file mode 100644
index 0000000000000..9af6a6b1b0c4e
--- /dev/null
+++ b/vb_suite/packers.py
@@ -0,0 +1,94 @@
+from vbench.api import Benchmark
+from datetime import datetime
+
+start_date = datetime(2013, 5, 1)
+
+common_setup = """from pandas_vb_common import *
+import os
+import pandas as pd
+from pandas.core import common as com
+
+f = '__test__.msg'
+def remove(f):
+ try:
+ os.remove(f)
+ except:
+ pass
+
+index = date_range('20000101',periods=50000,freq='H')
+df = DataFrame({'float1' : randn(50000),
+ 'float2' : randn(50000)},
+ index=index)
+remove(f)
+"""
+
+#----------------------------------------------------------------------
+# msgpack
+
+setup = common_setup + """
+df.to_msgpack(f)
+"""
+
+packers_read_pack = Benchmark("pd.read_msgpack(f)", setup, start_date=start_date)
+
+setup = common_setup + """
+"""
+
+packers_write_pack = Benchmark("df.to_msgpack(f)", setup, cleanup="remove(f)", start_date=start_date)
+
+#----------------------------------------------------------------------
+# pickle
+
+setup = common_setup + """
+df.to_pickle(f)
+"""
+
+packers_read_pickle = Benchmark("pd.read_pickle(f)", setup, start_date=start_date)
+
+setup = common_setup + """
+"""
+
+packers_write_pickle = Benchmark("df.to_pickle(f)", setup, cleanup="remove(f)", start_date=start_date)
+
+#----------------------------------------------------------------------
+# csv
+
+setup = common_setup + """
+df.to_csv(f)
+"""
+
+packers_read_csv = Benchmark("pd.read_csv(f)", setup, start_date=start_date)
+
+setup = common_setup + """
+"""
+
+packers_write_csv = Benchmark("df.to_csv(f)", setup, cleanup="remove(f)", start_date=start_date)
+
+#----------------------------------------------------------------------
+# hdf store
+
+setup = common_setup + """
+df.to_hdf(f,'df')
+"""
+
+packers_read_hdf_store = Benchmark("pd.read_hdf(f,'df')", setup, start_date=start_date)
+
+setup = common_setup + """
+"""
+
+packers_write_hdf_store = Benchmark("df.to_hdf(f,'df')", setup, cleanup="remove(f)", start_date=start_date)
+
+#----------------------------------------------------------------------
+# hdf table
+
+setup = common_setup + """
+df.to_hdf(f,'df',table=True)
+"""
+
+packers_read_hdf_table = Benchmark("pd.read_hdf(f,'df')", setup, start_date=start_date)
+
+setup = common_setup + """
+"""
+
+packers_write_hdf_table = Benchmark("df.to_hdf(f,'df',table=True)", setup, cleanup="remove(f)", start_date=start_date)
+
diff --git a/vb_suite/suite.py b/vb_suite/suite.py
index 57920fcbf7c19..e5002ef78ab9b 100644
--- a/vb_suite/suite.py
+++ b/vb_suite/suite.py
@@ -16,6 +16,7 @@
'join_merge',
'miscellaneous',
'panel_ctor',
+ 'packers',
'parser',
'plotting',
'reindex',
| extension of #3828
ToDo
- [x] remove use of `pytest` in test_msgpack
- [ ] PERF!
```
msgpack serialization/deserialization
support all pandas objects: Timestamp,Period,all index types,Series,DataFrame,Panel,Sparse suite
docs included (in io.rst)
iterator support
top-level api support
compression and direct calls to in-line msgpack (enabled via #3828) will wait for 0.13+
closes #686
```
```
Benchmarking: 50k rows of 2x columns of floats with a datetime index
In [4]: %timeit df.to_msgpack('foo')
100 loops, best of 3: 16.5 ms per loop
In [2]: %timeit df.to_pickle('foo')
10 loops, best of 3: 12.9 ms per loop
In [6]: %timeit df.to_csv('foo')
1 loops, best of 3: 470 ms per loop
In [11]: %timeit df.to_hdf('foo2','df',mode='w')
10 loops, best of 3: 20.1 ms per loop
In [13]: %timeit df.to_hdf('foo2','df',mode='w',table=True)
10 loops, best of 3: 81.4 ms per loop
```
```
In [5]: %timeit pd.read_msgpack('foo')
100 loops, best of 3: 16.3 ms per loop
In [3]: %timeit pd.read_pickle('foo')
1000 loops, best of 3: 1.28 ms per loop
In [7]: %timeit pd.read_csv('foo')
10 loops, best of 3: 46.1 ms per loop
In [12]: %timeit pd.read_hdf('foo2','df')
100 loops, best of 3: 5.64 ms per loop
In [14]: %timeit pd.read_hdf('foo2','df')
100 loops, best of 3: 9.39 ms per loop
```
```
In [1]: df = DataFrame(randn(10,2),
...: columns=list('AB'),
...: index=date_range('20130101',periods=10))
In [2]: pd.to_msgpack('foo.msg',df)
In [3]: pd.read_msgpack('foo.msg')
Out[3]:
A B
2013-01-01 0.676700 -1.702599
2013-01-02 -0.070164 -1.368716
2013-01-03 -0.877145 -1.427964
2013-01-04 -0.295715 -0.176954
2013-01-05 0.566986 0.588918
2013-01-06 -0.307070 1.541773
2013-01-07 1.302388 0.689701
2013-01-08 0.165292 0.273496
2013-01-09 -3.492113 -1.178075
2013-01-10 -1.069521 0.848614
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/3831 | 2013-06-10T14:10:57Z | 2013-10-01T13:54:09Z | 2013-10-01T13:54:09Z | 2014-06-24T20:07:18Z |
Add msgpack as submodule for pandas | diff --git a/LICENSES/MSGPACK_LICENSE b/LICENSES/MSGPACK_LICENSE
new file mode 100644
index 0000000000000..ae1b0f2f32f06
--- /dev/null
+++ b/LICENSES/MSGPACK_LICENSE
@@ -0,0 +1,13 @@
+Copyright (C) 2008-2011 INADA Naoki <songofacandy@gmail.com>
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
\ No newline at end of file
diff --git a/RELEASE.rst b/RELEASE.rst
index 307986ab81681..4e46093d584f4 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -51,6 +51,7 @@ pandas 0.11.1
- A ``filter`` method on grouped Series or DataFrames returns a subset of
the original (GH3680_, GH919_)
- Access to historical Google Finance data in pandas.io.data (GH3814_)
+ - Add pandas.msgpack submodule
**Improvements to existing features**
diff --git a/pandas/msgpack.pyx b/pandas/msgpack.pyx
new file mode 100644
index 0000000000000..a04a19d280467
--- /dev/null
+++ b/pandas/msgpack.pyx
@@ -0,0 +1,652 @@
+# coding: utf-8
+#cython: embedsignature=True
+
+from cpython cimport *
+cdef extern from "Python.h":
+ ctypedef char* const_char_ptr "const char*"
+ ctypedef char* const_void_ptr "const void*"
+ ctypedef struct PyObject
+ cdef int PyObject_AsReadBuffer(object o, const_void_ptr* buff, Py_ssize_t* buf_len) except -1
+
+from libc.stdlib cimport *
+from libc.string cimport *
+from libc.limits cimport *
+
+
+class UnpackException(Exception):
+ pass
+
+
+class BufferFull(UnpackException):
+ pass
+
+
+class OutOfData(UnpackException):
+ pass
+
+
+class UnpackValueError(UnpackException, ValueError):
+ pass
+
+
+class ExtraData(ValueError):
+ def __init__(self, unpacked, extra):
+ self.unpacked = unpacked
+ self.extra = extra
+
+ def __str__(self):
+ return "unpack(b) recieved extra data."
+
+class PackException(Exception):
+ pass
+
+class PackValueError(PackException, ValueError):
+ pass
+
+cdef extern from "msgpack/unpack.h":
+ ctypedef struct msgpack_user:
+ bint use_list
+ PyObject* object_hook
+ bint has_pairs_hook # call object_hook with k-v pairs
+ PyObject* list_hook
+ char *encoding
+ char *unicode_errors
+
+ ctypedef struct template_context:
+ msgpack_user user
+ PyObject* obj
+ size_t count
+ unsigned int ct
+ PyObject* key
+
+ ctypedef int (*execute_fn)(template_context* ctx, const_char_ptr data,
+ size_t len, size_t* off) except? -1
+ execute_fn template_construct
+ execute_fn template_skip
+ execute_fn read_array_header
+ execute_fn read_map_header
+ void template_init(template_context* ctx)
+ object template_data(template_context* ctx)
+
+cdef extern from "msgpack/pack.h":
+ struct msgpack_packer:
+ char* buf
+ size_t length
+ size_t buf_size
+
+ int msgpack_pack_int(msgpack_packer* pk, int d)
+ int msgpack_pack_nil(msgpack_packer* pk)
+ int msgpack_pack_true(msgpack_packer* pk)
+ int msgpack_pack_false(msgpack_packer* pk)
+ int msgpack_pack_long(msgpack_packer* pk, long d)
+ int msgpack_pack_long_long(msgpack_packer* pk, long long d)
+ int msgpack_pack_unsigned_long_long(msgpack_packer* pk, unsigned long long d)
+ int msgpack_pack_float(msgpack_packer* pk, float d)
+ int msgpack_pack_double(msgpack_packer* pk, double d)
+ int msgpack_pack_array(msgpack_packer* pk, size_t l)
+ int msgpack_pack_map(msgpack_packer* pk, size_t l)
+ int msgpack_pack_raw(msgpack_packer* pk, size_t l)
+ int msgpack_pack_raw_body(msgpack_packer* pk, char* body, size_t l)
+
+cdef int DEFAULT_RECURSE_LIMIT=511
+
+
+
+cdef class Packer(object):
+ """MessagePack Packer
+
+ usage:
+
+ packer = Packer()
+ astream.write(packer.pack(a))
+ astream.write(packer.pack(b))
+
+ Packer's constructor has some keyword arguments:
+
+ * *defaut* - Convert user type to builtin type that Packer supports.
+ See also simplejson's document.
+ * *encoding* - Convert unicode to bytes with this encoding. (default: 'utf-8')
+ * *unicode_erros* - Error handler for encoding unicode. (default: 'strict')
+ * *use_single_float* - Use single precision float type for float. (default: False)
+ * *autoreset* - Reset buffer after each pack and return it's content as `bytes`. (default: True).
+ If set this to false, use `bytes()` to get content and `.reset()` to clear buffer.
+ """
+ cdef msgpack_packer pk
+ cdef object _default
+ cdef object _bencoding
+ cdef object _berrors
+ cdef char *encoding
+ cdef char *unicode_errors
+ cdef bool use_float
+ cdef bint autoreset
+
+ def __cinit__(self):
+ cdef int buf_size = 1024*1024
+ self.pk.buf = <char*> malloc(buf_size);
+ if self.pk.buf == NULL:
+ raise MemoryError("Unable to allocate internal buffer.")
+ self.pk.buf_size = buf_size
+ self.pk.length = 0
+
+ def __init__(self, default=None, encoding='utf-8', unicode_errors='strict',
+ use_single_float=False, bint autoreset=1):
+ self.use_float = use_single_float
+ self.autoreset = autoreset
+ if default is not None:
+ if not PyCallable_Check(default):
+ raise TypeError("default must be a callable.")
+ self._default = default
+ if encoding is None:
+ self.encoding = NULL
+ self.unicode_errors = NULL
+ else:
+ if isinstance(encoding, unicode):
+ self._bencoding = encoding.encode('ascii')
+ else:
+ self._bencoding = encoding
+ self.encoding = PyBytes_AsString(self._bencoding)
+ if isinstance(unicode_errors, unicode):
+ self._berrors = unicode_errors.encode('ascii')
+ else:
+ self._berrors = unicode_errors
+ self.unicode_errors = PyBytes_AsString(self._berrors)
+
+ def __dealloc__(self):
+ free(self.pk.buf);
+
+ cdef int _pack(self, object o, int nest_limit=DEFAULT_RECURSE_LIMIT) except -1:
+ cdef long long llval
+ cdef unsigned long long ullval
+ cdef long longval
+ cdef float fval
+ cdef double dval
+ cdef char* rawval
+ cdef int ret
+ cdef dict d
+
+ if nest_limit < 0:
+ raise PackValueError("recursion limit exceeded.")
+
+ if o is None:
+ ret = msgpack_pack_nil(&self.pk)
+ elif isinstance(o, bool):
+ if o:
+ ret = msgpack_pack_true(&self.pk)
+ else:
+ ret = msgpack_pack_false(&self.pk)
+ elif PyLong_Check(o):
+ if o > 0:
+ ullval = o
+ ret = msgpack_pack_unsigned_long_long(&self.pk, ullval)
+ else:
+ llval = o
+ ret = msgpack_pack_long_long(&self.pk, llval)
+ elif PyInt_Check(o):
+ longval = o
+ ret = msgpack_pack_long(&self.pk, longval)
+ elif PyFloat_Check(o):
+ if self.use_float:
+ fval = o
+ ret = msgpack_pack_float(&self.pk, fval)
+ else:
+ dval = o
+ ret = msgpack_pack_double(&self.pk, dval)
+ elif PyBytes_Check(o):
+ rawval = o
+ ret = msgpack_pack_raw(&self.pk, len(o))
+ if ret == 0:
+ ret = msgpack_pack_raw_body(&self.pk, rawval, len(o))
+ elif PyUnicode_Check(o):
+ if not self.encoding:
+ raise TypeError("Can't encode unicode string: no encoding is specified")
+ o = PyUnicode_AsEncodedString(o, self.encoding, self.unicode_errors)
+ rawval = o
+ ret = msgpack_pack_raw(&self.pk, len(o))
+ if ret == 0:
+ ret = msgpack_pack_raw_body(&self.pk, rawval, len(o))
+ elif PyDict_CheckExact(o):
+ d = <dict>o
+ ret = msgpack_pack_map(&self.pk, len(d))
+ if ret == 0:
+ for k, v in d.iteritems():
+ ret = self._pack(k, nest_limit-1)
+ if ret != 0: break
+ ret = self._pack(v, nest_limit-1)
+ if ret != 0: break
+ elif PyDict_Check(o):
+ ret = msgpack_pack_map(&self.pk, len(o))
+ if ret == 0:
+ for k, v in o.items():
+ ret = self._pack(k, nest_limit-1)
+ if ret != 0: break
+ ret = self._pack(v, nest_limit-1)
+ if ret != 0: break
+ elif PyTuple_Check(o) or PyList_Check(o):
+ ret = msgpack_pack_array(&self.pk, len(o))
+ if ret == 0:
+ for v in o:
+ ret = self._pack(v, nest_limit-1)
+ if ret != 0: break
+ elif self._default:
+ o = self._default(o)
+ ret = self._pack(o, nest_limit-1)
+ else:
+ raise TypeError("can't serialize %r" % (o,))
+ return ret
+
+ cpdef pack(self, object obj):
+ cdef int ret
+ ret = self._pack(obj, DEFAULT_RECURSE_LIMIT)
+ if ret == -1:
+ raise MemoryError
+ elif ret: # should not happen.
+ raise TypeError
+ if self.autoreset:
+ buf = PyBytes_FromStringAndSize(self.pk.buf, self.pk.length)
+ self.pk.length = 0
+ return buf
+
+ def pack_array_header(self, size_t size):
+ cdef int ret = msgpack_pack_array(&self.pk, size)
+ if ret == -1:
+ raise MemoryError
+ elif ret: # should not happen
+ raise TypeError
+ if self.autoreset:
+ buf = PyBytes_FromStringAndSize(self.pk.buf, self.pk.length)
+ self.pk.length = 0
+ return buf
+
+ def pack_map_header(self, size_t size):
+ cdef int ret = msgpack_pack_map(&self.pk, size)
+ if ret == -1:
+ raise MemoryError
+ elif ret: # should not happen
+ raise TypeError
+ if self.autoreset:
+ buf = PyBytes_FromStringAndSize(self.pk.buf, self.pk.length)
+ self.pk.length = 0
+ return buf
+
+ def pack_map_pairs(self, object pairs):
+ """
+ Pack *pairs* as msgpack map type.
+
+ *pairs* should sequence of pair.
+ (`len(pairs)` and `for k, v in *pairs*:` should be supported.)
+ """
+ cdef int ret = msgpack_pack_map(&self.pk, len(pairs))
+ if ret == 0:
+ for k, v in pairs:
+ ret = self._pack(k)
+ if ret != 0: break
+ ret = self._pack(v)
+ if ret != 0: break
+ if ret == -1:
+ raise MemoryError
+ elif ret: # should not happen
+ raise TypeError
+ if self.autoreset:
+ buf = PyBytes_FromStringAndSize(self.pk.buf, self.pk.length)
+ self.pk.length = 0
+ return buf
+
+ def reset(self):
+ """Clear internal buffer."""
+ self.pk.length = 0
+
+ def bytes(self):
+ """Return buffer content."""
+ return PyBytes_FromStringAndSize(self.pk.buf, self.pk.length)
+
+
+def pack(object o, object stream, default=None, encoding='utf-8', unicode_errors='strict'):
+ """
+ pack an object `o` and write it to stream)."""
+ packer = Packer(default=default, encoding=encoding, unicode_errors=unicode_errors)
+ stream.write(packer.pack(o))
+
+def packb(object o, default=None, encoding='utf-8', unicode_errors='strict', use_single_float=False):
+ """
+ pack o and return packed bytes."""
+ packer = Packer(default=default, encoding=encoding, unicode_errors=unicode_errors,
+ use_single_float=use_single_float)
+ return packer.pack(o)
+
+
+cdef inline init_ctx(template_context *ctx,
+ object object_hook, object object_pairs_hook, object list_hook,
+ bint use_list, char* encoding, char* unicode_errors):
+ template_init(ctx)
+ ctx.user.use_list = use_list
+ ctx.user.object_hook = ctx.user.list_hook = <PyObject*>NULL
+
+ if object_hook is not None and object_pairs_hook is not None:
+ raise ValueError("object_pairs_hook and object_hook are mutually exclusive.")
+
+ if object_hook is not None:
+ if not PyCallable_Check(object_hook):
+ raise TypeError("object_hook must be a callable.")
+ ctx.user.object_hook = <PyObject*>object_hook
+
+ if object_pairs_hook is None:
+ ctx.user.has_pairs_hook = False
+ else:
+ if not PyCallable_Check(object_pairs_hook):
+ raise TypeError("object_pairs_hook must be a callable.")
+ ctx.user.object_hook = <PyObject*>object_pairs_hook
+ ctx.user.has_pairs_hook = True
+
+ if list_hook is not None:
+ if not PyCallable_Check(list_hook):
+ raise TypeError("list_hook must be a callable.")
+ ctx.user.list_hook = <PyObject*>list_hook
+
+ ctx.user.encoding = encoding
+ ctx.user.unicode_errors = unicode_errors
+
+def unpackb(object packed, object object_hook=None, object list_hook=None,
+ bint use_list=1, encoding=None, unicode_errors="strict",
+ object_pairs_hook=None,
+ ):
+ """Unpack packed_bytes to object. Returns an unpacked object.
+
+ Raises `ValueError` when `packed` contains extra bytes.
+ """
+ cdef template_context ctx
+ cdef size_t off = 0
+ cdef int ret
+
+ cdef char* buf
+ cdef Py_ssize_t buf_len
+ cdef char* cenc = NULL
+ cdef char* cerr = NULL
+
+ PyObject_AsReadBuffer(packed, <const_void_ptr*>&buf, &buf_len)
+
+ if encoding is not None:
+ if isinstance(encoding, unicode):
+ encoding = encoding.encode('ascii')
+ cenc = PyBytes_AsString(encoding)
+
+ if unicode_errors is not None:
+ if isinstance(unicode_errors, unicode):
+ unicode_errors = unicode_errors.encode('ascii')
+ cerr = PyBytes_AsString(unicode_errors)
+
+ init_ctx(&ctx, object_hook, object_pairs_hook, list_hook, use_list, cenc, cerr)
+ ret = template_construct(&ctx, buf, buf_len, &off)
+ if ret == 1:
+ obj = template_data(&ctx)
+ if off < buf_len:
+ raise ExtraData(obj, PyBytes_FromStringAndSize(buf+off, buf_len-off))
+ return obj
+ elif ret < 0:
+ raise ValueError("Unpack failed: error = %d" % (ret,))
+ else:
+ raise UnpackValueError
+
+
+def unpack(object stream, object object_hook=None, object list_hook=None,
+ bint use_list=1, encoding=None, unicode_errors="strict",
+ object_pairs_hook=None,
+ ):
+ """Unpack an object from `stream`.
+
+ Raises `ValueError` when `stream` has extra bytes.
+ """
+ return unpackb(stream.read(), use_list=use_list,
+ object_hook=object_hook, object_pairs_hook=object_pairs_hook, list_hook=list_hook,
+ encoding=encoding, unicode_errors=unicode_errors,
+ )
+
+
+cdef class Unpacker(object):
+ """
+ Streaming unpacker.
+
+ `file_like` is a file-like object having `.read(n)` method.
+ When `Unpacker` initialized with `file_like`, unpacker reads serialized data
+ from it and `.feed()` method is not usable.
+
+ `read_size` is used as `file_like.read(read_size)`.
+ (default: min(1024**2, max_buffer_size))
+
+ If `use_list` is true (default), msgpack list is deserialized to Python list.
+ Otherwise, it is deserialized to Python tuple.
+
+ `object_hook` is same to simplejson. If it is not None, it should be callable
+ and Unpacker calls it with a dict argument after deserializing a map.
+
+ `object_pairs_hook` is same to simplejson. If it is not None, it should be callable
+ and Unpacker calls it with a list of key-value pairs after deserializing a map.
+
+ `encoding` is encoding used for decoding msgpack bytes. If it is None (default),
+ msgpack bytes is deserialized to Python bytes.
+
+ `unicode_errors` is used for decoding bytes.
+
+ `max_buffer_size` limits size of data waiting unpacked.
+ 0 means system's INT_MAX (default).
+ Raises `BufferFull` exception when it is insufficient.
+ You shoud set this parameter when unpacking data from untrasted source.
+
+ example of streaming deserialize from file-like object::
+
+ unpacker = Unpacker(file_like)
+ for o in unpacker:
+ do_something(o)
+
+ example of streaming deserialize from socket::
+
+ unpacker = Unpacker()
+ while 1:
+ buf = sock.recv(1024**2)
+ if not buf:
+ break
+ unpacker.feed(buf)
+ for o in unpacker:
+ do_something(o)
+ """
+ cdef template_context ctx
+ cdef char* buf
+ cdef size_t buf_size, buf_head, buf_tail
+ cdef object file_like
+ cdef object file_like_read
+ cdef Py_ssize_t read_size
+ cdef object object_hook
+ cdef object encoding, unicode_errors
+ cdef size_t max_buffer_size
+
+ def __cinit__(self):
+ self.buf = NULL
+
+ def __dealloc__(self):
+ free(self.buf)
+ self.buf = NULL
+
+ def __init__(self, file_like=None, Py_ssize_t read_size=0, bint use_list=1,
+ object object_hook=None, object object_pairs_hook=None, object list_hook=None,
+ encoding=None, unicode_errors='strict', int max_buffer_size=0,
+ ):
+ cdef char *cenc=NULL, *cerr=NULL
+
+ self.file_like = file_like
+ if file_like:
+ self.file_like_read = file_like.read
+ if not PyCallable_Check(self.file_like_read):
+ raise ValueError("`file_like.read` must be a callable.")
+ if not max_buffer_size:
+ max_buffer_size = INT_MAX
+ if read_size > max_buffer_size:
+ raise ValueError("read_size should be less or equal to max_buffer_size")
+ if not read_size:
+ read_size = min(max_buffer_size, 1024**2)
+ self.max_buffer_size = max_buffer_size
+ self.read_size = read_size
+ self.buf = <char*>malloc(read_size)
+ if self.buf == NULL:
+ raise MemoryError("Unable to allocate internal buffer.")
+ self.buf_size = read_size
+ self.buf_head = 0
+ self.buf_tail = 0
+
+ if encoding is not None:
+ if isinstance(encoding, unicode):
+ encoding = encoding.encode('ascii')
+ self.encoding = encoding
+ cenc = PyBytes_AsString(encoding)
+
+ if unicode_errors is not None:
+ if isinstance(unicode_errors, unicode):
+ unicode_errors = unicode_errors.encode('ascii')
+ self.unicode_errors = unicode_errors
+ cerr = PyBytes_AsString(unicode_errors)
+
+ init_ctx(&self.ctx, object_hook, object_pairs_hook, list_hook, use_list, cenc, cerr)
+
+ def feed(self, object next_bytes):
+ """Append `next_bytes` to internal buffer."""
+ cdef char* buf
+ cdef Py_ssize_t buf_len
+ if self.file_like is not None:
+ raise AssertionError(
+ "unpacker.feed() is not be able to use with `file_like`.")
+ PyObject_AsReadBuffer(next_bytes, <const_void_ptr*>&buf, &buf_len)
+ self.append_buffer(buf, buf_len)
+
+ cdef append_buffer(self, void* _buf, Py_ssize_t _buf_len):
+ cdef:
+ char* buf = self.buf
+ char* new_buf
+ size_t head = self.buf_head
+ size_t tail = self.buf_tail
+ size_t buf_size = self.buf_size
+ size_t new_size
+
+ if tail + _buf_len > buf_size:
+ if ((tail - head) + _buf_len) <= buf_size:
+ # move to front.
+ memmove(buf, buf + head, tail - head)
+ tail -= head
+ head = 0
+ else:
+ # expand buffer.
+ new_size = (tail-head) + _buf_len
+ if new_size > self.max_buffer_size:
+ raise BufferFull
+ new_size = min(new_size*2, self.max_buffer_size)
+ new_buf = <char*>malloc(new_size)
+ if new_buf == NULL:
+ # self.buf still holds old buffer and will be freed during
+ # obj destruction
+ raise MemoryError("Unable to enlarge internal buffer.")
+ memcpy(new_buf, buf + head, tail - head)
+ free(buf)
+
+ buf = new_buf
+ buf_size = new_size
+ tail -= head
+ head = 0
+
+ memcpy(buf + tail, <char*>(_buf), _buf_len)
+ self.buf = buf
+ self.buf_head = head
+ self.buf_size = buf_size
+ self.buf_tail = tail + _buf_len
+
+ cdef read_from_file(self):
+ next_bytes = self.file_like_read(
+ min(self.read_size,
+ self.max_buffer_size - (self.buf_tail - self.buf_head)
+ ))
+ if next_bytes:
+ self.append_buffer(PyBytes_AsString(next_bytes), PyBytes_Size(next_bytes))
+ else:
+ self.file_like = None
+
+ cdef object _unpack(self, execute_fn execute, object write_bytes, bint iter=0):
+ cdef int ret
+ cdef object obj
+ cdef size_t prev_head
+ while 1:
+ prev_head = self.buf_head
+ ret = execute(&self.ctx, self.buf, self.buf_tail, &self.buf_head)
+ if write_bytes is not None:
+ write_bytes(PyBytes_FromStringAndSize(self.buf + prev_head, self.buf_head - prev_head))
+
+ if ret == 1:
+ obj = template_data(&self.ctx)
+ template_init(&self.ctx)
+ return obj
+ elif ret == 0:
+ if self.file_like is not None:
+ self.read_from_file()
+ continue
+ if iter:
+ raise StopIteration("No more data to unpack.")
+ else:
+ raise OutOfData("No more data to unpack.")
+ else:
+ raise ValueError("Unpack failed: error = %d" % (ret,))
+
+ def read_bytes(self, Py_ssize_t nbytes):
+ """read a specified number of raw bytes from the stream"""
+ cdef size_t nread
+ nread = min(self.buf_tail - self.buf_head, nbytes)
+ ret = PyBytes_FromStringAndSize(self.buf + self.buf_head, nread)
+ self.buf_head += nread
+ if len(ret) < nbytes and self.file_like is not None:
+ ret += self.file_like.read(nbytes - len(ret))
+ return ret
+
+ def unpack(self, object write_bytes=None):
+ """
+ unpack one object
+
+ If write_bytes is not None, it will be called with parts of the raw
+ message as it is unpacked.
+
+ Raises `OutOfData` when there are no more bytes to unpack.
+ """
+ return self._unpack(template_construct, write_bytes)
+
+ def skip(self, object write_bytes=None):
+ """
+ read and ignore one object, returning None
+
+ If write_bytes is not None, it will be called with parts of the raw
+ message as it is unpacked.
+
+ Raises `OutOfData` when there are no more bytes to unpack.
+ """
+ return self._unpack(template_skip, write_bytes)
+
+ def read_array_header(self, object write_bytes=None):
+ """assuming the next object is an array, return its size n, such that
+ the next n unpack() calls will iterate over its contents.
+
+ Raises `OutOfData` when there are no more bytes to unpack.
+ """
+ return self._unpack(read_array_header, write_bytes)
+
+ def read_map_header(self, object write_bytes=None):
+ """assuming the next object is a map, return its size n, such that the
+ next n * 2 unpack() calls will iterate over its key-value pairs.
+
+ Raises `OutOfData` when there are no more bytes to unpack.
+ """
+ return self._unpack(read_map_header, write_bytes)
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ return self._unpack(template_construct, None, 1)
+
+ # for debug.
+ #def _buf(self):
+ # return PyString_FromStringAndSize(self.buf, self.buf_tail)
+
+ #def _off(self):
+ # return self.buf_head
diff --git a/pandas/src/msgpack/pack.h b/pandas/src/msgpack/pack.h
new file mode 100644
index 0000000000000..bb939d93ebeca
--- /dev/null
+++ b/pandas/src/msgpack/pack.h
@@ -0,0 +1,108 @@
+/*
+ * MessagePack for Python packing routine
+ *
+ * Copyright (C) 2009 Naoki INADA
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stddef.h>
+#include <stdlib.h>
+#include "sysdep.h"
+#include <limits.h>
+#include <string.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef _MSC_VER
+#define inline __inline
+#endif
+
+typedef struct msgpack_packer {
+ char *buf;
+ size_t length;
+ size_t buf_size;
+} msgpack_packer;
+
+typedef struct Packer Packer;
+
+static inline int msgpack_pack_short(msgpack_packer* pk, short d);
+static inline int msgpack_pack_int(msgpack_packer* pk, int d);
+static inline int msgpack_pack_long(msgpack_packer* pk, long d);
+static inline int msgpack_pack_long_long(msgpack_packer* pk, long long d);
+static inline int msgpack_pack_unsigned_short(msgpack_packer* pk, unsigned short d);
+static inline int msgpack_pack_unsigned_int(msgpack_packer* pk, unsigned int d);
+static inline int msgpack_pack_unsigned_long(msgpack_packer* pk, unsigned long d);
+static inline int msgpack_pack_unsigned_long_long(msgpack_packer* pk, unsigned long long d);
+
+static inline int msgpack_pack_uint8(msgpack_packer* pk, uint8_t d);
+static inline int msgpack_pack_uint16(msgpack_packer* pk, uint16_t d);
+static inline int msgpack_pack_uint32(msgpack_packer* pk, uint32_t d);
+static inline int msgpack_pack_uint64(msgpack_packer* pk, uint64_t d);
+static inline int msgpack_pack_int8(msgpack_packer* pk, int8_t d);
+static inline int msgpack_pack_int16(msgpack_packer* pk, int16_t d);
+static inline int msgpack_pack_int32(msgpack_packer* pk, int32_t d);
+static inline int msgpack_pack_int64(msgpack_packer* pk, int64_t d);
+
+static inline int msgpack_pack_float(msgpack_packer* pk, float d);
+static inline int msgpack_pack_double(msgpack_packer* pk, double d);
+
+static inline int msgpack_pack_nil(msgpack_packer* pk);
+static inline int msgpack_pack_true(msgpack_packer* pk);
+static inline int msgpack_pack_false(msgpack_packer* pk);
+
+static inline int msgpack_pack_array(msgpack_packer* pk, unsigned int n);
+
+static inline int msgpack_pack_map(msgpack_packer* pk, unsigned int n);
+
+static inline int msgpack_pack_raw(msgpack_packer* pk, size_t l);
+static inline int msgpack_pack_raw_body(msgpack_packer* pk, const void* b, size_t l);
+
+static inline int msgpack_pack_write(msgpack_packer* pk, const char *data, size_t l)
+{
+ char* buf = pk->buf;
+ size_t bs = pk->buf_size;
+ size_t len = pk->length;
+
+ if (len + l > bs) {
+ bs = (len + l) * 2;
+ buf = (char*)realloc(buf, bs);
+ if (!buf) return -1;
+ }
+ memcpy(buf + len, data, l);
+ len += l;
+
+ pk->buf = buf;
+ pk->buf_size = bs;
+ pk->length = len;
+ return 0;
+}
+
+#define msgpack_pack_inline_func(name) \
+ static inline int msgpack_pack ## name
+
+#define msgpack_pack_inline_func_cint(name) \
+ static inline int msgpack_pack ## name
+
+#define msgpack_pack_user msgpack_packer*
+
+#define msgpack_pack_append_buffer(user, buf, len) \
+ return msgpack_pack_write(user, (const char*)buf, len)
+
+#include "pack_template.h"
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/pandas/src/msgpack/pack_template.h b/pandas/src/msgpack/pack_template.h
new file mode 100644
index 0000000000000..65c959dd8ce63
--- /dev/null
+++ b/pandas/src/msgpack/pack_template.h
@@ -0,0 +1,771 @@
+/*
+ * MessagePack packing routine template
+ *
+ * Copyright (C) 2008-2010 FURUHASHI Sadayuki
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#if defined(__LITTLE_ENDIAN__)
+#define TAKE8_8(d) ((uint8_t*)&d)[0]
+#define TAKE8_16(d) ((uint8_t*)&d)[0]
+#define TAKE8_32(d) ((uint8_t*)&d)[0]
+#define TAKE8_64(d) ((uint8_t*)&d)[0]
+#elif defined(__BIG_ENDIAN__)
+#define TAKE8_8(d) ((uint8_t*)&d)[0]
+#define TAKE8_16(d) ((uint8_t*)&d)[1]
+#define TAKE8_32(d) ((uint8_t*)&d)[3]
+#define TAKE8_64(d) ((uint8_t*)&d)[7]
+#endif
+
+#ifndef msgpack_pack_inline_func
+#error msgpack_pack_inline_func template is not defined
+#endif
+
+#ifndef msgpack_pack_user
+#error msgpack_pack_user type is not defined
+#endif
+
+#ifndef msgpack_pack_append_buffer
+#error msgpack_pack_append_buffer callback is not defined
+#endif
+
+
+/*
+ * Integer
+ */
+
+#define msgpack_pack_real_uint8(x, d) \
+do { \
+ if(d < (1<<7)) { \
+ /* fixnum */ \
+ msgpack_pack_append_buffer(x, &TAKE8_8(d), 1); \
+ } else { \
+ /* unsigned 8 */ \
+ unsigned char buf[2] = {0xcc, TAKE8_8(d)}; \
+ msgpack_pack_append_buffer(x, buf, 2); \
+ } \
+} while(0)
+
+#define msgpack_pack_real_uint16(x, d) \
+do { \
+ if(d < (1<<7)) { \
+ /* fixnum */ \
+ msgpack_pack_append_buffer(x, &TAKE8_16(d), 1); \
+ } else if(d < (1<<8)) { \
+ /* unsigned 8 */ \
+ unsigned char buf[2] = {0xcc, TAKE8_16(d)}; \
+ msgpack_pack_append_buffer(x, buf, 2); \
+ } else { \
+ /* unsigned 16 */ \
+ unsigned char buf[3]; \
+ buf[0] = 0xcd; _msgpack_store16(&buf[1], (uint16_t)d); \
+ msgpack_pack_append_buffer(x, buf, 3); \
+ } \
+} while(0)
+
+#define msgpack_pack_real_uint32(x, d) \
+do { \
+ if(d < (1<<8)) { \
+ if(d < (1<<7)) { \
+ /* fixnum */ \
+ msgpack_pack_append_buffer(x, &TAKE8_32(d), 1); \
+ } else { \
+ /* unsigned 8 */ \
+ unsigned char buf[2] = {0xcc, TAKE8_32(d)}; \
+ msgpack_pack_append_buffer(x, buf, 2); \
+ } \
+ } else { \
+ if(d < (1<<16)) { \
+ /* unsigned 16 */ \
+ unsigned char buf[3]; \
+ buf[0] = 0xcd; _msgpack_store16(&buf[1], (uint16_t)d); \
+ msgpack_pack_append_buffer(x, buf, 3); \
+ } else { \
+ /* unsigned 32 */ \
+ unsigned char buf[5]; \
+ buf[0] = 0xce; _msgpack_store32(&buf[1], (uint32_t)d); \
+ msgpack_pack_append_buffer(x, buf, 5); \
+ } \
+ } \
+} while(0)
+
+#define msgpack_pack_real_uint64(x, d) \
+do { \
+ if(d < (1ULL<<8)) { \
+ if(d < (1ULL<<7)) { \
+ /* fixnum */ \
+ msgpack_pack_append_buffer(x, &TAKE8_64(d), 1); \
+ } else { \
+ /* unsigned 8 */ \
+ unsigned char buf[2] = {0xcc, TAKE8_64(d)}; \
+ msgpack_pack_append_buffer(x, buf, 2); \
+ } \
+ } else { \
+ if(d < (1ULL<<16)) { \
+ /* unsigned 16 */ \
+ unsigned char buf[3]; \
+ buf[0] = 0xcd; _msgpack_store16(&buf[1], (uint16_t)d); \
+ msgpack_pack_append_buffer(x, buf, 3); \
+ } else if(d < (1ULL<<32)) { \
+ /* unsigned 32 */ \
+ unsigned char buf[5]; \
+ buf[0] = 0xce; _msgpack_store32(&buf[1], (uint32_t)d); \
+ msgpack_pack_append_buffer(x, buf, 5); \
+ } else { \
+ /* unsigned 64 */ \
+ unsigned char buf[9]; \
+ buf[0] = 0xcf; _msgpack_store64(&buf[1], d); \
+ msgpack_pack_append_buffer(x, buf, 9); \
+ } \
+ } \
+} while(0)
+
+#define msgpack_pack_real_int8(x, d) \
+do { \
+ if(d < -(1<<5)) { \
+ /* signed 8 */ \
+ unsigned char buf[2] = {0xd0, TAKE8_8(d)}; \
+ msgpack_pack_append_buffer(x, buf, 2); \
+ } else { \
+ /* fixnum */ \
+ msgpack_pack_append_buffer(x, &TAKE8_8(d), 1); \
+ } \
+} while(0)
+
+#define msgpack_pack_real_int16(x, d) \
+do { \
+ if(d < -(1<<5)) { \
+ if(d < -(1<<7)) { \
+ /* signed 16 */ \
+ unsigned char buf[3]; \
+ buf[0] = 0xd1; _msgpack_store16(&buf[1], (int16_t)d); \
+ msgpack_pack_append_buffer(x, buf, 3); \
+ } else { \
+ /* signed 8 */ \
+ unsigned char buf[2] = {0xd0, TAKE8_16(d)}; \
+ msgpack_pack_append_buffer(x, buf, 2); \
+ } \
+ } else if(d < (1<<7)) { \
+ /* fixnum */ \
+ msgpack_pack_append_buffer(x, &TAKE8_16(d), 1); \
+ } else { \
+ if(d < (1<<8)) { \
+ /* unsigned 8 */ \
+ unsigned char buf[2] = {0xcc, TAKE8_16(d)}; \
+ msgpack_pack_append_buffer(x, buf, 2); \
+ } else { \
+ /* unsigned 16 */ \
+ unsigned char buf[3]; \
+ buf[0] = 0xcd; _msgpack_store16(&buf[1], (uint16_t)d); \
+ msgpack_pack_append_buffer(x, buf, 3); \
+ } \
+ } \
+} while(0)
+
+#define msgpack_pack_real_int32(x, d) \
+do { \
+ if(d < -(1<<5)) { \
+ if(d < -(1<<15)) { \
+ /* signed 32 */ \
+ unsigned char buf[5]; \
+ buf[0] = 0xd2; _msgpack_store32(&buf[1], (int32_t)d); \
+ msgpack_pack_append_buffer(x, buf, 5); \
+ } else if(d < -(1<<7)) { \
+ /* signed 16 */ \
+ unsigned char buf[3]; \
+ buf[0] = 0xd1; _msgpack_store16(&buf[1], (int16_t)d); \
+ msgpack_pack_append_buffer(x, buf, 3); \
+ } else { \
+ /* signed 8 */ \
+ unsigned char buf[2] = {0xd0, TAKE8_32(d)}; \
+ msgpack_pack_append_buffer(x, buf, 2); \
+ } \
+ } else if(d < (1<<7)) { \
+ /* fixnum */ \
+ msgpack_pack_append_buffer(x, &TAKE8_32(d), 1); \
+ } else { \
+ if(d < (1<<8)) { \
+ /* unsigned 8 */ \
+ unsigned char buf[2] = {0xcc, TAKE8_32(d)}; \
+ msgpack_pack_append_buffer(x, buf, 2); \
+ } else if(d < (1<<16)) { \
+ /* unsigned 16 */ \
+ unsigned char buf[3]; \
+ buf[0] = 0xcd; _msgpack_store16(&buf[1], (uint16_t)d); \
+ msgpack_pack_append_buffer(x, buf, 3); \
+ } else { \
+ /* unsigned 32 */ \
+ unsigned char buf[5]; \
+ buf[0] = 0xce; _msgpack_store32(&buf[1], (uint32_t)d); \
+ msgpack_pack_append_buffer(x, buf, 5); \
+ } \
+ } \
+} while(0)
+
+#define msgpack_pack_real_int64(x, d) \
+do { \
+ if(d < -(1LL<<5)) { \
+ if(d < -(1LL<<15)) { \
+ if(d < -(1LL<<31)) { \
+ /* signed 64 */ \
+ unsigned char buf[9]; \
+ buf[0] = 0xd3; _msgpack_store64(&buf[1], d); \
+ msgpack_pack_append_buffer(x, buf, 9); \
+ } else { \
+ /* signed 32 */ \
+ unsigned char buf[5]; \
+ buf[0] = 0xd2; _msgpack_store32(&buf[1], (int32_t)d); \
+ msgpack_pack_append_buffer(x, buf, 5); \
+ } \
+ } else { \
+ if(d < -(1<<7)) { \
+ /* signed 16 */ \
+ unsigned char buf[3]; \
+ buf[0] = 0xd1; _msgpack_store16(&buf[1], (int16_t)d); \
+ msgpack_pack_append_buffer(x, buf, 3); \
+ } else { \
+ /* signed 8 */ \
+ unsigned char buf[2] = {0xd0, TAKE8_64(d)}; \
+ msgpack_pack_append_buffer(x, buf, 2); \
+ } \
+ } \
+ } else if(d < (1<<7)) { \
+ /* fixnum */ \
+ msgpack_pack_append_buffer(x, &TAKE8_64(d), 1); \
+ } else { \
+ if(d < (1LL<<16)) { \
+ if(d < (1<<8)) { \
+ /* unsigned 8 */ \
+ unsigned char buf[2] = {0xcc, TAKE8_64(d)}; \
+ msgpack_pack_append_buffer(x, buf, 2); \
+ } else { \
+ /* unsigned 16 */ \
+ unsigned char buf[3]; \
+ buf[0] = 0xcd; _msgpack_store16(&buf[1], (uint16_t)d); \
+ msgpack_pack_append_buffer(x, buf, 3); \
+ } \
+ } else { \
+ if(d < (1LL<<32)) { \
+ /* unsigned 32 */ \
+ unsigned char buf[5]; \
+ buf[0] = 0xce; _msgpack_store32(&buf[1], (uint32_t)d); \
+ msgpack_pack_append_buffer(x, buf, 5); \
+ } else { \
+ /* unsigned 64 */ \
+ unsigned char buf[9]; \
+ buf[0] = 0xcf; _msgpack_store64(&buf[1], d); \
+ msgpack_pack_append_buffer(x, buf, 9); \
+ } \
+ } \
+ } \
+} while(0)
+
+
+#ifdef msgpack_pack_inline_func_fixint
+
+msgpack_pack_inline_func_fixint(_uint8)(msgpack_pack_user x, uint8_t d)
+{
+ unsigned char buf[2] = {0xcc, TAKE8_8(d)};
+ msgpack_pack_append_buffer(x, buf, 2);
+}
+
+msgpack_pack_inline_func_fixint(_uint16)(msgpack_pack_user x, uint16_t d)
+{
+ unsigned char buf[3];
+ buf[0] = 0xcd; _msgpack_store16(&buf[1], d);
+ msgpack_pack_append_buffer(x, buf, 3);
+}
+
+msgpack_pack_inline_func_fixint(_uint32)(msgpack_pack_user x, uint32_t d)
+{
+ unsigned char buf[5];
+ buf[0] = 0xce; _msgpack_store32(&buf[1], d);
+ msgpack_pack_append_buffer(x, buf, 5);
+}
+
+msgpack_pack_inline_func_fixint(_uint64)(msgpack_pack_user x, uint64_t d)
+{
+ unsigned char buf[9];
+ buf[0] = 0xcf; _msgpack_store64(&buf[1], d);
+ msgpack_pack_append_buffer(x, buf, 9);
+}
+
+msgpack_pack_inline_func_fixint(_int8)(msgpack_pack_user x, int8_t d)
+{
+ unsigned char buf[2] = {0xd0, TAKE8_8(d)};
+ msgpack_pack_append_buffer(x, buf, 2);
+}
+
+msgpack_pack_inline_func_fixint(_int16)(msgpack_pack_user x, int16_t d)
+{
+ unsigned char buf[3];
+ buf[0] = 0xd1; _msgpack_store16(&buf[1], d);
+ msgpack_pack_append_buffer(x, buf, 3);
+}
+
+msgpack_pack_inline_func_fixint(_int32)(msgpack_pack_user x, int32_t d)
+{
+ unsigned char buf[5];
+ buf[0] = 0xd2; _msgpack_store32(&buf[1], d);
+ msgpack_pack_append_buffer(x, buf, 5);
+}
+
+msgpack_pack_inline_func_fixint(_int64)(msgpack_pack_user x, int64_t d)
+{
+ unsigned char buf[9];
+ buf[0] = 0xd3; _msgpack_store64(&buf[1], d);
+ msgpack_pack_append_buffer(x, buf, 9);
+}
+
+#undef msgpack_pack_inline_func_fixint
+#endif
+
+
+msgpack_pack_inline_func(_uint8)(msgpack_pack_user x, uint8_t d)
+{
+ msgpack_pack_real_uint8(x, d);
+}
+
+msgpack_pack_inline_func(_uint16)(msgpack_pack_user x, uint16_t d)
+{
+ msgpack_pack_real_uint16(x, d);
+}
+
+msgpack_pack_inline_func(_uint32)(msgpack_pack_user x, uint32_t d)
+{
+ msgpack_pack_real_uint32(x, d);
+}
+
+msgpack_pack_inline_func(_uint64)(msgpack_pack_user x, uint64_t d)
+{
+ msgpack_pack_real_uint64(x, d);
+}
+
+msgpack_pack_inline_func(_int8)(msgpack_pack_user x, int8_t d)
+{
+ msgpack_pack_real_int8(x, d);
+}
+
+msgpack_pack_inline_func(_int16)(msgpack_pack_user x, int16_t d)
+{
+ msgpack_pack_real_int16(x, d);
+}
+
+msgpack_pack_inline_func(_int32)(msgpack_pack_user x, int32_t d)
+{
+ msgpack_pack_real_int32(x, d);
+}
+
+msgpack_pack_inline_func(_int64)(msgpack_pack_user x, int64_t d)
+{
+ msgpack_pack_real_int64(x, d);
+}
+
+
+#ifdef msgpack_pack_inline_func_cint
+
+msgpack_pack_inline_func_cint(_short)(msgpack_pack_user x, short d)
+{
+#if defined(SIZEOF_SHORT)
+#if SIZEOF_SHORT == 2
+ msgpack_pack_real_int16(x, d);
+#elif SIZEOF_SHORT == 4
+ msgpack_pack_real_int32(x, d);
+#else
+ msgpack_pack_real_int64(x, d);
+#endif
+
+#elif defined(SHRT_MAX)
+#if SHRT_MAX == 0x7fff
+ msgpack_pack_real_int16(x, d);
+#elif SHRT_MAX == 0x7fffffff
+ msgpack_pack_real_int32(x, d);
+#else
+ msgpack_pack_real_int64(x, d);
+#endif
+
+#else
+if(sizeof(short) == 2) {
+ msgpack_pack_real_int16(x, d);
+} else if(sizeof(short) == 4) {
+ msgpack_pack_real_int32(x, d);
+} else {
+ msgpack_pack_real_int64(x, d);
+}
+#endif
+}
+
+msgpack_pack_inline_func_cint(_int)(msgpack_pack_user x, int d)
+{
+#if defined(SIZEOF_INT)
+#if SIZEOF_INT == 2
+ msgpack_pack_real_int16(x, d);
+#elif SIZEOF_INT == 4
+ msgpack_pack_real_int32(x, d);
+#else
+ msgpack_pack_real_int64(x, d);
+#endif
+
+#elif defined(INT_MAX)
+#if INT_MAX == 0x7fff
+ msgpack_pack_real_int16(x, d);
+#elif INT_MAX == 0x7fffffff
+ msgpack_pack_real_int32(x, d);
+#else
+ msgpack_pack_real_int64(x, d);
+#endif
+
+#else
+if(sizeof(int) == 2) {
+ msgpack_pack_real_int16(x, d);
+} else if(sizeof(int) == 4) {
+ msgpack_pack_real_int32(x, d);
+} else {
+ msgpack_pack_real_int64(x, d);
+}
+#endif
+}
+
+msgpack_pack_inline_func_cint(_long)(msgpack_pack_user x, long d)
+{
+#if defined(SIZEOF_LONG)
+#if SIZEOF_LONG == 2
+ msgpack_pack_real_int16(x, d);
+#elif SIZEOF_LONG == 4
+ msgpack_pack_real_int32(x, d);
+#else
+ msgpack_pack_real_int64(x, d);
+#endif
+
+#elif defined(LONG_MAX)
+#if LONG_MAX == 0x7fffL
+ msgpack_pack_real_int16(x, d);
+#elif LONG_MAX == 0x7fffffffL
+ msgpack_pack_real_int32(x, d);
+#else
+ msgpack_pack_real_int64(x, d);
+#endif
+
+#else
+if(sizeof(long) == 2) {
+ msgpack_pack_real_int16(x, d);
+} else if(sizeof(long) == 4) {
+ msgpack_pack_real_int32(x, d);
+} else {
+ msgpack_pack_real_int64(x, d);
+}
+#endif
+}
+
+msgpack_pack_inline_func_cint(_long_long)(msgpack_pack_user x, long long d)
+{
+#if defined(SIZEOF_LONG_LONG)
+#if SIZEOF_LONG_LONG == 2
+ msgpack_pack_real_int16(x, d);
+#elif SIZEOF_LONG_LONG == 4
+ msgpack_pack_real_int32(x, d);
+#else
+ msgpack_pack_real_int64(x, d);
+#endif
+
+#elif defined(LLONG_MAX)
+#if LLONG_MAX == 0x7fffL
+ msgpack_pack_real_int16(x, d);
+#elif LLONG_MAX == 0x7fffffffL
+ msgpack_pack_real_int32(x, d);
+#else
+ msgpack_pack_real_int64(x, d);
+#endif
+
+#else
+if(sizeof(long long) == 2) {
+ msgpack_pack_real_int16(x, d);
+} else if(sizeof(long long) == 4) {
+ msgpack_pack_real_int32(x, d);
+} else {
+ msgpack_pack_real_int64(x, d);
+}
+#endif
+}
+
+msgpack_pack_inline_func_cint(_unsigned_short)(msgpack_pack_user x, unsigned short d)
+{
+#if defined(SIZEOF_SHORT)
+#if SIZEOF_SHORT == 2
+ msgpack_pack_real_uint16(x, d);
+#elif SIZEOF_SHORT == 4
+ msgpack_pack_real_uint32(x, d);
+#else
+ msgpack_pack_real_uint64(x, d);
+#endif
+
+#elif defined(USHRT_MAX)
+#if USHRT_MAX == 0xffffU
+ msgpack_pack_real_uint16(x, d);
+#elif USHRT_MAX == 0xffffffffU
+ msgpack_pack_real_uint32(x, d);
+#else
+ msgpack_pack_real_uint64(x, d);
+#endif
+
+#else
+if(sizeof(unsigned short) == 2) {
+ msgpack_pack_real_uint16(x, d);
+} else if(sizeof(unsigned short) == 4) {
+ msgpack_pack_real_uint32(x, d);
+} else {
+ msgpack_pack_real_uint64(x, d);
+}
+#endif
+}
+
+msgpack_pack_inline_func_cint(_unsigned_int)(msgpack_pack_user x, unsigned int d)
+{
+#if defined(SIZEOF_INT)
+#if SIZEOF_INT == 2
+ msgpack_pack_real_uint16(x, d);
+#elif SIZEOF_INT == 4
+ msgpack_pack_real_uint32(x, d);
+#else
+ msgpack_pack_real_uint64(x, d);
+#endif
+
+#elif defined(UINT_MAX)
+#if UINT_MAX == 0xffffU
+ msgpack_pack_real_uint16(x, d);
+#elif UINT_MAX == 0xffffffffU
+ msgpack_pack_real_uint32(x, d);
+#else
+ msgpack_pack_real_uint64(x, d);
+#endif
+
+#else
+if(sizeof(unsigned int) == 2) {
+ msgpack_pack_real_uint16(x, d);
+} else if(sizeof(unsigned int) == 4) {
+ msgpack_pack_real_uint32(x, d);
+} else {
+ msgpack_pack_real_uint64(x, d);
+}
+#endif
+}
+
+msgpack_pack_inline_func_cint(_unsigned_long)(msgpack_pack_user x, unsigned long d)
+{
+#if defined(SIZEOF_LONG)
+#if SIZEOF_LONG == 2
+ msgpack_pack_real_uint16(x, d);
+#elif SIZEOF_LONG == 4
+ msgpack_pack_real_uint32(x, d);
+#else
+ msgpack_pack_real_uint64(x, d);
+#endif
+
+#elif defined(ULONG_MAX)
+#if ULONG_MAX == 0xffffUL
+ msgpack_pack_real_uint16(x, d);
+#elif ULONG_MAX == 0xffffffffUL
+ msgpack_pack_real_uint32(x, d);
+#else
+ msgpack_pack_real_uint64(x, d);
+#endif
+
+#else
+if(sizeof(unsigned long) == 2) {
+ msgpack_pack_real_uint16(x, d);
+} else if(sizeof(unsigned long) == 4) {
+ msgpack_pack_real_uint32(x, d);
+} else {
+ msgpack_pack_real_uint64(x, d);
+}
+#endif
+}
+
+msgpack_pack_inline_func_cint(_unsigned_long_long)(msgpack_pack_user x, unsigned long long d)
+{
+#if defined(SIZEOF_LONG_LONG)
+#if SIZEOF_LONG_LONG == 2
+ msgpack_pack_real_uint16(x, d);
+#elif SIZEOF_LONG_LONG == 4
+ msgpack_pack_real_uint32(x, d);
+#else
+ msgpack_pack_real_uint64(x, d);
+#endif
+
+#elif defined(ULLONG_MAX)
+#if ULLONG_MAX == 0xffffUL
+ msgpack_pack_real_uint16(x, d);
+#elif ULLONG_MAX == 0xffffffffUL
+ msgpack_pack_real_uint32(x, d);
+#else
+ msgpack_pack_real_uint64(x, d);
+#endif
+
+#else
+if(sizeof(unsigned long long) == 2) {
+ msgpack_pack_real_uint16(x, d);
+} else if(sizeof(unsigned long long) == 4) {
+ msgpack_pack_real_uint32(x, d);
+} else {
+ msgpack_pack_real_uint64(x, d);
+}
+#endif
+}
+
+#undef msgpack_pack_inline_func_cint
+#endif
+
+
+
+/*
+ * Float
+ */
+
+msgpack_pack_inline_func(_float)(msgpack_pack_user x, float d)
+{
+ union { float f; uint32_t i; } mem;
+ mem.f = d;
+ unsigned char buf[5];
+ buf[0] = 0xca; _msgpack_store32(&buf[1], mem.i);
+ msgpack_pack_append_buffer(x, buf, 5);
+}
+
+msgpack_pack_inline_func(_double)(msgpack_pack_user x, double d)
+{
+ union { double f; uint64_t i; } mem;
+ mem.f = d;
+ unsigned char buf[9];
+ buf[0] = 0xcb;
+#if defined(__arm__) && !(__ARM_EABI__) // arm-oabi
+ // https://github.com/msgpack/msgpack-perl/pull/1
+ mem.i = (mem.i & 0xFFFFFFFFUL) << 32UL | (mem.i >> 32UL);
+#endif
+ _msgpack_store64(&buf[1], mem.i);
+ msgpack_pack_append_buffer(x, buf, 9);
+}
+
+
+/*
+ * Nil
+ */
+
+msgpack_pack_inline_func(_nil)(msgpack_pack_user x)
+{
+ static const unsigned char d = 0xc0;
+ msgpack_pack_append_buffer(x, &d, 1);
+}
+
+
+/*
+ * Boolean
+ */
+
+msgpack_pack_inline_func(_true)(msgpack_pack_user x)
+{
+ static const unsigned char d = 0xc3;
+ msgpack_pack_append_buffer(x, &d, 1);
+}
+
+msgpack_pack_inline_func(_false)(msgpack_pack_user x)
+{
+ static const unsigned char d = 0xc2;
+ msgpack_pack_append_buffer(x, &d, 1);
+}
+
+
+/*
+ * Array
+ */
+
+msgpack_pack_inline_func(_array)(msgpack_pack_user x, unsigned int n)
+{
+ if(n < 16) {
+ unsigned char d = 0x90 | n;
+ msgpack_pack_append_buffer(x, &d, 1);
+ } else if(n < 65536) {
+ unsigned char buf[3];
+ buf[0] = 0xdc; _msgpack_store16(&buf[1], (uint16_t)n);
+ msgpack_pack_append_buffer(x, buf, 3);
+ } else {
+ unsigned char buf[5];
+ buf[0] = 0xdd; _msgpack_store32(&buf[1], (uint32_t)n);
+ msgpack_pack_append_buffer(x, buf, 5);
+ }
+}
+
+
+/*
+ * Map
+ */
+
+msgpack_pack_inline_func(_map)(msgpack_pack_user x, unsigned int n)
+{
+ if(n < 16) {
+ unsigned char d = 0x80 | n;
+ msgpack_pack_append_buffer(x, &TAKE8_8(d), 1);
+ } else if(n < 65536) {
+ unsigned char buf[3];
+ buf[0] = 0xde; _msgpack_store16(&buf[1], (uint16_t)n);
+ msgpack_pack_append_buffer(x, buf, 3);
+ } else {
+ unsigned char buf[5];
+ buf[0] = 0xdf; _msgpack_store32(&buf[1], (uint32_t)n);
+ msgpack_pack_append_buffer(x, buf, 5);
+ }
+}
+
+
+/*
+ * Raw
+ */
+
+msgpack_pack_inline_func(_raw)(msgpack_pack_user x, size_t l)
+{
+ if(l < 32) {
+ unsigned char d = 0xa0 | (uint8_t)l;
+ msgpack_pack_append_buffer(x, &TAKE8_8(d), 1);
+ } else if(l < 65536) {
+ unsigned char buf[3];
+ buf[0] = 0xda; _msgpack_store16(&buf[1], (uint16_t)l);
+ msgpack_pack_append_buffer(x, buf, 3);
+ } else {
+ unsigned char buf[5];
+ buf[0] = 0xdb; _msgpack_store32(&buf[1], (uint32_t)l);
+ msgpack_pack_append_buffer(x, buf, 5);
+ }
+}
+
+msgpack_pack_inline_func(_raw_body)(msgpack_pack_user x, const void* b, size_t l)
+{
+ msgpack_pack_append_buffer(x, (const unsigned char*)b, l);
+}
+
+#undef msgpack_pack_inline_func
+#undef msgpack_pack_user
+#undef msgpack_pack_append_buffer
+
+#undef TAKE8_8
+#undef TAKE8_16
+#undef TAKE8_32
+#undef TAKE8_64
+
+#undef msgpack_pack_real_uint8
+#undef msgpack_pack_real_uint16
+#undef msgpack_pack_real_uint32
+#undef msgpack_pack_real_uint64
+#undef msgpack_pack_real_int8
+#undef msgpack_pack_real_int16
+#undef msgpack_pack_real_int32
+#undef msgpack_pack_real_int64
+
diff --git a/pandas/src/msgpack/sysdep.h b/pandas/src/msgpack/sysdep.h
new file mode 100644
index 0000000000000..4fedbd8ba472f
--- /dev/null
+++ b/pandas/src/msgpack/sysdep.h
@@ -0,0 +1,195 @@
+/*
+ * MessagePack system dependencies
+ *
+ * Copyright (C) 2008-2010 FURUHASHI Sadayuki
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef MSGPACK_SYSDEP_H__
+#define MSGPACK_SYSDEP_H__
+
+#include <stdlib.h>
+#include <stddef.h>
+#if defined(_MSC_VER) && _MSC_VER < 1600
+typedef __int8 int8_t;
+typedef unsigned __int8 uint8_t;
+typedef __int16 int16_t;
+typedef unsigned __int16 uint16_t;
+typedef __int32 int32_t;
+typedef unsigned __int32 uint32_t;
+typedef __int64 int64_t;
+typedef unsigned __int64 uint64_t;
+#elif defined(_MSC_VER) // && _MSC_VER >= 1600
+#include <stdint.h>
+#else
+#include <stdint.h>
+#include <stdbool.h>
+#endif
+
+#ifdef _WIN32
+#define _msgpack_atomic_counter_header <windows.h>
+typedef long _msgpack_atomic_counter_t;
+#define _msgpack_sync_decr_and_fetch(ptr) InterlockedDecrement(ptr)
+#define _msgpack_sync_incr_and_fetch(ptr) InterlockedIncrement(ptr)
+#elif defined(__GNUC__) && ((__GNUC__*10 + __GNUC_MINOR__) < 41)
+#define _msgpack_atomic_counter_header "gcc_atomic.h"
+#else
+typedef unsigned int _msgpack_atomic_counter_t;
+#define _msgpack_sync_decr_and_fetch(ptr) __sync_sub_and_fetch(ptr, 1)
+#define _msgpack_sync_incr_and_fetch(ptr) __sync_add_and_fetch(ptr, 1)
+#endif
+
+#ifdef _WIN32
+
+#ifdef __cplusplus
+/* numeric_limits<T>::min,max */
+#ifdef max
+#undef max
+#endif
+#ifdef min
+#undef min
+#endif
+#endif
+
+#else
+#include <arpa/inet.h> /* __BYTE_ORDER */
+#endif
+
+#if !defined(__LITTLE_ENDIAN__) && !defined(__BIG_ENDIAN__)
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+#define __LITTLE_ENDIAN__
+#elif __BYTE_ORDER == __BIG_ENDIAN
+#define __BIG_ENDIAN__
+#elif _WIN32
+#define __LITTLE_ENDIAN__
+#endif
+#endif
+
+
+#ifdef __LITTLE_ENDIAN__
+
+#ifdef _WIN32
+# if defined(ntohs)
+# define _msgpack_be16(x) ntohs(x)
+# elif defined(_byteswap_ushort) || (defined(_MSC_VER) && _MSC_VER >= 1400)
+# define _msgpack_be16(x) ((uint16_t)_byteswap_ushort((unsigned short)x))
+# else
+# define _msgpack_be16(x) ( \
+ ((((uint16_t)x) << 8) ) | \
+ ((((uint16_t)x) >> 8) ) )
+# endif
+#else
+# define _msgpack_be16(x) ntohs(x)
+#endif
+
+#ifdef _WIN32
+# if defined(ntohl)
+# define _msgpack_be32(x) ntohl(x)
+# elif defined(_byteswap_ulong) || (defined(_MSC_VER) && _MSC_VER >= 1400)
+# define _msgpack_be32(x) ((uint32_t)_byteswap_ulong((unsigned long)x))
+# else
+# define _msgpack_be32(x) \
+ ( ((((uint32_t)x) << 24) ) | \
+ ((((uint32_t)x) << 8) & 0x00ff0000U ) | \
+ ((((uint32_t)x) >> 8) & 0x0000ff00U ) | \
+ ((((uint32_t)x) >> 24) ) )
+# endif
+#else
+# define _msgpack_be32(x) ntohl(x)
+#endif
+
+#if defined(_byteswap_uint64) || (defined(_MSC_VER) && _MSC_VER >= 1400)
+# define _msgpack_be64(x) (_byteswap_uint64(x))
+#elif defined(bswap_64)
+# define _msgpack_be64(x) bswap_64(x)
+#elif defined(__DARWIN_OSSwapInt64)
+# define _msgpack_be64(x) __DARWIN_OSSwapInt64(x)
+#else
+#define _msgpack_be64(x) \
+ ( ((((uint64_t)x) << 56) ) | \
+ ((((uint64_t)x) << 40) & 0x00ff000000000000ULL ) | \
+ ((((uint64_t)x) << 24) & 0x0000ff0000000000ULL ) | \
+ ((((uint64_t)x) << 8) & 0x000000ff00000000ULL ) | \
+ ((((uint64_t)x) >> 8) & 0x00000000ff000000ULL ) | \
+ ((((uint64_t)x) >> 24) & 0x0000000000ff0000ULL ) | \
+ ((((uint64_t)x) >> 40) & 0x000000000000ff00ULL ) | \
+ ((((uint64_t)x) >> 56) ) )
+#endif
+
+#define _msgpack_load16(cast, from) ((cast)( \
+ (((uint16_t)((uint8_t*)(from))[0]) << 8) | \
+ (((uint16_t)((uint8_t*)(from))[1]) ) ))
+
+#define _msgpack_load32(cast, from) ((cast)( \
+ (((uint32_t)((uint8_t*)(from))[0]) << 24) | \
+ (((uint32_t)((uint8_t*)(from))[1]) << 16) | \
+ (((uint32_t)((uint8_t*)(from))[2]) << 8) | \
+ (((uint32_t)((uint8_t*)(from))[3]) ) ))
+
+#define _msgpack_load64(cast, from) ((cast)( \
+ (((uint64_t)((uint8_t*)(from))[0]) << 56) | \
+ (((uint64_t)((uint8_t*)(from))[1]) << 48) | \
+ (((uint64_t)((uint8_t*)(from))[2]) << 40) | \
+ (((uint64_t)((uint8_t*)(from))[3]) << 32) | \
+ (((uint64_t)((uint8_t*)(from))[4]) << 24) | \
+ (((uint64_t)((uint8_t*)(from))[5]) << 16) | \
+ (((uint64_t)((uint8_t*)(from))[6]) << 8) | \
+ (((uint64_t)((uint8_t*)(from))[7]) ) ))
+
+#else
+
+#define _msgpack_be16(x) (x)
+#define _msgpack_be32(x) (x)
+#define _msgpack_be64(x) (x)
+
+#define _msgpack_load16(cast, from) ((cast)( \
+ (((uint16_t)((uint8_t*)from)[0]) << 8) | \
+ (((uint16_t)((uint8_t*)from)[1]) ) ))
+
+#define _msgpack_load32(cast, from) ((cast)( \
+ (((uint32_t)((uint8_t*)from)[0]) << 24) | \
+ (((uint32_t)((uint8_t*)from)[1]) << 16) | \
+ (((uint32_t)((uint8_t*)from)[2]) << 8) | \
+ (((uint32_t)((uint8_t*)from)[3]) ) ))
+
+#define _msgpack_load64(cast, from) ((cast)( \
+ (((uint64_t)((uint8_t*)from)[0]) << 56) | \
+ (((uint64_t)((uint8_t*)from)[1]) << 48) | \
+ (((uint64_t)((uint8_t*)from)[2]) << 40) | \
+ (((uint64_t)((uint8_t*)from)[3]) << 32) | \
+ (((uint64_t)((uint8_t*)from)[4]) << 24) | \
+ (((uint64_t)((uint8_t*)from)[5]) << 16) | \
+ (((uint64_t)((uint8_t*)from)[6]) << 8) | \
+ (((uint64_t)((uint8_t*)from)[7]) ) ))
+#endif
+
+
+#define _msgpack_store16(to, num) \
+ do { uint16_t val = _msgpack_be16(num); memcpy(to, &val, 2); } while(0)
+#define _msgpack_store32(to, num) \
+ do { uint32_t val = _msgpack_be32(num); memcpy(to, &val, 4); } while(0)
+#define _msgpack_store64(to, num) \
+ do { uint64_t val = _msgpack_be64(num); memcpy(to, &val, 8); } while(0)
+
+/*
+#define _msgpack_load16(cast, from) \
+ ({ cast val; memcpy(&val, (char*)from, 2); _msgpack_be16(val); })
+#define _msgpack_load32(cast, from) \
+ ({ cast val; memcpy(&val, (char*)from, 4); _msgpack_be32(val); })
+#define _msgpack_load64(cast, from) \
+ ({ cast val; memcpy(&val, (char*)from, 8); _msgpack_be64(val); })
+*/
+
+
+#endif /* msgpack/sysdep.h */
+
diff --git a/pandas/src/msgpack/unpack.h b/pandas/src/msgpack/unpack.h
new file mode 100644
index 0000000000000..3dc88e5fbded0
--- /dev/null
+++ b/pandas/src/msgpack/unpack.h
@@ -0,0 +1,235 @@
+/*
+ * MessagePack for Python unpacking routine
+ *
+ * Copyright (C) 2009 Naoki INADA
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define MSGPACK_EMBED_STACK_SIZE (1024)
+#include "unpack_define.h"
+
+typedef struct unpack_user {
+ int use_list;
+ PyObject *object_hook;
+ bool has_pairs_hook;
+ PyObject *list_hook;
+ const char *encoding;
+ const char *unicode_errors;
+} unpack_user;
+
+
+#define msgpack_unpack_struct(name) \
+ struct template ## name
+
+#define msgpack_unpack_func(ret, name) \
+ static inline ret template ## name
+
+#define msgpack_unpack_callback(name) \
+ template_callback ## name
+
+#define msgpack_unpack_object PyObject*
+
+#define msgpack_unpack_user unpack_user
+
+typedef int (*execute_fn)(msgpack_unpack_struct(_context)* ctx, const char* data, size_t len, size_t* off);
+
+struct template_context;
+typedef struct template_context template_context;
+
+static inline msgpack_unpack_object template_callback_root(unpack_user* u)
+{
+ return NULL;
+}
+
+static inline int template_callback_uint16(unpack_user* u, uint16_t d, msgpack_unpack_object* o)
+{
+ PyObject *p = PyInt_FromLong((long)d);
+ if (!p)
+ return -1;
+ *o = p;
+ return 0;
+}
+static inline int template_callback_uint8(unpack_user* u, uint8_t d, msgpack_unpack_object* o)
+{
+ return template_callback_uint16(u, d, o);
+}
+
+
+static inline int template_callback_uint32(unpack_user* u, uint32_t d, msgpack_unpack_object* o)
+{
+ PyObject *p;
+ if (d > LONG_MAX) {
+ p = PyLong_FromUnsignedLong((unsigned long)d);
+ } else {
+ p = PyInt_FromLong((long)d);
+ }
+ if (!p)
+ return -1;
+ *o = p;
+ return 0;
+}
+
+static inline int template_callback_uint64(unpack_user* u, uint64_t d, msgpack_unpack_object* o)
+{
+ PyObject *p = PyLong_FromUnsignedLongLong(d);
+ if (!p)
+ return -1;
+ *o = p;
+ return 0;
+}
+
+static inline int template_callback_int32(unpack_user* u, int32_t d, msgpack_unpack_object* o)
+{
+ PyObject *p = PyInt_FromLong(d);
+ if (!p)
+ return -1;
+ *o = p;
+ return 0;
+}
+
+static inline int template_callback_int16(unpack_user* u, int16_t d, msgpack_unpack_object* o)
+{
+ return template_callback_int32(u, d, o);
+}
+
+static inline int template_callback_int8(unpack_user* u, int8_t d, msgpack_unpack_object* o)
+{
+ return template_callback_int32(u, d, o);
+}
+
+static inline int template_callback_int64(unpack_user* u, int64_t d, msgpack_unpack_object* o)
+{
+ PyObject *p = PyLong_FromLongLong(d);
+ if (!p)
+ return -1;
+ *o = p;
+ return 0;
+}
+
+static inline int template_callback_double(unpack_user* u, double d, msgpack_unpack_object* o)
+{
+ PyObject *p = PyFloat_FromDouble(d);
+ if (!p)
+ return -1;
+ *o = p;
+ return 0;
+}
+
+static inline int template_callback_float(unpack_user* u, float d, msgpack_unpack_object* o)
+{
+ return template_callback_double(u, d, o);
+}
+
+static inline int template_callback_nil(unpack_user* u, msgpack_unpack_object* o)
+{ Py_INCREF(Py_None); *o = Py_None; return 0; }
+
+static inline int template_callback_true(unpack_user* u, msgpack_unpack_object* o)
+{ Py_INCREF(Py_True); *o = Py_True; return 0; }
+
+static inline int template_callback_false(unpack_user* u, msgpack_unpack_object* o)
+{ Py_INCREF(Py_False); *o = Py_False; return 0; }
+
+static inline int template_callback_array(unpack_user* u, unsigned int n, msgpack_unpack_object* o)
+{
+ PyObject *p = u->use_list ? PyList_New(n) : PyTuple_New(n);
+
+ if (!p)
+ return -1;
+ *o = p;
+ return 0;
+}
+
+static inline int template_callback_array_item(unpack_user* u, unsigned int current, msgpack_unpack_object* c, msgpack_unpack_object o)
+{
+ if (u->use_list)
+ PyList_SET_ITEM(*c, current, o);
+ else
+ PyTuple_SET_ITEM(*c, current, o);
+ return 0;
+}
+
+static inline int template_callback_array_end(unpack_user* u, msgpack_unpack_object* c)
+{
+ if (u->list_hook) {
+ PyObject *new_c = PyEval_CallFunction(u->list_hook, "(O)", *c);
+ if (!new_c)
+ return -1;
+ Py_DECREF(*c);
+ *c = new_c;
+ }
+ return 0;
+}
+
+static inline int template_callback_map(unpack_user* u, unsigned int n, msgpack_unpack_object* o)
+{
+ PyObject *p;
+ if (u->has_pairs_hook) {
+ p = PyList_New(n); // Or use tuple?
+ }
+ else {
+ p = PyDict_New();
+ }
+ if (!p)
+ return -1;
+ *o = p;
+ return 0;
+}
+
+static inline int template_callback_map_item(unpack_user* u, unsigned int current, msgpack_unpack_object* c, msgpack_unpack_object k, msgpack_unpack_object v)
+{
+ if (u->has_pairs_hook) {
+ msgpack_unpack_object item = PyTuple_Pack(2, k, v);
+ if (!item)
+ return -1;
+ Py_DECREF(k);
+ Py_DECREF(v);
+ PyList_SET_ITEM(*c, current, item);
+ return 0;
+ }
+ else if (PyDict_SetItem(*c, k, v) == 0) {
+ Py_DECREF(k);
+ Py_DECREF(v);
+ return 0;
+ }
+ return -1;
+}
+
+static inline int template_callback_map_end(unpack_user* u, msgpack_unpack_object* c)
+{
+ if (u->object_hook) {
+ PyObject *new_c = PyEval_CallFunction(u->object_hook, "(O)", *c);
+ if (!new_c)
+ return -1;
+
+ Py_DECREF(*c);
+ *c = new_c;
+ }
+ return 0;
+}
+
+static inline int template_callback_raw(unpack_user* u, const char* b, const char* p, unsigned int l, msgpack_unpack_object* o)
+{
+ PyObject *py;
+ if(u->encoding) {
+ py = PyUnicode_Decode(p, l, u->encoding, u->unicode_errors);
+ } else {
+ py = PyBytes_FromStringAndSize(p, l);
+ }
+ if (!py)
+ return -1;
+ *o = py;
+ return 0;
+}
+
+#include "unpack_template.h"
diff --git a/pandas/src/msgpack/unpack_define.h b/pandas/src/msgpack/unpack_define.h
new file mode 100644
index 0000000000000..959d3519e7b5c
--- /dev/null
+++ b/pandas/src/msgpack/unpack_define.h
@@ -0,0 +1,93 @@
+/*
+ * MessagePack unpacking routine template
+ *
+ * Copyright (C) 2008-2010 FURUHASHI Sadayuki
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef MSGPACK_UNPACK_DEFINE_H__
+#define MSGPACK_UNPACK_DEFINE_H__
+
+#include "msgpack/sysdep.h"
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+#include <stdio.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+#ifndef MSGPACK_EMBED_STACK_SIZE
+#define MSGPACK_EMBED_STACK_SIZE 32
+#endif
+
+
+typedef enum {
+ CS_HEADER = 0x00, // nil
+
+ //CS_ = 0x01,
+ //CS_ = 0x02, // false
+ //CS_ = 0x03, // true
+
+ //CS_ = 0x04,
+ //CS_ = 0x05,
+ //CS_ = 0x06,
+ //CS_ = 0x07,
+
+ //CS_ = 0x08,
+ //CS_ = 0x09,
+ CS_FLOAT = 0x0a,
+ CS_DOUBLE = 0x0b,
+ CS_UINT_8 = 0x0c,
+ CS_UINT_16 = 0x0d,
+ CS_UINT_32 = 0x0e,
+ CS_UINT_64 = 0x0f,
+ CS_INT_8 = 0x10,
+ CS_INT_16 = 0x11,
+ CS_INT_32 = 0x12,
+ CS_INT_64 = 0x13,
+
+ //CS_ = 0x14,
+ //CS_ = 0x15,
+ //CS_BIG_INT_16 = 0x16,
+ //CS_BIG_INT_32 = 0x17,
+ //CS_BIG_FLOAT_16 = 0x18,
+ //CS_BIG_FLOAT_32 = 0x19,
+ CS_RAW_16 = 0x1a,
+ CS_RAW_32 = 0x1b,
+ CS_ARRAY_16 = 0x1c,
+ CS_ARRAY_32 = 0x1d,
+ CS_MAP_16 = 0x1e,
+ CS_MAP_32 = 0x1f,
+
+ //ACS_BIG_INT_VALUE,
+ //ACS_BIG_FLOAT_VALUE,
+ ACS_RAW_VALUE,
+} msgpack_unpack_state;
+
+
+typedef enum {
+ CT_ARRAY_ITEM,
+ CT_MAP_KEY,
+ CT_MAP_VALUE,
+} msgpack_container_type;
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* msgpack/unpack_define.h */
+
diff --git a/pandas/src/msgpack/unpack_template.h b/pandas/src/msgpack/unpack_template.h
new file mode 100644
index 0000000000000..83b6918dc6686
--- /dev/null
+++ b/pandas/src/msgpack/unpack_template.h
@@ -0,0 +1,492 @@
+/*
+ * MessagePack unpacking routine template
+ *
+ * Copyright (C) 2008-2010 FURUHASHI Sadayuki
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef msgpack_unpack_func
+#error msgpack_unpack_func template is not defined
+#endif
+
+#ifndef msgpack_unpack_callback
+#error msgpack_unpack_callback template is not defined
+#endif
+
+#ifndef msgpack_unpack_struct
+#error msgpack_unpack_struct template is not defined
+#endif
+
+#ifndef msgpack_unpack_struct_decl
+#define msgpack_unpack_struct_decl(name) msgpack_unpack_struct(name)
+#endif
+
+#ifndef msgpack_unpack_object
+#error msgpack_unpack_object type is not defined
+#endif
+
+#ifndef msgpack_unpack_user
+#error msgpack_unpack_user type is not defined
+#endif
+
+#ifndef USE_CASE_RANGE
+#if !defined(_MSC_VER)
+#define USE_CASE_RANGE
+#endif
+#endif
+
+msgpack_unpack_struct_decl(_stack) {
+ msgpack_unpack_object obj;
+ size_t size;
+ size_t count;
+ unsigned int ct;
+ msgpack_unpack_object map_key;
+};
+
+msgpack_unpack_struct_decl(_context) {
+ msgpack_unpack_user user;
+ unsigned int cs;
+ unsigned int trail;
+ unsigned int top;
+ /*
+ msgpack_unpack_struct(_stack)* stack;
+ unsigned int stack_size;
+ msgpack_unpack_struct(_stack) embed_stack[MSGPACK_EMBED_STACK_SIZE];
+ */
+ msgpack_unpack_struct(_stack) stack[MSGPACK_EMBED_STACK_SIZE];
+};
+
+
+msgpack_unpack_func(void, _init)(msgpack_unpack_struct(_context)* ctx)
+{
+ ctx->cs = CS_HEADER;
+ ctx->trail = 0;
+ ctx->top = 0;
+ /*
+ ctx->stack = ctx->embed_stack;
+ ctx->stack_size = MSGPACK_EMBED_STACK_SIZE;
+ */
+ ctx->stack[0].obj = msgpack_unpack_callback(_root)(&ctx->user);
+}
+
+/*
+msgpack_unpack_func(void, _destroy)(msgpack_unpack_struct(_context)* ctx)
+{
+ if(ctx->stack_size != MSGPACK_EMBED_STACK_SIZE) {
+ free(ctx->stack);
+ }
+}
+*/
+
+msgpack_unpack_func(msgpack_unpack_object, _data)(msgpack_unpack_struct(_context)* ctx)
+{
+ return (ctx)->stack[0].obj;
+}
+
+
+template <bool construct>
+msgpack_unpack_func(int, _execute)(msgpack_unpack_struct(_context)* ctx, const char* data, size_t len, size_t* off)
+{
+ assert(len >= *off);
+
+ const unsigned char* p = (unsigned char*)data + *off;
+ const unsigned char* const pe = (unsigned char*)data + len;
+ const void* n = NULL;
+
+ unsigned int trail = ctx->trail;
+ unsigned int cs = ctx->cs;
+ unsigned int top = ctx->top;
+ msgpack_unpack_struct(_stack)* stack = ctx->stack;
+ /*
+ unsigned int stack_size = ctx->stack_size;
+ */
+ msgpack_unpack_user* user = &ctx->user;
+
+ msgpack_unpack_object obj;
+ msgpack_unpack_struct(_stack)* c = NULL;
+
+ int ret;
+
+#define construct_cb(name) \
+ construct && msgpack_unpack_callback(name)
+
+#define push_simple_value(func) \
+ if(construct_cb(func)(user, &obj) < 0) { goto _failed; } \
+ goto _push
+#define push_fixed_value(func, arg) \
+ if(construct_cb(func)(user, arg, &obj) < 0) { goto _failed; } \
+ goto _push
+#define push_variable_value(func, base, pos, len) \
+ if(construct_cb(func)(user, \
+ (const char*)base, (const char*)pos, len, &obj) < 0) { goto _failed; } \
+ goto _push
+
+#define again_fixed_trail(_cs, trail_len) \
+ trail = trail_len; \
+ cs = _cs; \
+ goto _fixed_trail_again
+#define again_fixed_trail_if_zero(_cs, trail_len, ifzero) \
+ trail = trail_len; \
+ if(trail == 0) { goto ifzero; } \
+ cs = _cs; \
+ goto _fixed_trail_again
+
+#define start_container(func, count_, ct_) \
+ if(top >= MSGPACK_EMBED_STACK_SIZE) { goto _failed; } /* FIXME */ \
+ if(construct_cb(func)(user, count_, &stack[top].obj) < 0) { goto _failed; } \
+ if((count_) == 0) { obj = stack[top].obj; \
+ if (construct_cb(func##_end)(user, &obj) < 0) { goto _failed; } \
+ goto _push; } \
+ stack[top].ct = ct_; \
+ stack[top].size = count_; \
+ stack[top].count = 0; \
+ ++top; \
+ /*printf("container %d count %d stack %d\n",stack[top].obj,count_,top);*/ \
+ /*printf("stack push %d\n", top);*/ \
+ /* FIXME \
+ if(top >= stack_size) { \
+ if(stack_size == MSGPACK_EMBED_STACK_SIZE) { \
+ size_t csize = sizeof(msgpack_unpack_struct(_stack)) * MSGPACK_EMBED_STACK_SIZE; \
+ size_t nsize = csize * 2; \
+ msgpack_unpack_struct(_stack)* tmp = (msgpack_unpack_struct(_stack)*)malloc(nsize); \
+ if(tmp == NULL) { goto _failed; } \
+ memcpy(tmp, ctx->stack, csize); \
+ ctx->stack = stack = tmp; \
+ ctx->stack_size = stack_size = MSGPACK_EMBED_STACK_SIZE * 2; \
+ } else { \
+ size_t nsize = sizeof(msgpack_unpack_struct(_stack)) * ctx->stack_size * 2; \
+ msgpack_unpack_struct(_stack)* tmp = (msgpack_unpack_struct(_stack)*)realloc(ctx->stack, nsize); \
+ if(tmp == NULL) { goto _failed; } \
+ ctx->stack = stack = tmp; \
+ ctx->stack_size = stack_size = stack_size * 2; \
+ } \
+ } \
+ */ \
+ goto _header_again
+
+#define NEXT_CS(p) \
+ ((unsigned int)*p & 0x1f)
+
+#ifdef USE_CASE_RANGE
+#define SWITCH_RANGE_BEGIN switch(*p) {
+#define SWITCH_RANGE(FROM, TO) case FROM ... TO:
+#define SWITCH_RANGE_DEFAULT default:
+#define SWITCH_RANGE_END }
+#else
+#define SWITCH_RANGE_BEGIN { if(0) {
+#define SWITCH_RANGE(FROM, TO) } else if(FROM <= *p && *p <= TO) {
+#define SWITCH_RANGE_DEFAULT } else {
+#define SWITCH_RANGE_END } }
+#endif
+
+ if(p == pe) { goto _out; }
+ do {
+ switch(cs) {
+ case CS_HEADER:
+ SWITCH_RANGE_BEGIN
+ SWITCH_RANGE(0x00, 0x7f) // Positive Fixnum
+ push_fixed_value(_uint8, *(uint8_t*)p);
+ SWITCH_RANGE(0xe0, 0xff) // Negative Fixnum
+ push_fixed_value(_int8, *(int8_t*)p);
+ SWITCH_RANGE(0xc0, 0xdf) // Variable
+ switch(*p) {
+ case 0xc0: // nil
+ push_simple_value(_nil);
+ //case 0xc1: // string
+ // again_terminal_trail(NEXT_CS(p), p+1);
+ case 0xc2: // false
+ push_simple_value(_false);
+ case 0xc3: // true
+ push_simple_value(_true);
+ //case 0xc4:
+ //case 0xc5:
+ //case 0xc6:
+ //case 0xc7:
+ //case 0xc8:
+ //case 0xc9:
+ case 0xca: // float
+ case 0xcb: // double
+ case 0xcc: // unsigned int 8
+ case 0xcd: // unsigned int 16
+ case 0xce: // unsigned int 32
+ case 0xcf: // unsigned int 64
+ case 0xd0: // signed int 8
+ case 0xd1: // signed int 16
+ case 0xd2: // signed int 32
+ case 0xd3: // signed int 64
+ again_fixed_trail(NEXT_CS(p), 1 << (((unsigned int)*p) & 0x03));
+ //case 0xd4:
+ //case 0xd5:
+ //case 0xd6: // big integer 16
+ //case 0xd7: // big integer 32
+ //case 0xd8: // big float 16
+ //case 0xd9: // big float 32
+ case 0xda: // raw 16
+ case 0xdb: // raw 32
+ case 0xdc: // array 16
+ case 0xdd: // array 32
+ case 0xde: // map 16
+ case 0xdf: // map 32
+ again_fixed_trail(NEXT_CS(p), 2 << (((unsigned int)*p) & 0x01));
+ default:
+ goto _failed;
+ }
+ SWITCH_RANGE(0xa0, 0xbf) // FixRaw
+ again_fixed_trail_if_zero(ACS_RAW_VALUE, ((unsigned int)*p & 0x1f), _raw_zero);
+ SWITCH_RANGE(0x90, 0x9f) // FixArray
+ start_container(_array, ((unsigned int)*p) & 0x0f, CT_ARRAY_ITEM);
+ SWITCH_RANGE(0x80, 0x8f) // FixMap
+ start_container(_map, ((unsigned int)*p) & 0x0f, CT_MAP_KEY);
+
+ SWITCH_RANGE_DEFAULT
+ goto _failed;
+ SWITCH_RANGE_END
+ // end CS_HEADER
+
+
+ _fixed_trail_again:
+ ++p;
+
+ default:
+ if((size_t)(pe - p) < trail) { goto _out; }
+ n = p; p += trail - 1;
+ switch(cs) {
+ //case CS_
+ //case CS_
+ case CS_FLOAT: {
+ union { uint32_t i; float f; } mem;
+ mem.i = _msgpack_load32(uint32_t,n);
+ push_fixed_value(_float, mem.f); }
+ case CS_DOUBLE: {
+ union { uint64_t i; double f; } mem;
+ mem.i = _msgpack_load64(uint64_t,n);
+#if defined(__arm__) && !(__ARM_EABI__) // arm-oabi
+ // https://github.com/msgpack/msgpack-perl/pull/1
+ mem.i = (mem.i & 0xFFFFFFFFUL) << 32UL | (mem.i >> 32UL);
+#endif
+ push_fixed_value(_double, mem.f); }
+ case CS_UINT_8:
+ push_fixed_value(_uint8, *(uint8_t*)n);
+ case CS_UINT_16:
+ push_fixed_value(_uint16, _msgpack_load16(uint16_t,n));
+ case CS_UINT_32:
+ push_fixed_value(_uint32, _msgpack_load32(uint32_t,n));
+ case CS_UINT_64:
+ push_fixed_value(_uint64, _msgpack_load64(uint64_t,n));
+
+ case CS_INT_8:
+ push_fixed_value(_int8, *(int8_t*)n);
+ case CS_INT_16:
+ push_fixed_value(_int16, _msgpack_load16(int16_t,n));
+ case CS_INT_32:
+ push_fixed_value(_int32, _msgpack_load32(int32_t,n));
+ case CS_INT_64:
+ push_fixed_value(_int64, _msgpack_load64(int64_t,n));
+
+ //case CS_
+ //case CS_
+ //case CS_BIG_INT_16:
+ // again_fixed_trail_if_zero(ACS_BIG_INT_VALUE, _msgpack_load16(uint16_t,n), _big_int_zero);
+ //case CS_BIG_INT_32:
+ // again_fixed_trail_if_zero(ACS_BIG_INT_VALUE, _msgpack_load32(uint32_t,n), _big_int_zero);
+ //case ACS_BIG_INT_VALUE:
+ //_big_int_zero:
+ // // FIXME
+ // push_variable_value(_big_int, data, n, trail);
+
+ //case CS_BIG_FLOAT_16:
+ // again_fixed_trail_if_zero(ACS_BIG_FLOAT_VALUE, _msgpack_load16(uint16_t,n), _big_float_zero);
+ //case CS_BIG_FLOAT_32:
+ // again_fixed_trail_if_zero(ACS_BIG_FLOAT_VALUE, _msgpack_load32(uint32_t,n), _big_float_zero);
+ //case ACS_BIG_FLOAT_VALUE:
+ //_big_float_zero:
+ // // FIXME
+ // push_variable_value(_big_float, data, n, trail);
+
+ case CS_RAW_16:
+ again_fixed_trail_if_zero(ACS_RAW_VALUE, _msgpack_load16(uint16_t,n), _raw_zero);
+ case CS_RAW_32:
+ again_fixed_trail_if_zero(ACS_RAW_VALUE, _msgpack_load32(uint32_t,n), _raw_zero);
+ case ACS_RAW_VALUE:
+ _raw_zero:
+ push_variable_value(_raw, data, n, trail);
+
+ case CS_ARRAY_16:
+ start_container(_array, _msgpack_load16(uint16_t,n), CT_ARRAY_ITEM);
+ case CS_ARRAY_32:
+ /* FIXME security guard */
+ start_container(_array, _msgpack_load32(uint32_t,n), CT_ARRAY_ITEM);
+
+ case CS_MAP_16:
+ start_container(_map, _msgpack_load16(uint16_t,n), CT_MAP_KEY);
+ case CS_MAP_32:
+ /* FIXME security guard */
+ start_container(_map, _msgpack_load32(uint32_t,n), CT_MAP_KEY);
+
+ default:
+ goto _failed;
+ }
+ }
+
+_push:
+ if(top == 0) { goto _finish; }
+ c = &stack[top-1];
+ switch(c->ct) {
+ case CT_ARRAY_ITEM:
+ if(construct_cb(_array_item)(user, c->count, &c->obj, obj) < 0) { goto _failed; }
+ if(++c->count == c->size) {
+ obj = c->obj;
+ if (construct_cb(_array_end)(user, &obj) < 0) { goto _failed; }
+ --top;
+ /*printf("stack pop %d\n", top);*/
+ goto _push;
+ }
+ goto _header_again;
+ case CT_MAP_KEY:
+ c->map_key = obj;
+ c->ct = CT_MAP_VALUE;
+ goto _header_again;
+ case CT_MAP_VALUE:
+ if(construct_cb(_map_item)(user, c->count, &c->obj, c->map_key, obj) < 0) { goto _failed; }
+ if(++c->count == c->size) {
+ obj = c->obj;
+ if (construct_cb(_map_end)(user, &obj) < 0) { goto _failed; }
+ --top;
+ /*printf("stack pop %d\n", top);*/
+ goto _push;
+ }
+ c->ct = CT_MAP_KEY;
+ goto _header_again;
+
+ default:
+ goto _failed;
+ }
+
+_header_again:
+ cs = CS_HEADER;
+ ++p;
+ } while(p != pe);
+ goto _out;
+
+
+_finish:
+ if (!construct)
+ msgpack_unpack_callback(_nil)(user, &obj);
+ stack[0].obj = obj;
+ ++p;
+ ret = 1;
+ /*printf("-- finish --\n"); */
+ goto _end;
+
+_failed:
+ /*printf("** FAILED **\n"); */
+ ret = -1;
+ goto _end;
+
+_out:
+ ret = 0;
+ goto _end;
+
+_end:
+ ctx->cs = cs;
+ ctx->trail = trail;
+ ctx->top = top;
+ *off = p - (const unsigned char*)data;
+
+ return ret;
+#undef construct_cb
+}
+
+#undef SWITCH_RANGE_BEGIN
+#undef SWITCH_RANGE
+#undef SWITCH_RANGE_DEFAULT
+#undef SWITCH_RANGE_END
+#undef push_simple_value
+#undef push_fixed_value
+#undef push_variable_value
+#undef again_fixed_trail
+#undef again_fixed_trail_if_zero
+#undef start_container
+
+template <unsigned int fixed_offset, unsigned int var_offset>
+msgpack_unpack_func(int, _container_header)(msgpack_unpack_struct(_context)* ctx, const char* data, size_t len, size_t* off)
+{
+ assert(len >= *off);
+ uint32_t size;
+ const unsigned char *const p = (unsigned char*)data + *off;
+
+#define inc_offset(inc) \
+ if (len - *off < inc) \
+ return 0; \
+ *off += inc;
+
+ switch (*p) {
+ case var_offset:
+ inc_offset(3);
+ size = _msgpack_load16(uint16_t, p + 1);
+ break;
+ case var_offset + 1:
+ inc_offset(5);
+ size = _msgpack_load32(uint32_t, p + 1);
+ break;
+#ifdef USE_CASE_RANGE
+ case fixed_offset + 0x0 ... fixed_offset + 0xf:
+#else
+ case fixed_offset + 0x0:
+ case fixed_offset + 0x1:
+ case fixed_offset + 0x2:
+ case fixed_offset + 0x3:
+ case fixed_offset + 0x4:
+ case fixed_offset + 0x5:
+ case fixed_offset + 0x6:
+ case fixed_offset + 0x7:
+ case fixed_offset + 0x8:
+ case fixed_offset + 0x9:
+ case fixed_offset + 0xa:
+ case fixed_offset + 0xb:
+ case fixed_offset + 0xc:
+ case fixed_offset + 0xd:
+ case fixed_offset + 0xe:
+ case fixed_offset + 0xf:
+#endif
+ ++*off;
+ size = ((unsigned int)*p) & 0x0f;
+ break;
+ default:
+ PyErr_SetString(PyExc_ValueError, "Unexpected type header on stream");
+ return -1;
+ }
+ msgpack_unpack_callback(_uint32)(&ctx->user, size, &ctx->stack[0].obj);
+ return 1;
+}
+
+#undef SWITCH_RANGE_BEGIN
+#undef SWITCH_RANGE
+#undef SWITCH_RANGE_DEFAULT
+#undef SWITCH_RANGE_END
+
+static const execute_fn template_construct = &template_execute<true>;
+static const execute_fn template_skip = &template_execute<false>;
+static const execute_fn read_array_header = &template_container_header<0x90, 0xdc>;
+static const execute_fn read_map_header = &template_container_header<0x80, 0xde>;
+
+#undef msgpack_unpack_func
+#undef msgpack_unpack_callback
+#undef msgpack_unpack_struct
+#undef msgpack_unpack_object
+#undef msgpack_unpack_user
+
+#undef NEXT_CS
+
+/* vim: set ts=4 sw=4 noexpandtab */
diff --git a/pandas/tests/test_msgpack/test_buffer.py b/pandas/tests/test_msgpack/test_buffer.py
new file mode 100644
index 0000000000000..940b65406103e
--- /dev/null
+++ b/pandas/tests/test_msgpack/test_buffer.py
@@ -0,0 +1,12 @@
+#!/usr/bin/env python
+# coding: utf-8
+
+from pandas.msgpack import packb, unpackb
+
+
+def test_unpack_buffer():
+ from array import array
+ buf = array('b')
+ buf.fromstring(packb(('foo', 'bar')))
+ obj = unpackb(buf, use_list=1)
+ assert [b'foo', b'bar'] == obj
diff --git a/pandas/tests/test_msgpack/test_case.py b/pandas/tests/test_msgpack/test_case.py
new file mode 100644
index 0000000000000..e78456b2ddb62
--- /dev/null
+++ b/pandas/tests/test_msgpack/test_case.py
@@ -0,0 +1,101 @@
+#!/usr/bin/env python
+# coding: utf-8
+
+from pandas.msgpack import packb, unpackb
+
+
+def check(length, obj):
+ v = packb(obj)
+ assert len(v) == length, \
+ "%r length should be %r but get %r" % (obj, length, len(v))
+ assert unpackb(v, use_list=0) == obj
+
+def test_1():
+ for o in [None, True, False, 0, 1, (1 << 6), (1 << 7) - 1, -1,
+ -((1<<5)-1), -(1<<5)]:
+ check(1, o)
+
+def test_2():
+ for o in [1 << 7, (1 << 8) - 1,
+ -((1<<5)+1), -(1<<7)
+ ]:
+ check(2, o)
+
+def test_3():
+ for o in [1 << 8, (1 << 16) - 1,
+ -((1<<7)+1), -(1<<15)]:
+ check(3, o)
+
+def test_5():
+ for o in [1 << 16, (1 << 32) - 1,
+ -((1<<15)+1), -(1<<31)]:
+ check(5, o)
+
+def test_9():
+ for o in [1 << 32, (1 << 64) - 1,
+ -((1<<31)+1), -(1<<63),
+ 1.0, 0.1, -0.1, -1.0]:
+ check(9, o)
+
+
+def check_raw(overhead, num):
+ check(num + overhead, b" " * num)
+
+def test_fixraw():
+ check_raw(1, 0)
+ check_raw(1, (1<<5) - 1)
+
+def test_raw16():
+ check_raw(3, 1<<5)
+ check_raw(3, (1<<16) - 1)
+
+def test_raw32():
+ check_raw(5, 1<<16)
+
+
+def check_array(overhead, num):
+ check(num + overhead, (None,) * num)
+
+def test_fixarray():
+ check_array(1, 0)
+ check_array(1, (1 << 4) - 1)
+
+def test_array16():
+ check_array(3, 1 << 4)
+ check_array(3, (1<<16)-1)
+
+def test_array32():
+ check_array(5, (1<<16))
+
+
+def match(obj, buf):
+ assert packb(obj) == buf
+ assert unpackb(buf, use_list=0) == obj
+
+def test_match():
+ cases = [
+ (None, b'\xc0'),
+ (False, b'\xc2'),
+ (True, b'\xc3'),
+ (0, b'\x00'),
+ (127, b'\x7f'),
+ (128, b'\xcc\x80'),
+ (256, b'\xcd\x01\x00'),
+ (-1, b'\xff'),
+ (-33, b'\xd0\xdf'),
+ (-129, b'\xd1\xff\x7f'),
+ ({1:1}, b'\x81\x01\x01'),
+ (1.0, b"\xcb\x3f\xf0\x00\x00\x00\x00\x00\x00"),
+ ((), b'\x90'),
+ (tuple(range(15)),b"\x9f\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e"),
+ (tuple(range(16)),b"\xdc\x00\x10\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"),
+ ({}, b'\x80'),
+ (dict([(x,x) for x in range(15)]), b'\x8f\x00\x00\x01\x01\x02\x02\x03\x03\x04\x04\x05\x05\x06\x06\x07\x07\x08\x08\t\t\n\n\x0b\x0b\x0c\x0c\r\r\x0e\x0e'),
+ (dict([(x,x) for x in range(16)]), b'\xde\x00\x10\x00\x00\x01\x01\x02\x02\x03\x03\x04\x04\x05\x05\x06\x06\x07\x07\x08\x08\t\t\n\n\x0b\x0b\x0c\x0c\r\r\x0e\x0e\x0f\x0f'),
+ ]
+
+ for v, p in cases:
+ match(v, p)
+
+def test_unicode():
+ assert unpackb(packb('foobar'), use_list=1) == b'foobar'
diff --git a/pandas/tests/test_msgpack/test_except.py b/pandas/tests/test_msgpack/test_except.py
new file mode 100644
index 0000000000000..2e1652a6b8761
--- /dev/null
+++ b/pandas/tests/test_msgpack/test_except.py
@@ -0,0 +1,31 @@
+#!/usr/bin/env python
+# coding: utf-8
+
+from pytest import raises
+from pandas.msgpack import packb, unpackb
+
+import datetime
+
+
+class DummyException(Exception):
+ pass
+
+
+def test_raise_on_find_unsupported_value():
+ with raises(TypeError):
+ packb(datetime.datetime.now())
+
+
+def test_raise_from_object_hook():
+ def hook(obj):
+ raise DummyException
+ raises(DummyException, unpackb, packb({}), object_hook=hook)
+ raises(DummyException, unpackb, packb({'fizz': 'buzz'}), object_hook=hook)
+ raises(DummyException, unpackb, packb({'fizz': 'buzz'}), object_pairs_hook=hook)
+ raises(DummyException, unpackb, packb({'fizz': {'buzz': 'spam'}}), object_hook=hook)
+ raises(DummyException, unpackb, packb({'fizz': {'buzz': 'spam'}}), object_pairs_hook=hook)
+
+
+def test_invalidvalue():
+ with raises(ValueError):
+ unpackb(b'\xd9\x97#DL_')
diff --git a/pandas/tests/test_msgpack/test_format.py b/pandas/tests/test_msgpack/test_format.py
new file mode 100644
index 0000000000000..a3a3afd046ce2
--- /dev/null
+++ b/pandas/tests/test_msgpack/test_format.py
@@ -0,0 +1,70 @@
+#!/usr/bin/env python
+# coding: utf-8
+
+from pandas.msgpack import unpackb
+
+def check(src, should, use_list=0):
+ assert unpackb(src, use_list=use_list) == should
+
+def testSimpleValue():
+ check(b"\x93\xc0\xc2\xc3",
+ (None, False, True,))
+
+def testFixnum():
+ check(b"\x92\x93\x00\x40\x7f\x93\xe0\xf0\xff",
+ ((0,64,127,), (-32,-16,-1,),)
+ )
+
+def testFixArray():
+ check(b"\x92\x90\x91\x91\xc0",
+ ((),((None,),),),
+ )
+
+def testFixRaw():
+ check(b"\x94\xa0\xa1a\xa2bc\xa3def",
+ (b"", b"a", b"bc", b"def",),
+ )
+
+def testFixMap():
+ check(
+ b"\x82\xc2\x81\xc0\xc0\xc3\x81\xc0\x80",
+ {False: {None: None}, True:{None:{}}},
+ )
+
+def testUnsignedInt():
+ check(
+ b"\x99\xcc\x00\xcc\x80\xcc\xff\xcd\x00\x00\xcd\x80\x00"
+ b"\xcd\xff\xff\xce\x00\x00\x00\x00\xce\x80\x00\x00\x00"
+ b"\xce\xff\xff\xff\xff",
+ (0, 128, 255, 0, 32768, 65535, 0, 2147483648, 4294967295,),
+ )
+
+def testSignedInt():
+ check(b"\x99\xd0\x00\xd0\x80\xd0\xff\xd1\x00\x00\xd1\x80\x00"
+ b"\xd1\xff\xff\xd2\x00\x00\x00\x00\xd2\x80\x00\x00\x00"
+ b"\xd2\xff\xff\xff\xff",
+ (0, -128, -1, 0, -32768, -1, 0, -2147483648, -1,))
+
+def testRaw():
+ check(b"\x96\xda\x00\x00\xda\x00\x01a\xda\x00\x02ab\xdb\x00\x00"
+ b"\x00\x00\xdb\x00\x00\x00\x01a\xdb\x00\x00\x00\x02ab",
+ (b"", b"a", b"ab", b"", b"a", b"ab"))
+
+def testArray():
+ check(b"\x96\xdc\x00\x00\xdc\x00\x01\xc0\xdc\x00\x02\xc2\xc3\xdd\x00"
+ b"\x00\x00\x00\xdd\x00\x00\x00\x01\xc0\xdd\x00\x00\x00\x02"
+ b"\xc2\xc3",
+ ((), (None,), (False,True), (), (None,), (False,True))
+ )
+
+def testMap():
+ check(
+ b"\x96"
+ b"\xde\x00\x00"
+ b"\xde\x00\x01\xc0\xc2"
+ b"\xde\x00\x02\xc0\xc2\xc3\xc2"
+ b"\xdf\x00\x00\x00\x00"
+ b"\xdf\x00\x00\x00\x01\xc0\xc2"
+ b"\xdf\x00\x00\x00\x02\xc0\xc2\xc3\xc2",
+ ({}, {None: False}, {True: False, None: False}, {},
+ {None: False}, {True: False, None: False}))
diff --git a/pandas/tests/test_msgpack/test_obj.py b/pandas/tests/test_msgpack/test_obj.py
new file mode 100644
index 0000000000000..967baa2443ed3
--- /dev/null
+++ b/pandas/tests/test_msgpack/test_obj.py
@@ -0,0 +1,67 @@
+#!/usr/bin/env python
+# coding: utf-8
+
+from pytest import raises
+from pandas.msgpack import packb, unpackb
+
+def _decode_complex(obj):
+ if b'__complex__' in obj:
+ return complex(obj[b'real'], obj[b'imag'])
+ return obj
+
+def _encode_complex(obj):
+ if isinstance(obj, complex):
+ return {b'__complex__': True, b'real': 1, b'imag': 2}
+ return obj
+
+def test_encode_hook():
+ packed = packb([3, 1+2j], default=_encode_complex)
+ unpacked = unpackb(packed, use_list=1)
+ assert unpacked[1] == {b'__complex__': True, b'real': 1, b'imag': 2}
+
+def test_decode_hook():
+ packed = packb([3, {b'__complex__': True, b'real': 1, b'imag': 2}])
+ unpacked = unpackb(packed, object_hook=_decode_complex, use_list=1)
+ assert unpacked[1] == 1+2j
+
+def test_decode_pairs_hook():
+ packed = packb([3, {1: 2, 3: 4}])
+ prod_sum = 1 * 2 + 3 * 4
+ unpacked = unpackb(packed, object_pairs_hook=lambda l: sum(k * v for k, v in l), use_list=1)
+ assert unpacked[1] == prod_sum
+
+def test_only_one_obj_hook():
+ with raises(ValueError):
+ unpackb(b'', object_hook=lambda x: x, object_pairs_hook=lambda x: x)
+
+def test_bad_hook():
+ with raises(ValueError):
+ packed = packb([3, 1+2j], default=lambda o: o)
+ unpacked = unpackb(packed, use_list=1)
+
+def _arr_to_str(arr):
+ return ''.join(str(c) for c in arr)
+
+def test_array_hook():
+ packed = packb([1,2,3])
+ unpacked = unpackb(packed, list_hook=_arr_to_str, use_list=1)
+ assert unpacked == '123'
+
+
+class DecodeError(Exception):
+ pass
+
+def bad_complex_decoder(o):
+ raise DecodeError("Ooops!")
+
+
+def test_an_exception_in_objecthook1():
+ with raises(DecodeError):
+ packed = packb({1: {'__complex__': True, 'real': 1, 'imag': 2}})
+ unpackb(packed, object_hook=bad_complex_decoder)
+
+
+def test_an_exception_in_objecthook2():
+ with raises(DecodeError):
+ packed = packb({1: [{'__complex__': True, 'real': 1, 'imag': 2}]})
+ unpackb(packed, list_hook=bad_complex_decoder, use_list=1)
diff --git a/pandas/tests/test_msgpack/test_pack.py b/pandas/tests/test_msgpack/test_pack.py
new file mode 100644
index 0000000000000..90979db60b8b8
--- /dev/null
+++ b/pandas/tests/test_msgpack/test_pack.py
@@ -0,0 +1,163 @@
+#!/usr/bin/env python
+# coding: utf-8
+
+import six
+import struct
+from pytest import raises, xfail
+
+from pandas.msgpack import packb, unpackb, Unpacker, Packer
+
+from io import BytesIO
+
+def check(data, use_list=False):
+ re = unpackb(packb(data), use_list=use_list)
+ assert re == data
+
+def testPack():
+ test_data = [
+ 0, 1, 127, 128, 255, 256, 65535, 65536,
+ -1, -32, -33, -128, -129, -32768, -32769,
+ 1.0,
+ b"", b"a", b"a"*31, b"a"*32,
+ None, True, False,
+ (), ((),), ((), None,),
+ {None: 0},
+ (1<<23),
+ ]
+ for td in test_data:
+ check(td)
+
+def testPackUnicode():
+ test_data = [
+ six.u(""), six.u("abcd"), [six.u("defgh")], six.u("Русский текст"),
+ ]
+ for td in test_data:
+ re = unpackb(packb(td, encoding='utf-8'), use_list=1, encoding='utf-8')
+ assert re == td
+ packer = Packer(encoding='utf-8')
+ data = packer.pack(td)
+ re = Unpacker(BytesIO(data), encoding='utf-8', use_list=1).unpack()
+ assert re == td
+
+def testPackUTF32():
+ try:
+ test_data = [
+ six.u(""),
+ six.u("abcd"),
+ [six.u("defgh")],
+ six.u("Русский текст"),
+ ]
+ for td in test_data:
+ re = unpackb(packb(td, encoding='utf-32'), use_list=1, encoding='utf-32')
+ assert re == td
+ except LookupError as e:
+ xfail(e)
+
+def testPackBytes():
+ test_data = [
+ b"", b"abcd", (b"defgh",),
+ ]
+ for td in test_data:
+ check(td)
+
+def testIgnoreUnicodeErrors():
+ re = unpackb(packb(b'abc\xeddef'), encoding='utf-8', unicode_errors='ignore', use_list=1)
+ assert re == "abcdef"
+
+def testStrictUnicodeUnpack():
+ with raises(UnicodeDecodeError):
+ unpackb(packb(b'abc\xeddef'), encoding='utf-8', use_list=1)
+
+def testStrictUnicodePack():
+ with raises(UnicodeEncodeError):
+ packb(six.u("abc\xeddef"), encoding='ascii', unicode_errors='strict')
+
+def testIgnoreErrorsPack():
+ re = unpackb(packb(six.u("abcФФФdef"), encoding='ascii', unicode_errors='ignore'), encoding='utf-8', use_list=1)
+ assert re == six.u("abcdef")
+
+def testNoEncoding():
+ with raises(TypeError):
+ packb(six.u("abc"), encoding=None)
+
+def testDecodeBinary():
+ re = unpackb(packb("abc"), encoding=None, use_list=1)
+ assert re == b"abc"
+
+def testPackFloat():
+ assert packb(1.0, use_single_float=True) == b'\xca' + struct.pack('>f', 1.0)
+ assert packb(1.0, use_single_float=False) == b'\xcb' + struct.pack('>d', 1.0)
+
+def testArraySize(sizes=[0, 5, 50, 1000]):
+ bio = six.BytesIO()
+ packer = Packer()
+ for size in sizes:
+ bio.write(packer.pack_array_header(size))
+ for i in range(size):
+ bio.write(packer.pack(i))
+
+ bio.seek(0)
+ unpacker = Unpacker(bio, use_list=1)
+ for size in sizes:
+ assert unpacker.unpack() == list(range(size))
+
+def test_manualreset(sizes=[0, 5, 50, 1000]):
+ packer = Packer(autoreset=False)
+ for size in sizes:
+ packer.pack_array_header(size)
+ for i in range(size):
+ packer.pack(i)
+
+ bio = six.BytesIO(packer.bytes())
+ unpacker = Unpacker(bio, use_list=1)
+ for size in sizes:
+ assert unpacker.unpack() == list(range(size))
+
+ packer.reset()
+ assert packer.bytes() == b''
+
+def testMapSize(sizes=[0, 5, 50, 1000]):
+ bio = six.BytesIO()
+ packer = Packer()
+ for size in sizes:
+ bio.write(packer.pack_map_header(size))
+ for i in range(size):
+ bio.write(packer.pack(i)) # key
+ bio.write(packer.pack(i * 2)) # value
+
+ bio.seek(0)
+ unpacker = Unpacker(bio)
+ for size in sizes:
+ assert unpacker.unpack() == dict((i, i * 2) for i in range(size))
+
+
+class odict(dict):
+ '''Reimplement OrderedDict to run test on Python 2.6'''
+ def __init__(self, seq):
+ self._seq = seq
+ dict.__init__(self, seq)
+
+ def items(self):
+ return self._seq[:]
+
+ def iteritems(self):
+ return iter(self._seq)
+
+ def keys(self):
+ return [x[0] for x in self._seq]
+
+def test_odict():
+ seq = [(b'one', 1), (b'two', 2), (b'three', 3), (b'four', 4)]
+ od = odict(seq)
+ assert unpackb(packb(od), use_list=1) == dict(seq)
+ def pair_hook(seq):
+ return list(seq)
+ assert unpackb(packb(od), object_pairs_hook=pair_hook, use_list=1) == seq
+
+
+def test_pairlist():
+ pairlist = [(b'a', 1), (2, b'b'), (b'foo', b'bar')]
+ packer = Packer()
+ packed = packer.pack_map_pairs(pairlist)
+ unpacked = unpackb(packed, object_pairs_hook=list)
+ assert pairlist == unpacked
diff --git a/pandas/tests/test_msgpack/test_read_size.py b/pandas/tests/test_msgpack/test_read_size.py
new file mode 100644
index 0000000000000..db3e1deb04f8f
--- /dev/null
+++ b/pandas/tests/test_msgpack/test_read_size.py
@@ -0,0 +1,65 @@
+"""Test Unpacker's read_array_header and read_map_header methods"""
+from pandas.msgpack import packb, Unpacker, OutOfData
+UnexpectedTypeException = ValueError
+
+def test_read_array_header():
+ unpacker = Unpacker()
+ unpacker.feed(packb(['a', 'b', 'c']))
+ assert unpacker.read_array_header() == 3
+ assert unpacker.unpack() == b'a'
+ assert unpacker.unpack() == b'b'
+ assert unpacker.unpack() == b'c'
+ try:
+ unpacker.unpack()
+ assert 0, 'should raise exception'
+ except OutOfData:
+ assert 1, 'okay'
+
+
+def test_read_map_header():
+ unpacker = Unpacker()
+ unpacker.feed(packb({'a': 'A'}))
+ assert unpacker.read_map_header() == 1
+ assert unpacker.unpack() == B'a'
+ assert unpacker.unpack() == B'A'
+ try:
+ unpacker.unpack()
+ assert 0, 'should raise exception'
+ except OutOfData:
+ assert 1, 'okay'
+
+def test_incorrect_type_array():
+ unpacker = Unpacker()
+ unpacker.feed(packb(1))
+ try:
+ unpacker.read_array_header()
+ assert 0, 'should raise exception'
+ except UnexpectedTypeException:
+ assert 1, 'okay'
+
+def test_incorrect_type_map():
+ unpacker = Unpacker()
+ unpacker.feed(packb(1))
+ try:
+ unpacker.read_map_header()
+ assert 0, 'should raise exception'
+ except UnexpectedTypeException:
+ assert 1, 'okay'
+
+def test_correct_type_nested_array():
+ unpacker = Unpacker()
+ unpacker.feed(packb({'a': ['b', 'c', 'd']}))
+ try:
+ unpacker.read_array_header()
+ assert 0, 'should raise exception'
+ except UnexpectedTypeException:
+ assert 1, 'okay'
+
+def test_incorrect_type_nested_map():
+ unpacker = Unpacker()
+ unpacker.feed(packb([{'a': 'b'}]))
+ try:
+ unpacker.read_map_header()
+ assert 0, 'should raise exception'
+ except UnexpectedTypeException:
+ assert 1, 'okay'
diff --git a/pandas/tests/test_msgpack/test_seq.py b/pandas/tests/test_msgpack/test_seq.py
new file mode 100644
index 0000000000000..d1639bd51003b
--- /dev/null
+++ b/pandas/tests/test_msgpack/test_seq.py
@@ -0,0 +1,42 @@
+#!/usr/bin/env python
+# coding: utf-8
+
+import six
+import io
+import pandas.msgpack as msgpack
+
+binarydata = [chr(i) for i in range(256)]
+binarydata = six.b("".join(binarydata))
+
+def gen_binary_data(idx):
+ data = binarydata[:idx % 300]
+ return data
+
+def test_exceeding_unpacker_read_size():
+ dumpf = io.BytesIO()
+
+ packer = msgpack.Packer()
+
+ NUMBER_OF_STRINGS = 6
+ read_size = 16
+ # 5 ok for read_size=16, while 6 glibc detected *** python: double free or corruption (fasttop):
+ # 20 ok for read_size=256, while 25 segfaults / glibc detected *** python: double free or corruption (!prev)
+ # 40 ok for read_size=1024, while 50 introduces errors
+ # 7000 ok for read_size=1024*1024, while 8000 leads to glibc detected *** python: double free or corruption (!prev):
+
+ for idx in range(NUMBER_OF_STRINGS):
+ data = gen_binary_data(idx)
+ dumpf.write(packer.pack(data))
+
+ f = io.BytesIO(dumpf.getvalue())
+ dumpf.close()
+
+ unpacker = msgpack.Unpacker(f, read_size=read_size, use_list=1)
+
+ read_count = 0
+ for idx, o in enumerate(unpacker):
+ assert type(o) == bytes
+ assert o == gen_binary_data(idx)
+ read_count += 1
+
+ assert read_count == NUMBER_OF_STRINGS
diff --git a/pandas/tests/test_msgpack/test_sequnpack.py b/pandas/tests/test_msgpack/test_sequnpack.py
new file mode 100644
index 0000000000000..add7e211effed
--- /dev/null
+++ b/pandas/tests/test_msgpack/test_sequnpack.py
@@ -0,0 +1,86 @@
+#!/usr/bin/env python
+# coding: utf-8
+
+import six
+from pandas.msgpack import Unpacker, BufferFull
+from pandas.msgpack import OutOfData
+from pytest import raises
+
+
+def test_partialdata():
+ unpacker = Unpacker()
+ unpacker.feed(b'\xa5')
+ with raises(StopIteration): next(iter(unpacker))
+ unpacker.feed(b'h')
+ with raises(StopIteration): next(iter(unpacker))
+ unpacker.feed(b'a')
+ with raises(StopIteration): next(iter(unpacker))
+ unpacker.feed(b'l')
+ with raises(StopIteration): next(iter(unpacker))
+ unpacker.feed(b'l')
+ with raises(StopIteration): next(iter(unpacker))
+ unpacker.feed(b'o')
+ assert next(iter(unpacker)) == b'hallo'
+
+def test_foobar():
+ unpacker = Unpacker(read_size=3, use_list=1)
+ unpacker.feed(b'foobar')
+ assert unpacker.unpack() == ord(b'f')
+ assert unpacker.unpack() == ord(b'o')
+ assert unpacker.unpack() == ord(b'o')
+ assert unpacker.unpack() == ord(b'b')
+ assert unpacker.unpack() == ord(b'a')
+ assert unpacker.unpack() == ord(b'r')
+ with raises(OutOfData):
+ unpacker.unpack()
+
+ unpacker.feed(b'foo')
+ unpacker.feed(b'bar')
+
+ k = 0
+ for o, e in zip(unpacker, 'foobarbaz'):
+ assert o == ord(e)
+ k += 1
+ assert k == len(b'foobar')
+
+def test_foobar_skip():
+ unpacker = Unpacker(read_size=3, use_list=1)
+ unpacker.feed(b'foobar')
+ assert unpacker.unpack() == ord(b'f')
+ unpacker.skip()
+ assert unpacker.unpack() == ord(b'o')
+ unpacker.skip()
+ assert unpacker.unpack() == ord(b'a')
+ unpacker.skip()
+ with raises(OutOfData):
+ unpacker.unpack()
+
+def test_maxbuffersize():
+ with raises(ValueError):
+ Unpacker(read_size=5, max_buffer_size=3)
+ unpacker = Unpacker(read_size=3, max_buffer_size=3, use_list=1)
+ unpacker.feed(b'fo')
+ with raises(BufferFull):
+ unpacker.feed(b'ob')
+ unpacker.feed(b'o')
+ assert ord('f') == next(unpacker)
+ unpacker.feed(b'b')
+ assert ord('o') == next(unpacker)
+ assert ord('o') == next(unpacker)
+ assert ord('b') == next(unpacker)
+
+
+def test_readbytes():
+ unpacker = Unpacker(read_size=3)
+ unpacker.feed(b'foobar')
+ assert unpacker.unpack() == ord(b'f')
+ assert unpacker.read_bytes(3) == b'oob'
+ assert unpacker.unpack() == ord(b'a')
+ assert unpacker.unpack() == ord(b'r')
+
+ # Test buffer refill
+ unpacker = Unpacker(six.BytesIO(b'foobar'), read_size=3)
+ assert unpacker.unpack() == ord(b'f')
+ assert unpacker.read_bytes(3) == b'oob'
+ assert unpacker.unpack() == ord(b'a')
+ assert unpacker.unpack() == ord(b'r')
diff --git a/pandas/tests/test_msgpack/test_subtype.py b/pandas/tests/test_msgpack/test_subtype.py
new file mode 100644
index 0000000000000..0934b31cebeda
--- /dev/null
+++ b/pandas/tests/test_msgpack/test_subtype.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python
+# coding: utf-8
+
+from pandas.msgpack import packb, unpackb
+from collections import namedtuple
+
+class MyList(list):
+ pass
+
+class MyDict(dict):
+ pass
+
+class MyTuple(tuple):
+ pass
+
+MyNamedTuple = namedtuple('MyNamedTuple', 'x y')
+
+def test_types():
+ assert packb(MyDict()) == packb(dict())
+ assert packb(MyList()) == packb(list())
+ assert packb(MyNamedTuple(1, 2)) == packb((1, 2))
diff --git a/pandas/tests/test_msgpack/test_unpack_raw.py b/pandas/tests/test_msgpack/test_unpack_raw.py
new file mode 100644
index 0000000000000..fc95f36f4dd6f
--- /dev/null
+++ b/pandas/tests/test_msgpack/test_unpack_raw.py
@@ -0,0 +1,29 @@
+"""Tests for cases where the user seeks to obtain packed msgpack objects"""
+
+import six
+from pandas.msgpack import Unpacker, packb
+
+
+def test_write_bytes():
+ unpacker = Unpacker()
+ unpacker.feed(b'abc')
+ f = six.BytesIO()
+ assert unpacker.unpack(f.write) == ord('a')
+ assert f.getvalue() == b'a'
+ f = six.BytesIO()
+ assert unpacker.skip(f.write) is None
+ assert f.getvalue() == b'b'
+ f = six.BytesIO()
+ assert unpacker.skip() is None
+ assert f.getvalue() == b''
+
+
+def test_write_bytes_multi_buffer():
+ long_val = (5) * 100
+ expected = packb(long_val)
+ unpacker = Unpacker(six.BytesIO(expected), read_size=3, max_buffer_size=3)
+
+ f = six.BytesIO()
+ unpacked = unpacker.unpack(f.write)
+ assert unpacked == long_val
+ assert f.getvalue() == expected
diff --git a/setup.py b/setup.py
index 030584ba509d3..3001453127656 100755
--- a/setup.py
+++ b/setup.py
@@ -447,6 +447,23 @@ def pxd(name):
extensions.extend([sparse_ext])
+#----------------------------------------------------------------------
+# msgpack stuff here
+
+if sys.byteorder == 'big':
+ macros = [('__BIG_ENDIAN__', '1')]
+else:
+ macros = [('__LITTLE_ENDIAN__', '1')]
+
+msgpack_ext = Extension('pandas.msgpack',
+ sources = [srcpath('msgpack',
+ suffix=suffix, subdir='')],
+ language='c++',
+ include_dirs=common_include,
+ define_macros=macros)
+
+extensions.append(msgpack_ext)
+
# if not ISRELEASED:
# extensions.extend([sandbox_ext])
| Let's get the ball rolling on better high perf serialization. Note this is based on the 0.3.0 release of msgpack-python, should be updated whenever another stable release of the library is made.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3828 | 2013-06-10T02:38:03Z | 2013-09-22T16:27:29Z | null | 2014-07-13T14:07:50Z |
CLN: remove relative imports | diff --git a/pandas/__init__.py b/pandas/__init__.py
index da4c146da3cfd..62de9a10e729b 100644
--- a/pandas/__init__.py
+++ b/pandas/__init__.py
@@ -3,17 +3,10 @@
__docformat__ = 'restructuredtext'
try:
- from . import hashtable, tslib, lib
-except Exception: # pragma: no cover
- import sys
- e = sys.exc_info()[1] # Py25 and Py3 current exception syntax conflict
- print e
- if 'No module named lib' in str(e):
- raise ImportError('C extensions not built: if you installed already '
- 'verify that you are not importing from the source '
- 'directory')
- else:
- raise
+ from pandas import hashtable, tslib, lib
+except ImportError as e: # pragma: no cover
+ module = str(e).lstrip('cannot import name ') # hack but overkill to use re
+ raise ImportError("C extensions: {0} not built".format(module))
from datetime import datetime
import numpy as np
diff --git a/pandas/algos.pyx b/pandas/algos.pyx
index cac9c5ccc7a6d..836101ecafa2d 100644
--- a/pandas/algos.pyx
+++ b/pandas/algos.pyx
@@ -57,7 +57,7 @@ cdef extern from "src/headers/math.h":
double fabs(double)
int signbit(double)
-from . import lib
+from pandas import lib
include "skiplist.pyx"
diff --git a/pandas/index.pyx b/pandas/index.pyx
index 7d33d6083d0eb..85a83b745510f 100644
--- a/pandas/index.pyx
+++ b/pandas/index.pyx
@@ -15,8 +15,8 @@ import numpy as np
cimport tslib
from hashtable cimport *
-from . import algos, tslib, hashtable as _hash
-from .tslib import Timestamp
+from pandas import algos, tslib, hashtable as _hash
+from pandas.tslib import Timestamp
from datetime cimport (get_datetime64_value, _pydatetime_to_dts,
pandas_datetimestruct)
@@ -34,7 +34,7 @@ try:
import pytz
UTC = pytz.utc
have_pytz = True
-except:
+except ImportError:
have_pytz = False
PyDateTime_IMPORT
@@ -42,8 +42,6 @@ PyDateTime_IMPORT
cdef extern from "Python.h":
int PySlice_Check(object)
-# int PyList_Check(object)
-# int PyTuple_Check(object)
cdef inline is_definitely_invalid_key(object val):
if PyTuple_Check(val):
| https://api.github.com/repos/pandas-dev/pandas/pulls/3827 | 2013-06-10T00:58:47Z | 2013-06-10T22:21:11Z | 2013-06-10T22:21:11Z | 2014-06-12T09:03:19Z | |
DOC: speedup io.rst doc build | diff --git a/doc/source/io.rst b/doc/source/io.rst
index 9d923d2d0e0cf..ac5d49e036669 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -953,10 +953,15 @@ Reading HTML Content
.. versionadded:: 0.11.1
-The toplevel :func:`~pandas.io.html.read_html` function can accept an HTML
+The top-level :func:`~pandas.io.html.read_html` function can accept an HTML
string/file/url and will parse HTML tables into list of pandas DataFrames.
Let's look at a few examples.
+.. note::
+
+ ``read_html`` returns a ``list`` of ``DataFrame`` objects, even if there is
+ only a single table contained in the HTML content
+
Read a URL with no options
.. ipython:: python
@@ -967,107 +972,129 @@ Read a URL with no options
.. note::
- ``read_html`` returns a ``list`` of ``DataFrame`` objects, even if there is
- only a single table contained in the HTML content
+ The data from the above URL changes every Monday so the resulting data above
+ and the data below may be slightly different.
-Read a URL and match a table that contains specific text
+Read in the content of the file from the above URL and pass it to ``read_html``
+as a string
+
+.. ipython:: python
+ :suppress:
+
+ import os
+ file_path = os.path.abspath(os.path.join('source', '_static', 'banklist.html'))
+
+.. ipython:: python
+
+ with open(file_path, 'r') as f:
+ dfs = read_html(f.read())
+ dfs
+
+You can even pass in an instance of ``StringIO`` if you so desire
.. ipython:: python
+ from cStringIO import StringIO
+
+ with open(file_path, 'r') as f:
+ sio = StringIO(f.read())
+
+ dfs = read_html(sio)
+ dfs
+
+.. note::
+
+ The following examples are not run by the IPython evaluator due to the fact
+ that having so many network-accessing functions slows down the documentation
+ build. If you spot an error or an example that doesn't run, please do not
+ hesitate to report it over on `pandas GitHub issues page
+ <http://www.github.com/pydata/pandas/issues>`__.
+
+
+Read a URL and match a table that contains specific text
+
+.. code-block:: python
+
match = 'Metcalf Bank'
df_list = read_html(url, match=match)
- len(dfs)
- dfs[0]
Specify a header row (by default ``<th>`` elements are used to form the column
index); if specified, the header row is taken from the data minus the parsed
header elements (``<th>`` elements).
-.. ipython:: python
+.. code-block:: python
dfs = read_html(url, header=0)
- len(dfs)
- dfs[0]
Specify an index column
-.. ipython:: python
+.. code-block:: python
dfs = read_html(url, index_col=0)
- len(dfs)
- dfs[0]
- dfs[0].index.name
Specify a number of rows to skip
-.. ipython:: python
+.. code-block:: python
dfs = read_html(url, skiprows=0)
- len(dfs)
- dfs[0]
Specify a number of rows to skip using a list (``xrange`` (Python 2 only) works
as well)
-.. ipython:: python
+.. code-block:: python
dfs = read_html(url, skiprows=range(2))
- len(dfs)
- dfs[0]
Don't infer numeric and date types
-.. ipython:: python
+.. code-block:: python
dfs = read_html(url, infer_types=False)
- len(dfs)
- dfs[0]
Specify an HTML attribute
-.. ipython:: python
+.. code-block:: python
dfs1 = read_html(url, attrs={'id': 'table'})
dfs2 = read_html(url, attrs={'class': 'sortable'})
- np.array_equal(dfs1[0], dfs2[0])
+ print np.array_equal(dfs1[0], dfs2[0]) # Should be True
Use some combination of the above
-.. ipython:: python
+.. code-block:: python
dfs = read_html(url, match='Metcalf Bank', index_col=0)
- len(dfs)
- dfs[0]
Read in pandas ``to_html`` output (with some loss of floating point precision)
-.. ipython:: python
+.. code-block:: python
df = DataFrame(randn(2, 2))
s = df.to_html(float_format='{0:.40g}'.format)
dfin = read_html(s, index_col=0)
- df
- dfin[0]
- df.index
- df.columns
- dfin[0].index
- dfin[0].columns
- np.allclose(df, dfin[0])
-``lxml`` will raise an error on a failed parse if that is the only parser you
-provide
+The ``lxml`` backend will raise an error on a failed parse if that is the only
+parser you provide (if you only have a single parser you can provide just a
+string, but it is considered good practice to pass a list with one string if,
+for example, the function expects a sequence of strings)
-.. ipython:: python
+.. code-block:: python
+
+ dfs = read_html(url, 'Metcalf Bank', index_col=0, flavor=['lxml'])
- dfs = read_html(url, match='Metcalf Bank', index_col=0, flavor=['lxml'])
+or
+
+.. code-block:: python
+
+ dfs = read_html(url, 'Metcalf Bank', index_col=0, flavor='lxml')
However, if you have bs4 and html5lib installed and pass ``None`` or ``['lxml',
'bs4']`` then the parse will most likely succeed. Note that *as soon as a parse
succeeds, the function will return*.
-.. ipython:: python
+.. code-block:: python
- dfs = read_html(url, match='Metcalf Bank', index_col=0, flavor=['lxml', 'bs4'])
+ dfs = read_html(url, 'Metcalf Bank', index_col=0, flavor=['lxml', 'bs4'])
Writing to HTML files
@@ -1082,8 +1109,8 @@ in the method ``to_string`` described above.
.. note::
Not all of the possible options for ``DataFrame.to_html`` are shown here for
- brevity's sake. See :func:`~pandas.core.frame.DataFrame.to_html` for the full set of
- options.
+ brevity's sake. See :func:`~pandas.core.frame.DataFrame.to_html` for the
+ full set of options.
.. ipython:: python
:suppress:
| @jreback can u check this out and see if the speedup is acceptable?
| https://api.github.com/repos/pandas-dev/pandas/pulls/3826 | 2013-06-10T00:34:59Z | 2013-06-11T11:50:08Z | 2013-06-11T11:50:08Z | 2013-06-27T15:39:34Z |
CLN: conform read_clipboard / to_clipboard to new io standards | diff --git a/RELEASE.rst b/RELEASE.rst
index 4d85834706e80..eca69d824d377 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -127,6 +127,7 @@ pandas 0.11.1
- added ``pandas.io.api`` for i/o imports
- removed ``Excel`` support to ``pandas.io.excel``
- added top-level ``pd.read_sql`` and ``to_sql`` DataFrame methods
+ - removed ``clipboard`` support to ``pandas.io.clipboard``
- the ``method`` and ``axis`` arguments of ``DataFrame.replace()`` are
deprecated
- Implement ``__nonzero__`` for ``NDFrame`` objects (GH3691_, GH3696_)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 7dd0315d7d90e..5533584745167 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -492,8 +492,8 @@ def to_hdf(self, path_or_buf, key, **kwargs):
return pytables.to_hdf(path_or_buf, key, self, **kwargs)
def to_clipboard(self):
- from pandas.io import parsers
- parsers.to_clipboard(self)
+ from pandas.io import clipboard
+ clipboard.to_clipboard(self)
# install the indexerse
for _name, _indexer in indexing.get_indexers_list():
diff --git a/pandas/io/api.py b/pandas/io/api.py
index e4c0c8c0c77f0..f17351921f83f 100644
--- a/pandas/io/api.py
+++ b/pandas/io/api.py
@@ -2,8 +2,8 @@
Data IO api
"""
-from pandas.io.parsers import (read_csv, read_table, read_clipboard,
- read_fwf, to_clipboard)
+from pandas.io.parsers import read_csv, read_table, read_fwf
+from pandas.io.clipboard import read_clipboard
from pandas.io.excel import ExcelFile, ExcelWriter, read_excel
from pandas.io.pytables import HDFStore, Term, get_store, read_hdf
from pandas.io.html import read_html
diff --git a/pandas/io/clipboard.py b/pandas/io/clipboard.py
new file mode 100644
index 0000000000000..4aa8db414386b
--- /dev/null
+++ b/pandas/io/clipboard.py
@@ -0,0 +1,31 @@
+""" io on the clipboard """
+
+def read_clipboard(**kwargs): # pragma: no cover
+ """
+ Read text from clipboard and pass to read_table. See read_table for the
+ full argument list
+
+ Returns
+ -------
+ parsed : DataFrame
+ """
+ from pandas.util.clipboard import clipboard_get
+ text = clipboard_get()
+ return read_table(StringIO(text), **kwargs)
+
+
+def to_clipboard(obj): # pragma: no cover
+ """
+ Attempt to write text representation of object to the system clipboard
+
+ Notes
+ -----
+ Requirements for your platform
+ - Linux: xsel command line tool
+ - Windows: Python win32 extensions
+ - OS X:
+ """
+ from pandas.util.clipboard import clipboard_set
+ clipboard_set(str(obj))
+
+
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 54ba7536afaee..6e937ba696e39 100644
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -5,6 +5,7 @@
import re
from itertools import izip
import csv
+from warnings import warn
import numpy as np
@@ -427,35 +428,6 @@ def read_fwf(filepath_or_buffer, colspecs=None, widths=None, **kwds):
return _read(filepath_or_buffer, kwds)
-def read_clipboard(**kwargs): # pragma: no cover
- """
- Read text from clipboard and pass to read_table. See read_table for the
- full argument list
-
- Returns
- -------
- parsed : DataFrame
- """
- from pandas.util.clipboard import clipboard_get
- text = clipboard_get()
- return read_table(StringIO(text), **kwargs)
-
-
-def to_clipboard(obj): # pragma: no cover
- """
- Attempt to write text representation of object to the system clipboard
-
- Notes
- -----
- Requirements for your platform
- - Linux: xsel command line tool
- - Windows: Python win32 extensions
- - OS X:
- """
- from pandas.util.clipboard import clipboard_set
- clipboard_set(str(obj))
-
-
# common NA values
# no longer excluding inf representations
# '1.#INF','-1.#INF', '1.#INF000000',
@@ -1940,15 +1912,25 @@ def _make_reader(self, f):
self.data = FixedWidthReader(f, self.colspecs, self.delimiter)
+##### deprecations in 0.11.1 #####
+##### remove in 0.12 #####
+
+from pandas.io import clipboard
+def read_clipboard(**kwargs):
+ warn("read_clipboard is now a top-level accessible via pandas.read_clipboard", FutureWarning)
+ clipboard.read_clipboard(**kwargs)
+
+def to_clipboard(obj):
+ warn("to_clipboard is now an object level method accessible via obj.to_clipboard()", FutureWarning)
+ clipboard.to_clipboard(obj)
+
from pandas.io import excel
class ExcelWriter(excel.ExcelWriter):
def __init__(self, path):
- from warnings import warn
warn("ExcelWriter can now be imported from: pandas.io.excel", FutureWarning)
super(ExcelWriter, self).__init__(path)
class ExcelFile(excel.ExcelFile):
def __init__(self, path_or_buf, kind=None, **kwds):
- from warnings import warn
warn("ExcelFile can now be imported from: pandas.io.excel", FutureWarning)
super(ExcelFile, self).__init__(path_or_buf, kind=kind, **kwds)
| removed to io.clipboard (from io.parsers)
| https://api.github.com/repos/pandas-dev/pandas/pulls/3824 | 2013-06-09T23:15:25Z | 2013-06-09T23:41:23Z | 2013-06-09T23:41:23Z | 2014-07-16T08:13:04Z |
ENH: DataFrame.corr(method='spearman') is cythonized. | diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt
index 76ae85a53102b..8dbf2e86e972e 100644
--- a/doc/source/v0.11.1.txt
+++ b/doc/source/v0.11.1.txt
@@ -295,6 +295,8 @@ Enhancements
- DatetimeIndexes no longer try to convert mixed-integer indexes during join
operations (GH3877_)
+ - DataFrame corr method (spearman) is now cythonized.
+
Bug Fixes
~~~~~~~~~
diff --git a/pandas/algos.pyx b/pandas/algos.pyx
index 836101ecafa2d..08ec707b0d96d 100644
--- a/pandas/algos.pyx
+++ b/pandas/algos.pyx
@@ -997,6 +997,69 @@ def nancorr(ndarray[float64_t, ndim=2] mat, cov=False, minp=None):
return result
+#----------------------------------------------------------------------
+# Pairwise Spearman correlation
+
+@cython.boundscheck(False)
+@cython.wraparound(False)
+def nancorr_spearman(ndarray[float64_t, ndim=2] mat, Py_ssize_t minp=1):
+ cdef:
+ Py_ssize_t i, j, xi, yi, N, K
+ ndarray[float64_t, ndim=2] result
+ ndarray[float64_t, ndim=1] maskedx
+ ndarray[float64_t, ndim=1] maskedy
+ ndarray[uint8_t, ndim=2] mask
+ int64_t nobs = 0
+ float64_t vx, vy, sumx, sumxx, sumyy, mean, divisor
+
+ N, K = (<object> mat).shape
+
+ result = np.empty((K, K), dtype=np.float64)
+ mask = np.isfinite(mat).view(np.uint8)
+
+ for xi in range(K):
+ for yi in range(xi + 1):
+ nobs = 0
+ for i in range(N):
+ if mask[i, xi] and mask[i, yi]:
+ nobs += 1
+
+ if nobs < minp:
+ result[xi, yi] = result[yi, xi] = np.NaN
+ else:
+ maskedx = np.empty(nobs, dtype=np.float64)
+ maskedy = np.empty(nobs, dtype=np.float64)
+ j = 0
+ for i in range(N):
+ if mask[i, xi] and mask[i, yi]:
+ maskedx[j] = mat[i, xi]
+ maskedy[j] = mat[i, yi]
+ j += 1
+ maskedx = rank_1d_float64(maskedx)
+ maskedy = rank_1d_float64(maskedy)
+
+ mean = (nobs + 1) / 2.
+
+ # now the cov numerator
+ sumx = sumxx = sumyy = 0
+
+ for i in range(nobs):
+ vx = maskedx[i] - mean
+ vy = maskedy[i] - mean
+
+ sumx += vx * vy
+ sumxx += vx * vx
+ sumyy += vy * vy
+
+ divisor = sqrt(sumxx * sumyy)
+
+ if divisor != 0:
+ result[xi, yi] = result[yi, xi] = sumx / divisor
+ else:
+ result[xi, yi] = result[yi, xi] = np.NaN
+
+ return result
+
#----------------------------------------------------------------------
# Rolling variance
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 5e3d3e95d8e56..f0145364363ac 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1528,7 +1528,7 @@ def to_stata(self, fname, convert_dates=None, write_index=True, encoding="latin-
from pandas.io.stata import StataWriter
writer = StataWriter(fname,self,convert_dates=convert_dates, encoding=encoding, byteorder=byteorder)
writer.write_file()
-
+
def to_sql(self, name, con, flavor='sqlite', if_exists='fail', **kwargs):
"""
Write records stored in a DataFrame to a SQL database.
@@ -4711,7 +4711,7 @@ def merge(self, right, how='inner', on=None, left_on=None, right_on=None,
#----------------------------------------------------------------------
# Statistical methods, etc.
- def corr(self, method='pearson', min_periods=None):
+ def corr(self, method='pearson', min_periods=1):
"""
Compute pairwise correlation of columns, excluding NA/null values
@@ -4724,7 +4724,7 @@ def corr(self, method='pearson', min_periods=None):
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result. Currently only available for pearson
- correlation
+ and spearman correlation
Returns
-------
@@ -4737,6 +4737,9 @@ def corr(self, method='pearson', min_periods=None):
if method == 'pearson':
correl = _algos.nancorr(com._ensure_float64(mat),
minp=min_periods)
+ elif method == 'spearman':
+ correl = _algos.nancorr_spearman(com._ensure_float64(mat),
+ minp=min_periods)
else:
if min_periods is None:
min_periods = 1
diff --git a/vb_suite/stat_ops.py b/vb_suite/stat_ops.py
index 86e879d0be523..f01a867ea2893 100644
--- a/vb_suite/stat_ops.py
+++ b/vb_suite/stat_ops.py
@@ -82,3 +82,12 @@
stats_rolling_mean = Benchmark('rolling_mean(arr, 100)', setup,
start_date=datetime(2011, 6, 1))
+
+# spearman correlation
+
+setup = common_setup + """
+df = DataFrame(np.random.randn(1000, 300))
+"""
+
+stats_corr_spearman = Benchmark("df.corr(method='spearman')", setup,
+ start_date=datetime(2011, 12, 4))
| It should be more than 10 times faster than the old version.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3823 | 2013-06-09T18:47:50Z | 2013-06-17T13:29:07Z | 2013-06-17T13:29:07Z | 2014-06-20T06:53:44Z |
Change Finance Options signatures and deprecate year/month parameters | diff --git a/doc/source/release.rst b/doc/source/release.rst
index 07489a140c018..df09d2f5a50ba 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -156,6 +156,9 @@ pandas 0.11.1
``load`` will give deprecation warning.
- the ``method`` and ``axis`` arguments of ``DataFrame.replace()`` are
deprecated
+ - set FutureWarning to require data_source, and to replace year/month with
+ expiry date in pandas.io options. This is in preparation to add options
+ data from google (:issue:`3822`)
- the ``method`` and ``axis`` arguments of ``DataFrame.replace()`` are
deprecated
- Implement ``__nonzero__`` for ``NDFrame`` objects (:issue:`3691`, :issue:`3696`)
diff --git a/pandas/io/data.py b/pandas/io/data.py
index a97c77c207a4c..03ccde6a2fcc1 100644
--- a/pandas/io/data.py
+++ b/pandas/io/data.py
@@ -10,6 +10,7 @@
import urllib
import urllib2
import time
+import warnings
from zipfile import ZipFile
from pandas.util.py3compat import StringIO, BytesIO, bytes_to_str
@@ -111,12 +112,7 @@ def get_quote_yahoo(symbols):
urlStr = 'http://finance.yahoo.com/d/quotes.csv?s=%s&f=%s' % (
sym_list, request)
- try:
- lines = urllib2.urlopen(urlStr).readlines()
- except Exception, e:
- s = "Failed to download:\n{0}".format(e)
- print (s)
- return None
+ lines = urllib2.urlopen(urlStr).readlines()
for line in lines:
fields = line.decode('utf-8').strip().split(',')
@@ -539,7 +535,7 @@ def _parse_options_data(table):
class Options(object):
"""
- This class fetches call/put data for a given stock/exipry month.
+ This class fetches call/put data for a given stock/expiry month.
It is instantiated with a string representing the ticker symbol.
@@ -553,7 +549,7 @@ class Options(object):
Examples
--------
# Instantiate object with ticker
- >>> aapl = Options('aapl')
+ >>> aapl = Options('aapl', 'yahoo')
# Fetch September 2012 call data
>>> calls = aapl.get_call_data(9, 2012)
@@ -576,24 +572,25 @@ class Options(object):
"""
- def __init__(self, symbol):
+ def __init__(self, symbol, data_source=None):
""" Instantiates options_data with a ticker saved as symbol """
self.symbol = str(symbol).upper()
+ if (data_source is None):
+ warnings.warn("Options(symbol) is deprecated, use Options(symbol, data_source) instead",
+ FutureWarning)
+ data_source = "yahoo"
+ if (data_source != "yahoo"):
+ raise NotImplementedError("currently only yahoo supported")
- def get_options_data(self, month=None, year=None):
+ def get_options_data(self, month=None, year=None, expiry=None):
"""
Gets call/put data for the stock with the expiration data in the
given month and year
Parameters
----------
- month: number, int, optional(default=None)
- The month the options expire. This should be either 1 or 2
- digits.
-
- year: number, int, optional(default=None)
- The year the options expire. This sould be a 4 digit int.
-
+ expiry: datetime.date, optional(default=None)
+ The date when options expire (defaults to current month)
Returns
-------
@@ -609,7 +606,7 @@ def get_options_data(self, month=None, year=None):
When called, this function will add instance variables named
calls and puts. See the following example:
- >>> aapl = Options('aapl') # Create object
+ >>> aapl = Options('aapl', 'yahoo') # Create object
>>> aapl.calls # will give an AttributeError
>>> aapl.get_options_data() # Get data and set ivars
>>> aapl.calls # Doesn't throw AttributeError
@@ -621,6 +618,8 @@ def get_options_data(self, month=None, year=None):
representations of the month and year for the expiry of the
options.
"""
+ year, month = self._try_parse_dates(year,month,expiry)
+
from lxml.html import parse
if month and year: # try to get specified month from yahoo finance
@@ -659,19 +658,15 @@ def get_options_data(self, month=None, year=None):
return [call_data, put_data]
- def get_call_data(self, month=None, year=None):
+ def get_call_data(self, month=None, year=None, expiry=None):
"""
Gets call/put data for the stock with the expiration data in the
given month and year
Parameters
----------
- month: number, int, optional(default=None)
- The month the options expire. This should be either 1 or 2
- digits.
-
- year: number, int, optional(default=None)
- The year the options expire. This sould be a 4 digit int.
+ expiry: datetime.date, optional(default=None)
+ The date when options expire (defaults to current month)
Returns
-------
@@ -683,7 +678,7 @@ def get_call_data(self, month=None, year=None):
When called, this function will add instance variables named
calls and puts. See the following example:
- >>> aapl = Options('aapl') # Create object
+ >>> aapl = Options('aapl', 'yahoo') # Create object
>>> aapl.calls # will give an AttributeError
>>> aapl.get_call_data() # Get data and set ivars
>>> aapl.calls # Doesn't throw AttributeError
@@ -694,6 +689,8 @@ def get_call_data(self, month=None, year=None):
repsectively, two digit representations of the month and year
for the expiry of the options.
"""
+ year, month = self._try_parse_dates(year,month,expiry)
+
from lxml.html import parse
if month and year: # try to get specified month from yahoo finance
@@ -727,19 +724,15 @@ def get_call_data(self, month=None, year=None):
return call_data
- def get_put_data(self, month=None, year=None):
+ def get_put_data(self, month=None, year=None, expiry=None):
"""
Gets put data for the stock with the expiration data in the
given month and year
Parameters
----------
- month: number, int, optional(default=None)
- The month the options expire. This should be either 1 or 2
- digits.
-
- year: number, int, optional(default=None)
- The year the options expire. This sould be a 4 digit int.
+ expiry: datetime.date, optional(default=None)
+ The date when options expire (defaults to current month)
Returns
-------
@@ -764,6 +757,8 @@ def get_put_data(self, month=None, year=None):
repsectively, two digit representations of the month and year
for the expiry of the options.
"""
+ year, month = self._try_parse_dates(year,month,expiry)
+
from lxml.html import parse
if month and year: # try to get specified month from yahoo finance
@@ -798,7 +793,7 @@ def get_put_data(self, month=None, year=None):
return put_data
def get_near_stock_price(self, above_below=2, call=True, put=False,
- month=None, year=None):
+ month=None, year=None, expiry=None):
"""
Cuts the data frame opt_df that is passed in to only take
options that are near the current stock price.
@@ -810,19 +805,15 @@ def get_near_stock_price(self, above_below=2, call=True, put=False,
should be taken
call: bool
- Tells the function weather or not it should be using
+ Tells the function whether or not it should be using
self.calls
put: bool
Tells the function weather or not it should be using
self.puts
- month: number, int, optional(default=None)
- The month the options expire. This should be either 1 or 2
- digits.
-
- year: number, int, optional(default=None)
- The year the options expire. This sould be a 4 digit int.
+ expiry: datetime.date, optional(default=None)
+ The date when options expire (defaults to current month)
Returns
-------
@@ -831,6 +822,8 @@ def get_near_stock_price(self, above_below=2, call=True, put=False,
desired. If there isn't data as far out as the user has asked for
then
"""
+ year, month = self._try_parse_dates(year,month,expiry)
+
price = float(get_quote_yahoo([self.symbol])['last'])
if call:
@@ -844,13 +837,6 @@ def get_near_stock_price(self, above_below=2, call=True, put=False,
except AttributeError:
df_c = self.get_call_data(month, year)
- # NOTE: For some reason the put commas in all values >1000. We remove
- # them here
- df_c.Strike = df_c.Strike.astype(str).apply(lambda x: \
- x.replace(',', ''))
- # Now make sure Strike column has dtype float
- df_c.Strike = df_c.Strike.astype(float)
-
start_index = np.where(df_c['Strike'] > price)[0][0]
get_range = range(start_index - above_below,
@@ -872,13 +858,6 @@ def get_near_stock_price(self, above_below=2, call=True, put=False,
except AttributeError:
df_p = self.get_put_data(month, year)
- # NOTE: For some reason the put commas in all values >1000. We remove
- # them here
- df_p.Strike = df_p.Strike.astype(str).apply(lambda x: \
- x.replace(',', ''))
- # Now make sure Strike column has dtype float
- df_p.Strike = df_p.Strike.astype(float)
-
start_index = np.where(df_p.Strike > price)[0][0]
get_range = range(start_index - above_below,
@@ -897,11 +876,21 @@ def get_near_stock_price(self, above_below=2, call=True, put=False,
else:
return chop_put
+ def _try_parse_dates(self, year, month, expiry):
+ if year is not None or month is not None:
+ warnings.warn("month, year arguments are deprecated, use expiry instead",
+ FutureWarning)
+
+ if expiry is not None:
+ year=expiry.year
+ month=expiry.month
+ return year, month
+
def get_forward_data(self, months, call=True, put=False, near=False,
above_below=2):
"""
Gets either call, put, or both data for months starting in the current
- month and going out in the future a spcified amount of time.
+ month and going out in the future a specified amount of time.
Parameters
----------
@@ -933,6 +922,7 @@ def get_forward_data(self, months, call=True, put=False, near=False,
If asked for, a DataFrame containing put data from the current
month to the current month plus months.
"""
+ warnings.warn("get_forward_data() is deprecated", FutureWarning)
in_months = range(cur_month, cur_month + months + 1)
in_years = [cur_year] * (months + 1)
diff --git a/pandas/io/tests/test_yahoo.py b/pandas/io/tests/test_yahoo.py
index 712475f76f5ed..1edb29efd00b9 100644
--- a/pandas/io/tests/test_yahoo.py
+++ b/pandas/io/tests/test_yahoo.py
@@ -1,6 +1,7 @@
import unittest
import nose
from datetime import datetime
+import warnings
import pandas as pd
import pandas.io.data as web
@@ -96,6 +97,61 @@ def test_get_data(self):
t= np.array(pan)
assert np.issubdtype(t.dtype, np.floating)
+ @network
+ def test_options(self):
+ try:
+ import lxml
+ except ImportError:
+ raise nose.SkipTest
+ # aapl has monthlies
+ aapl = web.Options('aapl', 'yahoo')
+ today = datetime.today()
+ year = today.year
+ month = today.month+1
+ if (month>12):
+ year = year +1
+ month = 1
+ expiry=datetime(year, month, 1)
+ (calls, puts) = aapl.get_options_data(expiry=expiry)
+ assert len(calls)>1
+ assert len(puts)>1
+ (calls, puts) = aapl.get_near_stock_price(call=True, put=True, expiry=expiry)
+ assert len(calls)==5
+ assert len(puts)==5
+ calls = aapl.get_call_data(expiry=expiry)
+ assert len(calls)>1
+ puts = aapl.get_put_data(expiry=expiry)
+ assert len(puts)>1
+
+ @network
+ def test_options_warnings(self):
+ try:
+ import lxml
+ except ImportError:
+ raise nose.SkipTest
+ with warnings.catch_warnings(record=True) as w:
+ warnings.resetwarnings()
+ # Cause all warnings to always be triggered.
+ warnings.simplefilter("always")
+ # aapl has monthlies
+ aapl = web.Options('aapl')
+ today = datetime.today()
+ year = today.year
+ month = today.month+1
+ if (month>12):
+ year = year +1
+ month = 1
+ (calls, puts) = aapl.get_options_data(month=month, year=year)
+ (calls, puts) = aapl.get_near_stock_price(call=True, put=True, month=month, year=year)
+ calls = aapl.get_call_data(month=month, year=year)
+ puts = aapl.get_put_data(month=month, year=year)
+ print(w)
+ assert len(w) == 5
+ assert "deprecated" in str(w[0].message)
+ assert "deprecated" in str(w[1].message)
+ assert "deprecated" in str(w[2].message)
+ assert "deprecated" in str(w[3].message)
+ assert "deprecated" in str(w[4].message)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
| #3817
| https://api.github.com/repos/pandas-dev/pandas/pulls/3822 | 2013-06-09T17:17:44Z | 2013-06-22T19:02:18Z | 2013-06-22T19:02:18Z | 2014-06-15T12:00:55Z |
FIX py3ing some print statements | diff --git a/examples/regressions.py b/examples/regressions.py
index 2d21a0ece58c3..2203165825ccb 100644
--- a/examples/regressions.py
+++ b/examples/regressions.py
@@ -31,7 +31,7 @@ def makeSeries():
model = ols(y=Y, x=X)
-print model
+print (model)
#-------------------------------------------------------------------------------
# Panel regression
@@ -48,4 +48,4 @@ def makeSeries():
model = ols(y=Y, x=data)
-print panelModel
+print (panelModel)
diff --git a/pandas/__init__.py b/pandas/__init__.py
index 62de9a10e729b..a0edb397c28c1 100644
--- a/pandas/__init__.py
+++ b/pandas/__init__.py
@@ -3,10 +3,17 @@
__docformat__ = 'restructuredtext'
try:
- from pandas import hashtable, tslib, lib
-except ImportError as e: # pragma: no cover
- module = str(e).lstrip('cannot import name ') # hack but overkill to use re
- raise ImportError("C extensions: {0} not built".format(module))
+ from . import hashtable, tslib, lib
+except Exception: # pragma: no cover
+ import sys
+ e = sys.exc_info()[1] # Py25 and Py3 current exception syntax conflict
+ print (e)
+ if 'No module named lib' in str(e):
+ raise ImportError('C extensions not built: if you installed already '
+ 'verify that you are not importing from the source '
+ 'directory')
+ else:
+ raise
from datetime import datetime
import numpy as np
diff --git a/pandas/core/config.py b/pandas/core/config.py
index e8403164ac1b9..ae7c71d082a89 100644
--- a/pandas/core/config.py
+++ b/pandas/core/config.py
@@ -154,7 +154,7 @@ def _describe_option(pat='', _print_desc=True):
s += _build_option_description(k)
if _print_desc:
- print s
+ print (s)
else:
return s
@@ -631,7 +631,7 @@ def pp(name, ks):
ls += pp(k, ks)
s = '\n'.join(ls)
if _print:
- print s
+ print (s)
else:
return s
diff --git a/pandas/core/format.py b/pandas/core/format.py
index 40d80e91f0264..b1f7a2a8964b9 100644
--- a/pandas/core/format.py
+++ b/pandas/core/format.py
@@ -1899,4 +1899,4 @@ def _binify(cols, line_width):
1134250., 1219550., 855736.85, 1042615.4286,
722621.3043, 698167.1818, 803750.])
fmt = FloatArrayFormatter(arr, digits=7)
- print fmt.get_result()
+ print (fmt.get_result())
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index d15dcc1510577..43def5047197a 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -1420,7 +1420,7 @@ def aggregate(self, func_or_funcs, *args, **kwargs):
ret = Series(result, index=index)
if not self.as_index: # pragma: no cover
- print 'Warning, ignoring as_index=True'
+ print ('Warning, ignoring as_index=True')
return ret
diff --git a/pandas/io/auth.py b/pandas/io/auth.py
index 5e64b795b8885..6da497687cf25 100644
--- a/pandas/io/auth.py
+++ b/pandas/io/auth.py
@@ -55,7 +55,7 @@ def process_flags(flags=[]):
try:
FLAGS(flags)
except gflags.FlagsError, e:
- print '%s\nUsage: %s ARGS\n%s' % (e, str(flags), FLAGS)
+ print ('%s\nUsage: %s ARGS\n%s' % (e, str(flags), FLAGS))
sys.exit(1)
# Set the logging according to the command-line flag.
diff --git a/pandas/io/data.py b/pandas/io/data.py
index 8bc3df561cadb..a97c77c207a4c 100644
--- a/pandas/io/data.py
+++ b/pandas/io/data.py
@@ -115,7 +115,7 @@ def get_quote_yahoo(symbols):
lines = urllib2.urlopen(urlStr).readlines()
except Exception, e:
s = "Failed to download:\n{0}".format(e)
- print s
+ print (s)
return None
for line in lines:
@@ -467,7 +467,7 @@ def get_data_fred(name=None, start=dt.datetime(2010, 1, 1),
start, end = _sanitize_dates(start, end)
if(name is None):
- print "Need to provide a name"
+ print ("Need to provide a name")
return None
fred_URL = "http://research.stlouisfed.org/fred2/series/"
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 054363d8cda06..e9088d68d73fa 100644
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -508,7 +508,7 @@ def _clean_options(self, options, engine):
sep = options['delimiter']
if (sep is None and not options['delim_whitespace']):
if engine == 'c':
- print 'Using Python parser to sniff delimiter'
+ print ('Using Python parser to sniff delimiter')
engine = 'python'
elif sep is not None and len(sep) > 1:
# wait until regex engine integrated
@@ -867,7 +867,7 @@ def _convert_to_ndarrays(self, dct, na_values, na_fvalues, verbose=False,
coerce_type)
result[c] = cvals
if verbose and na_count:
- print 'Filled %d NA values in column %s' % (na_count, str(c))
+ print ('Filled %d NA values in column %s' % (na_count, str(c)))
return result
def _convert_types(self, values, na_values, try_num_bool=True):
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 83e46fc949a4d..6cfbfd0f2d60a 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -386,7 +386,7 @@ def open(self, mode='a', warn=True):
self._handle = h5_open(self._path, self._mode)
except IOError, e: # pragma: no cover
if 'can not be written' in str(e):
- print 'Opening %s in read-only mode' % self._path
+ print ('Opening %s in read-only mode' % self._path)
self._handle = h5_open(self._path, 'r')
else:
raise
diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index 4a1cac8a60e30..68dff479a5015 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -51,7 +51,7 @@ def execute(sql, con, retry=True, cur=None, params=None):
except Exception: # pragma: no cover
pass
- print 'Error on sql %s' % sql
+ print ('Error on sql %s' % sql)
raise
@@ -94,7 +94,7 @@ def tquery(sql, con=None, cur=None, retry=True):
except Exception, e:
excName = e.__class__.__name__
if excName == 'OperationalError': # pragma: no cover
- print 'Failed to commit, may need to restart interpreter'
+ print ('Failed to commit, may need to restart interpreter')
else:
raise
@@ -128,7 +128,7 @@ def uquery(sql, con=None, cur=None, retry=True, params=None):
traceback.print_exc()
if retry:
- print 'Looks like your connection failed, reconnecting...'
+ print ('Looks like your connection failed, reconnecting...')
return uquery(sql, con, retry=False)
return result
diff --git a/pandas/io/tests/test_html.py b/pandas/io/tests/test_html.py
index d6086d822ee02..0157729044782 100644
--- a/pandas/io/tests/test_html.py
+++ b/pandas/io/tests/test_html.py
@@ -86,8 +86,8 @@ def test_to_html_compat(self):
out = df.to_html()
res = self.run_read_html(out, attrs={'class': 'dataframe'},
index_col=0)[0]
- print df.dtypes
- print res.dtypes
+ print (df.dtypes)
+ print (res.dtypes)
assert_frame_equal(res, df)
@network
@@ -125,7 +125,7 @@ def test_spam(self):
df2 = self.run_read_html(self.spam_data, 'Unit', infer_types=False)
assert_framelist_equal(df1, df2)
- print df1[0]
+ print (df1[0])
self.assertEqual(df1[0].ix[0, 0], 'Proximates')
self.assertEqual(df1[0].columns[0], 'Nutrient')
diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py
index f7f77698f51f5..f348e1ddce461 100644
--- a/pandas/io/tests/test_pytables.py
+++ b/pandas/io/tests/test_pytables.py
@@ -987,7 +987,7 @@ def test_big_table_frame(self):
rows = store.root.df.table.nrows
recons = store.select('df')
- print "\nbig_table frame [%s] -> %5.2f" % (rows, time.time() - x)
+ print ("\nbig_table frame [%s] -> %5.2f" % (rows, time.time() - x))
def test_big_table2_frame(self):
# this is a really big table: 1m rows x 60 float columns, 20 string, 20 datetime
@@ -995,7 +995,7 @@ def test_big_table2_frame(self):
raise nose.SkipTest('no big table2 frame')
# create and write a big table
- print "\nbig_table2 start"
+ print ("\nbig_table2 start")
import time
start_time = time.time()
df = DataFrame(np.random.randn(1000 * 1000, 60), index=xrange(int(
@@ -1005,7 +1005,8 @@ def test_big_table2_frame(self):
for x in xrange(20):
df['datetime%03d' % x] = datetime.datetime(2001, 1, 2, 0, 0)
- print "\nbig_table2 frame (creation of df) [rows->%s] -> %5.2f" % (len(df.index), time.time() - start_time)
+ print ("\nbig_table2 frame (creation of df) [rows->%s] -> %5.2f"
+ % (len(df.index), time.time() - start_time))
def f(chunksize):
with ensure_clean(self.path,mode='w') as store:
@@ -1015,14 +1016,15 @@ def f(chunksize):
for c in [10000, 50000, 250000]:
start_time = time.time()
- print "big_table2 frame [chunk->%s]" % c
+ print ("big_table2 frame [chunk->%s]" % c)
rows = f(c)
- print "big_table2 frame [rows->%s,chunk->%s] -> %5.2f" % (rows, c, time.time() - start_time)
+ print ("big_table2 frame [rows->%s,chunk->%s] -> %5.2f"
+ % (rows, c, time.time() - start_time))
def test_big_put_frame(self):
raise nose.SkipTest('no big put frame')
- print "\nbig_put start"
+ print ("\nbig_put start")
import time
start_time = time.time()
df = DataFrame(np.random.randn(1000 * 1000, 60), index=xrange(int(
@@ -1032,15 +1034,17 @@ def test_big_put_frame(self):
for x in xrange(20):
df['datetime%03d' % x] = datetime.datetime(2001, 1, 2, 0, 0)
- print "\nbig_put frame (creation of df) [rows->%s] -> %5.2f" % (len(df.index), time.time() - start_time)
+ print ("\nbig_put frame (creation of df) [rows->%s] -> %5.2f"
+ % (len(df.index), time.time() - start_time))
with ensure_clean(self.path, mode='w') as store:
start_time = time.time()
store = HDFStore(fn, mode='w')
store.put('df', df)
- print df.get_dtype_counts()
- print "big_put frame [shape->%s] -> %5.2f" % (df.shape, time.time() - start_time)
+ print (df.get_dtype_counts())
+ print ("big_put frame [shape->%s] -> %5.2f"
+ % (df.shape, time.time() - start_time))
def test_big_table_panel(self):
raise nose.SkipTest('no big table panel')
@@ -1064,7 +1068,7 @@ def test_big_table_panel(self):
rows = store.root.wp.table.nrows
recons = store.select('wp')
- print "\nbig_table panel [%s] -> %5.2f" % (rows, time.time() - x)
+ print ("\nbig_table panel [%s] -> %5.2f" % (rows, time.time() - x))
def test_append_diff_item_order(self):
@@ -2461,10 +2465,10 @@ def test_select_as_multiple(self):
expected = expected[5:]
tm.assert_frame_equal(result, expected)
except (Exception), detail:
- print "error in select_as_multiple %s" % str(detail)
- print "store: ", store
- print "df1: ", df1
- print "df2: ", df2
+ print ("error in select_as_multiple %s" % str(detail))
+ print ("store: %s" % store)
+ print ("df1: %s" % df1)
+ print ("df2: %s" % df2)
# test excpection for diff rows
diff --git a/pandas/io/wb.py b/pandas/io/wb.py
index 1a2108d069589..579da6bbc4e45 100644
--- a/pandas/io/wb.py
+++ b/pandas/io/wb.py
@@ -65,10 +65,10 @@ def download(country=['MX', 'CA', 'US'], indicator=['GDPPCKD', 'GDPPCKN'],
bad_indicators.append(ind)
# Warn
if len(bad_indicators) > 0:
- print 'Failed to obtain indicator(s): ' + '; '.join(bad_indicators)
- print 'The data may still be available for download at http://data.worldbank.org'
+ print ('Failed to obtain indicator(s): %s' % '; '.join(bad_indicators))
+ print ('The data may still be available for download at http://data.worldbank.org')
if len(bad_countries) > 0:
- print 'Invalid ISO-2 codes: ' + ' '.join(bad_countries)
+ print ('Invalid ISO-2 codes: %s' % ' '.join(bad_countries))
# Merge WDI series
if len(data) > 0:
out = reduce(lambda x, y: x.merge(y, how='outer'), data)
diff --git a/pandas/rpy/common.py b/pandas/rpy/common.py
index acc562925c925..92adee5bdae57 100644
--- a/pandas/rpy/common.py
+++ b/pandas/rpy/common.py
@@ -73,7 +73,7 @@ def _convert_array(obj):
major_axis=name_list[0],
minor_axis=name_list[1])
else:
- print 'Cannot handle dim=%d' % len(dim)
+ print ('Cannot handle dim=%d' % len(dim))
else:
return arr
diff --git a/pandas/stats/plm.py b/pandas/stats/plm.py
index 467ce6a05e1f0..e8c413ec4739c 100644
--- a/pandas/stats/plm.py
+++ b/pandas/stats/plm.py
@@ -56,7 +56,7 @@ def __init__(self, y, x, weights=None, intercept=True, nw_lags=None,
def log(self, msg):
if self._verbose: # pragma: no cover
- print msg
+ print (msg)
def _prepare_data(self):
"""Cleans and stacks input data into DataFrame objects
diff --git a/pandas/stats/tests/test_var.py b/pandas/stats/tests/test_var.py
index 282a794980979..cbaacd0e89b6e 100644
--- a/pandas/stats/tests/test_var.py
+++ b/pandas/stats/tests/test_var.py
@@ -124,10 +124,10 @@ def beta(self):
return rpy.convert_robj(r.coef(self._estimate))
def summary(self, equation=None):
- print r.summary(self._estimate, equation=equation)
+ print (r.summary(self._estimate, equation=equation))
def output(self):
- print self._estimate
+ print (self._estimate)
def estimate(self):
self._estimate = r.VAR(self.rdata, p=self.p, type=self.type)
@@ -144,7 +144,7 @@ def serial_test(self, lags_pt=16, type='PT.asymptotic'):
return test
def data_summary(self):
- print r.summary(self.rdata)
+ print (r.summary(self.rdata))
class TestVAR(TestCase):
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 8b32b3a641ebb..dd2fd88945f19 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -9079,7 +9079,7 @@ def _check_stat_op(self, name, alternative, frame=None, has_skipna=True,
if not ('max' in name or 'min' in name or 'count' in name):
df = DataFrame({'b': date_range('1/1/2001', periods=2)})
_f = getattr(df, name)
- print df
+ print (df)
self.assertFalse(len(_f()))
df['a'] = range(len(df))
diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py
index 6989d3bcae42b..8f60cb8fc6a63 100644
--- a/pandas/tests/test_groupby.py
+++ b/pandas/tests/test_groupby.py
@@ -499,8 +499,8 @@ def test_agg_item_by_item_raise_typeerror(self):
df = DataFrame(randint(10, size=(20, 10)))
def raiseException(df):
- print '----------------------------------------'
- print df.to_string()
+ print ('----------------------------------------')
+ print (df.to_string())
raise TypeError
self.assertRaises(TypeError, df.groupby(0).agg,
diff --git a/pandas/tseries/resample.py b/pandas/tseries/resample.py
index 4bf0a5bf3182c..9c22ad66d4f2b 100644
--- a/pandas/tseries/resample.py
+++ b/pandas/tseries/resample.py
@@ -85,7 +85,7 @@ def resample(self, obj):
offset = to_offset(self.freq)
if offset.n > 1:
if self.kind == 'period': # pragma: no cover
- print 'Warning: multiple of frequency -> timestamps'
+ print ('Warning: multiple of frequency -> timestamps')
# Cannot have multiple of periods, convert to timestamp
self.kind = 'timestamp'
diff --git a/pandas/tseries/tools.py b/pandas/tseries/tools.py
index 43d44702d2d5e..c39f65f95d99f 100644
--- a/pandas/tseries/tools.py
+++ b/pandas/tseries/tools.py
@@ -20,7 +20,7 @@
raise Exception('dateutil 2.0 incompatible with Python 2.x, you must '
'install version 1.5 or 2.1+!')
except ImportError: # pragma: no cover
- print 'Please install python-dateutil via easy_install or some method!'
+ print ('Please install python-dateutil via easy_install or some method!')
raise # otherwise a 2nd import won't show the message
diff --git a/pandas/util/terminal.py b/pandas/util/terminal.py
index 7b9ddfbcfc8e6..3b5f893d1a0b3 100644
--- a/pandas/util/terminal.py
+++ b/pandas/util/terminal.py
@@ -117,4 +117,4 @@ def ioctl_GWINSZ(fd):
if __name__ == "__main__":
sizex, sizey = get_terminal_size()
- print 'width =', sizex, 'height =', sizey
+ print ('width = %s height = %s' % (sizex, sizey))
| I just grepped where there was python 2 print statement. There are still some in vbench/scripts/ez_setup and sphinxext/src (as well as in the rst docs), but these are all all from the main codebase.
```
grep "print [^(]" . -r
```
I don't know how to deal with the `print >>buf, empty` etc. (or whether we need to, ~~I've only labelled~~ there's only the two of these).... ?
_I found one by trying to use py3 before building, where I got a `print e` syntax error._
| https://api.github.com/repos/pandas-dev/pandas/pulls/3821 | 2013-06-09T14:18:36Z | 2013-06-21T10:22:58Z | 2013-06-21T10:22:58Z | 2014-07-16T08:12:55Z |
ENH: Add unit keyword to Timestamp and to_datetime | diff --git a/RELEASE.rst b/RELEASE.rst
index 161047c478d88..0d94337ffea78 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -82,6 +82,9 @@ pandas 0.11.1
- Series and DataFrame hist methods now take a ``figsize`` argument (GH3834_)
- DatetimeIndexes no longer try to convert mixed-integer indexes during join
operations (GH3877_)
+ - Add ``unit`` keyword to ``Timestamp`` and ``to_datetime`` to enable passing of
+ integers or floats that are in an epoch unit of ``s, ms, us, ns``
+ (e.g. unix timestamps or epoch ``s``, with fracional seconds allowed) (GH3540_)
**API Changes**
@@ -264,6 +267,7 @@ pandas 0.11.1
.. _GH3499: https://github.com/pydata/pandas/issues/3499
.. _GH3495: https://github.com/pydata/pandas/issues/3495
.. _GH3492: https://github.com/pydata/pandas/issues/3492
+.. _GH3540: https://github.com/pydata/pandas/issues/3540
.. _GH3552: https://github.com/pydata/pandas/issues/3552
.. _GH3562: https://github.com/pydata/pandas/issues/3562
.. _GH3586: https://github.com/pydata/pandas/issues/3586
diff --git a/pandas/src/inference.pyx b/pandas/src/inference.pyx
index 5343819b9fbfe..270fb01a42033 100644
--- a/pandas/src/inference.pyx
+++ b/pandas/src/inference.pyx
@@ -471,7 +471,7 @@ def maybe_convert_objects(ndarray[object] objects, bint try_float=0,
seen_float = 1
elif util.is_datetime64_object(val):
if convert_datetime:
- idatetimes[i] = convert_to_tsobject(val, None).value
+ idatetimes[i] = convert_to_tsobject(val, None, None).value
seen_datetime = 1
else:
seen_object = 1
@@ -493,7 +493,7 @@ def maybe_convert_objects(ndarray[object] objects, bint try_float=0,
elif PyDateTime_Check(val) or util.is_datetime64_object(val):
if convert_datetime:
seen_datetime = 1
- idatetimes[i] = convert_to_tsobject(val, None).value
+ idatetimes[i] = convert_to_tsobject(val, None, None).value
else:
seen_object = 1
break
diff --git a/pandas/src/offsets.pyx b/pandas/src/offsets.pyx
index 5868ca5210e33..1823edeb0a4d9 100644
--- a/pandas/src/offsets.pyx
+++ b/pandas/src/offsets.pyx
@@ -76,7 +76,7 @@ cdef class _Offset:
cpdef anchor(self, object start=None):
if start is not None:
self.start = start
- self.ts = convert_to_tsobject(self.start)
+ self.ts = convert_to_tsobject(self.start, None, None)
self._setup()
cdef _setup(self):
diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py
index 51e657d1723b2..1cb986ee6cd7c 100644
--- a/pandas/tseries/index.py
+++ b/pandas/tseries/index.py
@@ -1204,6 +1204,9 @@ def slice_indexer(self, start=None, end=None, step=None):
if isinstance(start, time) or isinstance(end, time):
raise KeyError('Cannot mix time and non-time slice keys')
+ if isinstance(start, float) or isinstance(end, float):
+ raise TypeError('Cannot index datetime64 with float keys')
+
return Index.slice_indexer(self, start, end, step)
def slice_locs(self, start=None, end=None):
diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py
index f5415a195db77..ac02dee335afc 100644
--- a/pandas/tseries/tests/test_timeseries.py
+++ b/pandas/tseries/tests/test_timeseries.py
@@ -38,6 +38,7 @@
import pandas.util.py3compat as py3compat
from pandas.core.datetools import BDay
import pandas.core.common as com
+from pandas import concat
from numpy.testing.decorators import slow
@@ -171,7 +172,6 @@ def test_indexing_over_size_cutoff(self):
def test_indexing_unordered(self):
# GH 2437
- from pandas import concat
rng = date_range(start='2011-01-01', end='2011-01-15')
ts = Series(randn(len(rng)), index=rng)
ts2 = concat([ts[0:4],ts[-4:],ts[4:-4]])
@@ -593,6 +593,34 @@ def test_frame_add_datetime64_col_other_units(self):
self.assert_((tmp['dates'].values == ex_vals).all())
+ def test_to_datetime_unit(self):
+
+ epoch = 1370745748
+ s = Series([ epoch + t for t in range(20) ])
+ result = to_datetime(s,unit='s')
+ expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
+ assert_series_equal(result,expected)
+
+ s = Series([ epoch + t for t in range(20) ]).astype(float)
+ result = to_datetime(s,unit='s')
+ expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
+ assert_series_equal(result,expected)
+
+ s = Series([ epoch + t for t in range(20) ] + [iNaT])
+ result = to_datetime(s,unit='s')
+ expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
+ assert_series_equal(result,expected)
+
+ s = Series([ epoch + t for t in range(20) ] + [iNaT]).astype(float)
+ result = to_datetime(s,unit='s')
+ expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
+ assert_series_equal(result,expected)
+
+ s = concat([Series([ epoch + t for t in range(20) ]).astype(float),Series([np.nan])],ignore_index=True)
+ result = to_datetime(s,unit='s')
+ expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
+ assert_series_equal(result,expected)
+
def test_series_ctor_datetime64(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
@@ -2691,6 +2719,61 @@ def test_basics_nanos(self):
self.assert_(stamp.microsecond == 0)
self.assert_(stamp.nanosecond == 500)
+ def test_unit(self):
+ def check(val,unit=None,s=1,us=0):
+ stamp = Timestamp(val, unit=unit)
+ self.assert_(stamp.year == 2000)
+ self.assert_(stamp.month == 1)
+ self.assert_(stamp.day == 1)
+ self.assert_(stamp.hour == 1)
+ self.assert_(stamp.minute == 1)
+ self.assert_(stamp.second == s)
+ self.assert_(stamp.microsecond == us)
+ self.assert_(stamp.nanosecond == 0)
+
+ val = Timestamp('20000101 01:01:01').value
+
+ check(val)
+ check(val/1000L,unit='us')
+ check(val/1000000L,unit='ms')
+ check(val/1000000000L,unit='s')
+
+ # using truediv, so these are like floats
+ if py3compat.PY3:
+ check((val+500000)/1000000000L,unit='s',us=500)
+ check((val+500000000)/1000000000L,unit='s',us=500000)
+ check((val+500000)/1000000L,unit='ms',us=500)
+
+ # get chopped in py2
+ else:
+ check((val+500000)/1000000000L,unit='s')
+ check((val+500000000)/1000000000L,unit='s')
+ check((val+500000)/1000000L,unit='ms')
+
+ # ok
+ check((val+500000)/1000L,unit='us',us=500)
+ check((val+500000000)/1000000L,unit='ms',us=500000)
+
+ # floats
+ check(val/1000.0 + 5,unit='us',us=5)
+ check(val/1000.0 + 5000,unit='us',us=5000)
+ check(val/1000000.0 + 0.5,unit='ms',us=500)
+ check(val/1000000.0 + 0.005,unit='ms',us=5)
+ check(val/1000000000.0 + 0.5,unit='s',us=500000)
+
+ # nan
+ result = Timestamp(np.nan)
+ self.assert_(result is NaT)
+
+ result = Timestamp(None)
+ self.assert_(result is NaT)
+
+ result = Timestamp(iNaT)
+ self.assert_(result is NaT)
+
+ result = Timestamp(NaT)
+ self.assert_(result is NaT)
+
def test_comparison(self):
# 5-18-2012 00:00:00.000
stamp = 1337299200000000000L
diff --git a/pandas/tseries/tools.py b/pandas/tseries/tools.py
index 62ee19da6b845..90bc0beb8eb84 100644
--- a/pandas/tseries/tools.py
+++ b/pandas/tseries/tools.py
@@ -50,7 +50,7 @@ def _maybe_get_tz(tz):
def to_datetime(arg, errors='ignore', dayfirst=False, utc=None, box=True,
- format=None, coerce=False):
+ format=None, coerce=False, unit='ns'):
"""
Convert argument to datetime
@@ -69,6 +69,8 @@ def to_datetime(arg, errors='ignore', dayfirst=False, utc=None, box=True,
format : string, default None
strftime to parse time, eg "%d/%m/%Y"
coerce : force errors to NaT (False by default)
+ unit : unit of the arg (s,ms,us,ns) denote the unit in epoch
+ (e.g. a unix timestamp), which is an integer/float number
Returns
-------
@@ -86,7 +88,7 @@ def _convert_f(arg):
else:
result = tslib.array_to_datetime(arg, raise_=errors == 'raise',
utc=utc, dayfirst=dayfirst,
- coerce=coerce)
+ coerce=coerce, unit=unit)
if com.is_datetime64_dtype(result) and box:
result = DatetimeIndex(result, tz='utc' if utc else None)
return result
diff --git a/pandas/tslib.pxd b/pandas/tslib.pxd
index 3e7a6ef615e00..a70f9883c5bb1 100644
--- a/pandas/tslib.pxd
+++ b/pandas/tslib.pxd
@@ -1,3 +1,3 @@
from numpy cimport ndarray, int64_t
-cdef convert_to_tsobject(object, object)
+cdef convert_to_tsobject(object, object, object)
diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx
index abec45b52a363..ec11de7392680 100644
--- a/pandas/tslib.pyx
+++ b/pandas/tslib.pyx
@@ -131,21 +131,17 @@ class Timestamp(_Timestamp):
note: by definition there cannot be any tz info on the ordinal itself """
return cls(datetime.fromordinal(ordinal),offset=offset,tz=tz)
- def __new__(cls, object ts_input, object offset=None, tz=None):
+ def __new__(cls, object ts_input, object offset=None, tz=None, unit=None):
cdef _TSObject ts
cdef _Timestamp ts_base
- if PyFloat_Check(ts_input):
- # to do, do we want to support this, ie with fractional seconds?
- raise TypeError("Cannot convert a float to datetime")
-
if util.is_string_object(ts_input):
try:
ts_input = parse_date(ts_input)
except Exception:
pass
- ts = convert_to_tsobject(ts_input, tz)
+ ts = convert_to_tsobject(ts_input, tz, unit)
if ts.value == NPY_NAT:
return NaT
@@ -311,7 +307,7 @@ class Timestamp(_Timestamp):
if self.nanosecond != 0 and warn:
print 'Warning: discarding nonzero nanoseconds'
- ts = convert_to_tsobject(self, self.tzinfo)
+ ts = convert_to_tsobject(self, self.tzinfo, None)
return datetime(ts.dts.year, ts.dts.month, ts.dts.day,
ts.dts.hour, ts.dts.min, ts.dts.sec,
@@ -530,7 +526,7 @@ cdef class _Timestamp(datetime):
cdef:
pandas_datetimestruct dts
_TSObject ts
- ts = convert_to_tsobject(self, self.tzinfo)
+ ts = convert_to_tsobject(self, self.tzinfo, None)
dts = ts.dts
return datetime(dts.year, dts.month, dts.day,
dts.hour, dts.min, dts.sec,
@@ -623,12 +619,13 @@ cpdef _get_utcoffset(tzinfo, obj):
return tzinfo.utcoffset(obj)
# helper to extract datetime and int64 from several different possibilities
-cdef convert_to_tsobject(object ts, object tz):
+cdef convert_to_tsobject(object ts, object tz, object unit):
"""
Extract datetime and int64 from any of:
- - np.int64
+ - np.int64 (with unit providing a possible modifier)
- np.datetime64
- - python int or long object
+ - a float (with unit providing a possible modifier)
+ - python int or long object (with unit providing a possible modifier)
- iso8601 string object
- python datetime object
- another timestamp object
@@ -643,12 +640,25 @@ cdef convert_to_tsobject(object ts, object tz):
obj = _TSObject()
- if is_datetime64_object(ts):
+ if ts is None or ts is NaT:
+ obj.value = NPY_NAT
+ elif is_datetime64_object(ts):
obj.value = _get_datetime64_nanos(ts)
pandas_datetime_to_datetimestruct(obj.value, PANDAS_FR_ns, &obj.dts)
elif is_integer_object(ts):
- obj.value = ts
- pandas_datetime_to_datetimestruct(ts, PANDAS_FR_ns, &obj.dts)
+ if ts == NPY_NAT:
+ obj.value = NPY_NAT
+ else:
+ ts = ts * cast_from_unit(unit,None)
+ obj.value = ts
+ pandas_datetime_to_datetimestruct(ts, PANDAS_FR_ns, &obj.dts)
+ elif util.is_float_object(ts):
+ if ts != ts or ts == NPY_NAT:
+ obj.value = NPY_NAT
+ else:
+ ts = cast_from_unit(unit,ts)
+ obj.value = ts
+ pandas_datetime_to_datetimestruct(ts, PANDAS_FR_ns, &obj.dts)
elif util.is_string_object(ts):
if ts in _nat_strings:
obj.value = NPY_NAT
@@ -699,7 +709,7 @@ cdef convert_to_tsobject(object ts, object tz):
elif PyDate_Check(ts):
# Keep the converter same as PyDateTime's
ts = datetime.combine(ts, datetime_time())
- return convert_to_tsobject(ts, tz)
+ return convert_to_tsobject(ts, tz, None)
else:
raise ValueError("Could not construct Timestamp from argument %s" %
type(ts))
@@ -804,7 +814,7 @@ def datetime_to_datetime64(ndarray[object] values):
else:
inferred_tz = _get_zone(val.tzinfo)
- _ts = convert_to_tsobject(val, None)
+ _ts = convert_to_tsobject(val, None, None)
iresult[i] = _ts.value
_check_dts_bounds(iresult[i], &_ts.dts)
else:
@@ -819,7 +829,7 @@ def datetime_to_datetime64(ndarray[object] values):
def array_to_datetime(ndarray[object] values, raise_=False, dayfirst=False,
- format=None, utc=None, coerce=False):
+ format=None, utc=None, coerce=False, unit=None):
cdef:
Py_ssize_t i, n = len(values)
object val
@@ -828,6 +838,7 @@ def array_to_datetime(ndarray[object] values, raise_=False, dayfirst=False,
pandas_datetimestruct dts
bint utc_convert = bool(utc)
_TSObject _ts
+ int64_t m = cast_from_unit(unit,None)
from dateutil.parser import parse
@@ -841,7 +852,7 @@ def array_to_datetime(ndarray[object] values, raise_=False, dayfirst=False,
elif PyDateTime_Check(val):
if val.tzinfo is not None:
if utc_convert:
- _ts = convert_to_tsobject(val, None)
+ _ts = convert_to_tsobject(val, None, unit)
iresult[i] = _ts.value
_check_dts_bounds(iresult[i], &_ts.dts)
else:
@@ -861,7 +872,15 @@ def array_to_datetime(ndarray[object] values, raise_=False, dayfirst=False,
# if we are coercing, dont' allow integers
elif util.is_integer_object(val) and not coerce:
- iresult[i] = val
+ if val == iNaT:
+ iresult[i] = iNaT
+ else:
+ iresult[i] = val*m
+ elif util.is_float_object(val) and not coerce:
+ if val != val or val == iNaT:
+ iresult[i] = iNaT
+ else:
+ iresult[i] = cast_from_unit(unit,val)
else:
try:
if len(val) == 0:
@@ -1246,6 +1265,31 @@ cdef inline _get_datetime64_nanos(object val):
else:
return ival
+cdef inline int64_t cast_from_unit(object unit, object ts):
+ """ return a casting of the unit represented to nanoseconds
+ round the fractional part of a float to our precision, p """
+ if unit == 's':
+ m = 1000000000L
+ p = 6
+ elif unit == 'ms':
+ m = 1000000L
+ p = 3
+ elif unit == 'us':
+ m = 1000L
+ p = 0
+ else:
+ m = 1L
+ p = 0
+
+ # just give me the unit back
+ if ts is None:
+ return m
+
+ # cast the unit, multiply base/frace separately
+ # to avoid precision issues from float -> int
+ base = <int64_t> ts
+ frac = ts-base
+ return <int64_t> (base*m) + <int64_t> (round(frac,p)*m)
def cast_to_nanoseconds(ndarray arr):
cdef:
@@ -1286,7 +1330,7 @@ def pydt_to_i8(object pydt):
cdef:
_TSObject ts
- ts = convert_to_tsobject(pydt, None)
+ ts = convert_to_tsobject(pydt, None, None)
return ts.value
@@ -1784,7 +1828,7 @@ def get_date_field(ndarray[int64_t] dtindex, object field):
for i in range(count):
if dtindex[i] == NPY_NAT: out[i] = -1; continue
- ts = convert_to_tsobject(dtindex[i], None)
+ ts = convert_to_tsobject(dtindex[i], None, None)
out[i] = ts_dayofweek(ts)
return out
@@ -1793,7 +1837,7 @@ def get_date_field(ndarray[int64_t] dtindex, object field):
if dtindex[i] == NPY_NAT: out[i] = -1; continue
pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
- ts = convert_to_tsobject(dtindex[i], None)
+ ts = convert_to_tsobject(dtindex[i], None, None)
isleap = is_leapyear(dts.year)
isleap_prev = is_leapyear(dts.year - 1)
mo_off = _month_offset[isleap, dts.month - 1]
@@ -1831,7 +1875,7 @@ def get_date_field(ndarray[int64_t] dtindex, object field):
cdef inline int m8_weekday(int64_t val):
- ts = convert_to_tsobject(val, None)
+ ts = convert_to_tsobject(val, None, None)
return ts_dayofweek(ts)
cdef int64_t DAY_NS = 86400000000000LL
| to enable passing of integers or floats that are in an epoch unit of s, ms, us, ns
(e.g. unix timestamps or epoch s, with fracional seconds allowed)
closes #3540
```
In [5]: pd.to_datetime(Series([ 1370745748 + t for t in range(5) ]),unit='s')
Out[5]:
0 2013-06-09 02:42:28
1 2013-06-09 02:42:29
2 2013-06-09 02:42:30
3 2013-06-09 02:42:31
4 2013-06-09 02:42:32
dtype: datetime64[ns]
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/3818 | 2013-06-09T03:41:50Z | 2013-06-13T19:12:08Z | 2013-06-13T19:12:08Z | 2014-06-18T08:55:58Z |
Tag yahoo data tests as @network only | diff --git a/pandas/io/tests/test_yahoo.py b/pandas/io/tests/test_yahoo.py
index b79fdad2bff9d..0e2c2022af422 100644
--- a/pandas/io/tests/test_yahoo.py
+++ b/pandas/io/tests/test_yahoo.py
@@ -14,7 +14,6 @@
class TestYahoo(unittest.TestCase):
- @slow
@network
def test_yahoo(self):
# asserts that yahoo is minimally working and that it throws
@@ -41,14 +40,12 @@ def test_yahoo(self):
raise
- @slow
@network
def test_get_quote(self):
df = web.get_quote_yahoo(pd.Series(['GOOG', 'AAPL', 'GOOG']))
assert_series_equal(df.ix[0], df.ix[2])
- @slow
@network
def test_get_components(self):
@@ -69,7 +66,6 @@ def test_get_components(self):
assert 'GOOG' in df.index
assert 'AMZN' in df.index
- @slow
@network
def test_get_data(self):
import numpy as np
| https://api.github.com/repos/pandas-dev/pandas/pulls/3816 | 2013-06-09T00:21:06Z | 2013-06-10T02:25:21Z | 2013-06-10T02:25:21Z | 2014-07-16T08:12:50Z | |
DOC/BLD: Fix a bunch of doc build warnings that weren't being previously caught | diff --git a/doc/source/dsintro.rst b/doc/source/dsintro.rst
index 7870bdbeb97d3..c1d034d0d8e58 100644
--- a/doc/source/dsintro.rst
+++ b/doc/source/dsintro.rst
@@ -482,19 +482,23 @@ column-wise:
.. ipython:: python
index = date_range('1/1/2000', periods=8)
- df = DataFrame(randn(8, 3), index=index,
- columns=['A', 'B', 'C'])
+ df = DataFrame(randn(8, 3), index=index, columns=list('ABC'))
df
type(df['A'])
df - df['A']
-Technical purity aside, this case is so common in practice that supporting the
-special case is preferable to the alternative of forcing the user to transpose
-and do column-based alignment like so:
+.. warning::
-.. ipython:: python
+ .. code-block:: python
+
+ df - df['A']
+
+ is now deprecated and will be removed in a future release. The preferred way
+ to replicate this behavior is
+
+ .. code-block:: python
- (df.T - df['A']).T
+ df.sub(df['A'], axis=0)
For explicit control over the matching and broadcasting behavior, see the
section on :ref:`flexible binary operations <basics.binop>`.
diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst
index 3f6a4b7c59067..7f572c8c8e191 100644
--- a/doc/source/timeseries.rst
+++ b/doc/source/timeseries.rst
@@ -930,89 +930,103 @@ They can be both positive and negative.
.. ipython:: python
- from datetime import datetime, timedelta
- s = Series(date_range('2012-1-1', periods=3, freq='D'))
- td = Series([ timedelta(days=i) for i in range(3) ])
- df = DataFrame(dict(A = s, B = td))
- df
- df['C'] = df['A'] + df['B']
- df
- df.dtypes
-
- s - s.max()
- s - datetime(2011,1,1,3,5)
- s + timedelta(minutes=5)
+ from datetime import datetime, timedelta
+ s = Series(date_range('2012-1-1', periods=3, freq='D'))
+ td = Series([ timedelta(days=i) for i in range(3) ])
+ df = DataFrame(dict(A = s, B = td))
+ df
+ df['C'] = df['A'] + df['B']
+ df
+ df.dtypes
+
+ s - s.max()
+ s - datetime(2011,1,1,3,5)
+ s + timedelta(minutes=5)
Getting scalar results from a ``timedelta64[ns]`` series
+.. ipython:: python
+ :suppress:
+
+ from distutils.version import LooseVersion
+
.. ipython:: python
y = s - s[0]
y
- y.apply(lambda x: x.item().total_seconds())
- y.apply(lambda x: x.item().days)
-
-.. note::
- These operations are different in numpy 1.6.2 and in numpy >= 1.7. The ``timedelta64[ns]`` scalar
- type in 1.6.2 is much like a ``datetime.timedelta``, while in 1.7 it is a nanosecond based integer.
- A future version of pandas will make this transparent.
+ if LooseVersion(np.__version__) <= '1.6.2':
+ y.apply(lambda x: x.item().total_seconds())
+ y.apply(lambda x: x.item().days)
+ else:
+ y.apply(lambda x: x / np.timedelta64(1, 's'))
+ y.apply(lambda x: x / np.timedelta64(1, 'D'))
+
+.. note::
- These are the equivalent operation to above in numpy >= 1.7
+ As you can see from the conditional statement above, these operations are
+ different in numpy 1.6.2 and in numpy >= 1.7. The ``timedelta64[ns]`` scalar
+ type in 1.6.2 is much like a ``datetime.timedelta``, while in 1.7 it is a
+ nanosecond based integer. A future version of pandas will make this
+ transparent.
- ``y.apply(lambda x: x.item()/np.timedelta64(1,'s'))``
+.. note::
- ``y.apply(lambda x: x.item()/np.timedelta64(1,'D'))``
+ In numpy >= 1.7 dividing a ``timedelta64`` array by another ``timedelta64``
+ array will yield an array with dtype ``np.float64``.
Series of timedeltas with ``NaT`` values are supported
.. ipython:: python
- y = s - s.shift()
- y
+ y = s - s.shift()
+ y
+
The can be set to ``NaT`` using ``np.nan`` analagously to datetimes
.. ipython:: python
- y[1] = np.nan
- y
+ y[1] = np.nan
+ y
Operands can also appear in a reversed order (a singluar object operated with a Series)
.. ipython:: python
- s.max() - s
- datetime(2011,1,1,3,5) - s
- timedelta(minutes=5) + s
+ s.max() - s
+ datetime(2011,1,1,3,5) - s
+ timedelta(minutes=5) + s
Some timedelta numeric like operations are supported.
.. ipython:: python
- td - timedelta(minutes=5,seconds=5,microseconds=5)
+ td - timedelta(minutes=5, seconds=5, microseconds=5)
``min, max`` and the corresponding ``idxmin, idxmax`` operations are support on frames
.. ipython:: python
- df = DataFrame(dict(A = s - Timestamp('20120101')-timedelta(minutes=5,seconds=5),
- B = s - Series(date_range('2012-1-2', periods=3, freq='D'))))
- df
+ A = s - Timestamp('20120101') - timedelta(minutes=5, seconds=5)
+ B = s - Series(date_range('2012-1-2', periods=3, freq='D'))
+ df = DataFrame(dict(A=A, B=B))
+ df
- df.min()
- df.min(axis=1)
+ df.min()
+ df.min(axis=1)
- df.idxmin()
- df.idxmax()
+ df.idxmin()
+ df.idxmax()
-``min, max`` operations are support on series, these return a single element ``timedelta64[ns]`` Series (this avoids
-having to deal with numpy timedelta64 issues). ``idxmin, idxmax`` are supported as well.
+``min, max`` operations are support on series, these return a single element
+``timedelta64[ns]`` Series (this avoids having to deal with numpy timedelta64
+issues). ``idxmin, idxmax`` are supported as well.
.. ipython:: python
- df.min().max()
- df.min(axis=1).min()
+ df.min().max()
+ df.min(axis=1).min()
- df.min().idxmax()
- df.min(axis=1).idxmin()
+ df.min().idxmax()
+ df.min(axis=1).idxmin()
diff --git a/doc/source/v0.10.1.txt b/doc/source/v0.10.1.txt
index 3c22e9552c3a2..dafa4300af0e3 100644
--- a/doc/source/v0.10.1.txt
+++ b/doc/source/v0.10.1.txt
@@ -69,7 +69,7 @@ Retrieving unique values in an indexable or data column.
import warnings
with warnings.catch_warnings():
- warnings.simplefilter('ignore', category=DeprecationWarning)
+ warnings.simplefilter('ignore', category=UserWarning)
store.unique('df','index')
store.unique('df','string')
diff --git a/doc/source/visualization.rst b/doc/source/visualization.rst
index 63b5920bb0146..f0790396a5c39 100644
--- a/doc/source/visualization.rst
+++ b/doc/source/visualization.rst
@@ -5,14 +5,14 @@
:suppress:
import numpy as np
+ from numpy.random import randn, rand, randint
np.random.seed(123456)
- from pandas import *
+ from pandas import DataFrame, Series, date_range, options
import pandas.util.testing as tm
- randn = np.random.randn
np.set_printoptions(precision=4, suppress=True)
import matplotlib.pyplot as plt
plt.close('all')
- options.display.mpl_style='default'
+ options.display.mpl_style = 'default'
************************
Plotting with matplotlib
@@ -60,8 +60,7 @@ On DataFrame, ``plot`` is a convenience to plot all of the columns with labels:
.. ipython:: python
- df = DataFrame(randn(1000, 4), index=ts.index,
- columns=['A', 'B', 'C', 'D'])
+ df = DataFrame(randn(1000, 4), index=ts.index, columns=list('ABCD'))
df = df.cumsum()
@savefig frame_plot_basic.png width=6in
@@ -101,7 +100,7 @@ You can plot one column versus another using the `x` and `y` keywords in
plt.figure()
- df3 = DataFrame(np.random.randn(1000, 2), columns=['B', 'C']).cumsum()
+ df3 = DataFrame(randn(1000, 2), columns=['B', 'C']).cumsum()
df3['A'] = Series(range(len(df)))
@savefig df_plot_xy.png width=6in
@@ -169,7 +168,7 @@ Here is the default behavior, notice how the x-axis tick labelling is performed:
df.A.plot()
-Using the ``x_compat`` parameter, you can suppress this bevahior:
+Using the ``x_compat`` parameter, you can suppress this behavior:
.. ipython:: python
@@ -200,6 +199,15 @@ Targeting different subplots
You can pass an ``ax`` argument to ``Series.plot`` to plot on a particular axis:
+.. ipython:: python
+ :suppress:
+
+ ts = Series(randn(1000), index=date_range('1/1/2000', periods=1000))
+ ts = ts.cumsum()
+
+ df = DataFrame(randn(1000, 4), index=ts.index, columns=list('ABCD'))
+ df = df.cumsum()
+
.. ipython:: python
fig, axes = plt.subplots(nrows=2, ncols=2)
@@ -210,6 +218,7 @@ You can pass an ``ax`` argument to ``Series.plot`` to plot on a particular axis:
@savefig series_plot_multi.png width=6in
df['D'].plot(ax=axes[1,1]); axes[1,1].set_title('D')
+
.. _visualization.other:
Other plotting features
@@ -239,7 +248,7 @@ bar plot:
.. ipython:: python
- df2 = DataFrame(np.random.rand(10, 4), columns=['a', 'b', 'c', 'd'])
+ df2 = DataFrame(rand(10, 4), columns=['a', 'b', 'c', 'd'])
@savefig bar_plot_multi_ex.png width=5in
df2.plot(kind='bar');
@@ -298,10 +307,10 @@ New since 0.10.0, the ``by`` keyword can be specified to plot grouped histograms
.. ipython:: python
- data = Series(np.random.randn(1000))
+ data = Series(randn(1000))
@savefig grouped_hist.png width=6in
- data.hist(by=np.random.randint(0, 4, 1000))
+ data.hist(by=randint(0, 4, 1000))
.. _visualization.box:
@@ -317,7 +326,7 @@ a uniform random variable on [0,1).
.. ipython:: python
- df = DataFrame(np.random.rand(10,5))
+ df = DataFrame(rand(10,5))
plt.figure();
@savefig box_plot_ex.png width=6in
@@ -328,7 +337,7 @@ groupings. For instance,
.. ipython:: python
- df = DataFrame(np.random.rand(10,2), columns=['Col1', 'Col2'] )
+ df = DataFrame(rand(10,2), columns=['Col1', 'Col2'] )
df['X'] = Series(['A','A','A','A','A','B','B','B','B','B'])
plt.figure();
@@ -341,7 +350,7 @@ columns:
.. ipython:: python
- df = DataFrame(np.random.rand(10,3), columns=['Col1', 'Col2', 'Col3'])
+ df = DataFrame(rand(10,3), columns=['Col1', 'Col2', 'Col3'])
df['X'] = Series(['A','A','A','A','A','B','B','B','B','B'])
df['Y'] = Series(['A','B','A','B','A','B','A','B','A','B'])
@@ -361,7 +370,7 @@ Scatter plot matrix
.. ipython:: python
from pandas.tools.plotting import scatter_matrix
- df = DataFrame(np.random.randn(1000, 4), columns=['a', 'b', 'c', 'd'])
+ df = DataFrame(randn(1000, 4), columns=['a', 'b', 'c', 'd'])
@savefig scatter_matrix_kde.png width=6in
scatter_matrix(df, alpha=0.2, figsize=(6, 6), diagonal='kde')
@@ -378,7 +387,7 @@ setting `kind='kde'`:
.. ipython:: python
- ser = Series(np.random.randn(1000))
+ ser = Series(randn(1000))
@savefig kde_plot.png width=6in
ser.plot(kind='kde')
@@ -444,7 +453,7 @@ implies that the underlying data are not random.
plt.figure()
- data = Series(0.1 * np.random.random(1000) +
+ data = Series(0.1 * rand(1000) +
0.9 * np.sin(np.linspace(-99 * np.pi, 99 * np.pi, num=1000)))
@savefig lag_plot.png width=6in
@@ -467,7 +476,7 @@ confidence band.
plt.figure()
- data = Series(0.7 * np.random.random(1000) +
+ data = Series(0.7 * rand(1000) +
0.3 * np.sin(np.linspace(-9 * np.pi, 9 * np.pi, num=1000)))
@savefig autocorrelation_plot.png width=6in
@@ -488,7 +497,7 @@ are what constitutes the bootstrap plot.
from pandas.tools.plotting import bootstrap_plot
- data = Series(np.random.random(1000))
+ data = Series(rand(1000))
@savefig bootstrap_plot.png width=6in
bootstrap_plot(data, size=50, samples=500, color='grey')
| https://api.github.com/repos/pandas-dev/pandas/pulls/3815 | 2013-06-08T23:51:07Z | 2013-06-09T01:56:17Z | 2013-06-09T01:56:17Z | 2014-07-06T07:55:09Z | |
Implement historical finance data from Google Finance | diff --git a/pandas/io/data.py b/pandas/io/data.py
index 43178fdcfddf1..8bc3df561cadb 100644
--- a/pandas/io/data.py
+++ b/pandas/io/data.py
@@ -58,6 +58,10 @@ def DataReader(name, data_source=None, start=None, end=None,
return get_data_yahoo(symbols=name, start=start, end=end,
adjust_price=False, chunk=25,
retry_count=retry_count, pause=pause)
+ elif(data_source == "google"):
+ return get_data_google(symbols=name, start=start, end=end,
+ adjust_price=False, chunk=25,
+ retry_count=retry_count, pause=pause)
elif(data_source == "fred"):
return get_data_fred(name=name, start=start, end=end)
elif(data_source == "famafrench"):
@@ -132,6 +136,9 @@ def get_quote_yahoo(symbols):
return DataFrame(data, index=idx)
+def get_quote_google(symbols):
+ raise NotImplementedError("Google Finance doesn't have this functionality")
+
def _get_hist_yahoo(sym=None, start=None, end=None, retry_count=3,
pause=0, **kwargs):
"""
@@ -178,6 +185,41 @@ def _get_hist_yahoo(sym=None, start=None, end=None, retry_count=3,
"return a 200 for url %s" % (pause, url))
+def _get_hist_google(sym=None, start=None, end=None, retry_count=3,
+ pause=0, **kwargs):
+ """
+ Get historical data for the given name from google.
+ Date format is datetime
+
+ Returns a DataFrame.
+ """
+ if(sym is None):
+ warnings.warn("Need to provide a name.")
+ return None
+
+ start, end = _sanitize_dates(start, end)
+
+ google_URL = 'http://www.google.com/finance/historical?'
+
+ # www.google.com/finance/historical?q=GOOG&startdate=Jun+9%2C+2011&enddate=Jun+8%2C+2013&output=csv
+ url = google_URL + urllib.urlencode({"q": sym, \
+ "startdate": start.strftime('%b %d, %Y'), \
+ "enddate": end.strftime('%b %d, %Y'), "output": "csv" })
+ for _ in range(retry_count):
+ resp = urllib2.urlopen(url)
+ if resp.code == 200:
+ lines = resp.read()
+ rs = read_csv(StringIO(bytes_to_str(lines)), index_col=0,
+ parse_dates=True)[::-1]
+
+ return rs
+
+ time.sleep(pause)
+
+ raise Exception("after %d tries, Google did not "
+ "return a 200 for url %s" % (pause, url))
+
+
def _adjust_prices(hist_data, price_list=['Open', 'High', 'Low', 'Close']):
"""
Return modifed DataFrame or Panel with adjusted prices based on
@@ -347,6 +389,72 @@ def dl_mult_symbols(symbols):
return hist_data
+def get_data_google(symbols=None, start=None, end=None, retry_count=3, pause=0,
+ chunksize=25, **kwargs):
+ """
+ Returns DataFrame/Panel of historical stock prices from symbols, over date
+ range, start to end. To avoid being penalized by Google Finance servers,
+ pauses between downloading 'chunks' of symbols can be specified.
+
+ Parameters
+ ----------
+ symbols : string, array-like object (list, tuple, Series), or DataFrame
+ Single stock symbol (ticker), array-like object of symbols or
+ DataFrame with index containing stock symbols.
+ start : string, (defaults to '1/1/2010')
+ Starting date, timestamp. Parses many different kind of date
+ representations (e.g., 'JAN-01-2010', '1/1/10', 'Jan, 1, 1980')
+ end : string, (defaults to today)
+ Ending date, timestamp. Same format as starting date.
+ retry_count : int, default 3
+ Number of times to retry query request.
+ pause : int, default 0
+ Time, in seconds, to pause between consecutive queries of chunks. If
+ single value given for symbol, represents the pause between retries.
+ chunksize : int, default 25
+ Number of symbols to download consecutively before intiating pause.
+
+ Returns
+ -------
+ hist_data : DataFrame (str) or Panel (array-like object, DataFrame)
+ """
+
+ def dl_mult_symbols(symbols):
+ stocks = {}
+ for sym_group in _in_chunks(symbols, chunksize):
+ for sym in sym_group:
+ try:
+ stocks[sym] = _get_hist_google(sym, start=start,
+ end=end, **kwargs)
+ except:
+ warnings.warn('Error with sym: ' + sym + '... skipping.')
+
+ time.sleep(pause)
+
+ return Panel(stocks).swapaxes('items', 'minor')
+
+ if 'name' in kwargs:
+ warnings.warn("Arg 'name' is deprecated, please use 'symbols' instead.",
+ FutureWarning)
+ symbols = kwargs['name']
+
+ #If a single symbol, (e.g., 'GOOG')
+ if isinstance(symbols, (str, int)):
+ sym = symbols
+ hist_data = _get_hist_google(sym, start=start, end=end)
+ #Or multiple symbols, (e.g., ['GOOG', 'AAPL', 'MSFT'])
+ elif isinstance(symbols, DataFrame):
+ try:
+ hist_data = dl_mult_symbols(Series(symbols.index))
+ except ValueError:
+ raise
+ else: #Guess a Series
+ try:
+ hist_data = dl_mult_symbols(symbols)
+ except TypeError:
+ hist_data = dl_mult_symbols(Series(symbols))
+
+ return hist_data
def get_data_fred(name=None, start=dt.datetime(2010, 1, 1),
end=dt.datetime.today()):
diff --git a/pandas/io/tests/test_google.py b/pandas/io/tests/test_google.py
new file mode 100644
index 0000000000000..7f4ca13c27e58
--- /dev/null
+++ b/pandas/io/tests/test_google.py
@@ -0,0 +1,82 @@
+import unittest
+import nose
+from datetime import datetime
+
+import pandas as pd
+import pandas.io.data as web
+from pandas.util.testing import (network, assert_series_equal)
+from numpy.testing.decorators import slow
+
+import urllib2
+
+
+class TestGoogle(unittest.TestCase):
+
+ @network
+ def test_google(self):
+ # asserts that google is minimally working and that it throws
+ # an excecption when DataReader can't get a 200 response from
+ # google
+ start = datetime(2010, 1, 1)
+ end = datetime(2013, 01, 27)
+
+ try:
+ self.assertEquals(
+ web.DataReader("F", 'google', start, end)['Close'][-1],
+ 13.68)
+
+ self.assertRaises(
+ Exception,
+ lambda: web.DataReader("NON EXISTENT TICKER", 'google',
+ start, end))
+ except urllib2.URLError:
+ try:
+ urllib2.urlopen('http://www.google.com')
+ except urllib2.URLError:
+ raise nose.SkipTest
+ else:
+ raise
+
+
+ @network
+ def test_get_quote(self):
+ self.assertRaises(NotImplementedError,
+ lambda: web.get_quote_google(pd.Series(['GOOG', 'AAPL', 'GOOG'])))
+
+ @network
+ def test_get_data(self):
+ import numpy as np
+ df = web.get_data_google('GOOG')
+ print(df.Volume.ix['OCT-08-2010'])
+ assert df.Volume.ix['OCT-08-2010'] == 2863473
+
+ sl = ['AAPL', 'AMZN', 'GOOG']
+ pan = web.get_data_google(sl, '2012')
+ ts = pan.Close.GOOG.index[pan.Close.AAPL > pan.Close.GOOG]
+ assert ts[0].dayofyear == 96
+
+ pan = web.get_data_google(['GE', 'MSFT', 'INTC'], 'JAN-01-12', 'JAN-31-12')
+ expected = [19.02, 28.23, 25.39]
+ result = pan.Close.ix['01-18-12'][['GE', 'MSFT', 'INTC']].tolist()
+ assert result == expected
+
+ # sanity checking
+ t= np.array(result)
+ assert np.issubdtype(t.dtype, np.floating)
+ assert t.shape == (3,)
+
+ expected = [[ 18.99, 28.4 , 25.18],
+ [ 18.58, 28.31, 25.13],
+ [ 19.03, 28.16, 25.52],
+ [ 18.81, 28.82, 25.87]]
+ result = pan.Open.ix['Jan-15-12':'Jan-20-12'][['GE', 'MSFT', 'INTC']].values
+ assert (result == expected).all()
+
+ # sanity checking
+ t= np.array(pan)
+ assert np.issubdtype(t.dtype, np.floating)
+
+
+if __name__ == '__main__':
+ nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
+ exit=False)
| https://api.github.com/repos/pandas-dev/pandas/pulls/3814 | 2013-06-08T23:31:54Z | 2013-06-10T02:23:17Z | 2013-06-10T02:23:17Z | 2014-06-17T08:58:27Z | |
remove unused import in test_yahoo | diff --git a/pandas/io/tests/test_yahoo.py b/pandas/io/tests/test_yahoo.py
index 1109d67278f73..b79fdad2bff9d 100644
--- a/pandas/io/tests/test_yahoo.py
+++ b/pandas/io/tests/test_yahoo.py
@@ -2,8 +2,6 @@
import nose
from datetime import datetime
-from pandas.util.py3compat import StringIO, BytesIO
-
import pandas as pd
import pandas.io.data as web
from pandas.util.testing import (network, assert_frame_equal,
| https://api.github.com/repos/pandas-dev/pandas/pulls/3813 | 2013-06-08T21:39:58Z | 2013-06-08T22:49:11Z | 2013-06-08T22:49:11Z | 2014-07-16T08:12:42Z | |
correct FRED test (GDP changed ...) | diff --git a/pandas/io/tests/test_fred.py b/pandas/io/tests/test_fred.py
index 3e951e5443bc3..00a90ec3da402 100644
--- a/pandas/io/tests/test_fred.py
+++ b/pandas/io/tests/test_fred.py
@@ -29,7 +29,7 @@ def test_fred(self):
try:
self.assertEquals(
web.DataReader("GDP", "fred", start, end)['GDP'].tail(1),
- 16010.2)
+ 16004.5)
self.assertRaises(
Exception,
| https://api.github.com/repos/pandas-dev/pandas/pulls/3812 | 2013-06-08T20:54:58Z | 2013-06-08T21:48:41Z | 2013-06-08T21:48:41Z | 2014-07-16T08:12:41Z | |
DOC: turn off the ipython cache | diff --git a/doc/sphinxext/ipython_directive.py b/doc/sphinxext/ipython_directive.py
index 3b19b443af327..bc3c46dd5cc93 100644
--- a/doc/sphinxext/ipython_directive.py
+++ b/doc/sphinxext/ipython_directive.py
@@ -64,15 +64,8 @@
import sys
import tempfile
-# To keep compatibility with various python versions
-try:
- from hashlib import md5
-except ImportError:
- from md5 import md5
-
# Third-party
import matplotlib
-import sphinx
from docutils.parsers.rst import directives
from docutils import nodes
from sphinx.util.compat import Directive
@@ -84,7 +77,6 @@
from IPython.core.profiledir import ProfileDir
from IPython.utils import io
-from pdb import set_trace
#-----------------------------------------------------------------------------
# Globals
@@ -205,6 +197,7 @@ def __init__(self):
config.InteractiveShell.autocall = False
config.InteractiveShell.autoindent = False
config.InteractiveShell.colors = 'NoColor'
+ config.InteractiveShell.cache_size = 0
# create a profile so instance history isn't saved
tmp_profile_dir = tempfile.mkdtemp(prefix='profile_')
| Also clean up some unused import there
closes #3807.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3809 | 2013-06-08T13:22:39Z | 2013-06-08T14:30:15Z | 2013-06-08T14:30:15Z | 2014-07-16T08:12:40Z |
CLN: refactored url accessing and filepath conversion from urls to io.common | diff --git a/pandas/io/common.py b/pandas/io/common.py
new file mode 100644
index 0000000000000..46b47c06f7f5d
--- /dev/null
+++ b/pandas/io/common.py
@@ -0,0 +1,79 @@
+""" Common api utilities """
+
+import urlparse
+from pandas.util import py3compat
+
+_VALID_URLS = set(urlparse.uses_relative + urlparse.uses_netloc +
+ urlparse.uses_params)
+_VALID_URLS.discard('')
+
+
+def _is_url(url):
+ """Check to see if a URL has a valid protocol.
+
+ Parameters
+ ----------
+ url : str or unicode
+
+ Returns
+ -------
+ isurl : bool
+ If `url` has a valid protocol return True otherwise False.
+ """
+ try:
+ return urlparse.urlparse(url).scheme in _VALID_URLS
+ except:
+ return False
+
+def _is_s3_url(url):
+ """ Check for an s3 url """
+ try:
+ return urlparse.urlparse(url).scheme == 's3'
+ except:
+ return False
+
+def get_filepath_or_buffer(filepath_or_buffer, encoding=None):
+ """ if the filepath_or_buffer is a url, translate and return the buffer
+ passthru otherwise
+
+ Parameters
+ ----------
+ filepath_or_buffer : a url, filepath, or buffer
+ encoding : the encoding to use to decode py3 bytes, default is 'utf-8'
+
+ Returns
+ -------
+ a filepath_or_buffer, the encoding
+
+ """
+
+ if _is_url(filepath_or_buffer):
+ from urllib2 import urlopen
+ filepath_or_buffer = urlopen(filepath_or_buffer)
+ if py3compat.PY3: # pragma: no cover
+ if encoding:
+ errors = 'strict'
+ else:
+ errors = 'replace'
+ encoding = 'utf-8'
+ bytes = filepath_or_buffer.read()
+ filepath_or_buffer = StringIO(bytes.decode(encoding, errors))
+ return filepath_or_buffer, encoding
+ return filepath_or_buffer, None
+
+ if _is_s3_url(filepath_or_buffer):
+ try:
+ import boto
+ except:
+ raise ImportError("boto is required to handle s3 files")
+ # Assuming AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY
+ # are environment variables
+ parsed_url = urlparse.urlparse(filepath_or_buffer)
+ conn = boto.connect_s3()
+ b = conn.get_bucket(parsed_url.netloc)
+ k = boto.s3.key.Key(b)
+ k.key = parsed_url.path
+ filepath_or_buffer = StringIO(k.get_contents_as_string())
+ return filepath_or_buffer, None
+
+ return filepath_or_buffer, None
diff --git a/pandas/io/html.py b/pandas/io/html.py
index a5798b3493732..08a9403cd18a7 100644
--- a/pandas/io/html.py
+++ b/pandas/io/html.py
@@ -20,7 +20,7 @@
import numpy as np
from pandas import DataFrame, MultiIndex, isnull
-from pandas.io.parsers import _is_url
+from pandas.io.common import _is_url
try:
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 556d1ab1976b4..54ba7536afaee 100644
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -4,7 +4,6 @@
from StringIO import StringIO
import re
from itertools import izip
-import urlparse
import csv
import numpy as np
@@ -15,6 +14,7 @@
import pandas.core.common as com
from pandas.util import py3compat
from pandas.io.date_converters import generic_parser
+from pandas.io.common import get_filepath_or_buffer
from pandas.util.decorators import Appender
@@ -176,35 +176,6 @@ class DateConversionError(Exception):
""" % (_parser_params % _fwf_widths)
-_VALID_URLS = set(urlparse.uses_relative + urlparse.uses_netloc +
- urlparse.uses_params)
-_VALID_URLS.discard('')
-
-
-def _is_url(url):
- """Check to see if a URL has a valid protocol.
-
- Parameters
- ----------
- url : str or unicode
-
- Returns
- -------
- isurl : bool
- If `url` has a valid protocol return True otherwise False.
- """
- try:
- return urlparse.urlparse(url).scheme in _VALID_URLS
- except:
- return False
-
-def _is_s3_url(url):
- """ Check for an s3 url """
- try:
- return urlparse.urlparse(url).scheme == 's3'
- except:
- return False
-
def _read(filepath_or_buffer, kwds):
"Generic reader of line files."
encoding = kwds.get('encoding', None)
@@ -212,32 +183,7 @@ def _read(filepath_or_buffer, kwds):
if skipfooter is not None:
kwds['skip_footer'] = skipfooter
- if isinstance(filepath_or_buffer, basestring):
- if _is_url(filepath_or_buffer):
- from urllib2 import urlopen
- filepath_or_buffer = urlopen(filepath_or_buffer)
- if py3compat.PY3: # pragma: no cover
- if encoding:
- errors = 'strict'
- else:
- errors = 'replace'
- encoding = 'utf-8'
- bytes = filepath_or_buffer.read()
- filepath_or_buffer = StringIO(bytes.decode(encoding, errors))
-
- if _is_s3_url(filepath_or_buffer):
- try:
- import boto
- except:
- raise ImportError("boto is required to handle s3 files")
- # Assuming AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY
- # are environment variables
- parsed_url = urlparse.urlparse(filepath_or_buffer)
- conn = boto.connect_s3()
- b = conn.get_bucket(parsed_url.netloc)
- k = boto.s3.key.Key(b)
- k.key = parsed_url.path
- filepath_or_buffer = StringIO(k.get_contents_as_string())
+ filepath_or_buffer, _ = get_filepath_or_buffer(filepath_or_buffer)
if kwds.get('date_parser', None) is not None:
if isinstance(kwds['parse_dates'], bool):
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index f1257f505ca9b..ddc9db0b76539 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -21,7 +21,8 @@
import datetime
from pandas.util import py3compat
from pandas import isnull
-from pandas.io.parsers import _parser_params, _is_url, Appender
+from pandas.io.parsers import _parser_params, Appender
+from pandas.io.common import get_filepath_or_buffer
_read_stata_doc = """
@@ -288,18 +289,12 @@ def __init__(self, path_or_buf, encoding=None):
self._missing_values = False
self._data_read = False
self._value_labels_read = False
- if isinstance(path_or_buf, str) and _is_url(path_or_buf):
- from urllib.request import urlopen
- path_or_buf = urlopen(path_or_buf)
- if py3compat.PY3: # pragma: no cover
- if self._encoding:
- errors = 'strict'
- else:
- errors = 'replace'
- self._encoding = 'cp1252'
- bytes = path_or_buf.read()
- self.path_or_buf = StringIO(self._decode_bytes(bytes, errors))
- elif type(path_or_buf) is str:
+ if isinstance(path_or_buf, str):
+ path_or_buf, encoding = get_filepath_or_buffer(path_or_buf, encoding='cp1252')
+ if encoding is not None:
+ self._encoding = encoding
+
+ if type(path_or_buf) is str:
self.path_or_buf = open(path_or_buf, 'rb')
else:
self.path_or_buf = path_or_buf
diff --git a/pandas/io/tests/test_stata.py b/pandas/io/tests/test_stata.py
index 9f5d796763fb0..d512b0267ed13 100644
--- a/pandas/io/tests/test_stata.py
+++ b/pandas/io/tests/test_stata.py
@@ -185,7 +185,7 @@ def test_read_dta9(self):
def test_stata_doc_examples(self):
with ensure_clean(self.dta5) as path:
df = DataFrame(np.random.randn(10,2),columns=list('AB'))
- df.to_stata('path')
+ df.to_stata(path)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
| https://api.github.com/repos/pandas-dev/pandas/pulls/3808 | 2013-06-08T12:39:08Z | 2013-06-08T13:04:02Z | 2013-06-08T13:04:02Z | 2014-07-16T08:12:37Z | |
Alter imports in pandas/__init__.py to be explicit | diff --git a/pandas/__init__.py b/pandas/__init__.py
index da4c146da3cfd..c682d33b2bb47 100644
--- a/pandas/__init__.py
+++ b/pandas/__init__.py
@@ -24,11 +24,39 @@
# let init-time option registration happen
import pandas.core.config_init
-from pandas.core.api import *
-from pandas.sparse.api import *
-from pandas.stats.api import *
-from pandas.tseries.api import *
-from pandas.io.api import *
+from pandas.core.api import (
+ factorize, match, unique, value_counts, isnull, notnull, save, load,
+ Categorical, Factor, set_printoptions, reset_printoptions,
+ set_eng_float_format, Index, Int64Index, MultiIndex, Series, TimeSeries,
+ DataFrame, Panel, Panel4D, groupby, pivot, get_dummies, lreshape, WidePanel,
+ DateOffset, to_datetime, DatetimeIndex, Timestamp, date_range, bdate_range,
+ Period, PeriodIndex, datetools, get_option, set_option, reset_option,
+ describe_option, options, DateRange # deprecated
+ )
+from pandas.sparse.api import (
+ SparseArray, SparseList, SparseSeries, SparseTimeSeries,
+ SparseDataFrame, SparsePanel
+ )
+from pandas.stats.api import (
+ ols, fama_macbeth, rolling_count, rolling_max, rolling_min,
+ rolling_sum, rolling_mean, rolling_std, rolling_cov, rolling_corr,
+ rolling_var, rolling_skew, rolling_kurt, rolling_quantile,
+ rolling_median, rolling_apply, rolling_corr_pairwise, rolling_window,
+ ewma, ewmvar, ewmstd, ewmvol, ewmcorr, ewmcov, expanding_count,
+ expanding_max, expanding_min, expanding_sum, expanding_mean,
+ expanding_std, expanding_cov, expanding_corr, expanding_var,
+ expanding_skew, expanding_kurt, expanding_quantile, expanding_median,
+ expanding_apply, expanding_corr_pairwise
+ )
+from pandas.tseries.api import (
+ DatetimeIndex, date_range, bdate_range, infer_freq, Period, PeriodIndex,
+ period_range, pnow, TimeGrouper, NaT, offsets
+ )
+from pandas.io.api import (
+ read_csv, read_table, read_clipboard, read_fwf, to_clipboard, ExcelFile,
+ ExcelWriter, read_excel, HDFStore, Term, get_store, read_hdf, read_html,
+ read_sql, read_stata
+ )
from pandas.util.testing import debug
@@ -38,3 +66,18 @@
from pandas.tools.plotting import scatter_matrix, plot_params
from pandas.tools.tile import cut, qcut
from pandas.core.reshape import melt
+
+# import these so we can add them to __all__
+import pandas.core.api as core_api
+import pandas.sparse.api as sparse_api
+import pandas.stats.api as stats_api
+import pandas.tseries.api as tseries_api
+import pandas.io.api as io_api
+__all__ = ["debug", "value_range", "merge", "concat", "ordered_merge",
+ "pivot_table", "crosstab", "scatter_matrix", "plot_params",
+ "cut", "qcut", "melt", "np"]
+__all__.extend(core_api.__all__)
+__all__.extend(sparse_api.__all__)
+__all__.extend(stats_api.__all__)
+__all__.extend(tseries_api.__all__)
+__all__.extend(io_api.__all__)
diff --git a/pandas/core/api.py b/pandas/core/api.py
index 306f9aff8f4d3..b920bdc6954ba 100644
--- a/pandas/core/api.py
+++ b/pandas/core/api.py
@@ -32,3 +32,11 @@
from pandas.core.config import get_option, set_option, reset_option,\
describe_option, options
+
+__all__ = ['factorize', 'match', 'unique', 'value_counts', 'isnull', 'notnull', 'save', 'load',
+ 'Categorical', 'Factor', 'set_printoptions', 'reset_printoptions',
+ 'set_eng_float_format', 'Index', 'Int64Index', 'MultiIndex', 'Series', 'TimeSeries',
+ 'DataFrame', 'Panel', 'Panel4D', 'groupby', 'pivot', 'get_dummies', 'lreshape', 'WidePanel',
+ 'DateOffset', 'to_datetime', 'DatetimeIndex', 'Timestamp', 'date_range', 'bdate_range',
+ 'Period', 'PeriodIndex', 'datetools', 'get_option', 'set_option', 'reset_option',
+ 'describe_option', 'options', 'DateRange']
diff --git a/pandas/io/api.py b/pandas/io/api.py
index f17351921f83f..13451642785f2 100644
--- a/pandas/io/api.py
+++ b/pandas/io/api.py
@@ -9,3 +9,7 @@
from pandas.io.html import read_html
from pandas.io.sql import read_sql
from pandas.io.stata import read_stata
+
+__all__ = ['read_csv', 'read_table', 'read_clipboard', 'read_fwf',
+ 'to_clipboard', 'ExcelFile', 'ExcelWriter', 'read_excel', 'HDFStore', 'Term',
+ 'get_store', 'read_hdf', 'read_html', 'read_sql', 'read_stata']
diff --git a/pandas/sparse/api.py b/pandas/sparse/api.py
index 230ad15937c92..8705fbe0a5a78 100644
--- a/pandas/sparse/api.py
+++ b/pandas/sparse/api.py
@@ -5,3 +5,6 @@
from pandas.sparse.series import SparseSeries, SparseTimeSeries
from pandas.sparse.frame import SparseDataFrame
from pandas.sparse.panel import SparsePanel
+
+__all__ = ['SparseArray', 'SparseList', 'SparseSeries', 'SparseTimeSeries',
+ 'SparseDataFrame', 'SparsePanel']
diff --git a/pandas/stats/api.py b/pandas/stats/api.py
index 3732f9ed39524..1f157940ff6df 100644
--- a/pandas/stats/api.py
+++ b/pandas/stats/api.py
@@ -4,6 +4,26 @@
# pylint: disable-msg=W0611,W0614,W0401
-from pandas.stats.moments import *
+from pandas.stats.moments import (
+ rolling_count, rolling_max, rolling_min, rolling_sum, rolling_mean,
+ rolling_std, rolling_cov, rolling_corr, rolling_var, rolling_skew,
+ rolling_kurt, rolling_quantile, rolling_median, rolling_apply,
+ rolling_corr_pairwise, rolling_window, ewma, ewmvar, ewmstd, ewmvol,
+ ewmcorr, ewmcov, expanding_count, expanding_max, expanding_min,
+ expanding_sum, expanding_mean, expanding_std, expanding_cov,
+ expanding_corr, expanding_var, expanding_skew, expanding_kurt,
+ expanding_quantile, expanding_median, expanding_apply,
+ expanding_corr_pairwise
+ )
from pandas.stats.interface import ols
from pandas.stats.fama_macbeth import fama_macbeth
+
+__all__ = ['ols', 'fama_macbeth', 'rolling_count', 'rolling_max', 'rolling_min',
+ 'rolling_sum', 'rolling_mean', 'rolling_std', 'rolling_cov', 'rolling_corr',
+ 'rolling_var', 'rolling_skew', 'rolling_kurt', 'rolling_quantile',
+ 'rolling_median', 'rolling_apply', 'rolling_corr_pairwise', 'rolling_window',
+ 'ewma', 'ewmvar', 'ewmstd', 'ewmvol', 'ewmcorr', 'ewmcov', 'expanding_count',
+ 'expanding_max', 'expanding_min', 'expanding_sum', 'expanding_mean',
+ 'expanding_std', 'expanding_cov', 'expanding_corr', 'expanding_var',
+ 'expanding_skew', 'expanding_kurt', 'expanding_quantile', 'expanding_median',
+ 'expanding_apply', 'expanding_corr_pairwise']
diff --git a/pandas/tests/test_internals.py b/pandas/tests/test_internals.py
index 0f3b8c1634416..5ff0ae7800171 100644
--- a/pandas/tests/test_internals.py
+++ b/pandas/tests/test_internals.py
@@ -539,6 +539,20 @@ def test_missing_unicode_key(self):
except KeyError:
pass # this is the expected exception
+class TestTopLevelImports(object):
+ def test_pandas_imports_all__all__(self):
+ # import these so we can add them to __all__
+ import pandas.core.api as core_api
+ import pandas.sparse.api as sparse_api
+ import pandas.stats.api as stats_api
+ import pandas.tseries.api as tseries_api
+ import pandas.io.api as io_api
+ import pandas
+ missing = []
+ for api in (core_api, sparse_api, stats_api, tseries_api, io_api):
+ missing.extend([attr for attr in api.__all__ if not hasattr(pandas, attr)])
+ assert not missing, "Expected pandas to import all `__all__` from api files. Missing: %r" % missing
+
if __name__ == '__main__':
# unittest.main()
import nose
diff --git a/pandas/tseries/api.py b/pandas/tseries/api.py
index ead5a17c4fab1..a98e2dfa94bad 100644
--- a/pandas/tseries/api.py
+++ b/pandas/tseries/api.py
@@ -2,10 +2,13 @@
"""
-
from pandas.tseries.index import DatetimeIndex, date_range, bdate_range
from pandas.tseries.frequencies import infer_freq
from pandas.tseries.period import Period, PeriodIndex, period_range, pnow
from pandas.tseries.resample import TimeGrouper
from pandas.lib import NaT
import pandas.tseries.offsets as offsets
+
+__all__ = ['DatetimeIndex', 'date_range', 'bdate_range', 'infer_freq',
+ 'Period', 'PeriodIndex', 'period_range', 'pnow', 'TimeGrouper',
+ 'NaT', 'offsets']
| Also adds an `__all__` to all the api files as well as a test case that
checks that everything in the api files ends up in the toplevel pandas
namespace. I personally find it hard to trace back the top level pandas
functions because of the `from xyz import *` calls. I changed everything
to be explicit. I think it's better, but it's your call.
The only large change with this is that the `pandas` module/package no
longer exports `numpy/np` at the top level.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3806 | 2013-06-08T01:51:13Z | 2013-06-10T03:05:15Z | null | 2013-07-11T05:21:11Z |
Allow aggregate funcs to return arrays in groupby | diff --git a/RELEASE.rst b/RELEASE.rst
index 307986ab81681..e4383d3bfcca8 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -79,6 +79,8 @@ pandas 0.11.1
spurious plots from showing up.
- Added Faq section on repr display options, to help users customize their setup.
- ``where`` operations that result in block splitting are much faster (GH3733_)
+ - ``groupby`` will now warn with a ``PerformanceWarning`` if an aggregate function
+ returns an array or list, instead of raising an error. (GH3788_)
**API Changes**
@@ -312,6 +314,7 @@ pandas 0.11.1
.. _GH3726: https://github.com/pydata/pandas/issues/3726
.. _GH3795: https://github.com/pydata/pandas/issues/3795
.. _GH3814: https://github.com/pydata/pandas/issues/3814
+.. _GH3788: https://github.com/pydata/pandas/issues/3788
pandas 0.11.0
=============
diff --git a/pandas/core/common.py b/pandas/core/common.py
index 69f38bf0c7c61..b4395cc6e3bf1 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -42,6 +42,10 @@ class AmbiguousIndexError(PandasError, KeyError):
pass
+class PerformanceWarning(Warning):
+ "Baseclass for warnings about performance issues that affect speed, but not functionality."
+ pass
+
_POSSIBLY_CAST_DTYPES = set([ np.dtype(t) for t in ['M8[ns]','m8[ns]','O','int8','uint8','int16','uint16','int32','uint32','int64','uint64'] ])
_NS_DTYPE = np.dtype('M8[ns]')
_TD_DTYPE = np.dtype('m8[ns]')
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index 0be5d438e5e7c..777dfaefcb9ee 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -1,8 +1,10 @@
from itertools import izip
import types
+import warnings
import numpy as np
from pandas.core.categorical import Categorical
+from pandas.core.common import PerformanceWarning
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame
from pandas.core.index import Index, MultiIndex, _ensure_index
@@ -18,7 +20,7 @@
import pandas.lib as lib
import pandas.algos as _algos
import pandas.hashtable as _hash
-
+_non_agg_warning = "Function does not produce aggregated values. Will not be able to optimize and may produce unexpected results."
_agg_doc = """Aggregate using input function or dict of {column -> function}
Parameters
@@ -919,7 +921,7 @@ def _aggregate_series_pure_python(self, obj, func):
res = func(group)
if result is None:
if isinstance(res, np.ndarray) or isinstance(res, list):
- raise ValueError('Function does not reduce')
+ warnings.warn(_non_agg_warning, PerformanceWarning, stacklevel=2)
result = np.empty(ngroups, dtype='O')
counts[label] = group.shape[0]
@@ -1508,7 +1510,7 @@ def _aggregate_named(self, func, *args, **kwargs):
group.name = name
output = func(group, *args, **kwargs)
if isinstance(output, np.ndarray):
- raise Exception('Must produce aggregated value')
+ warnings.warn(_non_agg_warning, PerformanceWarning, stacklevel=2)
result[name] = self._try_cast(output, group)
return result
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index b1b7b80e5fd23..24d7921e04c25 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -19,7 +19,7 @@
from pandas.sparse.api import SparseSeries, SparseDataFrame, SparsePanel
from pandas.sparse.array import BlockIndex, IntIndex
from pandas.tseries.api import PeriodIndex, DatetimeIndex
-from pandas.core.common import adjoin, isnull, is_list_like
+from pandas.core.common import adjoin, isnull, is_list_like, PerformanceWarning
from pandas.core.algorithms import match, unique, factorize
from pandas.core.categorical import Categorical
from pandas.core.common import _asarray_tuplesafe, _try_sort
@@ -64,7 +64,7 @@ class AttributeConflictWarning(Warning): pass
the [%s] attribute of the existing index is [%s] which conflicts with the new [%s],
resetting the attribute to None
"""
-class PerformanceWarning(Warning): pass
+# for PerformanceWarning
performance_doc = """
your performance may suffer as PyTables will pickle object types that it cannot map
directly to c-types [inferred_type->%s,key->%s] [items->%s]
diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py
index 8b3d4a475d952..ff8207b11f4e3 100644
--- a/pandas/io/tests/test_pytables.py
+++ b/pandas/io/tests/test_pytables.py
@@ -10,9 +10,11 @@
import pandas
from pandas import (Series, DataFrame, Panel, MultiIndex, bdate_range,
date_range, Index)
+from pandas.core.common import PerformanceWarning
from pandas.io.pytables import (HDFStore, get_store, Term, read_hdf,
- IncompatibilityWarning, PerformanceWarning,
+ IncompatibilityWarning,
AttributeConflictWarning)
+from pandas.core.common import PerformanceWarning
import pandas.util.testing as tm
from pandas.tests.test_series import assert_series_equal
from pandas.tests.test_frame import assert_frame_equal
diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py
index f3a608b82e756..99ada1a5ee6b2 100644
--- a/pandas/tests/test_groupby.py
+++ b/pandas/tests/test_groupby.py
@@ -1,12 +1,13 @@
import nose
import unittest
+import warnings
from datetime import datetime
from numpy import nan
from pandas import bdate_range
from pandas.core.index import Index, MultiIndex
-from pandas.core.common import rands
+from pandas.core.common import rands, PerformanceWarning
from pandas.core.api import Categorical, DataFrame
from pandas.core.groupby import GroupByError, SpecificationError, DataError
from pandas.core.series import Series
@@ -131,8 +132,11 @@ def checkit(dtype):
self.assertEqual(agged[1], 21)
# corner cases
- self.assertRaises(Exception, grouped.aggregate, lambda x: x * 2)
-
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always')
+ grouped.aggregate(lambda x: x * 2)
+ self.assertEqual(len(w), 1)
+ assert 'aggregate' in str(w[-1].message), "Wrong message: %r" % str(w[-1].message)
for dtype in ['int64','int32','float64','float32']:
checkit(dtype)
@@ -334,8 +338,29 @@ def test_agg_period_index(self):
def test_agg_must_agg(self):
grouped = self.df.groupby('A')['C']
- self.assertRaises(Exception, grouped.agg, lambda x: x.describe())
- self.assertRaises(Exception, grouped.agg, lambda x: x.index[:2])
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always')
+ grouped.agg(lambda x: x.describe())
+ self.assertEqual(len(w), 1)
+ assert 'aggregate' in str(w[-1].message), "Wrong message: %r" % str(w[-1].message)
+
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always')
+ grouped.agg(lambda x: x.index[:2])
+ self.assertEqual(len(w), 1)
+ assert 'aggregate' in str(w[-1].message), "Wrong message: %r" % str(w[-1].message)
+
+ # motivating example for #3788
+ df = DataFrame([[1, np.array([10, 20, 30])],
+ [1, np.array([40, 50, 60])],
+ [2, np.array([20, 30, 40])]],
+ columns=['category', 'arraydata'])
+
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always')
+ df.groupby('category').agg(sum)
+ self.assertEqual(len(w), 1)
+ assert 'aggregate' in str(w[-1].message), "Wrong message: %r" % str(w[-1].message)
def test_agg_ser_multi_key(self):
ser = self.df.C
| fixes #3788
Please check out whether you like the error message for `Performance Warning`.
Also, I'm not sure whether this means that groupby fails under certain conditions and not others (like when trying Cython, etc.).
| https://api.github.com/repos/pandas-dev/pandas/pulls/3805 | 2013-06-07T23:53:53Z | 2013-10-12T01:30:47Z | null | 2022-10-13T00:14:57Z |
ENH: add ujson support in pandas.io.json | diff --git a/LICENSES/ULTRAJSON_LICENSE b/LICENSES/ULTRAJSON_LICENSE
new file mode 100644
index 0000000000000..defca46e7f820
--- /dev/null
+++ b/LICENSES/ULTRAJSON_LICENSE
@@ -0,0 +1,34 @@
+Copyright (c) 2011-2013, ESN Social Software AB and Jonas Tarnstrom
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of the ESN Social Software AB nor the
+ names of its contributors may be used to endorse or promote products
+ derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ESN SOCIAL SOFTWARE AB OR JONAS TARNSTROM BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+Portions of code from MODP_ASCII - Ascii transformations (upper/lower, etc)
+http://code.google.com/p/stringencoders/
+Copyright (c) 2007 Nick Galbreath -- nickg [at] modp [dot] com. All rights reserved.
+
+Numeric decoder derived from from TCL library
+http://www.opensource.apple.com/source/tcl/tcl-14/tcl/license.terms
+ * Copyright (c) 1988-1993 The Regents of the University of California.
+ * Copyright (c) 1994 Sun Microsystems, Inc.
\ No newline at end of file
diff --git a/doc/source/api.rst b/doc/source/api.rst
index e263554460380..bb6f0ac073e21 100644
--- a/doc/source/api.rst
+++ b/doc/source/api.rst
@@ -45,6 +45,16 @@ Excel
read_excel
ExcelFile.parse
+JSON
+~~~~
+
+.. currentmodule:: pandas.io.json
+
+.. autosummary::
+ :toctree: generated/
+
+ read_json
+
HTML
~~~~
@@ -597,6 +607,7 @@ Serialization / IO / Conversion
DataFrame.to_hdf
DataFrame.to_dict
DataFrame.to_excel
+ DataFrame.to_json
DataFrame.to_html
DataFrame.to_stata
DataFrame.to_records
diff --git a/doc/source/io.rst b/doc/source/io.rst
index ac5d49e036669..e64cbc4bc8101 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -35,6 +35,7 @@ object.
* ``read_excel``
* ``read_hdf``
* ``read_sql``
+ * ``read_json``
* ``read_html``
* ``read_stata``
* ``read_clipboard``
@@ -45,6 +46,7 @@ The corresponding ``writer`` functions are object methods that are accessed like
* ``to_excel``
* ``to_hdf``
* ``to_sql``
+ * ``to_json``
* ``to_html``
* ``to_stata``
* ``to_clipboard``
@@ -937,6 +939,104 @@ The Series object also has a ``to_string`` method, but with only the ``buf``,
which, if set to ``True``, will additionally output the length of the Series.
+JSON
+----
+
+Read and write ``JSON`` format files.
+
+.. _io.json:
+
+Writing JSON
+~~~~~~~~~~~~
+
+A ``Series`` or ``DataFrame`` can be converted to a valid JSON string. Use ``to_json``
+with optional parameters:
+
+- path_or_buf : the pathname or buffer to write the output
+ This can be ``None`` in which case a JSON string is returned
+- orient : The format of the JSON string, default is ``index`` for ``Series``, ``columns`` for ``DataFrame``
+
+ * split : dict like {index -> [index], columns -> [columns], data -> [values]}
+ * records : list like [{column -> value}, ... , {column -> value}]
+ * index : dict like {index -> {column -> value}}
+ * columns : dict like {column -> {index -> value}}
+ * values : just the values array
+
+- date_format : type of date conversion (epoch = epoch milliseconds, iso = ISO8601), default is epoch
+- double_precision : The number of decimal places to use when encoding floating point values, default 10.
+- force_ascii : force encoded string to be ASCII, default True.
+
+Note NaN's and None will be converted to null and datetime objects will be converted based on the date_format parameter
+
+.. ipython:: python
+
+ dfj = DataFrame(randn(5, 2), columns=list('AB'))
+ json = dfj.to_json()
+ json
+
+Writing in iso date format
+
+.. ipython:: python
+
+ dfd = DataFrame(randn(5, 2), columns=list('AB'))
+ dfd['date'] = Timestamp('20130101')
+ json = dfd.to_json(date_format='iso')
+ json
+
+Writing to a file, with a date index and a date column
+
+.. ipython:: python
+
+ dfj2 = dfj.copy()
+ dfj2['date'] = Timestamp('20130101')
+ dfj2.index = date_range('20130101',periods=5)
+ dfj2.to_json('test.json')
+ open('test.json').read()
+
+Reading JSON
+~~~~~~~~~~~~
+
+Reading a JSON string to pandas object can take a number of parameters.
+The parser will try to parse a ``DataFrame`` if ``typ`` is not supplied or
+is ``None``. To explicity force ``Series`` parsing, pass ``typ=series``
+
+- filepath_or_buffer : a **VALID** JSON string or file handle / StringIO. The string could be
+ a URL. Valid URL schemes include http, ftp, s3, and file. For file URLs, a host
+ is expected. For instance, a local file could be
+ file ://localhost/path/to/table.json
+- typ : type of object to recover (series or frame), default 'frame'
+- orient : The format of the JSON string, one of the following
+
+ * split : dict like {index -> [index], name -> name, data -> [values]}
+ * records : list like [value, ... , value]
+ * index : dict like {index -> value}
+
+- dtype : dtype of the resulting object
+- numpy : direct decoding to numpy arrays. default True but falls back to standard decoding if a problem occurs.
+- parse_dates : a list of columns to parse for dates; If True, then try to parse datelike columns, default is False
+- keep_default_dates : boolean, default True. If parsing dates, then parse the default datelike columns
+
+The parser will raise one of ``ValueError/TypeError/AssertionError`` if the JSON is
+not parsable.
+
+Reading from a JSON string
+
+.. ipython:: python
+
+ pd.read_json(json)
+
+Reading from a file, parsing dates
+
+.. ipython:: python
+
+ pd.read_json('test.json',parse_dates=True)
+
+.. ipython:: python
+ :suppress:
+
+ import os
+ os.remove('test.json')
+
HTML
----
@@ -2193,7 +2293,6 @@ into a .dta file. The format version of this file is always the latest one, 115.
.. ipython:: python
- from pandas.io.stata import StataWriter
df = DataFrame(randn(10, 2), columns=list('AB'))
df.to_stata('stata.dta')
diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt
index 70d840f8c477a..5045f73375a97 100644
--- a/doc/source/v0.11.1.txt
+++ b/doc/source/v0.11.1.txt
@@ -16,6 +16,7 @@ API changes
* ``read_excel``
* ``read_hdf``
* ``read_sql``
+ * ``read_json``
* ``read_html``
* ``read_stata``
* ``read_clipboard``
@@ -26,6 +27,7 @@ API changes
* ``to_excel``
* ``to_hdf``
* ``to_sql``
+ * ``to_json``
* ``to_html``
* ``to_stata``
* ``to_clipboard``
@@ -175,6 +177,10 @@ Enhancements
accessable via ``read_stata`` top-level function for reading,
and ``to_stata`` DataFrame method for writing, :ref:`See the docs<io.stata>`
+ - Added module for reading and writing json format files: ``pandas.io.json``
+ accessable via ``read_json`` top-level function for reading,
+ and ``to_json`` DataFrame method for writing, :ref:`See the docs<io.json>`
+
- ``DataFrame.replace()`` now allows regular expressions on contained
``Series`` with object dtype. See the examples section in the regular docs
:ref:`Replacing via String Expression <missing_data.replace_expression>`
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 5533584745167..0d2612d7aed7a 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -495,6 +495,45 @@ def to_clipboard(self):
from pandas.io import clipboard
clipboard.to_clipboard(self)
+ def to_json(self, path_or_buf=None, orient=None, date_format='epoch',
+ double_precision=10, force_ascii=True):
+ """
+ Convert the object to a JSON string.
+
+ Note NaN's and None will be converted to null and datetime objects
+ will be converted to UNIX timestamps.
+
+ Parameters
+ ----------
+ path_or_buf : the path or buffer to write the result string
+ if this is None, return a StringIO of the converted string
+ orient : {'split', 'records', 'index', 'columns', 'values'},
+ default is 'index' for Series, 'columns' for DataFrame
+
+ The format of the JSON string
+ split : dict like
+ {index -> [index], columns -> [columns], data -> [values]}
+ records : list like [{column -> value}, ... , {column -> value}]
+ index : dict like {index -> {column -> value}}
+ columns : dict like {column -> {index -> value}}
+ values : just the values array
+ date_format : type of date conversion (epoch = epoch milliseconds, iso = ISO8601),
+ default is epoch
+ double_precision : The number of decimal places to use when encoding
+ floating point values, default 10.
+ force_ascii : force encoded string to be ASCII, default True.
+
+ Returns
+ -------
+ result : a JSON compatible string written to the path_or_buf;
+ if the path_or_buf is none, return a StringIO of the result
+
+ """
+
+ from pandas.io import json
+ return json.to_json(path_or_buf=path_or_buf, obj=self, orient=orient, date_format=date_format,
+ double_precision=double_precision, force_ascii=force_ascii)
+
# install the indexerse
for _name, _indexer in indexing.get_indexers_list():
PandasObject._create_indexer(_name,_indexer)
diff --git a/pandas/io/api.py b/pandas/io/api.py
index f17351921f83f..48566399f9bfe 100644
--- a/pandas/io/api.py
+++ b/pandas/io/api.py
@@ -6,6 +6,7 @@
from pandas.io.clipboard import read_clipboard
from pandas.io.excel import ExcelFile, ExcelWriter, read_excel
from pandas.io.pytables import HDFStore, Term, get_store, read_hdf
+from pandas.io.json import read_json
from pandas.io.html import read_html
from pandas.io.sql import read_sql
from pandas.io.stata import read_stata
diff --git a/pandas/io/common.py b/pandas/io/common.py
index 46b47c06f7f5d..353930482c8b8 100644
--- a/pandas/io/common.py
+++ b/pandas/io/common.py
@@ -2,6 +2,7 @@
import urlparse
from pandas.util import py3compat
+from StringIO import StringIO
_VALID_URLS = set(urlparse.uses_relative + urlparse.uses_netloc +
urlparse.uses_params)
diff --git a/pandas/io/excel.py b/pandas/io/excel.py
index 5b7d13acd99ec..95702847d9c7f 100644
--- a/pandas/io/excel.py
+++ b/pandas/io/excel.py
@@ -11,7 +11,7 @@
from pandas.io.parsers import TextParser
from pandas.tseries.period import Period
-import json
+from pandas import json
def read_excel(path_or_buf, sheetname, kind=None, **kwds):
"""Read an Excel table into a pandas DataFrame
diff --git a/pandas/io/json.py b/pandas/io/json.py
new file mode 100644
index 0000000000000..17b33931bee5a
--- /dev/null
+++ b/pandas/io/json.py
@@ -0,0 +1,353 @@
+
+# pylint: disable-msg=E1101,W0613,W0603
+from StringIO import StringIO
+import os
+
+from pandas import Series, DataFrame, to_datetime
+from pandas.io.common import get_filepath_or_buffer
+import pandas.json as _json
+loads = _json.loads
+dumps = _json.dumps
+
+import numpy as np
+from pandas.tslib import iNaT
+
+### interface to/from ###
+
+def to_json(path_or_buf, obj, orient=None, date_format='epoch', double_precision=10, force_ascii=True):
+
+ if isinstance(obj, Series):
+ s = SeriesWriter(obj, orient=orient, date_format=date_format, double_precision=double_precision,
+ ensure_ascii=force_ascii).write()
+ elif isinstance(obj, DataFrame):
+ s = FrameWriter(obj, orient=orient, date_format=date_format, double_precision=double_precision,
+ ensure_ascii=force_ascii).write()
+ else:
+ raise NotImplementedError
+
+ if isinstance(path_or_buf, basestring):
+ with open(path_or_buf,'w') as fh:
+ fh.write(s)
+ elif path_or_buf is None:
+ return s
+ else:
+ path_or_buf.write(s)
+
+class Writer(object):
+
+ def __init__(self, obj, orient, date_format, double_precision, ensure_ascii):
+ self.obj = obj
+
+ if orient is None:
+ orient = self._default_orient
+
+ self.orient = orient
+ self.date_format = date_format
+ self.double_precision = double_precision
+ self.ensure_ascii = ensure_ascii
+
+ self.is_copy = False
+ self._format_axes()
+ self._format_dates()
+
+ def _format_dates(self):
+ raise NotImplementedError
+
+ def _format_axes(self):
+ raise NotImplementedError
+
+ def _needs_to_date(self, data):
+ return self.date_format == 'iso' and data.dtype == 'datetime64[ns]'
+
+ def _format_to_date(self, data):
+ if self._needs_to_date(data):
+ return data.apply(lambda x: x.isoformat())
+ return data
+
+ def copy_if_needed(self):
+ """ copy myself if necessary """
+ if not self.is_copy:
+ self.obj = self.obj.copy()
+ self.is_copy = True
+
+ def write(self):
+ return dumps(self.obj, orient=self.orient, double_precision=self.double_precision, ensure_ascii=self.ensure_ascii)
+
+class SeriesWriter(Writer):
+ _default_orient = 'index'
+
+ def _format_axes(self):
+ if self._needs_to_date(self.obj.index):
+ self.copy_if_needed()
+ self.obj.index = self._format_to_date(self.obj.index.to_series())
+
+ def _format_dates(self):
+ if self._needs_to_date(self.obj):
+ self.copy_if_needed()
+ self.obj = self._format_to_date(self.obj)
+
+class FrameWriter(Writer):
+ _default_orient = 'columns'
+
+ def _format_axes(self):
+ """ try to axes if they are datelike """
+ if self.orient == 'columns':
+ axis = 'index'
+ elif self.orient == 'index':
+ axis = 'columns'
+ else:
+ return
+
+ a = getattr(self.obj,axis)
+ if self._needs_to_date(a):
+ self.copy_if_needed()
+ setattr(self.obj,axis,self._format_to_date(a.to_series()))
+
+ def _format_dates(self):
+ if self.date_format == 'iso':
+ dtypes = self.obj.dtypes
+ dtypes = dtypes[dtypes == 'datetime64[ns]']
+ if len(dtypes):
+ self.copy_if_needed()
+ for c in dtypes.index:
+ self.obj[c] = self._format_to_date(self.obj[c])
+
+def read_json(path_or_buf=None, orient=None, typ='frame', dtype=None, numpy=True,
+ parse_dates=False, keep_default_dates=True):
+ """
+ Convert JSON string to pandas object
+
+ Parameters
+ ----------
+ filepath_or_buffer : a VALID JSON string or file handle / StringIO. The string could be
+ a URL. Valid URL schemes include http, ftp, s3, and file. For file URLs, a host
+ is expected. For instance, a local file could be
+ file ://localhost/path/to/table.json
+ orient : {'split', 'records', 'index'}, default 'index'
+ The format of the JSON string
+ split : dict like
+ {index -> [index], name -> name, data -> [values]}
+ records : list like [value, ... , value]
+ index : dict like {index -> value}
+ typ : type of object to recover (series or frame), default 'frame'
+ dtype : dtype of the resulting object
+ numpy: direct decoding to numpy arrays. default True but falls back
+ to standard decoding if a problem occurs.
+ parse_dates : a list of columns to parse for dates; If True, then try to parse datelike columns
+ default is False
+ keep_default_dates : boolean, default True. If parsing dates,
+ then parse the default datelike columns
+
+ Returns
+ -------
+ result : Series or DataFrame
+ """
+
+ filepath_or_buffer,_ = get_filepath_or_buffer(path_or_buf)
+ if isinstance(filepath_or_buffer, basestring):
+ if os.path.exists(filepath_or_buffer):
+ with open(filepath_or_buffer,'r') as fh:
+ json = fh.read()
+ else:
+ json = filepath_or_buffer
+ elif hasattr(filepath_or_buffer, 'read'):
+ json = filepath_or_buffer.read()
+ else:
+ json = filepath_or_buffer
+
+ obj = None
+ if typ == 'frame':
+ obj = FrameParser(json, orient, dtype, numpy, parse_dates, keep_default_dates).parse()
+
+ if typ == 'series' or obj is None:
+ obj = SeriesParser(json, orient, dtype, numpy, parse_dates, keep_default_dates).parse()
+
+ return obj
+
+class Parser(object):
+
+ def __init__(self, json, orient, dtype, numpy, parse_dates=False, keep_default_dates=False):
+ self.json = json
+
+ if orient is None:
+ orient = self._default_orient
+
+ self.orient = orient
+ self.dtype = dtype
+
+ if dtype is not None and orient == "split":
+ numpy = False
+
+ self.numpy = numpy
+ self.parse_dates = parse_dates
+ self.keep_default_dates = keep_default_dates
+ self.obj = None
+
+ def parse(self):
+ self._parse()
+ if self.obj is not None:
+ self._convert_axes()
+ if self.parse_dates:
+ self._try_parse_dates()
+ return self.obj
+
+
+ def _try_parse_to_date(self, data):
+ """ try to parse a ndarray like into a date column
+ try to coerce object in epoch/iso formats and
+ integer/float in epcoh formats """
+
+ new_data = data
+ if new_data.dtype == 'object':
+ try:
+ new_data = data.astype('int64')
+ except:
+ pass
+
+
+ # ignore numbers that are out of range
+ if issubclass(new_data.dtype.type,np.number):
+ if not ((new_data == iNaT) | (new_data > 31536000000000000L)).all():
+ return data
+
+ try:
+ new_data = to_datetime(new_data)
+ except:
+ try:
+ new_data = to_datetime(new_data.astype('int64'))
+ except:
+
+ # return old, noting more we can do
+ new_data = data
+
+ return new_data
+
+ def _try_parse_dates(self):
+ raise NotImplementedError
+
+class SeriesParser(Parser):
+ _default_orient = 'index'
+
+ def _parse(self):
+
+ json = self.json
+ dtype = self.dtype
+ orient = self.orient
+ numpy = self.numpy
+
+ if numpy:
+ try:
+ if orient == "split":
+ decoded = loads(json, dtype=dtype, numpy=True)
+ decoded = dict((str(k), v) for k, v in decoded.iteritems())
+ self.obj = Series(**decoded)
+ elif orient == "columns" or orient == "index":
+ self.obj = Series(*loads(json, dtype=dtype, numpy=True,
+ labelled=True))
+ else:
+ self.obj = Series(loads(json, dtype=dtype, numpy=True))
+ except ValueError:
+ numpy = False
+
+ if not numpy:
+ if orient == "split":
+ decoded = dict((str(k), v)
+ for k, v in loads(json).iteritems())
+ self.obj = Series(dtype=dtype, **decoded)
+ else:
+ self.obj = Series(loads(json), dtype=dtype)
+
+ def _convert_axes(self):
+ """ try to axes if they are datelike """
+ try:
+ self.obj.index = self._try_parse_to_date(self.obj.index)
+ except:
+ pass
+
+ def _try_parse_dates(self):
+ if self.obj is None: return
+
+ if self.parse_dates:
+ self.obj = self._try_parse_to_date(self.obj)
+
+class FrameParser(Parser):
+ _default_orient = 'columns'
+
+ def _parse(self):
+
+ json = self.json
+ dtype = self.dtype
+ orient = self.orient
+ numpy = self.numpy
+
+ if numpy:
+ try:
+ if orient == "columns":
+ args = loads(json, dtype=dtype, numpy=True, labelled=True)
+ if args:
+ args = (args[0].T, args[2], args[1])
+ self.obj = DataFrame(*args)
+ elif orient == "split":
+ decoded = loads(json, dtype=dtype, numpy=True)
+ decoded = dict((str(k), v) for k, v in decoded.iteritems())
+ self.obj = DataFrame(**decoded)
+ elif orient == "values":
+ self.obj = DataFrame(loads(json, dtype=dtype, numpy=True))
+ else:
+ self.obj = DataFrame(*loads(json, dtype=dtype, numpy=True,
+ labelled=True))
+ except ValueError:
+ numpy = False
+
+ if not numpy:
+ if orient == "columns":
+ self.obj = DataFrame(loads(json), dtype=dtype)
+ elif orient == "split":
+ decoded = dict((str(k), v)
+ for k, v in loads(json).iteritems())
+ self.obj = DataFrame(dtype=dtype, **decoded)
+ elif orient == "index":
+ self.obj = DataFrame(loads(json), dtype=dtype).T
+ else:
+ self.obj = DataFrame(loads(json), dtype=dtype)
+
+ def _convert_axes(self):
+ """ try to axes if they are datelike """
+ if self.orient == 'columns':
+ axis = 'index'
+ elif self.orient == 'index':
+ axis = 'columns'
+ else:
+ return
+
+ try:
+ a = getattr(self.obj,axis)
+ setattr(self.obj,axis,self._try_parse_to_date(a))
+ except:
+ pass
+
+ def _try_parse_dates(self):
+ if self.obj is None: return
+
+ # our columns to parse
+ parse_dates = self.parse_dates
+ if parse_dates is True:
+ parse_dates = []
+ parse_dates = set(parse_dates)
+
+ def is_ok(col):
+ """ return if this col is ok to try for a date parse """
+ if not isinstance(col, basestring): return False
+
+ if (col.endswith('_at') or
+ col.endswith('_time') or
+ col.lower() == 'modified' or
+ col.lower() == 'date' or
+ col.lower() == 'datetime'):
+ return True
+ return False
+
+
+ for col, c in self.obj.iteritems():
+ if (self.keep_default_dates and is_ok(col)) or col in parse_dates:
+ self.obj[col] = self._try_parse_to_date(c)
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 6e937ba696e39..faf439d87a5f2 100644
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -23,7 +23,6 @@
import pandas.tslib as tslib
import pandas.parser as _parser
from pandas.tseries.period import Period
-import json
class DateConversionError(Exception):
diff --git a/pandas/io/tests/test_json/__init__.py b/pandas/io/tests/test_json/__init__.py
new file mode 100644
index 0000000000000..e69de29bb2d1d
diff --git a/pandas/io/tests/test_json/test_pandas.py b/pandas/io/tests/test_json/test_pandas.py
new file mode 100644
index 0000000000000..b64bfaacd38f2
--- /dev/null
+++ b/pandas/io/tests/test_json/test_pandas.py
@@ -0,0 +1,355 @@
+
+# pylint: disable-msg=W0612,E1101
+from copy import deepcopy
+from datetime import datetime, timedelta
+from StringIO import StringIO
+import cPickle as pickle
+import operator
+import os
+import unittest
+
+import numpy as np
+
+from pandas import Series, DataFrame, DatetimeIndex, Timestamp
+import pandas as pd
+read_json = pd.read_json
+
+from pandas.util.testing import (assert_almost_equal, assert_frame_equal,
+ assert_series_equal, network,
+ ensure_clean)
+import pandas.util.testing as tm
+from numpy.testing.decorators import slow
+
+_seriesd = tm.getSeriesData()
+_tsd = tm.getTimeSeriesData()
+
+_frame = DataFrame(_seriesd)
+_frame2 = DataFrame(_seriesd, columns=['D', 'C', 'B', 'A'])
+_intframe = DataFrame(dict((k, v.astype(int))
+ for k, v in _seriesd.iteritems()))
+
+_tsframe = DataFrame(_tsd)
+
+_mixed_frame = _frame.copy()
+
+class TestPandasObjects(unittest.TestCase):
+
+ def setUp(self):
+ self.ts = tm.makeTimeSeries()
+ self.ts.name = 'ts'
+
+ self.series = tm.makeStringSeries()
+ self.series.name = 'series'
+
+ self.objSeries = tm.makeObjectSeries()
+ self.objSeries.name = 'objects'
+
+ self.empty_series = Series([], index=[])
+ self.empty_frame = DataFrame({})
+
+ self.frame = _frame.copy()
+ self.frame2 = _frame2.copy()
+ self.intframe = _intframe.copy()
+ self.tsframe = _tsframe.copy()
+ self.mixed_frame = _mixed_frame.copy()
+
+ def test_frame_from_json_to_json(self):
+
+ def _check_orient(df, orient, dtype=None, numpy=True):
+ df = df.sort()
+ dfjson = df.to_json(orient=orient)
+ unser = read_json(dfjson, orient=orient, dtype=dtype,
+ numpy=numpy)
+ unser = unser.sort()
+ if df.index.dtype.type == np.datetime64:
+ unser.index = DatetimeIndex(unser.index.values.astype('i8'))
+ if orient == "records":
+ # index is not captured in this orientation
+ assert_almost_equal(df.values, unser.values)
+ self.assert_(df.columns.equals(unser.columns))
+ elif orient == "values":
+ # index and cols are not captured in this orientation
+ assert_almost_equal(df.values, unser.values)
+ elif orient == "split":
+ # index and col labels might not be strings
+ unser.index = [str(i) for i in unser.index]
+ unser.columns = [str(i) for i in unser.columns]
+ unser = unser.sort()
+ assert_almost_equal(df.values, unser.values)
+ else:
+ assert_frame_equal(df, unser)
+
+ def _check_all_orients(df, dtype=None):
+ _check_orient(df, "columns", dtype=dtype)
+ _check_orient(df, "records", dtype=dtype)
+ _check_orient(df, "split", dtype=dtype)
+ _check_orient(df, "index", dtype=dtype)
+ _check_orient(df, "values", dtype=dtype)
+
+ _check_orient(df, "columns", dtype=dtype, numpy=False)
+ _check_orient(df, "records", dtype=dtype, numpy=False)
+ _check_orient(df, "split", dtype=dtype, numpy=False)
+ _check_orient(df, "index", dtype=dtype, numpy=False)
+ _check_orient(df, "values", dtype=dtype, numpy=False)
+
+ # basic
+ _check_all_orients(self.frame)
+ self.assertEqual(self.frame.to_json(),
+ self.frame.to_json(orient="columns"))
+
+ _check_all_orients(self.intframe, dtype=self.intframe.values.dtype)
+
+ # big one
+ # index and columns are strings as all unserialised JSON object keys
+ # are assumed to be strings
+ biggie = DataFrame(np.zeros((200, 4)),
+ columns=[str(i) for i in range(4)],
+ index=[str(i) for i in range(200)])
+ _check_all_orients(biggie)
+
+ # dtypes
+ _check_all_orients(DataFrame(biggie, dtype=np.float64),
+ dtype=np.float64)
+ _check_all_orients(DataFrame(biggie, dtype=np.int), dtype=np.int)
+ _check_all_orients(DataFrame(biggie, dtype='<U3'), dtype='<U3')
+
+ # empty
+ _check_all_orients(self.empty_frame)
+
+ # time series data
+ _check_all_orients(self.tsframe)
+
+ # mixed data
+ index = pd.Index(['a', 'b', 'c', 'd', 'e'])
+ data = {
+ 'A': [0., 1., 2., 3., 4.],
+ 'B': [0., 1., 0., 1., 0.],
+ 'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'],
+ 'D': [True, False, True, False, True]
+ }
+ df = DataFrame(data=data, index=index)
+ _check_orient(df, "split")
+ _check_orient(df, "records")
+ _check_orient(df, "values")
+ _check_orient(df, "columns")
+ # index oriented is problematic as it is read back in in a transposed
+ # state, so the columns are interpreted as having mixed data and
+ # given object dtypes.
+ # force everything to have object dtype beforehand
+ _check_orient(df.transpose().transpose(), "index")
+
+ def test_frame_from_json_bad_data(self):
+ self.assertRaises(ValueError, read_json, StringIO('{"key":b:a:d}'))
+
+ # too few indices
+ json = StringIO('{"columns":["A","B"],'
+ '"index":["2","3"],'
+ '"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}"')
+ self.assertRaises(ValueError, read_json, json,
+ orient="split")
+
+ # too many columns
+ json = StringIO('{"columns":["A","B","C"],'
+ '"index":["1","2","3"],'
+ '"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}"')
+ self.assertRaises(AssertionError, read_json, json,
+ orient="split")
+
+ # bad key
+ json = StringIO('{"badkey":["A","B"],'
+ '"index":["2","3"],'
+ '"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}"')
+ self.assertRaises(TypeError, read_json, json,
+ orient="split")
+
+ def test_frame_from_json_nones(self):
+ df = DataFrame([[1, 2], [4, 5, 6]])
+ unser = read_json(df.to_json())
+ self.assert_(np.isnan(unser['2'][0]))
+
+ df = DataFrame([['1', '2'], ['4', '5', '6']])
+ unser = read_json(df.to_json())
+ self.assert_(unser['2'][0] is None)
+
+ unser = read_json(df.to_json(), numpy=False)
+ self.assert_(unser['2'][0] is None)
+
+ # infinities get mapped to nulls which get mapped to NaNs during
+ # deserialisation
+ df = DataFrame([[1, 2], [4, 5, 6]])
+ df[2][0] = np.inf
+ unser = read_json(df.to_json())
+ self.assert_(np.isnan(unser['2'][0]))
+
+ df[2][0] = np.NINF
+ unser = read_json(df.to_json())
+ self.assert_(np.isnan(unser['2'][0]))
+
+ def test_frame_to_json_except(self):
+ df = DataFrame([1, 2, 3])
+ self.assertRaises(ValueError, df.to_json, orient="garbage")
+
+ def test_series_from_json_to_json(self):
+
+ def _check_orient(series, orient, dtype=None, numpy=True):
+ series = series.sort_index()
+ unser = read_json(series.to_json(orient=orient), typ='series',
+ orient=orient, numpy=numpy, dtype=dtype)
+ unser = unser.sort_index()
+ if series.index.dtype.type == np.datetime64:
+ unser.index = DatetimeIndex(unser.index.values.astype('i8'))
+ if orient == "records" or orient == "values":
+ assert_almost_equal(series.values, unser.values)
+ else:
+ try:
+ assert_series_equal(series, unser)
+ except:
+ raise
+ if orient == "split":
+ self.assert_(series.name == unser.name)
+
+ def _check_all_orients(series, dtype=None):
+ _check_orient(series, "columns", dtype=dtype)
+ _check_orient(series, "records", dtype=dtype)
+ _check_orient(series, "split", dtype=dtype)
+ _check_orient(series, "index", dtype=dtype)
+ _check_orient(series, "values", dtype=dtype)
+
+ _check_orient(series, "columns", dtype=dtype, numpy=False)
+ _check_orient(series, "records", dtype=dtype, numpy=False)
+ _check_orient(series, "split", dtype=dtype, numpy=False)
+ _check_orient(series, "index", dtype=dtype, numpy=False)
+ _check_orient(series, "values", dtype=dtype, numpy=False)
+
+ # basic
+ _check_all_orients(self.series)
+ self.assertEqual(self.series.to_json(),
+ self.series.to_json(orient="index"))
+
+ objSeries = Series([str(d) for d in self.objSeries],
+ index=self.objSeries.index,
+ name=self.objSeries.name)
+ _check_all_orients(objSeries)
+ _check_all_orients(self.empty_series)
+ _check_all_orients(self.ts)
+
+ # dtype
+ s = Series(range(6), index=['a','b','c','d','e','f'])
+ _check_all_orients(Series(s, dtype=np.float64), dtype=np.float64)
+ _check_all_orients(Series(s, dtype=np.int), dtype=np.int)
+
+ def test_series_to_json_except(self):
+ s = Series([1, 2, 3])
+ self.assertRaises(ValueError, s.to_json, orient="garbage")
+
+ def test_typ(self):
+
+ s = Series(range(6), index=['a','b','c','d','e','f'])
+ result = read_json(s.to_json(),typ=None)
+ assert_series_equal(result,s)
+
+ def test_reconstruction_index(self):
+
+ df = DataFrame([[1, 2, 3], [4, 5, 6]])
+ result = read_json(df.to_json())
+
+ # the index is serialized as strings....correct?
+ #assert_frame_equal(result,df)
+
+ def test_path(self):
+ with ensure_clean('test.json') as path:
+
+ for df in [ self.frame, self.frame2, self.intframe, self.tsframe, self.mixed_frame ]:
+ df.to_json(path)
+ read_json(path)
+
+ def test_axis_dates(self):
+
+ # frame
+ json = self.tsframe.to_json()
+ result = read_json(json)
+ assert_frame_equal(result,self.tsframe)
+
+ # series
+ json = self.ts.to_json()
+ result = read_json(json,typ='series')
+ assert_series_equal(result,self.ts)
+
+ def test_parse_dates(self):
+
+ # frame
+ df = self.tsframe.copy()
+ df['date'] = Timestamp('20130101')
+
+ json = df.to_json()
+ result = read_json(json,parse_dates=True)
+ assert_frame_equal(result,df)
+
+ df['foo'] = 1.
+ json = df.to_json()
+ result = read_json(json,parse_dates=True)
+ assert_frame_equal(result,df)
+
+ # series
+ ts = Series(Timestamp('20130101'),index=self.ts.index)
+ json = ts.to_json()
+ result = read_json(json,typ='series',parse_dates=True)
+ assert_series_equal(result,ts)
+
+ def test_date_format(self):
+
+ df = self.tsframe.copy()
+ df['date'] = Timestamp('20130101')
+ df_orig = df.copy()
+
+ json = df.to_json(date_format='iso')
+ result = read_json(json,parse_dates=True)
+ assert_frame_equal(result,df_orig)
+
+ # make sure that we did in fact copy
+ assert_frame_equal(df,df_orig)
+
+ ts = Series(Timestamp('20130101'),index=self.ts.index)
+ json = ts.to_json(date_format='iso')
+ result = read_json(json,typ='series',parse_dates=True)
+ assert_series_equal(result,ts)
+
+ def test_weird_nested_json(self):
+
+ # this used to core dump the parser
+ s = r'''{
+ "status": "success",
+ "data": {
+ "posts": [
+ {
+ "id": 1,
+ "title": "A blog post",
+ "body": "Some useful content"
+ },
+ {
+ "id": 2,
+ "title": "Another blog post",
+ "body": "More content"
+ }
+ ]
+ }
+}'''
+
+ read_json(s)
+
+ @network
+ @slow
+ def test_url(self):
+ import urllib2
+ try:
+
+ url = 'https://api.github.com/repos/pydata/pandas/issues?per_page=5'
+ result = read_json(url,parse_dates=True)
+ for c in ['created_at','closed_at','updated_at']:
+ self.assert_(result[c].dtype == 'datetime64[ns]')
+
+ url = 'http://search.twitter.com/search.json?q=pandas%20python'
+ result = read_json(url)
+
+ except urllib2.URLError:
+ raise nose.SkipTest
diff --git a/pandas/io/tests/test_json/test_ujson.py b/pandas/io/tests/test_json/test_ujson.py
new file mode 100644
index 0000000000000..2e775b4a541ea
--- /dev/null
+++ b/pandas/io/tests/test_json/test_ujson.py
@@ -0,0 +1,1232 @@
+import unittest
+from unittest import TestCase
+
+import pandas.json as ujson
+try:
+ import json
+except ImportError:
+ import simplejson as json
+import math
+import nose
+import platform
+import sys
+import time
+import datetime
+import calendar
+import StringIO
+import re
+from functools import partial
+import pandas.util.py3compat as py3compat
+
+import numpy as np
+from pandas.util.testing import assert_almost_equal
+from numpy.testing import (assert_array_equal,
+ assert_array_almost_equal_nulp,
+ assert_approx_equal)
+from pandas import DataFrame, Series, Index
+import pandas.util.testing as tm
+
+
+def _skip_if_python_ver(skip_major, skip_minor=None):
+ major, minor = sys.version_info[:2]
+ if major == skip_major and (skip_minor is None or minor == skip_minor):
+ raise nose.SkipTest
+
+json_unicode = (json.dumps if sys.version_info[0] >= 3
+ else partial(json.dumps, encoding="utf-8"))
+
+class UltraJSONTests(TestCase):
+ def test_encodeDictWithUnicodeKeys(self):
+ input = { u"key1": u"value1", u"key1": u"value1", u"key1": u"value1", u"key1": u"value1", u"key1": u"value1", u"key1": u"value1" }
+ output = ujson.encode(input)
+
+ input = { u"بن": u"value1", u"بن": u"value1", u"بن": u"value1", u"بن": u"value1", u"بن": u"value1", u"بن": u"value1", u"بن": u"value1" }
+ output = ujson.encode(input)
+
+ pass
+
+ def test_encodeDoubleConversion(self):
+ input = math.pi
+ output = ujson.encode(input)
+ self.assertEquals(round(input, 5), round(json.loads(output), 5))
+ self.assertEquals(round(input, 5), round(ujson.decode(output), 5))
+
+ def test_encodeWithDecimal(self):
+ input = 1.0
+ output = ujson.encode(input)
+ self.assertEquals(output, "1.0")
+
+ def test_encodeDoubleNegConversion(self):
+ input = -math.pi
+ output = ujson.encode(input)
+ self.assertEquals(round(input, 5), round(json.loads(output), 5))
+ self.assertEquals(round(input, 5), round(ujson.decode(output), 5))
+
+ def test_encodeArrayOfNestedArrays(self):
+ input = [[[[]]]] * 20
+ output = ujson.encode(input)
+ self.assertEquals(input, json.loads(output))
+ #self.assertEquals(output, json.dumps(input))
+ self.assertEquals(input, ujson.decode(output))
+ input = np.array(input)
+ assert_array_equal(input, ujson.decode(output, numpy=True, dtype=input.dtype))
+
+ def test_encodeArrayOfDoubles(self):
+ input = [ 31337.31337, 31337.31337, 31337.31337, 31337.31337] * 10
+ output = ujson.encode(input)
+ self.assertEquals(input, json.loads(output))
+ #self.assertEquals(output, json.dumps(input))
+ self.assertEquals(input, ujson.decode(output))
+ assert_array_equal(np.array(input), ujson.decode(output, numpy=True))
+
+ def test_doublePrecisionTest(self):
+ input = 30.012345678901234
+ output = ujson.encode(input, double_precision = 15)
+ self.assertEquals(input, json.loads(output))
+ self.assertEquals(input, ujson.decode(output))
+
+ output = ujson.encode(input, double_precision = 9)
+ self.assertEquals(round(input, 9), json.loads(output))
+ self.assertEquals(round(input, 9), ujson.decode(output))
+
+ output = ujson.encode(input, double_precision = 3)
+ self.assertEquals(round(input, 3), json.loads(output))
+ self.assertEquals(round(input, 3), ujson.decode(output))
+
+ output = ujson.encode(input)
+ self.assertEquals(round(input, 5), json.loads(output))
+ self.assertEquals(round(input, 5), ujson.decode(output))
+
+ def test_invalidDoublePrecision(self):
+ input = 30.12345678901234567890
+ output = ujson.encode(input, double_precision = 20)
+ # should snap to the max, which is 15
+ self.assertEquals(round(input, 15), json.loads(output))
+ self.assertEquals(round(input, 15), ujson.decode(output))
+
+ output = ujson.encode(input, double_precision = -1)
+ # also should snap to the max, which is 15
+ self.assertEquals(round(input, 15), json.loads(output))
+ self.assertEquals(round(input, 15), ujson.decode(output))
+
+ # will throw typeError
+ self.assertRaises(TypeError, ujson.encode, input, double_precision = '9')
+ # will throw typeError
+ self.assertRaises(TypeError, ujson.encode, input, double_precision = None)
+
+
+ def test_encodeStringConversion(self):
+ input = "A string \\ / \b \f \n \r \t"
+ output = ujson.encode(input)
+ self.assertEquals(input, json.loads(output))
+ self.assertEquals(output, '"A string \\\\ \\/ \\b \\f \\n \\r \\t"')
+ self.assertEquals(input, ujson.decode(output))
+ pass
+
+ def test_decodeUnicodeConversion(self):
+ pass
+
+ def test_encodeUnicodeConversion1(self):
+ input = "Räksmörgås اسامة بن محمد بن عوض بن لادن"
+ enc = ujson.encode(input)
+ dec = ujson.decode(enc)
+ self.assertEquals(enc, json_unicode(input))
+ self.assertEquals(dec, json.loads(enc))
+
+ def test_encodeControlEscaping(self):
+ input = "\x19"
+ enc = ujson.encode(input)
+ dec = ujson.decode(enc)
+ self.assertEquals(input, dec)
+ self.assertEquals(enc, json_unicode(input))
+
+
+ def test_encodeUnicodeConversion2(self):
+ input = "\xe6\x97\xa5\xd1\x88"
+ enc = ujson.encode(input)
+ dec = ujson.decode(enc)
+ self.assertEquals(enc, json_unicode(input))
+ self.assertEquals(dec, json.loads(enc))
+
+ def test_encodeUnicodeSurrogatePair(self):
+ _skip_if_python_ver(2, 5)
+ _skip_if_python_ver(2, 6)
+ input = "\xf0\x90\x8d\x86"
+ enc = ujson.encode(input)
+ dec = ujson.decode(enc)
+
+ self.assertEquals(enc, json_unicode(input))
+ self.assertEquals(dec, json.loads(enc))
+
+ def test_encodeUnicode4BytesUTF8(self):
+ _skip_if_python_ver(2, 5)
+ _skip_if_python_ver(2, 6)
+ input = "\xf0\x91\x80\xb0TRAILINGNORMAL"
+ enc = ujson.encode(input)
+ dec = ujson.decode(enc)
+
+ self.assertEquals(enc, json_unicode(input))
+ self.assertEquals(dec, json.loads(enc))
+
+ def test_encodeUnicode4BytesUTF8Highest(self):
+ _skip_if_python_ver(2, 5)
+ _skip_if_python_ver(2, 6)
+ input = "\xf3\xbf\xbf\xbfTRAILINGNORMAL"
+ enc = ujson.encode(input)
+
+ dec = ujson.decode(enc)
+
+ self.assertEquals(enc, json_unicode(input))
+ self.assertEquals(dec, json.loads(enc))
+
+
+ def test_encodeArrayInArray(self):
+ input = [[[[]]]]
+ output = ujson.encode(input)
+
+ self.assertEquals(input, json.loads(output))
+ self.assertEquals(output, json.dumps(input))
+ self.assertEquals(input, ujson.decode(output))
+ assert_array_equal(np.array(input), ujson.decode(output, numpy=True))
+ pass
+
+ def test_encodeIntConversion(self):
+ input = 31337
+ output = ujson.encode(input)
+ self.assertEquals(input, json.loads(output))
+ self.assertEquals(output, json.dumps(input))
+ self.assertEquals(input, ujson.decode(output))
+ pass
+
+ def test_encodeIntNegConversion(self):
+ input = -31337
+ output = ujson.encode(input)
+ self.assertEquals(input, json.loads(output))
+ self.assertEquals(output, json.dumps(input))
+ self.assertEquals(input, ujson.decode(output))
+ pass
+
+
+ def test_encodeLongNegConversion(self):
+ input = -9223372036854775808
+ output = ujson.encode(input)
+
+ outputjson = json.loads(output)
+ outputujson = ujson.decode(output)
+
+ self.assertEquals(input, json.loads(output))
+ self.assertEquals(output, json.dumps(input))
+ self.assertEquals(input, ujson.decode(output))
+ pass
+
+ def test_encodeListConversion(self):
+ input = [ 1, 2, 3, 4 ]
+ output = ujson.encode(input)
+ self.assertEquals(input, json.loads(output))
+ self.assertEquals(input, ujson.decode(output))
+ assert_array_equal(np.array(input), ujson.decode(output, numpy=True))
+ pass
+
+ def test_encodeDictConversion(self):
+ input = { "k1": 1, "k2": 2, "k3": 3, "k4": 4 }
+ output = ujson.encode(input)
+ self.assertEquals(input, json.loads(output))
+ self.assertEquals(input, ujson.decode(output))
+ self.assertEquals(input, ujson.decode(output))
+ pass
+
+ def test_encodeNoneConversion(self):
+ input = None
+ output = ujson.encode(input)
+ self.assertEquals(input, json.loads(output))
+ self.assertEquals(output, json.dumps(input))
+ self.assertEquals(input, ujson.decode(output))
+ pass
+
+ def test_encodeTrueConversion(self):
+ input = True
+ output = ujson.encode(input)
+ self.assertEquals(input, json.loads(output))
+ self.assertEquals(output, json.dumps(input))
+ self.assertEquals(input, ujson.decode(output))
+ pass
+
+ def test_encodeFalseConversion(self):
+ input = False
+ output = ujson.encode(input)
+ self.assertEquals(input, json.loads(output))
+ self.assertEquals(output, json.dumps(input))
+ self.assertEquals(input, ujson.decode(output))
+ pass
+
+ # def test_encodeDatetimeConversion(self):
+ # ts = time.time()
+ # input = datetime.datetime.fromtimestamp(ts)
+ # output = ujson.encode(input)
+ # expected = calendar.timegm(input.utctimetuple())
+ # self.assertEquals(int(expected), json.loads(output))
+ # self.assertEquals(int(expected), ujson.decode(output))
+ # pass
+
+ # def test_encodeDateConversion(self):
+ # ts = time.time()
+ # input = datetime.date.fromtimestamp(ts)
+
+ # output = ujson.encode(input)
+ # tup = ( input.year, input.month, input.day, 0, 0, 0 )
+
+ # expected = calendar.timegm(tup)
+ # self.assertEquals(int(expected), json.loads(output))
+ # self.assertEquals(int(expected), ujson.decode(output))
+
+ def test_datetime_nanosecond_unit(self):
+ from datetime import datetime
+ from pandas.lib import Timestamp
+
+ val = datetime.now()
+ stamp = Timestamp(val)
+
+ roundtrip = ujson.decode(ujson.encode(val))
+ self.assert_(roundtrip == stamp.value)
+
+ def test_encodeToUTF8(self):
+ _skip_if_python_ver(2, 5)
+ input = "\xe6\x97\xa5\xd1\x88"
+ enc = ujson.encode(input, ensure_ascii=False)
+ dec = ujson.decode(enc)
+ self.assertEquals(enc, json_unicode(input, ensure_ascii=False))
+ self.assertEquals(dec, json.loads(enc))
+
+ def test_decodeFromUnicode(self):
+ input = u"{\"obj\": 31337}"
+ dec1 = ujson.decode(input)
+ dec2 = ujson.decode(str(input))
+ self.assertEquals(dec1, dec2)
+
+ def test_encodeRecursionMax(self):
+ # 8 is the max recursion depth
+
+ class O2:
+ member = 0
+ pass
+
+ class O1:
+ member = 0
+ pass
+
+ input = O1()
+ input.member = O2()
+ input.member.member = input
+
+ try:
+ output = ujson.encode(input)
+ assert False, "Expected overflow exception"
+ except(OverflowError):
+ pass
+
+ def test_encodeDoubleNan(self):
+ input = np.nan
+ assert ujson.encode(input) == 'null', "Expected null"
+
+ def test_encodeDoubleInf(self):
+ input = np.inf
+ assert ujson.encode(input) == 'null', "Expected null"
+
+ def test_encodeDoubleNegInf(self):
+ input = -np.inf
+ assert ujson.encode(input) == 'null', "Expected null"
+
+
+ def test_decodeJibberish(self):
+ input = "fdsa sda v9sa fdsa"
+ try:
+ ujson.decode(input)
+ assert False, "Expected exception!"
+ except(ValueError):
+ return
+ assert False, "Wrong exception"
+
+ def test_decodeBrokenArrayStart(self):
+ input = "["
+ try:
+ ujson.decode(input)
+ assert False, "Expected exception!"
+ except(ValueError):
+ return
+ assert False, "Wrong exception"
+
+ def test_decodeBrokenObjectStart(self):
+ input = "{"
+ try:
+ ujson.decode(input)
+ assert False, "Expected exception!"
+ except(ValueError):
+ return
+ assert False, "Wrong exception"
+
+ def test_decodeBrokenArrayEnd(self):
+ input = "]"
+ try:
+ ujson.decode(input)
+ assert False, "Expected exception!"
+ except(ValueError):
+ return
+ assert False, "Wrong exception"
+
+ def test_decodeBrokenObjectEnd(self):
+ input = "}"
+ try:
+ ujson.decode(input)
+ assert False, "Expected exception!"
+ except(ValueError):
+ return
+ assert False, "Wrong exception"
+
+ def test_decodeStringUnterminated(self):
+ input = "\"TESTING"
+ try:
+ ujson.decode(input)
+ assert False, "Expected exception!"
+ except(ValueError):
+ return
+ assert False, "Wrong exception"
+
+ def test_decodeStringUntermEscapeSequence(self):
+ input = "\"TESTING\\\""
+ try:
+ ujson.decode(input)
+ assert False, "Expected exception!"
+ except(ValueError):
+ return
+ assert False, "Wrong exception"
+
+ def test_decodeStringBadEscape(self):
+ input = "\"TESTING\\\""
+ try:
+ ujson.decode(input)
+ assert False, "Expected exception!"
+ except(ValueError):
+ return
+ assert False, "Wrong exception"
+
+ def test_decodeTrueBroken(self):
+ input = "tru"
+ try:
+ ujson.decode(input)
+ assert False, "Expected exception!"
+ except(ValueError):
+ return
+ assert False, "Wrong exception"
+
+ def test_decodeFalseBroken(self):
+ input = "fa"
+ try:
+ ujson.decode(input)
+ assert False, "Expected exception!"
+ except(ValueError):
+ return
+ assert False, "Wrong exception"
+
+ def test_decodeNullBroken(self):
+ input = "n"
+ try:
+ ujson.decode(input)
+ assert False, "Expected exception!"
+ except(ValueError):
+ return
+ assert False, "Wrong exception"
+
+
+ def test_decodeBrokenDictKeyTypeLeakTest(self):
+ input = '{{1337:""}}'
+ for x in xrange(1000):
+ try:
+ ujson.decode(input)
+ assert False, "Expected exception!"
+ except(ValueError),e:
+ continue
+
+ assert False, "Wrong exception"
+
+ def test_decodeBrokenDictLeakTest(self):
+ input = '{{"key":"}'
+ for x in xrange(1000):
+ try:
+ ujson.decode(input)
+ assert False, "Expected exception!"
+ except(ValueError):
+ continue
+
+ assert False, "Wrong exception"
+
+ def test_decodeBrokenListLeakTest(self):
+ input = '[[[true'
+ for x in xrange(1000):
+ try:
+ ujson.decode(input)
+ assert False, "Expected exception!"
+ except(ValueError):
+ continue
+
+ assert False, "Wrong exception"
+
+ def test_decodeDictWithNoKey(self):
+ input = "{{{{31337}}}}"
+ try:
+ ujson.decode(input)
+ assert False, "Expected exception!"
+ except(ValueError):
+ return
+
+ assert False, "Wrong exception"
+
+ def test_decodeDictWithNoColonOrValue(self):
+ input = "{{{{\"key\"}}}}"
+ try:
+ ujson.decode(input)
+ assert False, "Expected exception!"
+ except(ValueError):
+ return
+
+ assert False, "Wrong exception"
+
+ def test_decodeDictWithNoValue(self):
+ input = "{{{{\"key\":}}}}"
+ try:
+ ujson.decode(input)
+ assert False, "Expected exception!"
+ except(ValueError):
+ return
+
+ assert False, "Wrong exception"
+
+ def test_decodeNumericIntPos(self):
+ input = "31337"
+ self.assertEquals (31337, ujson.decode(input))
+
+ def test_decodeNumericIntNeg(self):
+ input = "-31337"
+ self.assertEquals (-31337, ujson.decode(input))
+
+ def test_encodeUnicode4BytesUTF8Fail(self):
+ _skip_if_python_ver(3)
+ input = "\xfd\xbf\xbf\xbf\xbf\xbf"
+ try:
+ enc = ujson.encode(input)
+ assert False, "Expected exception"
+ except OverflowError:
+ pass
+
+ def test_encodeNullCharacter(self):
+ input = "31337 \x00 1337"
+ output = ujson.encode(input)
+ self.assertEquals(input, json.loads(output))
+ self.assertEquals(output, json.dumps(input))
+ self.assertEquals(input, ujson.decode(output))
+
+ input = "\x00"
+ output = ujson.encode(input)
+ self.assertEquals(input, json.loads(output))
+ self.assertEquals(output, json.dumps(input))
+ self.assertEquals(input, ujson.decode(output))
+
+ self.assertEquals('" \\u0000\\r\\n "', ujson.dumps(u" \u0000\r\n "))
+ pass
+
+ def test_decodeNullCharacter(self):
+ input = "\"31337 \\u0000 31337\""
+ self.assertEquals(ujson.decode(input), json.loads(input))
+
+
+ def test_encodeListLongConversion(self):
+ input = [9223372036854775807, 9223372036854775807, 9223372036854775807,
+ 9223372036854775807, 9223372036854775807, 9223372036854775807 ]
+ output = ujson.encode(input)
+ self.assertEquals(input, json.loads(output))
+ self.assertEquals(input, ujson.decode(output))
+ assert_array_equal(np.array(input), ujson.decode(output, numpy=True,
+ dtype=np.int64))
+ pass
+
+ def test_encodeLongConversion(self):
+ input = 9223372036854775807
+ output = ujson.encode(input)
+ self.assertEquals(input, json.loads(output))
+ self.assertEquals(output, json.dumps(input))
+ self.assertEquals(input, ujson.decode(output))
+ pass
+
+ def test_numericIntExp(self):
+ input = "1337E40"
+ output = ujson.decode(input)
+ self.assertEquals(output, json.loads(input))
+
+ def test_numericIntFrcExp(self):
+ input = "1.337E40"
+ output = ujson.decode(input)
+ self.assertAlmostEqual(output, json.loads(input))
+
+ def test_decodeNumericIntExpEPLUS(self):
+ input = "1337E+40"
+ output = ujson.decode(input)
+ self.assertAlmostEqual(output, json.loads(input))
+
+ def test_decodeNumericIntExpePLUS(self):
+ input = "1.337e+40"
+ output = ujson.decode(input)
+ self.assertAlmostEqual(output, json.loads(input))
+
+ def test_decodeNumericIntExpE(self):
+ input = "1337E40"
+ output = ujson.decode(input)
+ self.assertAlmostEqual(output, json.loads(input))
+
+ def test_decodeNumericIntExpe(self):
+ input = "1337e40"
+ output = ujson.decode(input)
+ self.assertAlmostEqual(output, json.loads(input))
+
+ def test_decodeNumericIntExpEMinus(self):
+ input = "1.337E-4"
+ output = ujson.decode(input)
+ self.assertAlmostEqual(output, json.loads(input))
+
+ def test_decodeNumericIntExpeMinus(self):
+ input = "1.337e-4"
+ output = ujson.decode(input)
+ self.assertAlmostEqual(output, json.loads(input))
+
+ def test_dumpToFile(self):
+ f = StringIO.StringIO()
+ ujson.dump([1, 2, 3], f)
+ self.assertEquals("[1,2,3]", f.getvalue())
+
+ def test_dumpToFileLikeObject(self):
+ class filelike:
+ def __init__(self):
+ self.bytes = ''
+ def write(self, bytes):
+ self.bytes += bytes
+ f = filelike()
+ ujson.dump([1, 2, 3], f)
+ self.assertEquals("[1,2,3]", f.bytes)
+
+ def test_dumpFileArgsError(self):
+ try:
+ ujson.dump([], '')
+ except TypeError:
+ pass
+ else:
+ assert False, 'expected TypeError'
+
+ def test_loadFile(self):
+ f = StringIO.StringIO("[1,2,3,4]")
+ self.assertEquals([1, 2, 3, 4], ujson.load(f))
+ f = StringIO.StringIO("[1,2,3,4]")
+ assert_array_equal(np.array([1, 2, 3, 4]), ujson.load(f, numpy=True))
+
+ def test_loadFileLikeObject(self):
+ class filelike:
+ def read(self):
+ try:
+ self.end
+ except AttributeError:
+ self.end = True
+ return "[1,2,3,4]"
+ f = filelike()
+ self.assertEquals([1, 2, 3, 4], ujson.load(f))
+ f = filelike()
+ assert_array_equal(np.array([1, 2, 3, 4]), ujson.load(f, numpy=True))
+
+ def test_loadFileArgsError(self):
+ try:
+ ujson.load("[]")
+ except TypeError:
+ pass
+ else:
+ assert False, "expected TypeError"
+
+ def test_version(self):
+ assert re.match(r'^\d+\.\d+(\.\d+)?$', ujson.__version__), \
+ "ujson.__version__ must be a string like '1.4.0'"
+
+ def test_encodeNumericOverflow(self):
+ try:
+ ujson.encode(12839128391289382193812939)
+ except OverflowError:
+ pass
+ else:
+ assert False, "expected OverflowError"
+
+ def test_encodeNumericOverflowNested(self):
+ for n in xrange(0, 100):
+ class Nested:
+ x = 12839128391289382193812939
+
+ nested = Nested()
+
+ try:
+ ujson.encode(nested)
+ except OverflowError:
+ pass
+ else:
+ assert False, "expected OverflowError"
+
+ def test_decodeNumberWith32bitSignBit(self):
+ #Test that numbers that fit within 32 bits but would have the
+ # sign bit set (2**31 <= x < 2**32) are decoded properly.
+ boundary1 = 2**31
+ boundary2 = 2**32
+ docs = (
+ '{"id": 3590016419}',
+ '{"id": %s}' % 2**31,
+ '{"id": %s}' % 2**32,
+ '{"id": %s}' % ((2**32)-1),
+ )
+ results = (3590016419, 2**31, 2**32, 2**32-1)
+ for doc,result in zip(docs, results):
+ self.assertEqual(ujson.decode(doc)['id'], result)
+
+ def test_encodeBigEscape(self):
+ for x in xrange(10):
+ if py3compat.PY3:
+ base = '\u00e5'.encode('utf-8')
+ else:
+ base = "\xc3\xa5"
+ input = base * 1024 * 1024 * 2
+ output = ujson.encode(input)
+
+ def test_decodeBigEscape(self):
+ for x in xrange(10):
+ if py3compat.PY3:
+ base = '\u00e5'.encode('utf-8')
+ else:
+ base = "\xc3\xa5"
+ quote = py3compat.str_to_bytes("\"")
+ input = quote + (base * 1024 * 1024 * 2) + quote
+ output = ujson.decode(input)
+
+ def test_toDict(self):
+ d = {u"key": 31337}
+
+ class DictTest:
+ def toDict(self):
+ return d
+
+ o = DictTest()
+ output = ujson.encode(o)
+ dec = ujson.decode(output)
+ self.assertEquals(dec, d)
+
+
+class NumpyJSONTests(TestCase):
+
+ def testBool(self):
+ b = np.bool(True)
+ self.assertEqual(ujson.decode(ujson.encode(b)), b)
+
+ def testBoolArray(self):
+ inpt = np.array([True, False, True, True, False, True, False , False],
+ dtype=np.bool)
+ outp = np.array(ujson.decode(ujson.encode(inpt)), dtype=np.bool)
+ assert_array_equal(inpt, outp)
+
+ def testInt(self):
+ num = np.int(2562010)
+ self.assertEqual(np.int(ujson.decode(ujson.encode(num))), num)
+
+ num = np.int8(127)
+ self.assertEqual(np.int8(ujson.decode(ujson.encode(num))), num)
+
+ num = np.int16(2562010)
+ self.assertEqual(np.int16(ujson.decode(ujson.encode(num))), num)
+
+ num = np.int32(2562010)
+ self.assertEqual(np.int32(ujson.decode(ujson.encode(num))), num)
+
+ num = np.int64(2562010)
+ self.assertEqual(np.int64(ujson.decode(ujson.encode(num))), num)
+
+ num = np.uint8(255)
+ self.assertEqual(np.uint8(ujson.decode(ujson.encode(num))), num)
+
+ num = np.uint16(2562010)
+ self.assertEqual(np.uint16(ujson.decode(ujson.encode(num))), num)
+
+ num = np.uint32(2562010)
+ self.assertEqual(np.uint32(ujson.decode(ujson.encode(num))), num)
+
+ num = np.uint64(2562010)
+ self.assertEqual(np.uint64(ujson.decode(ujson.encode(num))), num)
+
+ def testIntArray(self):
+ arr = np.arange(100, dtype=np.int)
+ dtypes = (np.int, np.int8, np.int16, np.int32, np.int64,
+ np.uint, np.uint8, np.uint16, np.uint32, np.uint64)
+ for dtype in dtypes:
+ inpt = arr.astype(dtype)
+ outp = np.array(ujson.decode(ujson.encode(inpt)), dtype=dtype)
+ assert_array_equal(inpt, outp)
+
+ def testIntMax(self):
+ num = np.int(np.iinfo(np.int).max)
+ self.assertEqual(np.int(ujson.decode(ujson.encode(num))), num)
+
+ num = np.int8(np.iinfo(np.int8).max)
+ self.assertEqual(np.int8(ujson.decode(ujson.encode(num))), num)
+
+ num = np.int16(np.iinfo(np.int16).max)
+ self.assertEqual(np.int16(ujson.decode(ujson.encode(num))), num)
+
+ num = np.int32(np.iinfo(np.int32).max)
+ self.assertEqual(np.int32(ujson.decode(ujson.encode(num))), num)
+
+ num = np.uint8(np.iinfo(np.uint8).max)
+ self.assertEqual(np.uint8(ujson.decode(ujson.encode(num))), num)
+
+ num = np.uint16(np.iinfo(np.uint16).max)
+ self.assertEqual(np.uint16(ujson.decode(ujson.encode(num))), num)
+
+ num = np.uint32(np.iinfo(np.uint32).max)
+ self.assertEqual(np.uint32(ujson.decode(ujson.encode(num))), num)
+
+ if platform.architecture()[0] != '32bit':
+ num = np.int64(np.iinfo(np.int64).max)
+ self.assertEqual(np.int64(ujson.decode(ujson.encode(num))), num)
+
+ # uint64 max will always overflow as it's encoded to signed
+ num = np.uint64(np.iinfo(np.int64).max)
+ self.assertEqual(np.uint64(ujson.decode(ujson.encode(num))), num)
+
+ def testFloat(self):
+ num = np.float(256.2013)
+ self.assertEqual(np.float(ujson.decode(ujson.encode(num))), num)
+
+ num = np.float32(256.2013)
+ self.assertEqual(np.float32(ujson.decode(ujson.encode(num))), num)
+
+ num = np.float64(256.2013)
+ self.assertEqual(np.float64(ujson.decode(ujson.encode(num))), num)
+
+ def testFloatArray(self):
+ arr = np.arange(12.5, 185.72, 1.7322, dtype=np.float)
+ dtypes = (np.float, np.float32, np.float64)
+
+ for dtype in dtypes:
+ inpt = arr.astype(dtype)
+ outp = np.array(ujson.decode(ujson.encode(inpt, double_precision=15)), dtype=dtype)
+ assert_array_almost_equal_nulp(inpt, outp)
+
+ def testFloatMax(self):
+ num = np.float(np.finfo(np.float).max/10)
+ assert_approx_equal(np.float(ujson.decode(ujson.encode(num))), num, 15)
+
+ num = np.float32(np.finfo(np.float32).max/10)
+ assert_approx_equal(np.float32(ujson.decode(ujson.encode(num))), num, 15)
+
+ num = np.float64(np.finfo(np.float64).max/10)
+ assert_approx_equal(np.float64(ujson.decode(ujson.encode(num))), num, 15)
+
+ def testArrays(self):
+ arr = np.arange(100);
+
+ arr = arr.reshape((10, 10))
+ assert_array_equal(np.array(ujson.decode(ujson.encode(arr))), arr)
+ assert_array_equal(ujson.decode(ujson.encode(arr), numpy=True), arr)
+
+ arr = arr.reshape((5, 5, 4))
+ assert_array_equal(np.array(ujson.decode(ujson.encode(arr))), arr)
+ assert_array_equal(ujson.decode(ujson.encode(arr), numpy=True), arr)
+
+ arr = arr.reshape((100, 1))
+ assert_array_equal(np.array(ujson.decode(ujson.encode(arr))), arr)
+ assert_array_equal(ujson.decode(ujson.encode(arr), numpy=True), arr)
+
+ arr = np.arange(96);
+ arr = arr.reshape((2, 2, 2, 2, 3, 2))
+ assert_array_equal(np.array(ujson.decode(ujson.encode(arr))), arr)
+ assert_array_equal(ujson.decode(ujson.encode(arr), numpy=True), arr)
+
+ l = ['a', list(), dict(), dict(), list(),
+ 42, 97.8, ['a', 'b'], {'key': 'val'}]
+ arr = np.array(l)
+ assert_array_equal(np.array(ujson.decode(ujson.encode(arr))), arr)
+
+ arr = np.arange(100.202, 200.202, 1, dtype=np.float32);
+ arr = arr.reshape((5, 5, 4))
+ outp = np.array(ujson.decode(ujson.encode(arr)), dtype=np.float32)
+ assert_array_almost_equal_nulp(arr, outp)
+ outp = ujson.decode(ujson.encode(arr), numpy=True, dtype=np.float32)
+ assert_array_almost_equal_nulp(arr, outp)
+
+ def testArrayNumpyExcept(self):
+
+ input = ujson.dumps([42, {}, 'a'])
+ try:
+ ujson.decode(input, numpy=True)
+ assert False, "Expected exception!"
+ except(TypeError):
+ pass
+ except:
+ assert False, "Wrong exception"
+
+ input = ujson.dumps(['a', 'b', [], 'c'])
+ try:
+ ujson.decode(input, numpy=True)
+ assert False, "Expected exception!"
+ except(ValueError):
+ pass
+ except:
+ assert False, "Wrong exception"
+
+ input = ujson.dumps([['a'], 42])
+ try:
+ ujson.decode(input, numpy=True)
+ assert False, "Expected exception!"
+ except(ValueError):
+ pass
+ except:
+ assert False, "Wrong exception"
+
+ input = ujson.dumps([42, ['a'], 42])
+ try:
+ ujson.decode(input, numpy=True)
+ assert False, "Expected exception!"
+ except(ValueError):
+ pass
+ except:
+ assert False, "Wrong exception"
+
+ input = ujson.dumps([{}, []])
+ try:
+ ujson.decode(input, numpy=True)
+ assert False, "Expected exception!"
+ except(ValueError):
+ pass
+ except:
+ assert False, "Wrong exception"
+
+ input = ujson.dumps([42, None])
+ try:
+ ujson.decode(input, numpy=True)
+ assert False, "Expected exception!"
+ except(TypeError):
+ pass
+ except:
+ assert False, "Wrong exception"
+
+ input = ujson.dumps([{'a': 'b'}])
+ try:
+ ujson.decode(input, numpy=True, labelled=True)
+ assert False, "Expected exception!"
+ except(ValueError):
+ pass
+ except:
+ assert False, "Wrong exception"
+
+ input = ujson.dumps({'a': {'b': {'c': 42}}})
+ try:
+ ujson.decode(input, numpy=True, labelled=True)
+ assert False, "Expected exception!"
+ except(ValueError):
+ pass
+ except:
+ assert False, "Wrong exception"
+
+ input = ujson.dumps([{'a': 42, 'b': 23}, {'c': 17}])
+ try:
+ ujson.decode(input, numpy=True, labelled=True)
+ assert False, "Expected exception!"
+ except(ValueError):
+ pass
+ except:
+ assert False, "Wrong exception"
+
+ def testArrayNumpyLabelled(self):
+ input = {'a': []}
+ output = ujson.loads(ujson.dumps(input), numpy=True, labelled=True)
+ self.assertTrue((np.empty((1, 0)) == output[0]).all())
+ self.assertTrue((np.array(['a']) == output[1]).all())
+ self.assertTrue(output[2] is None)
+
+ input = [{'a': 42}]
+ output = ujson.loads(ujson.dumps(input), numpy=True, labelled=True)
+ self.assertTrue((np.array([42]) == output[0]).all())
+ self.assertTrue(output[1] is None)
+ self.assertTrue((np.array([u'a']) == output[2]).all())
+
+ # py3 is non-determinstic on the ordering......
+ if not py3compat.PY3:
+ input = [{'a': 42, 'b':31}, {'a': 24, 'c': 99}, {'a': 2.4, 'b': 78}]
+ output = ujson.loads(ujson.dumps(input), numpy=True, labelled=True)
+ expectedvals = np.array([42, 31, 24, 99, 2.4, 78], dtype=int).reshape((3,2))
+ self.assertTrue((expectedvals == output[0]).all())
+ self.assertTrue(output[1] is None)
+ self.assertTrue((np.array([u'a', 'b']) == output[2]).all())
+
+
+ input = {1: {'a': 42, 'b':31}, 2: {'a': 24, 'c': 99}, 3: {'a': 2.4, 'b': 78}}
+ output = ujson.loads(ujson.dumps(input), numpy=True, labelled=True)
+ expectedvals = np.array([42, 31, 24, 99, 2.4, 78], dtype=int).reshape((3,2))
+ self.assertTrue((expectedvals == output[0]).all())
+ self.assertTrue((np.array(['1','2','3']) == output[1]).all())
+ self.assertTrue((np.array(['a', 'b']) == output[2]).all())
+
+class PandasJSONTests(TestCase):
+
+ def testDataFrame(self):
+ df = DataFrame([[1,2,3], [4,5,6]], index=['a', 'b'], columns=['x', 'y', 'z'])
+
+ # column indexed
+ outp = DataFrame(ujson.decode(ujson.encode(df)))
+ self.assertTrue((df == outp).values.all())
+ assert_array_equal(df.columns, outp.columns)
+ assert_array_equal(df.index, outp.index)
+
+ dec = _clean_dict(ujson.decode(ujson.encode(df, orient="split")))
+ outp = DataFrame(**dec)
+ self.assertTrue((df == outp).values.all())
+ assert_array_equal(df.columns, outp.columns)
+ assert_array_equal(df.index, outp.index)
+
+ outp = DataFrame(ujson.decode(ujson.encode(df, orient="records")))
+ outp.index = df.index
+ self.assertTrue((df == outp).values.all())
+ assert_array_equal(df.columns, outp.columns)
+
+ outp = DataFrame(ujson.decode(ujson.encode(df, orient="values")))
+ outp.index = df.index
+ self.assertTrue((df.values == outp.values).all())
+
+ outp = DataFrame(ujson.decode(ujson.encode(df, orient="index")))
+ self.assertTrue((df.transpose() == outp).values.all())
+ assert_array_equal(df.transpose().columns, outp.columns)
+ assert_array_equal(df.transpose().index, outp.index)
+
+
+ def testDataFrameNumpy(self):
+ df = DataFrame([[1,2,3], [4,5,6]], index=['a', 'b'], columns=['x', 'y', 'z'])
+
+ # column indexed
+ outp = DataFrame(ujson.decode(ujson.encode(df), numpy=True))
+ self.assertTrue((df == outp).values.all())
+ assert_array_equal(df.columns, outp.columns)
+ assert_array_equal(df.index, outp.index)
+
+ dec = _clean_dict(ujson.decode(ujson.encode(df, orient="split"),
+ numpy=True))
+ outp = DataFrame(**dec)
+ self.assertTrue((df == outp).values.all())
+ assert_array_equal(df.columns, outp.columns)
+ assert_array_equal(df.index, outp.index)
+
+ outp = DataFrame(ujson.decode(ujson.encode(df, orient="index"), numpy=True))
+ self.assertTrue((df.transpose() == outp).values.all())
+ assert_array_equal(df.transpose().columns, outp.columns)
+ assert_array_equal(df.transpose().index, outp.index)
+
+ def testDataFrameNested(self):
+ df = DataFrame([[1,2,3], [4,5,6]], index=['a', 'b'], columns=['x', 'y', 'z'])
+
+ nested = {'df1': df, 'df2': df.copy()}
+
+ exp = {'df1': ujson.decode(ujson.encode(df)),
+ 'df2': ujson.decode(ujson.encode(df))}
+ self.assertTrue(ujson.decode(ujson.encode(nested)) == exp)
+
+ exp = {'df1': ujson.decode(ujson.encode(df, orient="index")),
+ 'df2': ujson.decode(ujson.encode(df, orient="index"))}
+ self.assertTrue(ujson.decode(ujson.encode(nested, orient="index")) == exp)
+
+ exp = {'df1': ujson.decode(ujson.encode(df, orient="records")),
+ 'df2': ujson.decode(ujson.encode(df, orient="records"))}
+ self.assertTrue(ujson.decode(ujson.encode(nested, orient="records")) == exp)
+
+ exp = {'df1': ujson.decode(ujson.encode(df, orient="values")),
+ 'df2': ujson.decode(ujson.encode(df, orient="values"))}
+ self.assertTrue(ujson.decode(ujson.encode(nested, orient="values")) == exp)
+
+ exp = {'df1': ujson.decode(ujson.encode(df, orient="split")),
+ 'df2': ujson.decode(ujson.encode(df, orient="split"))}
+ self.assertTrue(ujson.decode(ujson.encode(nested, orient="split")) == exp)
+
+ def testDataFrameNumpyLabelled(self):
+ df = DataFrame([[1,2,3], [4,5,6]], index=['a', 'b'], columns=['x', 'y', 'z'])
+
+ # column indexed
+ outp = DataFrame(*ujson.decode(ujson.encode(df), numpy=True, labelled=True))
+ self.assertTrue((df.T == outp).values.all())
+ assert_array_equal(df.T.columns, outp.columns)
+ assert_array_equal(df.T.index, outp.index)
+
+ outp = DataFrame(*ujson.decode(ujson.encode(df, orient="records"), numpy=True, labelled=True))
+ outp.index = df.index
+ self.assertTrue((df == outp).values.all())
+ assert_array_equal(df.columns, outp.columns)
+
+ outp = DataFrame(*ujson.decode(ujson.encode(df, orient="index"), numpy=True, labelled=True))
+ self.assertTrue((df == outp).values.all())
+ assert_array_equal(df.columns, outp.columns)
+ assert_array_equal(df.index, outp.index)
+
+ def testSeries(self):
+ s = Series([10, 20, 30, 40, 50, 60], name="series", index=[6,7,8,9,10,15])
+ s.sort()
+
+ # column indexed
+ outp = Series(ujson.decode(ujson.encode(s)))
+ outp.sort()
+ self.assertTrue((s == outp).values.all())
+
+ outp = Series(ujson.decode(ujson.encode(s), numpy=True))
+ outp.sort()
+ self.assertTrue((s == outp).values.all())
+
+ dec = _clean_dict(ujson.decode(ujson.encode(s, orient="split")))
+ outp = Series(**dec)
+ self.assertTrue((s == outp).values.all())
+ self.assertTrue(s.name == outp.name)
+
+ dec = _clean_dict(ujson.decode(ujson.encode(s, orient="split"),
+ numpy=True))
+ outp = Series(**dec)
+ self.assertTrue((s == outp).values.all())
+ self.assertTrue(s.name == outp.name)
+
+ outp = Series(ujson.decode(ujson.encode(s, orient="records"), numpy=True))
+ self.assertTrue((s == outp).values.all())
+
+ outp = Series(ujson.decode(ujson.encode(s, orient="records")))
+ self.assertTrue((s == outp).values.all())
+
+ outp = Series(ujson.decode(ujson.encode(s, orient="values"), numpy=True))
+ self.assertTrue((s == outp).values.all())
+
+ outp = Series(ujson.decode(ujson.encode(s, orient="values")))
+ self.assertTrue((s == outp).values.all())
+
+ outp = Series(ujson.decode(ujson.encode(s, orient="index")))
+ outp.sort()
+ self.assertTrue((s == outp).values.all())
+
+ outp = Series(ujson.decode(ujson.encode(s, orient="index"), numpy=True))
+ outp.sort()
+ self.assertTrue((s == outp).values.all())
+
+ def testSeriesNested(self):
+ s = Series([10, 20, 30, 40, 50, 60], name="series", index=[6,7,8,9,10,15])
+ s.sort()
+
+ nested = {'s1': s, 's2': s.copy()}
+
+ exp = {'s1': ujson.decode(ujson.encode(s)),
+ 's2': ujson.decode(ujson.encode(s))}
+ self.assertTrue(ujson.decode(ujson.encode(nested)) == exp)
+
+ exp = {'s1': ujson.decode(ujson.encode(s, orient="split")),
+ 's2': ujson.decode(ujson.encode(s, orient="split"))}
+ self.assertTrue(ujson.decode(ujson.encode(nested, orient="split")) == exp)
+
+ exp = {'s1': ujson.decode(ujson.encode(s, orient="records")),
+ 's2': ujson.decode(ujson.encode(s, orient="records"))}
+ self.assertTrue(ujson.decode(ujson.encode(nested, orient="records")) == exp)
+
+ exp = {'s1': ujson.decode(ujson.encode(s, orient="values")),
+ 's2': ujson.decode(ujson.encode(s, orient="values"))}
+ self.assertTrue(ujson.decode(ujson.encode(nested, orient="values")) == exp)
+
+ exp = {'s1': ujson.decode(ujson.encode(s, orient="index")),
+ 's2': ujson.decode(ujson.encode(s, orient="index"))}
+ self.assertTrue(ujson.decode(ujson.encode(nested, orient="index")) == exp)
+
+ def testIndex(self):
+ i = Index([23, 45, 18, 98, 43, 11], name="index")
+
+ # column indexed
+ outp = Index(ujson.decode(ujson.encode(i)))
+ self.assert_(i.equals(outp))
+
+ outp = Index(ujson.decode(ujson.encode(i), numpy=True))
+ self.assert_(i.equals(outp))
+
+ dec = _clean_dict(ujson.decode(ujson.encode(i, orient="split")))
+ outp = Index(**dec)
+ self.assert_(i.equals(outp))
+ self.assertTrue(i.name == outp.name)
+
+ dec = _clean_dict(ujson.decode(ujson.encode(i, orient="split"),
+ numpy=True))
+ outp = Index(**dec)
+ self.assert_(i.equals(outp))
+ self.assertTrue(i.name == outp.name)
+
+ outp = Index(ujson.decode(ujson.encode(i, orient="values")))
+ self.assert_(i.equals(outp))
+
+ outp = Index(ujson.decode(ujson.encode(i, orient="values"), numpy=True))
+ self.assert_(i.equals(outp))
+
+ outp = Index(ujson.decode(ujson.encode(i, orient="records")))
+ self.assert_(i.equals(outp))
+
+ outp = Index(ujson.decode(ujson.encode(i, orient="records"), numpy=True))
+ self.assert_(i.equals(outp))
+
+ outp = Index(ujson.decode(ujson.encode(i, orient="index")))
+ self.assert_(i.equals(outp))
+
+ outp = Index(ujson.decode(ujson.encode(i, orient="index"), numpy=True))
+ self.assert_(i.equals(outp))
+
+ def test_datetimeindex(self):
+ from pandas.tseries.index import date_range, DatetimeIndex
+
+ rng = date_range('1/1/2000', periods=20)
+
+ encoded = ujson.encode(rng)
+ decoded = DatetimeIndex(np.array(ujson.decode(encoded)))
+
+ self.assert_(rng.equals(decoded))
+
+ ts = Series(np.random.randn(len(rng)), index=rng)
+ decoded = Series(ujson.decode(ujson.encode(ts)))
+ idx_values = decoded.index.values.astype(np.int64)
+ decoded.index = DatetimeIndex(idx_values)
+ tm.assert_series_equal(np.round(ts, 5), decoded)
+
+"""
+def test_decodeNumericIntFrcOverflow(self):
+input = "X.Y"
+raise NotImplementedError("Implement this test!")
+
+
+def test_decodeStringUnicodeEscape(self):
+input = "\u3131"
+raise NotImplementedError("Implement this test!")
+
+def test_decodeStringUnicodeBrokenEscape(self):
+input = "\u3131"
+raise NotImplementedError("Implement this test!")
+
+def test_decodeStringUnicodeInvalidEscape(self):
+input = "\u3131"
+raise NotImplementedError("Implement this test!")
+
+def test_decodeStringUTF8(self):
+input = "someutfcharacters"
+raise NotImplementedError("Implement this test!")
+
+
+
+"""
+
+def _clean_dict(d):
+ return dict((str(k), v) for k, v in d.iteritems())
+
+if __name__ == '__main__':
+ # unittest.main()
+ import nose
+ # nose.runmodule(argv=[__file__,'-vvs','-x', '--ipdb-failure'],
+ # exit=False)
+ nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure'],
+ exit=False)
diff --git a/pandas/src/ujson/lib/ultrajson.h b/pandas/src/ujson/lib/ultrajson.h
new file mode 100644
index 0000000000000..eae665f00f03e
--- /dev/null
+++ b/pandas/src/ujson/lib/ultrajson.h
@@ -0,0 +1,298 @@
+/*
+Copyright (c) 2011, Jonas Tarnstrom and ESN Social Software AB
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+3. All advertising materials mentioning features or use of this software
+ must display the following acknowledgement:
+ This product includes software developed by ESN Social Software AB (www.esn.me).
+4. Neither the name of the ESN Social Software AB nor the
+ names of its contributors may be used to endorse or promote products
+ derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY ESN SOCIAL SOFTWARE AB ''AS IS'' AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ESN SOCIAL SOFTWARE AB BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+Portions of code from:
+MODP_ASCII - Ascii transformations (upper/lower, etc)
+http://code.google.com/p/stringencoders/
+Copyright (c) 2007 Nick Galbreath -- nickg [at] modp [dot] com. All rights reserved.
+
+*/
+
+/*
+Ultra fast JSON encoder and decoder
+Developed by Jonas Tarnstrom (jonas@esn.me).
+
+Encoder notes:
+------------------
+
+:: Cyclic references ::
+Cyclic referenced objects are not detected.
+Set JSONObjectEncoder.recursionMax to suitable value or make sure input object
+tree doesn't have cyclic references.
+
+*/
+
+#ifndef __ULTRAJSON_H__
+#define __ULTRAJSON_H__
+
+#include <stdio.h>
+#include <wchar.h>
+
+//#define JSON_DECODE_NUMERIC_AS_DOUBLE
+
+// Don't output any extra whitespaces when encoding
+#define JSON_NO_EXTRA_WHITESPACE
+
+// Max decimals to encode double floating point numbers with
+#ifndef JSON_DOUBLE_MAX_DECIMALS
+#define JSON_DOUBLE_MAX_DECIMALS 15
+#endif
+
+// Max recursion depth, default for encoder
+#ifndef JSON_MAX_RECURSION_DEPTH
+#define JSON_MAX_RECURSION_DEPTH 1024
+#endif
+
+/*
+Dictates and limits how much stack space for buffers UltraJSON will use before resorting to provided heap functions */
+#ifndef JSON_MAX_STACK_BUFFER_SIZE
+#define JSON_MAX_STACK_BUFFER_SIZE 131072
+#endif
+
+#ifdef _WIN32
+
+typedef __int64 JSINT64;
+typedef unsigned __int64 JSUINT64;
+
+typedef __int32 JSINT32;
+typedef unsigned __int32 JSUINT32;
+typedef unsigned __int8 JSUINT8;
+typedef unsigned __int16 JSUTF16;
+typedef unsigned __int32 JSUTF32;
+typedef __int64 JSLONG;
+
+#define EXPORTFUNCTION __declspec(dllexport)
+
+#define FASTCALL_MSVC __fastcall
+#define FASTCALL_ATTR
+#define INLINE_PREFIX __inline
+
+#else
+
+#include <sys/types.h>
+typedef int64_t JSINT64;
+typedef u_int64_t JSUINT64;
+
+typedef int32_t JSINT32;
+typedef u_int32_t JSUINT32;
+
+#define FASTCALL_MSVC
+#define FASTCALL_ATTR __attribute__((fastcall))
+#define INLINE_PREFIX inline
+
+typedef u_int8_t JSUINT8;
+typedef u_int16_t JSUTF16;
+typedef u_int32_t JSUTF32;
+
+typedef int64_t JSLONG;
+
+#define EXPORTFUNCTION
+#endif
+
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+#define __LITTLE_ENDIAN__
+#else
+
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#define __BIG_ENDIAN__
+#endif
+
+#endif
+
+#if !defined(__LITTLE_ENDIAN__) && !defined(__BIG_ENDIAN__)
+#error "Endianess not supported"
+#endif
+
+enum JSTYPES
+{
+ JT_NULL, // NULL
+ JT_TRUE, //boolean true
+ JT_FALSE, //boolean false
+ JT_INT, //(JSINT32 (signed 32-bit))
+ JT_LONG, //(JSINT64 (signed 64-bit))
+ JT_DOUBLE, //(double)
+ JT_UTF8, //(char 8-bit)
+ JT_ARRAY, // Array structure
+ JT_OBJECT, // Key/Value structure
+ JT_INVALID, // Internal, do not return nor expect
+};
+
+typedef void * JSOBJ;
+typedef void * JSITER;
+
+typedef struct __JSONTypeContext
+{
+ int type;
+ void *encoder;
+ void *prv;
+} JSONTypeContext;
+
+/*
+Function pointer declarations, suitable for implementing UltraJSON */
+typedef void (*JSPFN_ITERBEGIN)(JSOBJ obj, JSONTypeContext *tc);
+typedef int (*JSPFN_ITERNEXT)(JSOBJ obj, JSONTypeContext *tc);
+typedef void (*JSPFN_ITEREND)(JSOBJ obj, JSONTypeContext *tc);
+typedef JSOBJ (*JSPFN_ITERGETVALUE)(JSOBJ obj, JSONTypeContext *tc);
+typedef char *(*JSPFN_ITERGETNAME)(JSOBJ obj, JSONTypeContext *tc, size_t *outLen);
+typedef void *(*JSPFN_MALLOC)(size_t size);
+typedef void (*JSPFN_FREE)(void *pptr);
+typedef void *(*JSPFN_REALLOC)(void *base, size_t size);
+
+typedef struct __JSONObjectEncoder
+{
+ void (*beginTypeContext)(JSOBJ obj, JSONTypeContext *tc);
+ void (*endTypeContext)(JSOBJ obj, JSONTypeContext *tc);
+ const char *(*getStringValue)(JSOBJ obj, JSONTypeContext *tc, size_t *_outLen);
+ JSINT64 (*getLongValue)(JSOBJ obj, JSONTypeContext *tc);
+ JSINT32 (*getIntValue)(JSOBJ obj, JSONTypeContext *tc);
+ double (*getDoubleValue)(JSOBJ obj, JSONTypeContext *tc);
+
+ /*
+ Begin iteration of an iteratable object (JS_ARRAY or JS_OBJECT)
+ Implementor should setup iteration state in ti->prv
+ */
+ JSPFN_ITERBEGIN iterBegin;
+
+ /*
+ Retrieve next object in an iteration. Should return 0 to indicate iteration has reached end or 1 if there are more items.
+ Implementor is responsible for keeping state of the iteration. Use ti->prv fields for this
+ */
+ JSPFN_ITERNEXT iterNext;
+
+ /*
+ Ends the iteration of an iteratable object.
+ Any iteration state stored in ti->prv can be freed here
+ */
+ JSPFN_ITEREND iterEnd;
+
+ /*
+ Returns a reference to the value object of an iterator
+ The is responsible for the life-cycle of the returned string. Use iterNext/iterEnd and ti->prv to keep track of current object
+ */
+ JSPFN_ITERGETVALUE iterGetValue;
+
+ /*
+ Return name of iterator.
+ The is responsible for the life-cycle of the returned string. Use iterNext/iterEnd and ti->prv to keep track of current object
+ */
+ JSPFN_ITERGETNAME iterGetName;
+
+ /*
+ Release a value as indicated by setting ti->release = 1 in the previous getValue call.
+ The ti->prv array should contain the necessary context to release the value
+ */
+ void (*releaseObject)(JSOBJ obj);
+
+ /* Library functions
+ Set to NULL to use STDLIB malloc,realloc,free */
+ JSPFN_MALLOC malloc;
+ JSPFN_REALLOC realloc;
+ JSPFN_FREE free;
+
+ /*
+ Configuration for max recursion, set to 0 to use default (see JSON_MAX_RECURSION_DEPTH)*/
+ int recursionMax;
+
+ /*
+ Configuration for max decimals of double floating poiunt numbers to encode (0-9) */
+ int doublePrecision;
+
+ /*
+ If true output will be ASCII with all characters above 127 encoded as \uXXXX. If false output will be UTF-8 or what ever charset strings are brought as */
+ int forceASCII;
+
+
+ /*
+ Set to an error message if error occured */
+ const char *errorMsg;
+ JSOBJ errorObj;
+
+ /* Buffer stuff */
+ char *start;
+ char *offset;
+ char *end;
+ int heap;
+ int level;
+
+} JSONObjectEncoder;
+
+
+/*
+Encode an object structure into JSON.
+
+Arguments:
+obj - An anonymous type representing the object
+enc - Function definitions for querying JSOBJ type
+buffer - Preallocated buffer to store result in. If NULL function allocates own buffer
+cbBuffer - Length of buffer (ignored if buffer is NULL)
+
+Returns:
+Encoded JSON object as a null terminated char string.
+
+NOTE:
+If the supplied buffer wasn't enough to hold the result the function will allocate a new buffer.
+Life cycle of the provided buffer must still be handled by caller.
+
+If the return value doesn't equal the specified buffer caller must release the memory using
+JSONObjectEncoder.free or free() as specified when calling this function.
+*/
+EXPORTFUNCTION char *JSON_EncodeObject(JSOBJ obj, JSONObjectEncoder *enc, char *buffer, size_t cbBuffer);
+
+
+
+typedef struct __JSONObjectDecoder
+{
+ JSOBJ (*newString)(wchar_t *start, wchar_t *end);
+ int (*objectAddKey)(JSOBJ obj, JSOBJ name, JSOBJ value);
+ int (*arrayAddItem)(JSOBJ obj, JSOBJ value);
+ JSOBJ (*newTrue)(void);
+ JSOBJ (*newFalse)(void);
+ JSOBJ (*newNull)(void);
+ JSOBJ (*newObject)(void *decoder);
+ JSOBJ (*endObject)(JSOBJ obj);
+ JSOBJ (*newArray)(void *decoder);
+ JSOBJ (*endArray)(JSOBJ obj);
+ JSOBJ (*newInt)(JSINT32 value);
+ JSOBJ (*newLong)(JSINT64 value);
+ JSOBJ (*newDouble)(double value);
+ void (*releaseObject)(JSOBJ obj, void *decoder);
+ JSPFN_MALLOC malloc;
+ JSPFN_FREE free;
+ JSPFN_REALLOC realloc;
+
+ char *errorStr;
+ char *errorOffset;
+
+
+
+} JSONObjectDecoder;
+
+EXPORTFUNCTION JSOBJ JSON_DecodeObject(JSONObjectDecoder *dec, const char *buffer, size_t cbBuffer);
+
+#endif
diff --git a/pandas/src/ujson/lib/ultrajsondec.c b/pandas/src/ujson/lib/ultrajsondec.c
new file mode 100644
index 0000000000000..eda30f3fea839
--- /dev/null
+++ b/pandas/src/ujson/lib/ultrajsondec.c
@@ -0,0 +1,845 @@
+/*
+Copyright (c) 2011, Jonas Tarnstrom and ESN Social Software AB
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+3. All advertising materials mentioning features or use of this software
+ must display the following acknowledgement:
+ This product includes software developed by ESN Social Software AB (www.esn.me).
+4. Neither the name of the ESN Social Software AB nor the
+ names of its contributors may be used to endorse or promote products
+ derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY ESN SOCIAL SOFTWARE AB ''AS IS'' AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ESN SOCIAL SOFTWARE AB BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+Portions of code from:
+MODP_ASCII - Ascii transformations (upper/lower, etc)
+http://code.google.com/p/stringencoders/
+Copyright (c) 2007 Nick Galbreath -- nickg [at] modp [dot] com. All rights reserved.
+
+*/
+
+#include "ultrajson.h"
+#include <math.h>
+#include <assert.h>
+#include <string.h>
+#include <limits.h>
+#include <wchar.h>
+
+struct DecoderState
+{
+ char *start;
+ char *end;
+ wchar_t *escStart;
+ wchar_t *escEnd;
+ int escHeap;
+ int lastType;
+ JSONObjectDecoder *dec;
+};
+
+JSOBJ FASTCALL_MSVC decode_any( struct DecoderState *ds) FASTCALL_ATTR;
+typedef JSOBJ (*PFN_DECODER)( struct DecoderState *ds);
+#define RETURN_JSOBJ_NULLCHECK(_expr) return(_expr);
+
+double createDouble(double intNeg, double intValue, double frcValue, int frcDecimalCount)
+{
+ static const double g_pow10[] = {1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000, 10000000000, 100000000000, 1000000000000, 10000000000000, 100000000000000, 1000000000000000};
+
+ return (intValue + (frcValue / g_pow10[frcDecimalCount])) * intNeg;
+}
+
+static JSOBJ SetError( struct DecoderState *ds, int offset, const char *message)
+{
+ ds->dec->errorOffset = ds->start + offset;
+ ds->dec->errorStr = (char *) message;
+ return NULL;
+}
+
+
+FASTCALL_ATTR JSOBJ FASTCALL_MSVC decode_numeric ( struct DecoderState *ds)
+{
+#ifdef JSON_DECODE_NUMERIC_AS_DOUBLE
+ double intNeg = 1;
+ double intValue;
+#else
+ int intNeg = 1;
+ JSLONG intValue;
+#endif
+
+ double expNeg;
+ int chr;
+ int decimalCount = 0;
+ double frcValue = 0.0;
+ double expValue;
+ char *offset = ds->start;
+
+ if (*(offset) == '-')
+ {
+ offset ++;
+ intNeg = -1;
+ }
+
+ // Scan integer part
+ intValue = 0;
+
+ while (1)
+ {
+ chr = (int) (unsigned char) *(offset);
+
+ switch (chr)
+ {
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ //FIXME: Check for arithemtic overflow here
+ //PERF: Don't do 64-bit arithmetic here unless we know we have to
+#ifdef JSON_DECODE_NUMERIC_AS_DOUBLE
+ intValue = intValue * 10.0 + (double) (chr - 48);
+#else
+ intValue = intValue * 10LL + (JSLONG) (chr - 48);
+#endif
+ offset ++;
+ break;
+
+ case '.':
+ offset ++;
+ goto DECODE_FRACTION;
+ break;
+
+ case 'e':
+ case 'E':
+ offset ++;
+ goto DECODE_EXPONENT;
+ break;
+
+ default:
+ goto BREAK_INT_LOOP;
+ break;
+ }
+ }
+
+BREAK_INT_LOOP:
+
+ ds->lastType = JT_INT;
+ ds->start = offset;
+
+ //If input string is LONGLONG_MIN here the value is already negative so we should not flip it
+
+#ifdef JSON_DECODE_NUMERIC_AS_DOUBLE
+#else
+ if (intValue < 0)
+ {
+ intNeg = 1;
+ }
+#endif
+
+ //dbg1 = (intValue * intNeg);
+ //dbg2 = (JSLONG) dbg1;
+
+#ifdef JSON_DECODE_NUMERIC_AS_DOUBLE
+ if (intValue > (double) INT_MAX || intValue < (double) INT_MIN)
+#else
+ if ( (intValue >> 31))
+#endif
+ {
+ RETURN_JSOBJ_NULLCHECK(ds->dec->newLong( (JSINT64) (intValue * (JSINT64) intNeg)));
+ }
+ else
+ {
+ RETURN_JSOBJ_NULLCHECK(ds->dec->newInt( (JSINT32) (intValue * intNeg)));
+ }
+
+
+
+DECODE_FRACTION:
+
+ // Scan fraction part
+ frcValue = 0.0;
+ while (1)
+ {
+ chr = (int) (unsigned char) *(offset);
+
+ switch (chr)
+ {
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ if (decimalCount < JSON_DOUBLE_MAX_DECIMALS)
+ {
+ frcValue = frcValue * 10.0 + (double) (chr - 48);
+ decimalCount ++;
+ }
+ offset ++;
+ break;
+
+ case 'e':
+ case 'E':
+ offset ++;
+ goto DECODE_EXPONENT;
+ break;
+
+ default:
+ goto BREAK_FRC_LOOP;
+ }
+ }
+
+BREAK_FRC_LOOP:
+
+ if (intValue < 0)
+ {
+ intNeg = 1;
+ }
+
+ //FIXME: Check for arithemtic overflow here
+ ds->lastType = JT_DOUBLE;
+ ds->start = offset;
+ RETURN_JSOBJ_NULLCHECK(ds->dec->newDouble (createDouble( (double) intNeg, (double) intValue, frcValue, decimalCount)));
+
+DECODE_EXPONENT:
+ expNeg = 1.0;
+
+ if (*(offset) == '-')
+ {
+ expNeg = -1.0;
+ offset ++;
+ }
+ else
+ if (*(offset) == '+')
+ {
+ expNeg = +1.0;
+ offset ++;
+ }
+
+ expValue = 0.0;
+
+ while (1)
+ {
+ chr = (int) (unsigned char) *(offset);
+
+ switch (chr)
+ {
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ expValue = expValue * 10.0 + (double) (chr - 48);
+ offset ++;
+ break;
+
+ default:
+ goto BREAK_EXP_LOOP;
+
+ }
+ }
+
+BREAK_EXP_LOOP:
+
+#ifdef JSON_DECODE_NUMERIC_AS_DOUBLE
+#else
+ if (intValue < 0)
+ {
+ intNeg = 1;
+ }
+#endif
+
+ //FIXME: Check for arithemtic overflow here
+ ds->lastType = JT_DOUBLE;
+ ds->start = offset;
+ RETURN_JSOBJ_NULLCHECK(ds->dec->newDouble (createDouble( (double) intNeg, (double) intValue , frcValue, decimalCount) * pow(10.0, expValue * expNeg)));
+}
+
+FASTCALL_ATTR JSOBJ FASTCALL_MSVC decode_true ( struct DecoderState *ds)
+{
+ char *offset = ds->start;
+ offset ++;
+
+ if (*(offset++) != 'r')
+ goto SETERROR;
+ if (*(offset++) != 'u')
+ goto SETERROR;
+ if (*(offset++) != 'e')
+ goto SETERROR;
+
+ ds->lastType = JT_TRUE;
+ ds->start = offset;
+ RETURN_JSOBJ_NULLCHECK(ds->dec->newTrue());
+
+SETERROR:
+ return SetError(ds, -1, "Unexpected character found when decoding 'true'");
+}
+
+FASTCALL_ATTR JSOBJ FASTCALL_MSVC decode_false ( struct DecoderState *ds)
+{
+ char *offset = ds->start;
+ offset ++;
+
+ if (*(offset++) != 'a')
+ goto SETERROR;
+ if (*(offset++) != 'l')
+ goto SETERROR;
+ if (*(offset++) != 's')
+ goto SETERROR;
+ if (*(offset++) != 'e')
+ goto SETERROR;
+
+ ds->lastType = JT_FALSE;
+ ds->start = offset;
+ RETURN_JSOBJ_NULLCHECK(ds->dec->newFalse());
+
+SETERROR:
+ return SetError(ds, -1, "Unexpected character found when decoding 'false'");
+
+}
+
+
+FASTCALL_ATTR JSOBJ FASTCALL_MSVC decode_null ( struct DecoderState *ds)
+{
+ char *offset = ds->start;
+ offset ++;
+
+ if (*(offset++) != 'u')
+ goto SETERROR;
+ if (*(offset++) != 'l')
+ goto SETERROR;
+ if (*(offset++) != 'l')
+ goto SETERROR;
+
+ ds->lastType = JT_NULL;
+ ds->start = offset;
+ RETURN_JSOBJ_NULLCHECK(ds->dec->newNull());
+
+SETERROR:
+ return SetError(ds, -1, "Unexpected character found when decoding 'null'");
+}
+
+FASTCALL_ATTR void FASTCALL_MSVC SkipWhitespace(struct DecoderState *ds)
+{
+ char *offset = ds->start;
+
+ while (1)
+ {
+ switch (*offset)
+ {
+ case ' ':
+ case '\t':
+ case '\r':
+ case '\n':
+ offset ++;
+ break;
+
+ default:
+ ds->start = offset;
+ return;
+ }
+ }
+}
+
+
+enum DECODESTRINGSTATE
+{
+ DS_ISNULL = 0x32,
+ DS_ISQUOTE,
+ DS_ISESCAPE,
+ DS_UTFLENERROR,
+
+};
+
+static const JSUINT8 g_decoderLookup[256] =
+{
+/* 0x00 */ DS_ISNULL, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+/* 0x10 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+/* 0x20 */ 1, 1, DS_ISQUOTE, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+/* 0x30 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+/* 0x40 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+/* 0x50 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, DS_ISESCAPE, 1, 1, 1,
+/* 0x60 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+/* 0x70 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+/* 0x80 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+/* 0x90 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+/* 0xa0 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+/* 0xb0 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+/* 0xc0 */ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+/* 0xd0 */ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+/* 0xe0 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+/* 0xf0 */ 4, 4, 4, 4, 4, 4, 4, 4, DS_UTFLENERROR, DS_UTFLENERROR, DS_UTFLENERROR, DS_UTFLENERROR, DS_UTFLENERROR, DS_UTFLENERROR, DS_UTFLENERROR, DS_UTFLENERROR,
+};
+
+
+FASTCALL_ATTR JSOBJ FASTCALL_MSVC decode_string ( struct DecoderState *ds)
+{
+ JSUTF16 sur[2] = { 0 };
+ int iSur = 0;
+ int index;
+ wchar_t *escOffset;
+ size_t escLen = (ds->escEnd - ds->escStart);
+ JSUINT8 *inputOffset;
+ JSUINT8 oct;
+ JSUTF32 ucs;
+ ds->lastType = JT_INVALID;
+ ds->start ++;
+
+ if ( (ds->end - ds->start) > escLen)
+ {
+ size_t newSize = (ds->end - ds->start);
+
+ if (ds->escHeap)
+ {
+ ds->escStart = (wchar_t *) ds->dec->realloc (ds->escStart, newSize * sizeof(wchar_t));
+ if (!ds->escStart)
+ {
+ return SetError(ds, -1, "Could not reserve memory block");
+ }
+ }
+ else
+ {
+ wchar_t *oldStart = ds->escStart;
+ ds->escHeap = 1;
+ ds->escStart = (wchar_t *) ds->dec->malloc (newSize * sizeof(wchar_t));
+ if (!ds->escStart)
+ {
+ return SetError(ds, -1, "Could not reserve memory block");
+ }
+ memcpy (ds->escStart, oldStart, escLen * sizeof(wchar_t));
+ }
+
+ ds->escEnd = ds->escStart + newSize;
+ }
+
+ escOffset = ds->escStart;
+ inputOffset = ds->start;
+
+ while(1)
+ {
+ switch (g_decoderLookup[(JSUINT8)(*inputOffset)])
+ {
+ case DS_ISNULL:
+ return SetError(ds, -1, "Unmatched ''\"' when when decoding 'string'");
+
+ case DS_ISQUOTE:
+ ds->lastType = JT_UTF8;
+ inputOffset ++;
+ ds->start += ( (char *) inputOffset - (ds->start));
+ RETURN_JSOBJ_NULLCHECK(ds->dec->newString(ds->escStart, escOffset));
+
+ case DS_UTFLENERROR:
+ return SetError (ds, -1, "Invalid UTF-8 sequence length when decoding 'string'");
+
+ case DS_ISESCAPE:
+ inputOffset ++;
+ switch (*inputOffset)
+ {
+ case '\\': *(escOffset++) = L'\\'; inputOffset++; continue;
+ case '\"': *(escOffset++) = L'\"'; inputOffset++; continue;
+ case '/': *(escOffset++) = L'/'; inputOffset++; continue;
+ case 'b': *(escOffset++) = L'\b'; inputOffset++; continue;
+ case 'f': *(escOffset++) = L'\f'; inputOffset++; continue;
+ case 'n': *(escOffset++) = L'\n'; inputOffset++; continue;
+ case 'r': *(escOffset++) = L'\r'; inputOffset++; continue;
+ case 't': *(escOffset++) = L'\t'; inputOffset++; continue;
+
+ case 'u':
+ {
+ int index;
+ inputOffset ++;
+
+ for (index = 0; index < 4; index ++)
+ {
+ switch (*inputOffset)
+ {
+ case '\0': return SetError (ds, -1, "Unterminated unicode escape sequence when decoding 'string'");
+ default: return SetError (ds, -1, "Unexpected character in unicode escape sequence when decoding 'string'");
+
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ sur[iSur] = (sur[iSur] << 4) + (JSUTF16) (*inputOffset - '0');
+ break;
+
+ case 'a':
+ case 'b':
+ case 'c':
+ case 'd':
+ case 'e':
+ case 'f':
+ sur[iSur] = (sur[iSur] << 4) + 10 + (JSUTF16) (*inputOffset - 'a');
+ break;
+
+ case 'A':
+ case 'B':
+ case 'C':
+ case 'D':
+ case 'E':
+ case 'F':
+ sur[iSur] = (sur[iSur] << 4) + 10 + (JSUTF16) (*inputOffset - 'A');
+ break;
+ }
+
+ inputOffset ++;
+ }
+
+
+ if (iSur == 0)
+ {
+ if((sur[iSur] & 0xfc00) == 0xd800)
+ {
+ // First of a surrogate pair, continue parsing
+ iSur ++;
+ break;
+ }
+ (*escOffset++) = (wchar_t) sur[iSur];
+ iSur = 0;
+ }
+ else
+ {
+ // Decode pair
+ if ((sur[1] & 0xfc00) != 0xdc00)
+ {
+ return SetError (ds, -1, "Unpaired high surrogate when decoding 'string'");
+ }
+
+#if WCHAR_MAX == 0xffff
+ (*escOffset++) = (wchar_t) sur[0];
+ (*escOffset++) = (wchar_t) sur[1];
+#else
+ (*escOffset++) = (wchar_t) 0x10000 + (((sur[0] - 0xd800) << 10) | (sur[1] - 0xdc00));
+#endif
+ iSur = 0;
+ }
+ break;
+ }
+
+ case '\0': return SetError(ds, -1, "Unterminated escape sequence when decoding 'string'");
+ default: return SetError(ds, -1, "Unrecognized escape sequence when decoding 'string'");
+ }
+ break;
+
+ case 1:
+ *(escOffset++) = (wchar_t) (*inputOffset++);
+ break;
+
+ case 2:
+ {
+ ucs = (*inputOffset++) & 0x1f;
+ ucs <<= 6;
+ if (((*inputOffset) & 0x80) != 0x80)
+ {
+ return SetError(ds, -1, "Invalid octet in UTF-8 sequence when decoding 'string'");
+ }
+ ucs |= (*inputOffset++) & 0x3f;
+ if (ucs < 0x80) return SetError (ds, -1, "Overlong 2 byte UTF-8 sequence detected when decoding 'string'");
+ *(escOffset++) = (wchar_t) ucs;
+ break;
+ }
+
+ case 3:
+ {
+ JSUTF32 ucs = 0;
+ ucs |= (*inputOffset++) & 0x0f;
+
+ for (index = 0; index < 2; index ++)
+ {
+ ucs <<= 6;
+ oct = (*inputOffset++);
+
+ if ((oct & 0x80) != 0x80)
+ {
+ return SetError(ds, -1, "Invalid octet in UTF-8 sequence when decoding 'string'");
+ }
+
+ ucs |= oct & 0x3f;
+ }
+
+ if (ucs < 0x800) return SetError (ds, -1, "Overlong 3 byte UTF-8 sequence detected when encoding string");
+ *(escOffset++) = (wchar_t) ucs;
+ break;
+ }
+
+ case 4:
+ {
+ JSUTF32 ucs = 0;
+ ucs |= (*inputOffset++) & 0x07;
+
+ for (index = 0; index < 3; index ++)
+ {
+ ucs <<= 6;
+ oct = (*inputOffset++);
+
+ if ((oct & 0x80) != 0x80)
+ {
+ return SetError(ds, -1, "Invalid octet in UTF-8 sequence when decoding 'string'");
+ }
+
+ ucs |= oct & 0x3f;
+ }
+
+ if (ucs < 0x10000) return SetError (ds, -1, "Overlong 4 byte UTF-8 sequence detected when decoding 'string'");
+
+ #if WCHAR_MAX == 0xffff
+ if (ucs >= 0x10000)
+ {
+ ucs -= 0x10000;
+ *(escOffset++) = (ucs >> 10) + 0xd800;
+ *(escOffset++) = (ucs & 0x3ff) + 0xdc00;
+ }
+ else
+ {
+ *(escOffset++) = (wchar_t) ucs;
+ }
+ #else
+ *(escOffset++) = (wchar_t) ucs;
+ #endif
+ break;
+ }
+ }
+ }
+}
+
+FASTCALL_ATTR JSOBJ FASTCALL_MSVC decode_array( struct DecoderState *ds)
+{
+ JSOBJ itemValue;
+ JSOBJ newObj = ds->dec->newArray(ds->dec);
+
+ ds->lastType = JT_INVALID;
+ ds->start ++;
+
+ while (1)//(*ds->start) != '\0')
+ {
+ SkipWhitespace(ds);
+
+ if ((*ds->start) == ']')
+ {
+ ds->start++;
+ return ds->dec->endArray(newObj);
+ }
+
+ itemValue = decode_any(ds);
+
+ if (itemValue == NULL)
+ {
+ ds->dec->releaseObject(newObj, ds->dec);
+ return NULL;
+ }
+
+ if (!ds->dec->arrayAddItem (newObj, itemValue))
+ {
+ ds->dec->releaseObject(newObj, ds->dec);
+ return NULL;
+ }
+
+ SkipWhitespace(ds);
+
+ switch (*(ds->start++))
+ {
+ case ']':
+ return ds->dec->endArray(newObj);
+
+ case ',':
+ break;
+
+ default:
+ ds->dec->releaseObject(newObj, ds->dec);
+ return SetError(ds, -1, "Unexpected character in found when decoding array value");
+ }
+ }
+
+ ds->dec->releaseObject(newObj, ds->dec);
+ return SetError(ds, -1, "Unmatched ']' when decoding 'array'");
+}
+
+
+
+FASTCALL_ATTR JSOBJ FASTCALL_MSVC decode_object( struct DecoderState *ds)
+{
+ JSOBJ itemName;
+ JSOBJ itemValue;
+ JSOBJ newObj = ds->dec->newObject(ds->dec);
+
+ ds->start ++;
+
+ while (1)
+ {
+ SkipWhitespace(ds);
+
+ if ((*ds->start) == '}')
+ {
+ ds->start ++;
+ return ds->dec->endObject(newObj);
+ }
+
+ ds->lastType = JT_INVALID;
+ itemName = decode_any(ds);
+
+ if (itemName == NULL)
+ {
+ ds->dec->releaseObject(newObj, ds->dec);
+ return NULL;
+ }
+
+ if (ds->lastType != JT_UTF8)
+ {
+ ds->dec->releaseObject(newObj, ds->dec);
+ ds->dec->releaseObject(itemName, ds->dec);
+ return SetError(ds, -1, "Key name of object must be 'string' when decoding 'object'");
+ }
+
+ SkipWhitespace(ds);
+
+ if (*(ds->start++) != ':')
+ {
+ ds->dec->releaseObject(newObj, ds->dec);
+ ds->dec->releaseObject(itemName, ds->dec);
+ return SetError(ds, -1, "No ':' found when decoding object value");
+ }
+
+ SkipWhitespace(ds);
+
+ itemValue = decode_any(ds);
+
+ if (itemValue == NULL)
+ {
+ ds->dec->releaseObject(newObj, ds->dec);
+ ds->dec->releaseObject(itemName, ds->dec);
+ return NULL;
+ }
+
+ if (!ds->dec->objectAddKey (newObj, itemName, itemValue))
+ {
+ ds->dec->releaseObject(newObj, ds->dec);
+ ds->dec->releaseObject(itemName, ds->dec);
+ ds->dec->releaseObject(itemValue, ds->dec);
+ return NULL;
+ }
+
+ SkipWhitespace(ds);
+
+ switch (*(ds->start++))
+ {
+ case '}':
+ return ds->dec->endObject(newObj);
+
+ case ',':
+ break;
+
+ default:
+ ds->dec->releaseObject(newObj, ds->dec);
+ return SetError(ds, -1, "Unexpected character in found when decoding object value");
+ }
+ }
+
+ ds->dec->releaseObject(newObj, ds->dec);
+ return SetError(ds, -1, "Unmatched '}' when decoding object");
+}
+
+FASTCALL_ATTR JSOBJ FASTCALL_MSVC decode_any(struct DecoderState *ds)
+{
+ while (1)
+ {
+ switch (*ds->start)
+ {
+ case '\"':
+ return decode_string (ds);
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ case '-':
+ return decode_numeric (ds);
+
+ case '[': return decode_array (ds);
+ case '{': return decode_object (ds);
+ case 't': return decode_true (ds);
+ case 'f': return decode_false (ds);
+ case 'n': return decode_null (ds);
+
+ case ' ':
+ case '\t':
+ case '\r':
+ case '\n':
+ // White space
+ ds->start ++;
+ break;
+
+ default:
+ return SetError(ds, -1, "Expected object or value");
+ }
+ }
+}
+
+
+JSOBJ JSON_DecodeObject(JSONObjectDecoder *dec, const char *buffer, size_t cbBuffer)
+{
+
+ /*
+ FIXME: Base the size of escBuffer of that of cbBuffer so that the unicode escaping doesn't run into the wall each time */
+ struct DecoderState ds;
+ wchar_t escBuffer[(JSON_MAX_STACK_BUFFER_SIZE / sizeof(wchar_t))];
+ JSOBJ ret;
+
+ ds.start = (char *) buffer;
+ ds.end = ds.start + cbBuffer;
+
+ ds.escStart = escBuffer;
+ ds.escEnd = ds.escStart + (JSON_MAX_STACK_BUFFER_SIZE / sizeof(wchar_t));
+ ds.escHeap = 0;
+ ds.dec = dec;
+ ds.dec->errorStr = NULL;
+ ds.dec->errorOffset = NULL;
+
+ ds.dec = dec;
+
+ ret = decode_any (&ds);
+
+ if (ds.escHeap)
+ {
+ dec->free(ds.escStart);
+ }
+ return ret;
+}
diff --git a/pandas/src/ujson/lib/ultrajsonenc.c b/pandas/src/ujson/lib/ultrajsonenc.c
new file mode 100644
index 0000000000000..22871513870b7
--- /dev/null
+++ b/pandas/src/ujson/lib/ultrajsonenc.c
@@ -0,0 +1,891 @@
+/*
+Copyright (c) 2011, Jonas Tarnstrom and ESN Social Software AB
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+3. All advertising materials mentioning features or use of this software
+ must display the following acknowledgement:
+ This product includes software developed by ESN Social Software AB (www.esn.me).
+4. Neither the name of the ESN Social Software AB nor the
+ names of its contributors may be used to endorse or promote products
+ derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY ESN SOCIAL SOFTWARE AB ''AS IS'' AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ESN SOCIAL SOFTWARE AB BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+Portions of code from:
+MODP_ASCII - Ascii transformations (upper/lower, etc)
+http://code.google.com/p/stringencoders/
+Copyright (c) 2007 Nick Galbreath -- nickg [at] modp [dot] com. All rights reserved.
+
+*/
+
+#include "ultrajson.h"
+#include <stdio.h>
+#include <assert.h>
+#include <string.h>
+#include <stdlib.h>
+#include <math.h>
+
+#include <float.h>
+
+#ifndef TRUE
+#define TRUE 1
+#endif
+#ifndef FALSE
+#define FALSE 0
+#endif
+
+static const double g_pow10[] = {1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000, 10000000000, 100000000000, 1000000000000, 10000000000000, 100000000000000, 1000000000000000};
+static const char g_hexChars[] = "0123456789abcdef";
+static const char g_escapeChars[] = "0123456789\\b\\t\\n\\f\\r\\\"\\\\\\/";
+
+
+/*
+FIXME: While this is fine dandy and working it's a magic value mess which probably only the author understands.
+Needs a cleanup and more documentation */
+
+/*
+Table for pure ascii output escaping all characters above 127 to \uXXXX */
+static const JSUINT8 g_asciiOutputTable[256] =
+{
+/* 0x00 */ 0, 30, 30, 30, 30, 30, 30, 30, 10, 12, 14, 30, 16, 18, 30, 30,
+/* 0x10 */ 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30,
+/* 0x20 */ 1, 1, 20, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 24,
+/* 0x30 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+/* 0x40 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+/* 0x50 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 22, 1, 1, 1,
+/* 0x60 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+/* 0x70 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+/* 0x80 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+/* 0x90 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+/* 0xa0 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+/* 0xb0 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+/* 0xc0 */ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+/* 0xd0 */ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+/* 0xe0 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+/* 0xf0 */ 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 1, 1
+};
+
+
+static void SetError (JSOBJ obj, JSONObjectEncoder *enc, const char *message)
+{
+ enc->errorMsg = message;
+ enc->errorObj = obj;
+}
+
+/*
+FIXME: Keep track of how big these get across several encoder calls and try to make an estimate
+That way we won't run our head into the wall each call */
+void Buffer_Realloc (JSONObjectEncoder *enc, size_t cbNeeded)
+{
+ size_t curSize = enc->end - enc->start;
+ size_t newSize = curSize * 2;
+ size_t offset = enc->offset - enc->start;
+
+ while (newSize < curSize + cbNeeded)
+ {
+ newSize *= 2;
+ }
+
+ if (enc->heap)
+ {
+ enc->start = (char *) enc->realloc (enc->start, newSize);
+ if (!enc->start)
+ {
+ SetError (NULL, enc, "Could not reserve memory block");
+ return;
+ }
+ }
+ else
+ {
+ char *oldStart = enc->start;
+ enc->heap = 1;
+ enc->start = (char *) enc->malloc (newSize);
+ if (!enc->start)
+ {
+ SetError (NULL, enc, "Could not reserve memory block");
+ return;
+ }
+ memcpy (enc->start, oldStart, offset);
+ }
+ enc->offset = enc->start + offset;
+ enc->end = enc->start + newSize;
+}
+
+FASTCALL_ATTR INLINE_PREFIX void FASTCALL_MSVC Buffer_AppendShortHexUnchecked (char *outputOffset, unsigned short value)
+{
+ *(outputOffset++) = g_hexChars[(value & 0xf000) >> 12];
+ *(outputOffset++) = g_hexChars[(value & 0x0f00) >> 8];
+ *(outputOffset++) = g_hexChars[(value & 0x00f0) >> 4];
+ *(outputOffset++) = g_hexChars[(value & 0x000f) >> 0];
+}
+
+int Buffer_EscapeStringUnvalidated (JSOBJ obj, JSONObjectEncoder *enc, const char *io, const char *end)
+{
+ char *of = (char *) enc->offset;
+
+ while (1)
+ {
+ switch (*io)
+ {
+ case 0x00:
+ if (io < end)
+ {
+ *(of++) = '\\';
+ *(of++) = 'u';
+ *(of++) = '0';
+ *(of++) = '0';
+ *(of++) = '0';
+ *(of++) = '0';
+ break;
+ }
+ else
+ {
+ enc->offset += (of - enc->offset);
+ return TRUE;
+ }
+
+ case '\"': (*of++) = '\\'; (*of++) = '\"'; break;
+ case '\\': (*of++) = '\\'; (*of++) = '\\'; break;
+ case '/': (*of++) = '\\'; (*of++) = '/'; break;
+ case '\b': (*of++) = '\\'; (*of++) = 'b'; break;
+ case '\f': (*of++) = '\\'; (*of++) = 'f'; break;
+ case '\n': (*of++) = '\\'; (*of++) = 'n'; break;
+ case '\r': (*of++) = '\\'; (*of++) = 'r'; break;
+ case '\t': (*of++) = '\\'; (*of++) = 't'; break;
+
+ case 0x01:
+ case 0x02:
+ case 0x03:
+ case 0x04:
+ case 0x05:
+ case 0x06:
+ case 0x07:
+ case 0x0b:
+ case 0x0e:
+ case 0x0f:
+ case 0x10:
+ case 0x11:
+ case 0x12:
+ case 0x13:
+ case 0x14:
+ case 0x15:
+ case 0x16:
+ case 0x17:
+ case 0x18:
+ case 0x19:
+ case 0x1a:
+ case 0x1b:
+ case 0x1c:
+ case 0x1d:
+ case 0x1e:
+ case 0x1f:
+ *(of++) = '\\';
+ *(of++) = 'u';
+ *(of++) = '0';
+ *(of++) = '0';
+ *(of++) = g_hexChars[ (unsigned char) (((*io) & 0xf0) >> 4)];
+ *(of++) = g_hexChars[ (unsigned char) ((*io) & 0x0f)];
+ break;
+
+ default: (*of++) = (*io); break;
+ }
+
+ io++;
+ }
+
+ return FALSE;
+}
+
+
+/*
+FIXME:
+This code only works with Little and Big Endian
+
+FIXME: The JSON spec says escape "/" but non of the others do and we don't
+want to be left alone doing it so we don't :)
+
+*/
+int Buffer_EscapeStringValidated (JSOBJ obj, JSONObjectEncoder *enc, const char *io, const char *end)
+{
+ JSUTF32 ucs;
+ char *of = (char *) enc->offset;
+
+ while (1)
+ {
+
+ //JSUINT8 chr = (unsigned char) *io;
+ JSUINT8 utflen = g_asciiOutputTable[(unsigned char) *io];
+
+ switch (utflen)
+ {
+ case 0:
+ {
+ if (io < end)
+ {
+ *(of++) = '\\';
+ *(of++) = 'u';
+ *(of++) = '0';
+ *(of++) = '0';
+ *(of++) = '0';
+ *(of++) = '0';
+ io ++;
+ continue;
+ }
+ else
+ {
+ enc->offset += (of - enc->offset);
+ return TRUE;
+ }
+ }
+
+ case 1:
+ {
+ *(of++)= (*io++);
+ continue;
+ }
+
+ case 2:
+ {
+ JSUTF32 in;
+ JSUTF16 in16;
+
+ if (end - io < 1)
+ {
+ enc->offset += (of - enc->offset);
+ SetError (obj, enc, "Unterminated UTF-8 sequence when encoding string");
+ return FALSE;
+ }
+
+ memcpy(&in16, io, sizeof(JSUTF16));
+ in = (JSUTF32) in16;
+
+#ifdef __LITTLE_ENDIAN__
+ ucs = ((in & 0x1f) << 6) | ((in >> 8) & 0x3f);
+#else
+ ucs = ((in & 0x1f00) >> 2) | (in & 0x3f);
+#endif
+
+ if (ucs < 0x80)
+ {
+ enc->offset += (of - enc->offset);
+ SetError (obj, enc, "Overlong 2 byte UTF-8 sequence detected when encoding string");
+ return FALSE;
+ }
+
+ io += 2;
+ break;
+ }
+
+ case 3:
+ {
+ JSUTF32 in;
+ JSUTF16 in16;
+ JSUINT8 in8;
+
+ if (end - io < 2)
+ {
+ enc->offset += (of - enc->offset);
+ SetError (obj, enc, "Unterminated UTF-8 sequence when encoding string");
+ return FALSE;
+ }
+
+ memcpy(&in16, io, sizeof(JSUTF16));
+ memcpy(&in8, io + 2, sizeof(JSUINT8));
+#ifdef __LITTLE_ENDIAN__
+ in = (JSUTF32) in16;
+ in |= in8 << 16;
+ ucs = ((in & 0x0f) << 12) | ((in & 0x3f00) >> 2) | ((in & 0x3f0000) >> 16);
+#else
+ in = in16 << 8;
+ in |= in8;
+ ucs = ((in & 0x0f0000) >> 4) | ((in & 0x3f00) >> 2) | (in & 0x3f);
+#endif
+
+
+ if (ucs < 0x800)
+ {
+ enc->offset += (of - enc->offset);
+ SetError (obj, enc, "Overlong 3 byte UTF-8 sequence detected when encoding string");
+ return FALSE;
+ }
+
+ io += 3;
+ break;
+ }
+ case 4:
+ {
+ JSUTF32 in;
+
+ if (end - io < 3)
+ {
+ enc->offset += (of - enc->offset);
+ SetError (obj, enc, "Unterminated UTF-8 sequence when encoding string");
+ return FALSE;
+ }
+
+ memcpy(&in, io, sizeof(JSUTF32));
+#ifdef __LITTLE_ENDIAN__
+ ucs = ((in & 0x07) << 18) | ((in & 0x3f00) << 4) | ((in & 0x3f0000) >> 10) | ((in & 0x3f000000) >> 24);
+#else
+ ucs = ((in & 0x07000000) >> 6) | ((in & 0x3f0000) >> 4) | ((in & 0x3f00) >> 2) | (in & 0x3f);
+#endif
+ if (ucs < 0x10000)
+ {
+ enc->offset += (of - enc->offset);
+ SetError (obj, enc, "Overlong 4 byte UTF-8 sequence detected when encoding string");
+ return FALSE;
+ }
+
+ io += 4;
+ break;
+ }
+
+
+ case 5:
+ case 6:
+ enc->offset += (of - enc->offset);
+ SetError (obj, enc, "Unsupported UTF-8 sequence length when encoding string");
+ return FALSE;
+
+ case 30:
+ // \uXXXX encode
+ *(of++) = '\\';
+ *(of++) = 'u';
+ *(of++) = '0';
+ *(of++) = '0';
+ *(of++) = g_hexChars[ (unsigned char) (((*io) & 0xf0) >> 4)];
+ *(of++) = g_hexChars[ (unsigned char) ((*io) & 0x0f)];
+ io ++;
+ continue;
+
+ case 10:
+ case 12:
+ case 14:
+ case 16:
+ case 18:
+ case 20:
+ case 22:
+ case 24:
+ *(of++) = *( (char *) (g_escapeChars + utflen + 0));
+ *(of++) = *( (char *) (g_escapeChars + utflen + 1));
+ io ++;
+ continue;
+ }
+
+ /*
+ If the character is a UTF8 sequence of length > 1 we end up here */
+ if (ucs >= 0x10000)
+ {
+ ucs -= 0x10000;
+ *(of++) = '\\';
+ *(of++) = 'u';
+ Buffer_AppendShortHexUnchecked(of, (ucs >> 10) + 0xd800);
+ of += 4;
+
+ *(of++) = '\\';
+ *(of++) = 'u';
+ Buffer_AppendShortHexUnchecked(of, (ucs & 0x3ff) + 0xdc00);
+ of += 4;
+ }
+ else
+ {
+ *(of++) = '\\';
+ *(of++) = 'u';
+ Buffer_AppendShortHexUnchecked(of, ucs);
+ of += 4;
+ }
+ }
+
+ return FALSE;
+}
+
+#define Buffer_Reserve(__enc, __len) \
+ if ((__enc)->end - (__enc)->offset < (__len)) \
+ { \
+ Buffer_Realloc((__enc), (__len));\
+ } \
+
+
+#define Buffer_AppendCharUnchecked(__enc, __chr) \
+ *((__enc)->offset++) = __chr; \
+
+FASTCALL_ATTR INLINE_PREFIX void FASTCALL_MSVC strreverse(char* begin, char* end)
+{
+ char aux;
+ while (end > begin)
+ aux = *end, *end-- = *begin, *begin++ = aux;
+}
+
+void Buffer_AppendIntUnchecked(JSONObjectEncoder *enc, JSINT32 value)
+{
+ char* wstr;
+ JSUINT32 uvalue = (value < 0) ? -value : value;
+
+ wstr = enc->offset;
+ // Conversion. Number is reversed.
+
+ do *wstr++ = (char)(48 + (uvalue % 10)); while(uvalue /= 10);
+ if (value < 0) *wstr++ = '-';
+
+ // Reverse string
+ strreverse(enc->offset,wstr - 1);
+ enc->offset += (wstr - (enc->offset));
+}
+
+void Buffer_AppendLongUnchecked(JSONObjectEncoder *enc, JSINT64 value)
+{
+ char* wstr;
+ JSUINT64 uvalue = (value < 0) ? -value : value;
+
+ wstr = enc->offset;
+ // Conversion. Number is reversed.
+
+ do *wstr++ = (char)(48 + (uvalue % 10ULL)); while(uvalue /= 10ULL);
+ if (value < 0) *wstr++ = '-';
+
+ // Reverse string
+ strreverse(enc->offset,wstr - 1);
+ enc->offset += (wstr - (enc->offset));
+}
+
+int Buffer_AppendDoubleUnchecked(JSOBJ obj, JSONObjectEncoder *enc, double value)
+{
+ /* if input is larger than thres_max, revert to exponential */
+ const double thres_max = (double) 1e16 - 1;
+ int count;
+ double diff = 0.0;
+ char* str = enc->offset;
+ char* wstr = str;
+ unsigned long long whole;
+ double tmp;
+ unsigned long long frac;
+ int neg;
+ double pow10;
+
+ if (value == HUGE_VAL || value == -HUGE_VAL)
+ {
+ SetError (obj, enc, "Invalid Inf value when encoding double");
+ return FALSE;
+ }
+ if (! (value == value))
+ {
+ SetError (obj, enc, "Invalid Nan value when encoding double");
+ return FALSE;
+ }
+
+
+ /* we'll work in positive values and deal with the
+ negative sign issue later */
+ neg = 0;
+ if (value < 0)
+ {
+ neg = 1;
+ value = -value;
+ }
+
+ pow10 = g_pow10[enc->doublePrecision];
+
+ whole = (unsigned long long) value;
+ tmp = (value - whole) * pow10;
+ frac = (unsigned long long)(tmp);
+ diff = tmp - frac;
+
+ if (diff > 0.5)
+ {
+ ++frac;
+ /* handle rollover, e.g. case 0.99 with prec 1 is 1.0 */
+ if (frac >= pow10)
+ {
+ frac = 0;
+ ++whole;
+ }
+ }
+ else
+ if (diff == 0.5 && ((frac == 0) || (frac & 1)))
+ {
+ /* if halfway, round up if odd, OR
+ if last digit is 0. That last part is strange */
+ ++frac;
+ }
+
+ /* for very large numbers switch back to native sprintf for exponentials.
+ anyone want to write code to replace this? */
+ /*
+ normal printf behavior is to print EVERY whole number digit
+ which can be 100s of characters overflowing your buffers == bad
+ */
+ if (value > thres_max)
+ {
+ enc->offset += sprintf(str, "%.15e", neg ? -value : value);
+ return TRUE;
+ }
+
+ if (enc->doublePrecision == 0)
+ {
+ diff = value - whole;
+
+ if (diff > 0.5)
+ {
+ /* greater than 0.5, round up, e.g. 1.6 -> 2 */
+ ++whole;
+ }
+ else
+ if (diff == 0.5 && (whole & 1))
+ {
+ /* exactly 0.5 and ODD, then round up */
+ /* 1.5 -> 2, but 2.5 -> 2 */
+ ++whole;
+ }
+
+ //vvvvvvvvvvvvvvvvvvv Diff from modp_dto2
+ }
+ else
+ if (frac)
+ {
+ count = enc->doublePrecision;
+ // now do fractional part, as an unsigned number
+ // we know it is not 0 but we can have leading zeros, these
+ // should be removed
+ while (!(frac % 10))
+ {
+ --count;
+ frac /= 10;
+ }
+ //^^^^^^^^^^^^^^^^^^^ Diff from modp_dto2
+
+ // now do fractional part, as an unsigned number
+ do
+ {
+ --count;
+ *wstr++ = (char)(48 + (frac % 10));
+ } while (frac /= 10);
+ // add extra 0s
+ while (count-- > 0)
+ {
+ *wstr++ = '0';
+ }
+ // add decimal
+ *wstr++ = '.';
+ }
+ else
+ {
+ *wstr++ = '0';
+ *wstr++ = '.';
+ }
+
+ // do whole part
+ // Take care of sign
+ // Conversion. Number is reversed.
+ do *wstr++ = (char)(48 + (whole % 10)); while (whole /= 10);
+
+ if (neg)
+ {
+ *wstr++ = '-';
+ }
+ strreverse(str, wstr-1);
+ enc->offset += (wstr - (enc->offset));
+
+ return TRUE;
+}
+
+
+
+
+
+
+/*
+FIXME:
+Handle integration functions returning NULL here */
+
+/*
+FIXME:
+Perhaps implement recursion detection */
+
+void encode(JSOBJ obj, JSONObjectEncoder *enc, const char *name, size_t cbName)
+{
+ const char *value;
+ char *objName;
+ int count;
+ JSOBJ iterObj;
+ size_t szlen;
+ JSONTypeContext tc;
+ tc.encoder = enc;
+
+ if (enc->level > enc->recursionMax)
+ {
+ SetError (obj, enc, "Maximum recursion level reached");
+ return;
+ }
+
+ /*
+ This reservation must hold
+
+ length of _name as encoded worst case +
+ maxLength of double to string OR maxLength of JSLONG to string
+
+ Since input is assumed to be UTF-8 the worst character length is:
+
+ 4 bytes (of UTF-8) => "\uXXXX\uXXXX" (12 bytes)
+ */
+
+ Buffer_Reserve(enc, 256 + (((cbName / 4) + 1) * 12));
+ if (enc->errorMsg)
+ {
+ return;
+ }
+
+ if (name)
+ {
+ Buffer_AppendCharUnchecked(enc, '\"');
+
+ if (enc->forceASCII)
+ {
+ if (!Buffer_EscapeStringValidated(obj, enc, name, name + cbName))
+ {
+ return;
+ }
+ }
+ else
+ {
+ if (!Buffer_EscapeStringUnvalidated(obj, enc, name, name + cbName))
+ {
+ return;
+ }
+ }
+
+
+ Buffer_AppendCharUnchecked(enc, '\"');
+
+ Buffer_AppendCharUnchecked (enc, ':');
+#ifndef JSON_NO_EXTRA_WHITESPACE
+ Buffer_AppendCharUnchecked (enc, ' ');
+#endif
+ }
+
+ enc->beginTypeContext(obj, &tc);
+
+ switch (tc.type)
+ {
+ case JT_INVALID:
+ return;
+
+ case JT_ARRAY:
+ {
+ count = 0;
+ enc->iterBegin(obj, &tc);
+
+ Buffer_AppendCharUnchecked (enc, '[');
+
+ while (enc->iterNext(obj, &tc))
+ {
+ if (count > 0)
+ {
+ Buffer_AppendCharUnchecked (enc, ',');
+#ifndef JSON_NO_EXTRA_WHITESPACE
+ Buffer_AppendCharUnchecked (buffer, ' ');
+#endif
+ }
+
+ iterObj = enc->iterGetValue(obj, &tc);
+
+ enc->level ++;
+ encode (iterObj, enc, NULL, 0);
+ count ++;
+ }
+
+ enc->iterEnd(obj, &tc);
+ Buffer_AppendCharUnchecked (enc, ']');
+ break;
+ }
+
+ case JT_OBJECT:
+ {
+ count = 0;
+ enc->iterBegin(obj, &tc);
+
+ Buffer_AppendCharUnchecked (enc, '{');
+
+ while (enc->iterNext(obj, &tc))
+ {
+ if (count > 0)
+ {
+ Buffer_AppendCharUnchecked (enc, ',');
+#ifndef JSON_NO_EXTRA_WHITESPACE
+ Buffer_AppendCharUnchecked (enc, ' ');
+#endif
+ }
+
+ iterObj = enc->iterGetValue(obj, &tc);
+ objName = enc->iterGetName(obj, &tc, &szlen);
+
+ enc->level ++;
+ encode (iterObj, enc, objName, szlen);
+ count ++;
+ }
+
+ enc->iterEnd(obj, &tc);
+ Buffer_AppendCharUnchecked (enc, '}');
+ break;
+ }
+
+ case JT_LONG:
+ {
+ Buffer_AppendLongUnchecked (enc, enc->getLongValue(obj, &tc));
+ break;
+ }
+
+ case JT_INT:
+ {
+ Buffer_AppendIntUnchecked (enc, enc->getIntValue(obj, &tc));
+ break;
+ }
+
+ case JT_TRUE:
+ {
+ Buffer_AppendCharUnchecked (enc, 't');
+ Buffer_AppendCharUnchecked (enc, 'r');
+ Buffer_AppendCharUnchecked (enc, 'u');
+ Buffer_AppendCharUnchecked (enc, 'e');
+ break;
+ }
+
+ case JT_FALSE:
+ {
+ Buffer_AppendCharUnchecked (enc, 'f');
+ Buffer_AppendCharUnchecked (enc, 'a');
+ Buffer_AppendCharUnchecked (enc, 'l');
+ Buffer_AppendCharUnchecked (enc, 's');
+ Buffer_AppendCharUnchecked (enc, 'e');
+ break;
+ }
+
+
+ case JT_NULL:
+ {
+ Buffer_AppendCharUnchecked (enc, 'n');
+ Buffer_AppendCharUnchecked (enc, 'u');
+ Buffer_AppendCharUnchecked (enc, 'l');
+ Buffer_AppendCharUnchecked (enc, 'l');
+ break;
+ }
+
+ case JT_DOUBLE:
+ {
+ if (!Buffer_AppendDoubleUnchecked (obj, enc, enc->getDoubleValue(obj, &tc)))
+ {
+ enc->endTypeContext(obj, &tc);
+ enc->level --;
+ return;
+ }
+ break;
+ }
+
+ case JT_UTF8:
+ {
+ value = enc->getStringValue(obj, &tc, &szlen);
+ Buffer_Reserve(enc, ((szlen / 4) + 1) * 12);
+ if (enc->errorMsg)
+ {
+ enc->endTypeContext(obj, &tc);
+ return;
+ }
+ Buffer_AppendCharUnchecked (enc, '\"');
+
+
+ if (enc->forceASCII)
+ {
+ if (!Buffer_EscapeStringValidated(obj, enc, value, value + szlen))
+ {
+ enc->endTypeContext(obj, &tc);
+ enc->level --;
+ return;
+ }
+ }
+ else
+ {
+ if (!Buffer_EscapeStringUnvalidated(obj, enc, value, value + szlen))
+ {
+ enc->endTypeContext(obj, &tc);
+ enc->level --;
+ return;
+ }
+ }
+
+ Buffer_AppendCharUnchecked (enc, '\"');
+ break;
+ }
+ }
+
+ enc->endTypeContext(obj, &tc);
+ enc->level --;
+
+}
+
+char *JSON_EncodeObject(JSOBJ obj, JSONObjectEncoder *enc, char *_buffer, size_t _cbBuffer)
+{
+ enc->malloc = enc->malloc ? enc->malloc : malloc;
+ enc->free = enc->free ? enc->free : free;
+ enc->realloc = enc->realloc ? enc->realloc : realloc;
+ enc->errorMsg = NULL;
+ enc->errorObj = NULL;
+ enc->level = 0;
+
+ if (enc->recursionMax < 1)
+ {
+ enc->recursionMax = JSON_MAX_RECURSION_DEPTH;
+ }
+
+ if (enc->doublePrecision < 0 ||
+ enc->doublePrecision > JSON_DOUBLE_MAX_DECIMALS)
+ {
+ enc->doublePrecision = JSON_DOUBLE_MAX_DECIMALS;
+ }
+
+ if (_buffer == NULL)
+ {
+ _cbBuffer = 32768;
+ enc->start = (char *) enc->malloc (_cbBuffer);
+ if (!enc->start)
+ {
+ SetError(obj, enc, "Could not reserve memory block");
+ return NULL;
+ }
+ enc->heap = 1;
+ }
+ else
+ {
+ enc->start = _buffer;
+ enc->heap = 0;
+ }
+
+ enc->end = enc->start + _cbBuffer;
+ enc->offset = enc->start;
+
+
+ encode (obj, enc, NULL, 0);
+
+ Buffer_Reserve(enc, 1);
+ if (enc->errorMsg)
+ {
+ return NULL;
+ }
+ Buffer_AppendCharUnchecked(enc, '\0');
+
+ return enc->start;
+}
diff --git a/pandas/src/ujson/python/JSONtoObj.c b/pandas/src/ujson/python/JSONtoObj.c
new file mode 100644
index 0000000000000..bc42269d9698b
--- /dev/null
+++ b/pandas/src/ujson/python/JSONtoObj.c
@@ -0,0 +1,676 @@
+#include "py_defines.h"
+#define PY_ARRAY_UNIQUE_SYMBOL UJSON_NUMPY
+#define NO_IMPORT_ARRAY
+#include <numpy/arrayobject.h>
+#include <ultrajson.h>
+
+
+typedef struct __PyObjectDecoder
+{
+ JSONObjectDecoder dec;
+
+ void* npyarr; // Numpy context buffer
+ void* npyarr_addr; // Ref to npyarr ptr to track DECREF calls
+ npy_intp curdim; // Current array dimension
+
+ PyArray_Descr* dtype;
+} PyObjectDecoder;
+
+typedef struct __NpyArrContext
+{
+ PyObject* ret;
+ PyObject* labels[2];
+ PyArray_Dims shape;
+
+ PyObjectDecoder* dec;
+
+ npy_intp i;
+ npy_intp elsize;
+ npy_intp elcount;
+} NpyArrContext;
+
+//#define PRINTMARK() fprintf(stderr, "%s: MARK(%d)\n", __FILE__, __LINE__)
+#define PRINTMARK()
+
+// Numpy handling based on numpy internal code, specifically the function
+// PyArray_FromIter.
+
+// numpy related functions are inter-dependent so declare them all here,
+// to ensure the compiler catches any errors
+
+// standard numpy array handling
+JSOBJ Object_npyNewArray(void* decoder);
+JSOBJ Object_npyEndArray(JSOBJ obj);
+int Object_npyArrayAddItem(JSOBJ obj, JSOBJ value);
+
+// for more complex dtypes (object and string) fill a standard Python list
+// and convert to a numpy array when done.
+JSOBJ Object_npyNewArrayList(void* decoder);
+JSOBJ Object_npyEndArrayList(JSOBJ obj);
+int Object_npyArrayListAddItem(JSOBJ obj, JSOBJ value);
+
+// labelled support, encode keys and values of JS object into separate numpy
+// arrays
+JSOBJ Object_npyNewObject(void* decoder);
+JSOBJ Object_npyEndObject(JSOBJ obj);
+int Object_npyObjectAddKey(JSOBJ obj, JSOBJ name, JSOBJ value);
+
+
+// free the numpy context buffer
+void Npy_releaseContext(NpyArrContext* npyarr)
+{
+ PRINTMARK();
+ if (npyarr)
+ {
+ if (npyarr->shape.ptr)
+ {
+ PyObject_Free(npyarr->shape.ptr);
+ }
+ if (npyarr->dec)
+ {
+ npyarr->dec->npyarr = NULL;
+ npyarr->dec->curdim = 0;
+ }
+ Py_XDECREF(npyarr->labels[0]);
+ Py_XDECREF(npyarr->labels[1]);
+ Py_XDECREF(npyarr->ret);
+ PyObject_Free(npyarr);
+ }
+}
+
+JSOBJ Object_npyNewArray(void* _decoder)
+{
+ NpyArrContext* npyarr;
+ PyObjectDecoder* decoder = (PyObjectDecoder*) _decoder;
+ PRINTMARK();
+ if (decoder->curdim <= 0)
+ {
+ // start of array - initialise the context buffer
+ npyarr = decoder->npyarr = PyObject_Malloc(sizeof(NpyArrContext));
+ decoder->npyarr_addr = npyarr;
+
+ if (!npyarr)
+ {
+ PyErr_NoMemory();
+ return NULL;
+ }
+
+ npyarr->dec = decoder;
+ npyarr->labels[0] = npyarr->labels[1] = NULL;
+
+ npyarr->shape.ptr = PyObject_Malloc(sizeof(npy_intp)*NPY_MAXDIMS);
+ npyarr->shape.len = 1;
+ npyarr->ret = NULL;
+
+ npyarr->elsize = 0;
+ npyarr->elcount = 4;
+ npyarr->i = 0;
+ }
+ else
+ {
+ // starting a new dimension continue the current array (and reshape after)
+ npyarr = (NpyArrContext*) decoder->npyarr;
+ if (decoder->curdim >= npyarr->shape.len)
+ {
+ npyarr->shape.len++;
+ }
+ }
+
+ npyarr->shape.ptr[decoder->curdim] = 0;
+ decoder->curdim++;
+ return npyarr;
+}
+
+PyObject* Npy_returnLabelled(NpyArrContext* npyarr)
+{
+ PyObject* ret = npyarr->ret;
+ npy_intp i;
+
+ if (npyarr->labels[0] || npyarr->labels[1])
+ {
+ // finished decoding, build tuple with values and labels
+ ret = PyTuple_New(npyarr->shape.len+1);
+ for (i = 0; i < npyarr->shape.len; i++)
+ {
+ if (npyarr->labels[i])
+ {
+ PyTuple_SET_ITEM(ret, i+1, npyarr->labels[i]);
+ npyarr->labels[i] = NULL;
+ }
+ else
+ {
+ Py_INCREF(Py_None);
+ PyTuple_SET_ITEM(ret, i+1, Py_None);
+ }
+ }
+ PyTuple_SET_ITEM(ret, 0, npyarr->ret);
+ }
+
+ return ret;
+}
+
+JSOBJ Object_npyEndArray(JSOBJ obj)
+{
+ PyObject *ret;
+ char* new_data;
+ NpyArrContext* npyarr = (NpyArrContext*) obj;
+ int emptyType = NPY_DEFAULT_TYPE;
+ npy_intp i;
+ PRINTMARK();
+ if (!npyarr)
+ {
+ return NULL;
+ }
+
+ ret = npyarr->ret;
+ i = npyarr->i;
+
+ npyarr->dec->curdim--;
+
+ if (i == 0 || !npyarr->ret) {
+ // empty array would not have been initialised so do it now.
+ if (npyarr->dec->dtype)
+ {
+ emptyType = npyarr->dec->dtype->type_num;
+ }
+ npyarr->ret = ret = PyArray_EMPTY(npyarr->shape.len, npyarr->shape.ptr, emptyType, 0);
+ }
+ else if (npyarr->dec->curdim <= 0)
+ {
+ // realloc to final size
+ new_data = PyDataMem_RENEW(PyArray_DATA(ret), i * npyarr->elsize);
+ if (new_data == NULL) {
+ PyErr_NoMemory();
+ Npy_releaseContext(npyarr);
+ return NULL;
+ }
+ ((PyArrayObject*) ret)->data = (void*) new_data;
+ // PyArray_BYTES(ret) = new_data;
+ }
+
+ if (npyarr->dec->curdim <= 0)
+ {
+ // finished decoding array, reshape if necessary
+ if (npyarr->shape.len > 1)
+ {
+ npyarr->ret = PyArray_Newshape((PyArrayObject*) ret, &npyarr->shape, NPY_ANYORDER);
+ Py_DECREF(ret);
+ }
+
+ ret = Npy_returnLabelled(npyarr);
+
+ npyarr->ret = NULL;
+ Npy_releaseContext(npyarr);
+ }
+
+ return ret;
+}
+
+int Object_npyArrayAddItem(JSOBJ obj, JSOBJ value)
+{
+ PyObject* type;
+ PyArray_Descr* dtype;
+ npy_intp i;
+ char *new_data, *item;
+ NpyArrContext* npyarr = (NpyArrContext*) obj;
+ PRINTMARK();
+ if (!npyarr)
+ {
+ return 0;
+ }
+
+ i = npyarr->i;
+
+ npyarr->shape.ptr[npyarr->dec->curdim-1]++;
+
+ if (PyArray_Check((PyObject*)value))
+ {
+ // multidimensional array, keep decoding values.
+ return 1;
+ }
+
+ if (!npyarr->ret)
+ {
+ // Array not initialised yet.
+ // We do it here so we can 'sniff' the data type if none was provided
+ if (!npyarr->dec->dtype)
+ {
+ type = PyObject_Type(value);
+ if(!PyArray_DescrConverter(type, &dtype))
+ {
+ Py_DECREF(type);
+ goto fail;
+ }
+ Py_INCREF(dtype);
+ Py_DECREF(type);
+ }
+ else
+ {
+ dtype = PyArray_DescrNew(npyarr->dec->dtype);
+ }
+
+ // If it's an object or string then fill a Python list and subsequently
+ // convert. Otherwise we would need to somehow mess about with
+ // reference counts when renewing memory.
+ npyarr->elsize = dtype->elsize;
+ if (PyDataType_REFCHK(dtype) || npyarr->elsize == 0)
+ {
+ Py_XDECREF(dtype);
+
+ if (npyarr->dec->curdim > 1)
+ {
+ PyErr_SetString(PyExc_ValueError, "Cannot decode multidimensional arrays with variable length elements to numpy");
+ goto fail;
+ }
+ npyarr->elcount = 0;
+ npyarr->ret = PyList_New(0);
+ if (!npyarr->ret)
+ {
+ goto fail;
+ }
+ ((JSONObjectDecoder*)npyarr->dec)->newArray = Object_npyNewArrayList;
+ ((JSONObjectDecoder*)npyarr->dec)->arrayAddItem = Object_npyArrayListAddItem;
+ ((JSONObjectDecoder*)npyarr->dec)->endArray = Object_npyEndArrayList;
+ return Object_npyArrayListAddItem(obj, value);
+ }
+
+ npyarr->ret = PyArray_NewFromDescr(&PyArray_Type, dtype, 1,
+ &npyarr->elcount, NULL,NULL, 0, NULL);
+
+ if (!npyarr->ret)
+ {
+ goto fail;
+ }
+ }
+
+ if (i >= npyarr->elcount) {
+ // Grow PyArray_DATA(ret):
+ // this is similar for the strategy for PyListObject, but we use
+ // 50% overallocation => 0, 4, 8, 14, 23, 36, 56, 86 ...
+ if (npyarr->elsize == 0)
+ {
+ PyErr_SetString(PyExc_ValueError, "Cannot decode multidimensional arrays with variable length elements to numpy");
+ goto fail;
+ }
+
+ npyarr->elcount = (i >> 1) + (i < 4 ? 4 : 2) + i;
+ if (npyarr->elcount <= NPY_MAX_INTP/npyarr->elsize) {
+ new_data = PyDataMem_RENEW(PyArray_DATA(npyarr->ret), npyarr->elcount * npyarr->elsize);
+ }
+ else {
+ PyErr_NoMemory();
+ goto fail;
+ }
+ ((PyArrayObject*) npyarr->ret)->data = (void*) new_data;
+
+ // PyArray_BYTES(npyarr->ret) = new_data;
+ }
+
+ PyArray_DIMS(npyarr->ret)[0] = i + 1;
+
+ if ((item = PyArray_GETPTR1(npyarr->ret, i)) == NULL
+ || PyArray_SETITEM(npyarr->ret, item, value) == -1) {
+ goto fail;
+ }
+
+ Py_DECREF( (PyObject *) value);
+ npyarr->i++;
+ return 1;
+
+fail:
+
+ Npy_releaseContext(npyarr);
+ return 0;
+}
+
+JSOBJ Object_npyNewArrayList(void* _decoder)
+{
+ PyObjectDecoder* decoder = (PyObjectDecoder*) _decoder;
+ PRINTMARK();
+ PyErr_SetString(PyExc_ValueError, "nesting not supported for object or variable length dtypes");
+ Npy_releaseContext(decoder->npyarr);
+ return NULL;
+}
+
+JSOBJ Object_npyEndArrayList(JSOBJ obj)
+{
+ PyObject *list, *ret;
+ NpyArrContext* npyarr = (NpyArrContext*) obj;
+ PRINTMARK();
+ if (!npyarr)
+ {
+ return NULL;
+ }
+
+ // convert decoded list to numpy array
+ list = (PyObject *) npyarr->ret;
+ npyarr->ret = PyArray_FROM_O(list);
+
+ ret = Npy_returnLabelled(npyarr);
+ npyarr->ret = list;
+
+ ((JSONObjectDecoder*)npyarr->dec)->newArray = Object_npyNewArray;
+ ((JSONObjectDecoder*)npyarr->dec)->arrayAddItem = Object_npyArrayAddItem;
+ ((JSONObjectDecoder*)npyarr->dec)->endArray = Object_npyEndArray;
+ Npy_releaseContext(npyarr);
+ return ret;
+}
+
+int Object_npyArrayListAddItem(JSOBJ obj, JSOBJ value)
+{
+ NpyArrContext* npyarr = (NpyArrContext*) obj;
+ PRINTMARK();
+ if (!npyarr)
+ {
+ return 0;
+ }
+ PyList_Append((PyObject*) npyarr->ret, value);
+ Py_DECREF( (PyObject *) value);
+ npyarr->elcount++;
+ return 1;
+}
+
+
+JSOBJ Object_npyNewObject(void* _decoder)
+{
+ PyObjectDecoder* decoder = (PyObjectDecoder*) _decoder;
+ PRINTMARK();
+ if (decoder->curdim > 1)
+ {
+ PyErr_SetString(PyExc_ValueError, "labels only supported up to 2 dimensions");
+ return NULL;
+ }
+
+ return ((JSONObjectDecoder*)decoder)->newArray(decoder);
+}
+
+JSOBJ Object_npyEndObject(JSOBJ obj)
+{
+ PyObject *list;
+ npy_intp labelidx;
+ NpyArrContext* npyarr = (NpyArrContext*) obj;
+ PRINTMARK();
+ if (!npyarr)
+ {
+ return NULL;
+ }
+
+ labelidx = npyarr->dec->curdim-1;
+
+ list = npyarr->labels[labelidx];
+ if (list)
+ {
+ npyarr->labels[labelidx] = PyArray_FROM_O(list);
+ Py_DECREF(list);
+ }
+
+ return (PyObject*) ((JSONObjectDecoder*)npyarr->dec)->endArray(obj);
+}
+
+int Object_npyObjectAddKey(JSOBJ obj, JSOBJ name, JSOBJ value)
+{
+ PyObject *label;
+ npy_intp labelidx;
+ // add key to label array, value to values array
+ NpyArrContext* npyarr = (NpyArrContext*) obj;
+ PRINTMARK();
+ if (!npyarr)
+ {
+ return 0;
+ }
+
+ label = (PyObject*) name;
+ labelidx = npyarr->dec->curdim-1;
+
+ if (!npyarr->labels[labelidx])
+ {
+ npyarr->labels[labelidx] = PyList_New(0);
+ }
+
+ // only fill label array once, assumes all column labels are the same
+ // for 2-dimensional arrays.
+ if (PyList_GET_SIZE(npyarr->labels[labelidx]) <= npyarr->elcount)
+ {
+ PyList_Append(npyarr->labels[labelidx], label);
+ }
+
+ if(((JSONObjectDecoder*)npyarr->dec)->arrayAddItem(obj, value))
+ {
+ Py_DECREF(label);
+ return 1;
+ }
+ return 0;
+}
+
+int Object_objectAddKey(JSOBJ obj, JSOBJ name, JSOBJ value)
+{
+ PyDict_SetItem (obj, name, value);
+ Py_DECREF( (PyObject *) name);
+ Py_DECREF( (PyObject *) value);
+ return 1;
+}
+
+int Object_arrayAddItem(JSOBJ obj, JSOBJ value)
+{
+ PyList_Append(obj, value);
+ Py_DECREF( (PyObject *) value);
+ return 1;
+}
+
+JSOBJ Object_newString(wchar_t *start, wchar_t *end)
+{
+ return PyUnicode_FromWideChar (start, (end - start));
+}
+
+JSOBJ Object_newTrue(void)
+{
+ Py_RETURN_TRUE;
+}
+
+JSOBJ Object_newFalse(void)
+{
+ Py_RETURN_FALSE;
+}
+
+JSOBJ Object_newNull(void)
+{
+ Py_RETURN_NONE;
+}
+
+JSOBJ Object_newObject(void* decoder)
+{
+ return PyDict_New();
+}
+
+JSOBJ Object_endObject(JSOBJ obj)
+{
+ return obj;
+}
+
+JSOBJ Object_newArray(void* decoder)
+{
+ return PyList_New(0);
+}
+
+JSOBJ Object_endArray(JSOBJ obj)
+{
+ return obj;
+}
+
+JSOBJ Object_newInteger(JSINT32 value)
+{
+ return PyInt_FromLong( (long) value);
+}
+
+JSOBJ Object_newLong(JSINT64 value)
+{
+ return PyLong_FromLongLong (value);
+}
+
+JSOBJ Object_newDouble(double value)
+{
+ return PyFloat_FromDouble(value);
+}
+
+static void Object_releaseObject(JSOBJ obj, void* _decoder)
+{
+ PyObjectDecoder* decoder = (PyObjectDecoder*) _decoder;
+ if (obj != decoder->npyarr_addr)
+ {
+ Py_XDECREF( ((PyObject *)obj));
+ }
+}
+
+
+PyObject* JSONToObj(PyObject* self, PyObject *args, PyObject *kwargs)
+{
+ PyObject *ret;
+ PyObject *sarg;
+ JSONObjectDecoder *decoder;
+ PyObjectDecoder pyDecoder;
+ PyArray_Descr *dtype = NULL;
+ static char *kwlist[] = { "obj", "numpy", "labelled", "dtype", NULL};
+ int numpy = 0, labelled = 0, decref = 0;
+ // PRINTMARK();
+
+ JSONObjectDecoder dec = {
+ Object_newString,
+ Object_objectAddKey,
+ Object_arrayAddItem,
+ Object_newTrue,
+ Object_newFalse,
+ Object_newNull,
+ Object_newObject,
+ Object_endObject,
+ Object_newArray,
+ Object_endArray,
+ Object_newInteger,
+ Object_newLong,
+ Object_newDouble,
+ Object_releaseObject,
+ PyObject_Malloc,
+ PyObject_Free,
+ PyObject_Realloc,
+ };
+ pyDecoder.dec = dec;
+ pyDecoder.curdim = 0;
+ pyDecoder.npyarr = NULL;
+ pyDecoder.npyarr_addr = NULL;
+
+ decoder = (JSONObjectDecoder*) &pyDecoder;
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|iiO&", kwlist, &sarg, &numpy, &labelled, PyArray_DescrConverter2, &dtype))
+ {
+ Npy_releaseContext(pyDecoder.npyarr);
+ return NULL;
+ }
+
+ if (PyUnicode_Check(sarg))
+ {
+ sarg = PyUnicode_AsUTF8String(sarg);
+ if (sarg == NULL)
+ {
+ //Exception raised above us by codec according to docs
+ return NULL;
+ }
+ decref = 1;
+ }
+ else
+ if (!PyString_Check(sarg))
+ {
+ PyErr_Format(PyExc_TypeError, "Expected String or Unicode");
+ return NULL;
+ }
+
+ if (numpy)
+ {
+ pyDecoder.dtype = dtype;
+ decoder->newArray = Object_npyNewArray;
+ decoder->endArray = Object_npyEndArray;
+ decoder->arrayAddItem = Object_npyArrayAddItem;
+
+ if (labelled)
+ {
+ decoder->newObject = Object_npyNewObject;
+ decoder->endObject = Object_npyEndObject;
+ decoder->objectAddKey = Object_npyObjectAddKey;
+ }
+ }
+
+ decoder->errorStr = NULL;
+ decoder->errorOffset = NULL;
+
+ PRINTMARK();
+ ret = JSON_DecodeObject(decoder, PyString_AS_STRING(sarg), PyString_GET_SIZE(sarg));
+ PRINTMARK();
+
+ if (decref)
+ {
+ Py_DECREF(sarg);
+ }
+
+ if (PyErr_Occurred())
+ {
+ return NULL;
+ }
+
+ if (decoder->errorStr)
+ {
+ /*FIXME: It's possible to give a much nicer error message here with actual failing element in input etc*/
+ PyErr_Format (PyExc_ValueError, "%s", decoder->errorStr);
+ Py_XDECREF( (PyObject *) ret);
+ Npy_releaseContext(pyDecoder.npyarr);
+
+ return NULL;
+ }
+
+ return ret;
+}
+
+PyObject* JSONFileToObj(PyObject* self, PyObject *args, PyObject *kwargs)
+{
+ PyObject *file;
+ PyObject *read;
+ PyObject *string;
+ PyObject *result;
+ PyObject *argtuple;
+
+ if (!PyArg_ParseTuple (args, "O", &file)) {
+ return NULL;
+ }
+
+ if (!PyObject_HasAttrString (file, "read"))
+ {
+ PyErr_Format (PyExc_TypeError, "expected file");
+ return NULL;
+ }
+
+ read = PyObject_GetAttrString (file, "read");
+
+ if (!PyCallable_Check (read)) {
+ Py_XDECREF(read);
+ PyErr_Format (PyExc_TypeError, "expected file");
+ return NULL;
+ }
+
+ string = PyObject_CallObject (read, NULL);
+ Py_XDECREF(read);
+
+ if (string == NULL)
+ {
+ return NULL;
+ }
+
+ argtuple = PyTuple_Pack(1, string);
+
+ result = JSONToObj (self, argtuple, kwargs);
+ Py_XDECREF(string);
+ Py_DECREF(argtuple);
+
+ if (result == NULL) {
+ return NULL;
+ }
+
+ return result;
+}
+
diff --git a/pandas/src/ujson/python/objToJSON.c b/pandas/src/ujson/python/objToJSON.c
new file mode 100644
index 0000000000000..ce8bdf3721f5e
--- /dev/null
+++ b/pandas/src/ujson/python/objToJSON.c
@@ -0,0 +1,1701 @@
+#define PY_ARRAY_UNIQUE_SYMBOL UJSON_NUMPY
+
+#include "py_defines.h"
+#include <numpy/arrayobject.h>
+#include <numpy/npy_math.h>
+#include <np_datetime.h>
+#include <stdio.h>
+#include <datetime.h>
+#include <ultrajson.h>
+
+#define NPY_JSON_BUFSIZE 32768
+
+static PyObject* cls_dataframe;
+static PyObject* cls_series;
+static PyObject* cls_index;
+
+typedef void *(*PFN_PyTypeToJSON)(JSOBJ obj, JSONTypeContext *ti, void *outValue, size_t *_outLen);
+
+
+#if (PY_VERSION_HEX < 0x02050000)
+typedef ssize_t Py_ssize_t;
+#endif
+
+typedef struct __NpyArrContext
+{
+ PyObject *array;
+ char* dataptr;
+ int was_datetime64;
+ int curdim; // current dimension in array's order
+ int stridedim; // dimension we are striding over
+ int inc; // stride dimension increment (+/- 1)
+ npy_intp dim;
+ npy_intp stride;
+ npy_intp ndim;
+ npy_intp index[NPY_MAXDIMS];
+ PyArray_GetItemFunc* getitem;
+
+ char** rowLabels;
+ char** columnLabels;
+} NpyArrContext;
+
+typedef struct __TypeContext
+{
+ JSPFN_ITERBEGIN iterBegin;
+ JSPFN_ITEREND iterEnd;
+ JSPFN_ITERNEXT iterNext;
+ JSPFN_ITERGETNAME iterGetName;
+ JSPFN_ITERGETVALUE iterGetValue;
+ PFN_PyTypeToJSON PyTypeToJSON;
+ PyObject *newObj;
+ PyObject *dictObj;
+ Py_ssize_t index;
+ Py_ssize_t size;
+ PyObject *itemValue;
+ PyObject *itemName;
+ PyObject *attrList;
+ char *citemName;
+
+ JSINT64 longValue;
+
+ NpyArrContext *npyarr;
+ int transpose;
+ char** rowLabels;
+ char** columnLabels;
+ npy_intp rowLabelsLen;
+ npy_intp columnLabelsLen;
+
+} TypeContext;
+
+typedef struct __PyObjectEncoder
+{
+ JSONObjectEncoder enc;
+
+ // pass through the NpyArrContext when encoding multi-dimensional arrays
+ NpyArrContext* npyCtxtPassthru;
+
+ // output format style for pandas data types
+ int outputFormat;
+ int originalOutputFormat;
+} PyObjectEncoder;
+
+#define GET_TC(__ptrtc) ((TypeContext *)((__ptrtc)->prv))
+
+struct PyDictIterState
+{
+ PyObject *keys;
+ size_t i;
+ size_t sz;
+};
+
+enum PANDAS_FORMAT
+{
+ SPLIT,
+ RECORDS,
+ INDEX,
+ COLUMNS,
+ VALUES
+};
+
+//#define PRINTMARK() fprintf(stderr, "%s: MARK(%d)\n", __FILE__, __LINE__)
+#define PRINTMARK()
+
+void initObjToJSON(void)
+{
+ PyObject *mod_frame;
+ PyDateTime_IMPORT;
+
+ mod_frame = PyImport_ImportModule("pandas.core.frame");
+ if (mod_frame)
+ {
+ cls_dataframe = PyObject_GetAttrString(mod_frame, "DataFrame");
+ cls_index = PyObject_GetAttrString(mod_frame, "Index");
+ cls_series = PyObject_GetAttrString(mod_frame, "Series");
+ Py_DECREF(mod_frame);
+ }
+
+ /* Initialise numpy API */
+ import_array();
+}
+
+static void *PyIntToINT32(JSOBJ _obj, JSONTypeContext *tc, void *outValue, size_t *_outLen)
+{
+ PyObject *obj = (PyObject *) _obj;
+ *((JSINT32 *) outValue) = PyInt_AS_LONG (obj);
+ return NULL;
+}
+
+static void *PyIntToINT64(JSOBJ _obj, JSONTypeContext *tc, void *outValue, size_t *_outLen)
+{
+ PyObject *obj = (PyObject *) _obj;
+ *((JSINT64 *) outValue) = PyInt_AS_LONG (obj);
+ return NULL;
+}
+
+static void *PyLongToINT64(JSOBJ _obj, JSONTypeContext *tc, void *outValue, size_t *_outLen)
+{
+ *((JSINT64 *) outValue) = GET_TC(tc)->longValue;
+ return NULL;
+}
+
+static void *NpyFloatToDOUBLE(JSOBJ _obj, JSONTypeContext *tc, void *outValue, size_t *_outLen)
+{
+ PyObject *obj = (PyObject *) _obj;
+ PyArray_CastScalarToCtype(obj, outValue, PyArray_DescrFromType(NPY_DOUBLE));
+ return NULL;
+}
+
+static void *PyFloatToDOUBLE(JSOBJ _obj, JSONTypeContext *tc, void *outValue, size_t *_outLen)
+{
+ PyObject *obj = (PyObject *) _obj;
+ *((double *) outValue) = PyFloat_AS_DOUBLE (obj);
+ return NULL;
+}
+
+static void *PyStringToUTF8(JSOBJ _obj, JSONTypeContext *tc, void *outValue, size_t *_outLen)
+{
+ PyObject *obj = (PyObject *) _obj;
+ *_outLen = PyString_GET_SIZE(obj);
+ return PyString_AS_STRING(obj);
+}
+
+static void *PyUnicodeToUTF8(JSOBJ _obj, JSONTypeContext *tc, void *outValue, size_t *_outLen)
+{
+ PyObject *obj = (PyObject *) _obj;
+ PyObject *newObj = PyUnicode_AsUTF8String (obj);
+
+ GET_TC(tc)->newObj = newObj;
+
+ *_outLen = PyString_GET_SIZE(newObj);
+ return PyString_AS_STRING(newObj);
+}
+
+static void *NpyDateTimeToINT64(JSOBJ _obj, JSONTypeContext *tc, void *outValue, size_t *_outLen)
+{
+ PyObject *obj = (PyObject *) _obj;
+ PyArray_CastScalarToCtype(obj, outValue, PyArray_DescrFromType(NPY_DATETIME));
+ return NULL;
+}
+
+static void *PyDateTimeToINT64(JSOBJ _obj, JSONTypeContext *tc, void *outValue, size_t *_outLen)
+{
+ pandas_datetimestruct dts;
+ PyObject *obj = (PyObject *) _obj;
+
+ dts.year = PyDateTime_GET_YEAR(obj);
+ dts.month = PyDateTime_GET_MONTH(obj);
+ dts.day = PyDateTime_GET_DAY(obj);
+ dts.hour = PyDateTime_DATE_GET_HOUR(obj);
+ dts.min = PyDateTime_DATE_GET_MINUTE(obj);
+ dts.sec = PyDateTime_DATE_GET_SECOND(obj);
+ dts.us = PyDateTime_DATE_GET_MICROSECOND(obj);
+ dts.ps = dts.as = 0;
+ *((JSINT64*)outValue) = (JSINT64) pandas_datetimestruct_to_datetime(PANDAS_FR_ns, &dts);
+ return NULL;
+}
+
+static void *PyDateToINT64(JSOBJ _obj, JSONTypeContext *tc, void *outValue, size_t *_outLen)
+{
+ pandas_datetimestruct dts;
+ PyObject *obj = (PyObject *) _obj;
+
+ dts.year = PyDateTime_GET_YEAR(obj);
+ dts.month = PyDateTime_GET_MONTH(obj);
+ dts.day = PyDateTime_GET_DAY(obj);
+ dts.hour = dts.min = dts.sec = dts.ps = dts.as = 0;
+ *((JSINT64*)outValue) = (JSINT64) pandas_datetimestruct_to_datetime(PANDAS_FR_ns, &dts);
+ return NULL;
+}
+
+//=============================================================================
+// Numpy array iteration functions
+//=============================================================================
+int NpyArr_iterNextNone(JSOBJ _obj, JSONTypeContext *tc)
+{
+ return 0;
+}
+
+void NpyArr_iterBegin(JSOBJ _obj, JSONTypeContext *tc)
+{
+ PyArrayObject *obj;
+ PyArray_Descr *dtype;
+ NpyArrContext *npyarr;
+
+ if (GET_TC(tc)->newObj)
+ {
+ obj = (PyArrayObject *) GET_TC(tc)->newObj;
+ }
+ else
+ {
+ obj = (PyArrayObject *) _obj;
+ }
+
+ if (PyArray_SIZE(obj) > 0)
+ {
+ PRINTMARK();
+ npyarr = PyObject_Malloc(sizeof(NpyArrContext));
+ GET_TC(tc)->npyarr = npyarr;
+
+ if (!npyarr)
+ {
+ PyErr_NoMemory();
+ GET_TC(tc)->iterNext = NpyArr_iterNextNone;
+ return;
+ }
+
+ // uber hack to support datetime64[ns] arrays
+ if (PyArray_DESCR(obj)->type_num == NPY_DATETIME) {
+ npyarr->was_datetime64 = 1;
+ dtype = PyArray_DescrFromType(NPY_INT64);
+ obj = (PyArrayObject *) PyArray_CastToType(obj, dtype, 0);
+ } else {
+ npyarr->was_datetime64 = 0;
+ }
+
+ npyarr->array = (PyObject*) obj;
+ npyarr->getitem = (PyArray_GetItemFunc*) PyArray_DESCR(obj)->f->getitem;
+ npyarr->dataptr = PyArray_DATA(obj);
+ npyarr->ndim = PyArray_NDIM(obj) - 1;
+ npyarr->curdim = 0;
+
+ if (GET_TC(tc)->transpose)
+ {
+ npyarr->dim = PyArray_DIM(obj, npyarr->ndim);
+ npyarr->stride = PyArray_STRIDE(obj, npyarr->ndim);
+ npyarr->stridedim = npyarr->ndim;
+ npyarr->index[npyarr->ndim] = 0;
+ npyarr->inc = -1;
+ }
+ else
+ {
+ npyarr->dim = PyArray_DIM(obj, 0);
+ npyarr->stride = PyArray_STRIDE(obj, 0);
+ npyarr->stridedim = 0;
+ npyarr->index[0] = 0;
+ npyarr->inc = 1;
+ }
+
+ npyarr->columnLabels = GET_TC(tc)->columnLabels;
+ npyarr->rowLabels = GET_TC(tc)->rowLabels;
+ }
+ else
+ {
+ GET_TC(tc)->iterNext = NpyArr_iterNextNone;
+ }
+ PRINTMARK();
+}
+
+void NpyArr_iterEnd(JSOBJ obj, JSONTypeContext *tc)
+{
+ NpyArrContext *npyarr = GET_TC(tc)->npyarr;
+
+ if (npyarr)
+ {
+ if (npyarr->was_datetime64) {
+ Py_XDECREF(npyarr->array);
+ }
+
+ if (GET_TC(tc)->itemValue != npyarr->array)
+ {
+ Py_XDECREF(GET_TC(tc)->itemValue);
+ }
+ GET_TC(tc)->itemValue = NULL;
+
+ PyObject_Free(npyarr);
+ }
+ PRINTMARK();
+}
+
+void NpyArrPassThru_iterBegin(JSOBJ obj, JSONTypeContext *tc)
+{
+ PRINTMARK();
+}
+
+void NpyArrPassThru_iterEnd(JSOBJ obj, JSONTypeContext *tc)
+{
+ NpyArrContext* npyarr;
+ PRINTMARK();
+ // finished this dimension, reset the data pointer
+ npyarr = GET_TC(tc)->npyarr;
+ npyarr->curdim--;
+ npyarr->dataptr -= npyarr->stride * npyarr->index[npyarr->stridedim];
+ npyarr->stridedim -= npyarr->inc;
+ npyarr->dim = PyArray_DIM(npyarr->array, npyarr->stridedim);
+ npyarr->stride = PyArray_STRIDE(npyarr->array, npyarr->stridedim);
+ npyarr->dataptr += npyarr->stride;
+
+ if (GET_TC(tc)->itemValue != npyarr->array)
+ {
+ Py_XDECREF(GET_TC(tc)->itemValue);
+ GET_TC(tc)->itemValue = NULL;
+ }
+}
+
+int NpyArr_iterNextItem(JSOBJ _obj, JSONTypeContext *tc)
+{
+ NpyArrContext* npyarr;
+ PRINTMARK();
+ npyarr = GET_TC(tc)->npyarr;
+
+ if (GET_TC(tc)->itemValue != npyarr->array)
+ {
+ Py_XDECREF(GET_TC(tc)->itemValue);
+ GET_TC(tc)->itemValue = NULL;
+ }
+
+ if (npyarr->index[npyarr->stridedim] >= npyarr->dim)
+ {
+ return 0;
+ }
+
+ GET_TC(tc)->itemValue = npyarr->getitem(npyarr->dataptr, npyarr->array);
+
+ npyarr->dataptr += npyarr->stride;
+ npyarr->index[npyarr->stridedim]++;
+ return 1;
+}
+
+int NpyArr_iterNext(JSOBJ _obj, JSONTypeContext *tc)
+{
+ NpyArrContext* npyarr;
+ PRINTMARK();
+ npyarr = GET_TC(tc)->npyarr;
+
+ if (npyarr->curdim >= npyarr->ndim || npyarr->index[npyarr->stridedim] >= npyarr->dim)
+ {
+ // innermost dimension, start retrieving item values
+ GET_TC(tc)->iterNext = NpyArr_iterNextItem;
+ return NpyArr_iterNextItem(_obj, tc);
+ }
+
+ // dig a dimension deeper
+ npyarr->index[npyarr->stridedim]++;
+
+ npyarr->curdim++;
+ npyarr->stridedim += npyarr->inc;
+ npyarr->dim = PyArray_DIM(npyarr->array, npyarr->stridedim);
+ npyarr->stride = PyArray_STRIDE(npyarr->array, npyarr->stridedim);
+ npyarr->index[npyarr->stridedim] = 0;
+
+ ((PyObjectEncoder*) tc->encoder)->npyCtxtPassthru = npyarr;
+ GET_TC(tc)->itemValue = npyarr->array;
+ return 1;
+}
+
+JSOBJ NpyArr_iterGetValue(JSOBJ obj, JSONTypeContext *tc)
+{
+ PRINTMARK();
+ return GET_TC(tc)->itemValue;
+}
+
+char *NpyArr_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen)
+{
+ NpyArrContext* npyarr;
+ npy_intp idx;
+ PRINTMARK();
+ npyarr = GET_TC(tc)->npyarr;
+ if (GET_TC(tc)->iterNext == NpyArr_iterNextItem)
+ {
+ idx = npyarr->index[npyarr->stridedim] - 1;
+ *outLen = strlen(npyarr->columnLabels[idx]);
+ return npyarr->columnLabels[idx];
+ }
+ else
+ {
+ idx = npyarr->index[npyarr->stridedim - npyarr->inc] - 1;
+ *outLen = strlen(npyarr->rowLabels[idx]);
+ return npyarr->rowLabels[idx];
+ }
+}
+
+//=============================================================================
+// Tuple iteration functions
+// itemValue is borrowed reference, no ref counting
+//=============================================================================
+void Tuple_iterBegin(JSOBJ obj, JSONTypeContext *tc)
+{
+ GET_TC(tc)->index = 0;
+ GET_TC(tc)->size = PyTuple_GET_SIZE( (PyObject *) obj);
+ GET_TC(tc)->itemValue = NULL;
+}
+
+int Tuple_iterNext(JSOBJ obj, JSONTypeContext *tc)
+{
+ PyObject *item;
+
+ if (GET_TC(tc)->index >= GET_TC(tc)->size)
+ {
+ return 0;
+ }
+
+ item = PyTuple_GET_ITEM (obj, GET_TC(tc)->index);
+
+ GET_TC(tc)->itemValue = item;
+ GET_TC(tc)->index ++;
+ return 1;
+}
+
+void Tuple_iterEnd(JSOBJ obj, JSONTypeContext *tc)
+{
+}
+
+JSOBJ Tuple_iterGetValue(JSOBJ obj, JSONTypeContext *tc)
+{
+ return GET_TC(tc)->itemValue;
+}
+
+char *Tuple_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen)
+{
+ return NULL;
+}
+
+//=============================================================================
+// Dir iteration functions
+// itemName ref is borrowed from PyObject_Dir (attrList). No refcount
+// itemValue ref is from PyObject_GetAttr. Ref counted
+//=============================================================================
+void Dir_iterBegin(JSOBJ obj, JSONTypeContext *tc)
+{
+ GET_TC(tc)->attrList = PyObject_Dir(obj);
+ GET_TC(tc)->index = 0;
+ GET_TC(tc)->size = PyList_GET_SIZE(GET_TC(tc)->attrList);
+ PRINTMARK();
+}
+
+void Dir_iterEnd(JSOBJ obj, JSONTypeContext *tc)
+{
+ if (GET_TC(tc)->itemValue)
+ {
+ Py_DECREF(GET_TC(tc)->itemValue);
+ GET_TC(tc)->itemValue = NULL;
+ }
+
+ if (GET_TC(tc)->itemName)
+ {
+ Py_DECREF(GET_TC(tc)->itemName);
+ GET_TC(tc)->itemName = NULL;
+ }
+
+ Py_DECREF( (PyObject *) GET_TC(tc)->attrList);
+ PRINTMARK();
+}
+
+int Dir_iterNext(JSOBJ _obj, JSONTypeContext *tc)
+{
+ PyObject *obj = (PyObject *) _obj;
+ PyObject *itemValue = GET_TC(tc)->itemValue;
+ PyObject *itemName = GET_TC(tc)->itemName;
+ PyObject* attr;
+ PyObject* attrName;
+ char* attrStr;
+
+
+ if (itemValue)
+ {
+ Py_DECREF(GET_TC(tc)->itemValue);
+ GET_TC(tc)->itemValue = itemValue = NULL;
+ }
+
+ if (itemName)
+ {
+ Py_DECREF(GET_TC(tc)->itemName);
+ GET_TC(tc)->itemName = itemName = NULL;
+ }
+
+ for (; GET_TC(tc)->index < GET_TC(tc)->size; GET_TC(tc)->index ++)
+ {
+ attrName = PyList_GET_ITEM(GET_TC(tc)->attrList, GET_TC(tc)->index);
+#if PY_MAJOR_VERSION >= 3
+ attr = PyUnicode_AsUTF8String(attrName);
+#else
+ attr = attrName;
+ Py_INCREF(attr);
+#endif
+ attrStr = PyString_AS_STRING(attr);
+
+ if (attrStr[0] == '_')
+ {
+ PRINTMARK();
+ Py_DECREF(attr);
+ continue;
+ }
+
+ itemValue = PyObject_GetAttr(obj, attrName);
+ if (itemValue == NULL)
+ {
+ PyErr_Clear();
+ Py_DECREF(attr);
+ PRINTMARK();
+ continue;
+ }
+
+ if (PyCallable_Check(itemValue))
+ {
+ Py_DECREF(itemValue);
+ Py_DECREF(attr);
+ PRINTMARK();
+ continue;
+ }
+
+ PRINTMARK();
+ itemName = attr;
+ break;
+ }
+
+ if (itemName == NULL)
+ {
+ GET_TC(tc)->index = GET_TC(tc)->size;
+ GET_TC(tc)->itemValue = NULL;
+ return 0;
+ }
+
+ GET_TC(tc)->itemName = itemName;
+ GET_TC(tc)->itemValue = itemValue;
+ GET_TC(tc)->index ++;
+
+ PRINTMARK();
+ return 1;
+}
+
+
+
+JSOBJ Dir_iterGetValue(JSOBJ obj, JSONTypeContext *tc)
+{
+ PRINTMARK();
+ return GET_TC(tc)->itemValue;
+}
+
+char *Dir_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen)
+{
+ PRINTMARK();
+ *outLen = PyString_GET_SIZE(GET_TC(tc)->itemName);
+ return PyString_AS_STRING(GET_TC(tc)->itemName);
+}
+
+
+
+
+//=============================================================================
+// List iteration functions
+// itemValue is borrowed from object (which is list). No refcounting
+//=============================================================================
+void List_iterBegin(JSOBJ obj, JSONTypeContext *tc)
+{
+ GET_TC(tc)->index = 0;
+ GET_TC(tc)->size = PyList_GET_SIZE( (PyObject *) obj);
+}
+
+int List_iterNext(JSOBJ obj, JSONTypeContext *tc)
+{
+ if (GET_TC(tc)->index >= GET_TC(tc)->size)
+ {
+ PRINTMARK();
+ return 0;
+ }
+
+ GET_TC(tc)->itemValue = PyList_GET_ITEM (obj, GET_TC(tc)->index);
+ GET_TC(tc)->index ++;
+ return 1;
+}
+
+void List_iterEnd(JSOBJ obj, JSONTypeContext *tc)
+{
+}
+
+JSOBJ List_iterGetValue(JSOBJ obj, JSONTypeContext *tc)
+{
+ return GET_TC(tc)->itemValue;
+}
+
+char *List_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen)
+{
+ return NULL;
+}
+
+//=============================================================================
+// pandas Index iteration functions
+//=============================================================================
+void Index_iterBegin(JSOBJ obj, JSONTypeContext *tc)
+{
+ GET_TC(tc)->index = 0;
+ GET_TC(tc)->citemName = PyObject_Malloc(20 * sizeof(char));
+ if (!GET_TC(tc)->citemName)
+ {
+ PyErr_NoMemory();
+ }
+ PRINTMARK();
+}
+
+int Index_iterNext(JSOBJ obj, JSONTypeContext *tc)
+{
+ Py_ssize_t index;
+ if (!GET_TC(tc)->citemName)
+ {
+ return 0;
+ }
+
+ index = GET_TC(tc)->index;
+ Py_XDECREF(GET_TC(tc)->itemValue);
+ if (index == 0)
+ {
+ memcpy(GET_TC(tc)->citemName, "name", sizeof(char)*5);
+ GET_TC(tc)->itemValue = PyObject_GetAttrString(obj, "name");
+ }
+ else
+ if (index == 1)
+ {
+ memcpy(GET_TC(tc)->citemName, "data", sizeof(char)*5);
+ GET_TC(tc)->itemValue = PyObject_GetAttrString(obj, "values");
+ }
+ else
+ {
+ PRINTMARK();
+ return 0;
+ }
+
+ GET_TC(tc)->index++;
+ PRINTMARK();
+ return 1;
+}
+
+void Index_iterEnd(JSOBJ obj, JSONTypeContext *tc)
+{
+ if (GET_TC(tc)->citemName)
+ {
+ PyObject_Free(GET_TC(tc)->citemName);
+ }
+ PRINTMARK();
+}
+
+JSOBJ Index_iterGetValue(JSOBJ obj, JSONTypeContext *tc)
+{
+ return GET_TC(tc)->itemValue;
+}
+
+char *Index_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen)
+{
+ *outLen = strlen(GET_TC(tc)->citemName);
+ return GET_TC(tc)->citemName;
+}
+
+//=============================================================================
+// pandas Series iteration functions
+//=============================================================================
+void Series_iterBegin(JSOBJ obj, JSONTypeContext *tc)
+{
+ PyObjectEncoder* enc = (PyObjectEncoder*) tc->encoder;
+ GET_TC(tc)->index = 0;
+ GET_TC(tc)->citemName = PyObject_Malloc(20 * sizeof(char));
+ enc->outputFormat = VALUES; // for contained series
+ if (!GET_TC(tc)->citemName)
+ {
+ PyErr_NoMemory();
+ }
+ PRINTMARK();
+}
+
+int Series_iterNext(JSOBJ obj, JSONTypeContext *tc)
+{
+ Py_ssize_t index;
+ if (!GET_TC(tc)->citemName)
+ {
+ return 0;
+ }
+
+ index = GET_TC(tc)->index;
+ Py_XDECREF(GET_TC(tc)->itemValue);
+ if (index == 0)
+ {
+ memcpy(GET_TC(tc)->citemName, "name", sizeof(char)*5);
+ GET_TC(tc)->itemValue = PyObject_GetAttrString(obj, "name");
+ }
+ else
+ if (index == 1)
+ {
+ memcpy(GET_TC(tc)->citemName, "index", sizeof(char)*6);
+ GET_TC(tc)->itemValue = PyObject_GetAttrString(obj, "index");
+ }
+ else
+ if (index == 2)
+ {
+ memcpy(GET_TC(tc)->citemName, "data", sizeof(char)*5);
+ GET_TC(tc)->itemValue = PyObject_GetAttrString(obj, "values");
+ }
+ else
+ {
+ PRINTMARK();
+ return 0;
+ }
+
+ GET_TC(tc)->index++;
+ PRINTMARK();
+ return 1;
+}
+
+void Series_iterEnd(JSOBJ obj, JSONTypeContext *tc)
+{
+ PyObjectEncoder* enc = (PyObjectEncoder*) tc->encoder;
+ enc->outputFormat = enc->originalOutputFormat;
+ if (GET_TC(tc)->citemName)
+ {
+ PyObject_Free(GET_TC(tc)->citemName);
+ }
+ PRINTMARK();
+}
+
+JSOBJ Series_iterGetValue(JSOBJ obj, JSONTypeContext *tc)
+{
+ return GET_TC(tc)->itemValue;
+}
+
+char *Series_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen)
+{
+ *outLen = strlen(GET_TC(tc)->citemName);
+ return GET_TC(tc)->citemName;
+}
+
+//=============================================================================
+// pandas DataFrame iteration functions
+//=============================================================================
+void DataFrame_iterBegin(JSOBJ obj, JSONTypeContext *tc)
+{
+ PyObjectEncoder* enc = (PyObjectEncoder*) tc->encoder;
+ GET_TC(tc)->index = 0;
+ GET_TC(tc)->citemName = PyObject_Malloc(20 * sizeof(char));
+ enc->outputFormat = VALUES; // for contained series & index
+ if (!GET_TC(tc)->citemName)
+ {
+ PyErr_NoMemory();
+ }
+ PRINTMARK();
+}
+
+int DataFrame_iterNext(JSOBJ obj, JSONTypeContext *tc)
+{
+ Py_ssize_t index;
+ if (!GET_TC(tc)->citemName)
+ {
+ return 0;
+ }
+
+ index = GET_TC(tc)->index;
+ Py_XDECREF(GET_TC(tc)->itemValue);
+ if (index == 0)
+ {
+ memcpy(GET_TC(tc)->citemName, "columns", sizeof(char)*8);
+ GET_TC(tc)->itemValue = PyObject_GetAttrString(obj, "columns");
+ }
+ else
+ if (index == 1)
+ {
+ memcpy(GET_TC(tc)->citemName, "index", sizeof(char)*6);
+ GET_TC(tc)->itemValue = PyObject_GetAttrString(obj, "index");
+ }
+ else
+ if (index == 2)
+ {
+ memcpy(GET_TC(tc)->citemName, "data", sizeof(char)*5);
+ GET_TC(tc)->itemValue = PyObject_GetAttrString(obj, "values");
+ }
+ else
+ {
+ PRINTMARK();
+ return 0;
+ }
+
+ GET_TC(tc)->index++;
+ PRINTMARK();
+ return 1;
+}
+
+void DataFrame_iterEnd(JSOBJ obj, JSONTypeContext *tc)
+{
+ PyObjectEncoder* enc = (PyObjectEncoder*) tc->encoder;
+ enc->outputFormat = enc->originalOutputFormat;
+ if (GET_TC(tc)->citemName)
+ {
+ PyObject_Free(GET_TC(tc)->citemName);
+ }
+ PRINTMARK();
+}
+
+JSOBJ DataFrame_iterGetValue(JSOBJ obj, JSONTypeContext *tc)
+{
+ return GET_TC(tc)->itemValue;
+}
+
+char *DataFrame_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen)
+{
+ *outLen = strlen(GET_TC(tc)->citemName);
+ return GET_TC(tc)->citemName;
+}
+
+//=============================================================================
+// Dict iteration functions
+// itemName might converted to string (Python_Str). Do refCounting
+// itemValue is borrowed from object (which is dict). No refCounting
+//=============================================================================
+void Dict_iterBegin(JSOBJ obj, JSONTypeContext *tc)
+{
+ GET_TC(tc)->index = 0;
+ PRINTMARK();
+}
+
+int Dict_iterNext(JSOBJ obj, JSONTypeContext *tc)
+{
+#if PY_MAJOR_VERSION >= 3
+ PyObject* itemNameTmp;
+#endif
+
+ if (GET_TC(tc)->itemName)
+ {
+ Py_DECREF(GET_TC(tc)->itemName);
+ GET_TC(tc)->itemName = NULL;
+ }
+
+
+ if (!PyDict_Next ( (PyObject *)GET_TC(tc)->dictObj, &GET_TC(tc)->index, &GET_TC(tc)->itemName, &GET_TC(tc)->itemValue))
+ {
+ PRINTMARK();
+ return 0;
+ }
+
+ if (PyUnicode_Check(GET_TC(tc)->itemName))
+ {
+ GET_TC(tc)->itemName = PyUnicode_AsUTF8String (GET_TC(tc)->itemName);
+ }
+ else
+ if (!PyString_Check(GET_TC(tc)->itemName))
+ {
+ GET_TC(tc)->itemName = PyObject_Str(GET_TC(tc)->itemName);
+#if PY_MAJOR_VERSION >= 3
+ itemNameTmp = GET_TC(tc)->itemName;
+ GET_TC(tc)->itemName = PyUnicode_AsUTF8String (GET_TC(tc)->itemName);
+ Py_DECREF(itemNameTmp);
+#endif
+ }
+ else
+ {
+ Py_INCREF(GET_TC(tc)->itemName);
+ }
+ PRINTMARK();
+ return 1;
+}
+
+void Dict_iterEnd(JSOBJ obj, JSONTypeContext *tc)
+{
+ if (GET_TC(tc)->itemName)
+ {
+ Py_DECREF(GET_TC(tc)->itemName);
+ GET_TC(tc)->itemName = NULL;
+ }
+ Py_DECREF(GET_TC(tc)->dictObj);
+ PRINTMARK();
+}
+
+JSOBJ Dict_iterGetValue(JSOBJ obj, JSONTypeContext *tc)
+{
+ return GET_TC(tc)->itemValue;
+}
+
+char *Dict_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen)
+{
+ *outLen = PyString_GET_SIZE(GET_TC(tc)->itemName);
+ return PyString_AS_STRING(GET_TC(tc)->itemName);
+}
+
+void NpyArr_freeLabels(char** labels, npy_intp len)
+{
+ npy_intp i;
+
+ if (labels)
+ {
+ for (i = 0; i < len; i++)
+ {
+ PyObject_Free(labels[i]);
+ }
+ PyObject_Free(labels);
+ }
+}
+
+char** NpyArr_encodeLabels(PyArrayObject* labels, JSONObjectEncoder* enc, npy_intp num)
+{
+ // NOTE this function steals a reference to labels.
+ PyArray_Descr *dtype = NULL;
+ PyArrayObject* labelsTmp = NULL;
+ PyObject* item = NULL;
+ npy_intp i, stride, len;
+ // npy_intp bufsize = 32768;
+ char** ret;
+ char *dataptr, *cLabel, *origend, *origst, *origoffset;
+ char labelBuffer[NPY_JSON_BUFSIZE];
+ PyArray_GetItemFunc* getitem;
+ PRINTMARK();
+
+ if (PyArray_SIZE(labels) < num)
+ {
+ PyErr_SetString(PyExc_ValueError, "Label array sizes do not match corresponding data shape");
+ Py_DECREF(labels);
+ return 0;
+ }
+
+ ret = PyObject_Malloc(sizeof(char*)*num);
+ if (!ret)
+ {
+ PyErr_NoMemory();
+ Py_DECREF(labels);
+ return 0;
+ }
+
+ for (i = 0; i < num; i++)
+ {
+ ret[i] = NULL;
+ }
+
+ origst = enc->start;
+ origend = enc->end;
+ origoffset = enc->offset;
+
+ if (PyArray_DESCR(labels)->type_num == NPY_DATETIME) {
+ dtype = PyArray_DescrFromType(NPY_INT64);
+ labelsTmp = labels;
+ labels = (PyArrayObject *) PyArray_CastToType(labels, dtype, 0);
+ Py_DECREF(labelsTmp);
+ }
+
+ stride = PyArray_STRIDE(labels, 0);
+ dataptr = PyArray_DATA(labels);
+ getitem = PyArray_DESCR(labels)->f->getitem;
+
+ for (i = 0; i < num; i++)
+ {
+ item = getitem(dataptr, labels);
+ if (!item)
+ {
+ NpyArr_freeLabels(ret, num);
+ ret = 0;
+ break;
+ }
+
+ cLabel = JSON_EncodeObject(item, enc, labelBuffer, NPY_JSON_BUFSIZE);
+ Py_DECREF(item);
+
+ if (PyErr_Occurred() || enc->errorMsg)
+ {
+ NpyArr_freeLabels(ret, num);
+ ret = 0;
+ break;
+ }
+
+ // trim off any quotes surrounding the result
+ if (*cLabel == '\"')
+ {
+ cLabel++;
+ enc->offset -= 2;
+ *(enc->offset) = '\0';
+ }
+
+ len = enc->offset - cLabel + 1;
+ ret[i] = PyObject_Malloc(sizeof(char)*len);
+
+ if (!ret[i])
+ {
+ PyErr_NoMemory();
+ ret = 0;
+ break;
+ }
+
+ memcpy(ret[i], cLabel, sizeof(char)*len);
+ dataptr += stride;
+ }
+
+ enc->start = origst;
+ enc->end = origend;
+ enc->offset = origoffset;
+
+ Py_DECREF(labels);
+ return ret;
+}
+
+void Object_beginTypeContext (JSOBJ _obj, JSONTypeContext *tc)
+{
+ PyObject *obj, *exc, *toDictFunc;
+ TypeContext *pc;
+ PyObjectEncoder *enc;
+ double val;
+ PRINTMARK();
+ if (!_obj) {
+ tc->type = JT_INVALID;
+ return;
+ }
+
+ obj = (PyObject*) _obj;
+ enc = (PyObjectEncoder*) tc->encoder;
+
+ tc->prv = PyObject_Malloc(sizeof(TypeContext));
+ pc = (TypeContext *) tc->prv;
+ if (!pc)
+ {
+ tc->type = JT_INVALID;
+ PyErr_NoMemory();
+ return;
+ }
+ pc->newObj = NULL;
+ pc->dictObj = NULL;
+ pc->itemValue = NULL;
+ pc->itemName = NULL;
+ pc->attrList = NULL;
+ pc->citemName = NULL;
+ pc->npyarr = NULL;
+ pc->rowLabels = NULL;
+ pc->columnLabels = NULL;
+ pc->index = 0;
+ pc->size = 0;
+ pc->longValue = 0;
+ pc->transpose = 0;
+ pc->rowLabelsLen = 0;
+ pc->columnLabelsLen = 0;
+
+ if (PyIter_Check(obj) || PyArray_Check(obj))
+ {
+ goto ISITERABLE;
+ }
+
+ if (PyBool_Check(obj))
+ {
+ PRINTMARK();
+ tc->type = (obj == Py_True) ? JT_TRUE : JT_FALSE;
+ return;
+ }
+ else
+ if (PyLong_Check(obj))
+ {
+ PRINTMARK();
+ pc->PyTypeToJSON = PyLongToINT64;
+ tc->type = JT_LONG;
+ GET_TC(tc)->longValue = PyLong_AsLongLong(obj);
+
+ exc = PyErr_Occurred();
+
+ if (exc && PyErr_ExceptionMatches(PyExc_OverflowError))
+ {
+ PRINTMARK();
+ goto INVALID;
+ }
+
+ return;
+ }
+ else
+ if (PyInt_Check(obj))
+ {
+ PRINTMARK();
+#ifdef _LP64
+ pc->PyTypeToJSON = PyIntToINT64; tc->type = JT_LONG;
+#else
+ pc->PyTypeToJSON = PyIntToINT32; tc->type = JT_INT;
+#endif
+ return;
+ }
+ else
+ if (PyArray_IsScalar(obj, Integer))
+ {
+ PRINTMARK();
+ pc->PyTypeToJSON = PyLongToINT64;
+ tc->type = JT_LONG;
+ PyArray_CastScalarToCtype(obj, &(GET_TC(tc)->longValue), PyArray_DescrFromType(NPY_INT64));
+
+ exc = PyErr_Occurred();
+
+ if (exc && PyErr_ExceptionMatches(PyExc_OverflowError))
+ {
+ PRINTMARK();
+ goto INVALID;
+ }
+
+ return;
+ }
+ else
+ if (PyString_Check(obj))
+ {
+ PRINTMARK();
+ pc->PyTypeToJSON = PyStringToUTF8; tc->type = JT_UTF8;
+ return;
+ }
+ else
+ if (PyUnicode_Check(obj))
+ {
+ PRINTMARK();
+ pc->PyTypeToJSON = PyUnicodeToUTF8; tc->type = JT_UTF8;
+ return;
+ }
+ else
+ if (PyFloat_Check(obj))
+ {
+ PRINTMARK();
+ val = PyFloat_AS_DOUBLE (obj);
+ if (npy_isnan(val) || npy_isinf(val))
+ {
+ tc->type = JT_NULL;
+ }
+ else
+ {
+ pc->PyTypeToJSON = PyFloatToDOUBLE; tc->type = JT_DOUBLE;
+ }
+ return;
+ }
+ else
+ if (PyArray_IsScalar(obj, Float))
+ {
+ PRINTMARK();
+ pc->PyTypeToJSON = NpyFloatToDOUBLE; tc->type = JT_DOUBLE;
+ return;
+ }
+ else
+ if (PyArray_IsScalar(obj, Datetime))
+ {
+ PRINTMARK();
+ pc->PyTypeToJSON = NpyDateTimeToINT64; tc->type = JT_LONG;
+ return;
+ }
+ else
+ if (PyDateTime_Check(obj))
+ {
+ PRINTMARK();
+ pc->PyTypeToJSON = PyDateTimeToINT64; tc->type = JT_LONG;
+ return;
+ }
+ else
+ if (PyDate_Check(obj))
+ {
+ PRINTMARK();
+ pc->PyTypeToJSON = PyDateToINT64; tc->type = JT_LONG;
+ return;
+ }
+ else
+ if (obj == Py_None)
+ {
+ PRINTMARK();
+ tc->type = JT_NULL;
+ return;
+ }
+
+
+ISITERABLE:
+
+ if (PyDict_Check(obj))
+ {
+ PRINTMARK();
+ tc->type = JT_OBJECT;
+ pc->iterBegin = Dict_iterBegin;
+ pc->iterEnd = Dict_iterEnd;
+ pc->iterNext = Dict_iterNext;
+ pc->iterGetValue = Dict_iterGetValue;
+ pc->iterGetName = Dict_iterGetName;
+ pc->dictObj = obj;
+ Py_INCREF(obj);
+
+ return;
+ }
+ else
+ if (PyList_Check(obj))
+ {
+ PRINTMARK();
+ tc->type = JT_ARRAY;
+ pc->iterBegin = List_iterBegin;
+ pc->iterEnd = List_iterEnd;
+ pc->iterNext = List_iterNext;
+ pc->iterGetValue = List_iterGetValue;
+ pc->iterGetName = List_iterGetName;
+ return;
+ }
+ else
+ if (PyTuple_Check(obj))
+ {
+ PRINTMARK();
+ tc->type = JT_ARRAY;
+ pc->iterBegin = Tuple_iterBegin;
+ pc->iterEnd = Tuple_iterEnd;
+ pc->iterNext = Tuple_iterNext;
+ pc->iterGetValue = Tuple_iterGetValue;
+ pc->iterGetName = Tuple_iterGetName;
+ return;
+ }
+ else
+ if (PyObject_TypeCheck(obj, (PyTypeObject*) cls_index))
+ {
+ if (enc->outputFormat == SPLIT)
+ {
+ PRINTMARK();
+ tc->type = JT_OBJECT;
+ pc->iterBegin = Index_iterBegin;
+ pc->iterEnd = Index_iterEnd;
+ pc->iterNext = Index_iterNext;
+ pc->iterGetValue = Index_iterGetValue;
+ pc->iterGetName = Index_iterGetName;
+ return;
+ }
+
+ PRINTMARK();
+ tc->type = JT_ARRAY;
+ pc->newObj = PyObject_GetAttrString(obj, "values");
+ pc->iterBegin = NpyArr_iterBegin;
+ pc->iterEnd = NpyArr_iterEnd;
+ pc->iterNext = NpyArr_iterNext;
+ pc->iterGetValue = NpyArr_iterGetValue;
+ pc->iterGetName = NpyArr_iterGetName;
+ return;
+ }
+ else
+ if (PyObject_TypeCheck(obj, (PyTypeObject*) cls_series))
+ {
+ if (enc->outputFormat == SPLIT)
+ {
+ PRINTMARK();
+ tc->type = JT_OBJECT;
+ pc->iterBegin = Series_iterBegin;
+ pc->iterEnd = Series_iterEnd;
+ pc->iterNext = Series_iterNext;
+ pc->iterGetValue = Series_iterGetValue;
+ pc->iterGetName = Series_iterGetName;
+ return;
+ }
+
+ if (enc->outputFormat == INDEX || enc->outputFormat == COLUMNS)
+ {
+ PRINTMARK();
+ tc->type = JT_OBJECT;
+ pc->columnLabelsLen = PyArray_SIZE(obj);
+ pc->columnLabels = NpyArr_encodeLabels((PyArrayObject*) PyObject_GetAttrString(obj, "index"), (JSONObjectEncoder*) enc, pc->columnLabelsLen);
+ if (!pc->columnLabels)
+ {
+ goto INVALID;
+ }
+ }
+ else
+ {
+ PRINTMARK();
+ tc->type = JT_ARRAY;
+ }
+ pc->newObj = PyObject_GetAttrString(obj, "values");
+ pc->iterBegin = NpyArr_iterBegin;
+ pc->iterEnd = NpyArr_iterEnd;
+ pc->iterNext = NpyArr_iterNext;
+ pc->iterGetValue = NpyArr_iterGetValue;
+ pc->iterGetName = NpyArr_iterGetName;
+ return;
+ }
+ else
+ if (PyArray_Check(obj))
+ {
+ if (enc->npyCtxtPassthru)
+ {
+ PRINTMARK();
+ pc->npyarr = enc->npyCtxtPassthru;
+ tc->type = (pc->npyarr->columnLabels ? JT_OBJECT : JT_ARRAY);
+ pc->iterBegin = NpyArrPassThru_iterBegin;
+ pc->iterEnd = NpyArrPassThru_iterEnd;
+ pc->iterNext = NpyArr_iterNext;
+ pc->iterGetValue = NpyArr_iterGetValue;
+ pc->iterGetName = NpyArr_iterGetName;
+ enc->npyCtxtPassthru = NULL;
+ return;
+ }
+
+ PRINTMARK();
+ tc->type = JT_ARRAY;
+ pc->iterBegin = NpyArr_iterBegin;
+ pc->iterEnd = NpyArr_iterEnd;
+ pc->iterNext = NpyArr_iterNext;
+ pc->iterGetValue = NpyArr_iterGetValue;
+ pc->iterGetName = NpyArr_iterGetName;
+ return;
+ }
+ else
+ if (PyObject_TypeCheck(obj, (PyTypeObject*) cls_dataframe))
+ {
+ if (enc->outputFormat == SPLIT)
+ {
+ PRINTMARK();
+ tc->type = JT_OBJECT;
+ pc->iterBegin = DataFrame_iterBegin;
+ pc->iterEnd = DataFrame_iterEnd;
+ pc->iterNext = DataFrame_iterNext;
+ pc->iterGetValue = DataFrame_iterGetValue;
+ pc->iterGetName = DataFrame_iterGetName;
+ return;
+ }
+
+ PRINTMARK();
+ pc->newObj = PyObject_GetAttrString(obj, "values");
+ pc->iterBegin = NpyArr_iterBegin;
+ pc->iterEnd = NpyArr_iterEnd;
+ pc->iterNext = NpyArr_iterNext;
+ pc->iterGetValue = NpyArr_iterGetValue;
+ pc->iterGetName = NpyArr_iterGetName;
+ if (enc->outputFormat == VALUES)
+ {
+ PRINTMARK();
+ tc->type = JT_ARRAY;
+ }
+ else
+ if (enc->outputFormat == RECORDS)
+ {
+ PRINTMARK();
+ tc->type = JT_ARRAY;
+ pc->columnLabelsLen = PyArray_DIM(pc->newObj, 1);
+ pc->columnLabels = NpyArr_encodeLabels((PyArrayObject*) PyObject_GetAttrString(obj, "columns"), (JSONObjectEncoder*) enc, pc->columnLabelsLen);
+ if (!pc->columnLabels)
+ {
+ goto INVALID;
+ }
+ }
+ else
+ if (enc->outputFormat == INDEX)
+ {
+ PRINTMARK();
+ tc->type = JT_OBJECT;
+ pc->rowLabelsLen = PyArray_DIM(pc->newObj, 0);
+ pc->rowLabels = NpyArr_encodeLabels((PyArrayObject*) PyObject_GetAttrString(obj, "index"), (JSONObjectEncoder*) enc, pc->rowLabelsLen);
+ if (!pc->rowLabels)
+ {
+ goto INVALID;
+ }
+ pc->columnLabelsLen = PyArray_DIM(pc->newObj, 1);
+ pc->columnLabels = NpyArr_encodeLabels((PyArrayObject*) PyObject_GetAttrString(obj, "columns"), (JSONObjectEncoder*) enc, pc->columnLabelsLen);
+ if (!pc->columnLabels)
+ {
+ NpyArr_freeLabels(pc->rowLabels, pc->rowLabelsLen);
+ pc->rowLabels = NULL;
+ goto INVALID;
+ }
+ }
+ else
+ {
+ PRINTMARK();
+ tc->type = JT_OBJECT;
+ pc->rowLabelsLen = PyArray_DIM(pc->newObj, 1);
+ pc->rowLabels = NpyArr_encodeLabels((PyArrayObject*) PyObject_GetAttrString(obj, "columns"), (JSONObjectEncoder*) enc, pc->rowLabelsLen);
+ if (!pc->rowLabels)
+ {
+ goto INVALID;
+ }
+ pc->columnLabelsLen = PyArray_DIM(pc->newObj, 0);
+ pc->columnLabels = NpyArr_encodeLabels((PyArrayObject*) PyObject_GetAttrString(obj, "index"), (JSONObjectEncoder*) enc, pc->columnLabelsLen);
+ if (!pc->columnLabels)
+ {
+ NpyArr_freeLabels(pc->rowLabels, pc->rowLabelsLen);
+ pc->rowLabels = NULL;
+ goto INVALID;
+ }
+ pc->transpose = 1;
+ }
+ return;
+ }
+
+
+ toDictFunc = PyObject_GetAttrString(obj, "toDict");
+
+ if (toDictFunc)
+ {
+ PyObject* tuple = PyTuple_New(0);
+ PyObject* toDictResult = PyObject_Call(toDictFunc, tuple, NULL);
+ Py_DECREF(tuple);
+ Py_DECREF(toDictFunc);
+
+ if (toDictResult == NULL)
+ {
+ PyErr_Clear();
+ tc->type = JT_NULL;
+ return;
+ }
+
+ if (!PyDict_Check(toDictResult))
+ {
+ Py_DECREF(toDictResult);
+ tc->type = JT_NULL;
+ return;
+ }
+
+ PRINTMARK();
+ tc->type = JT_OBJECT;
+ pc->iterBegin = Dict_iterBegin;
+ pc->iterEnd = Dict_iterEnd;
+ pc->iterNext = Dict_iterNext;
+ pc->iterGetValue = Dict_iterGetValue;
+ pc->iterGetName = Dict_iterGetName;
+ pc->dictObj = toDictResult;
+ return;
+ }
+
+ PyErr_Clear();
+
+ tc->type = JT_OBJECT;
+ pc->iterBegin = Dir_iterBegin;
+ pc->iterEnd = Dir_iterEnd;
+ pc->iterNext = Dir_iterNext;
+ pc->iterGetValue = Dir_iterGetValue;
+ pc->iterGetName = Dir_iterGetName;
+
+ return;
+
+INVALID:
+ tc->type = JT_INVALID;
+ PyObject_Free(tc->prv);
+ tc->prv = NULL;
+ return;
+}
+
+
+void Object_endTypeContext(JSOBJ obj, JSONTypeContext *tc)
+{
+ Py_XDECREF(GET_TC(tc)->newObj);
+ NpyArr_freeLabels(GET_TC(tc)->rowLabels, GET_TC(tc)->rowLabelsLen);
+ NpyArr_freeLabels(GET_TC(tc)->columnLabels, GET_TC(tc)->columnLabelsLen);
+
+ PyObject_Free(tc->prv);
+ tc->prv = NULL;
+}
+
+const char *Object_getStringValue(JSOBJ obj, JSONTypeContext *tc, size_t *_outLen)
+{
+ return GET_TC(tc)->PyTypeToJSON (obj, tc, NULL, _outLen);
+}
+
+JSINT64 Object_getLongValue(JSOBJ obj, JSONTypeContext *tc)
+{
+ JSINT64 ret;
+ GET_TC(tc)->PyTypeToJSON (obj, tc, &ret, NULL);
+
+ return ret;
+}
+
+JSINT32 Object_getIntValue(JSOBJ obj, JSONTypeContext *tc)
+{
+ JSINT32 ret;
+ GET_TC(tc)->PyTypeToJSON (obj, tc, &ret, NULL);
+ return ret;
+}
+
+
+double Object_getDoubleValue(JSOBJ obj, JSONTypeContext *tc)
+{
+ double ret;
+ GET_TC(tc)->PyTypeToJSON (obj, tc, &ret, NULL);
+ return ret;
+}
+
+static void Object_releaseObject(JSOBJ _obj)
+{
+ Py_DECREF( (PyObject *) _obj);
+}
+
+
+
+void Object_iterBegin(JSOBJ obj, JSONTypeContext *tc)
+{
+ GET_TC(tc)->iterBegin(obj, tc);
+}
+
+int Object_iterNext(JSOBJ obj, JSONTypeContext *tc)
+{
+ return GET_TC(tc)->iterNext(obj, tc);
+}
+
+void Object_iterEnd(JSOBJ obj, JSONTypeContext *tc)
+{
+ GET_TC(tc)->iterEnd(obj, tc);
+}
+
+JSOBJ Object_iterGetValue(JSOBJ obj, JSONTypeContext *tc)
+{
+ return GET_TC(tc)->iterGetValue(obj, tc);
+}
+
+char *Object_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen)
+{
+ return GET_TC(tc)->iterGetName(obj, tc, outLen);
+}
+
+
+PyObject* objToJSON(PyObject* self, PyObject *args, PyObject *kwargs)
+{
+ static char *kwlist[] = { "obj", "ensure_ascii", "double_precision", "orient", NULL};
+
+ char buffer[65536];
+ char *ret;
+ PyObject *newobj;
+ PyObject *oinput = NULL;
+ PyObject *oensureAscii = NULL;
+ char *sOrient = NULL;
+ int idoublePrecision = 5; // default double precision setting
+
+ PyObjectEncoder pyEncoder =
+ {
+ {
+ Object_beginTypeContext, //void (*beginTypeContext)(JSOBJ obj, JSONTypeContext *tc);
+ Object_endTypeContext, //void (*endTypeContext)(JSOBJ obj, JSONTypeContext *tc);
+ Object_getStringValue, //const char *(*getStringValue)(JSOBJ obj, JSONTypeContext *tc, size_t *_outLen);
+ Object_getLongValue, //JSLONG (*getLongValue)(JSOBJ obj, JSONTypeContext *tc);
+ Object_getIntValue, //JSLONG (*getLongValue)(JSOBJ obj, JSONTypeContext *tc);
+ Object_getDoubleValue, //double (*getDoubleValue)(JSOBJ obj, JSONTypeContext *tc);
+ Object_iterBegin, //JSPFN_ITERBEGIN iterBegin;
+ Object_iterNext, //JSPFN_ITERNEXT iterNext;
+ Object_iterEnd, //JSPFN_ITEREND iterEnd;
+ Object_iterGetValue, //JSPFN_ITERGETVALUE iterGetValue;
+ Object_iterGetName, //JSPFN_ITERGETNAME iterGetName;
+ Object_releaseObject, //void (*releaseValue)(JSONTypeContext *ti);
+ PyObject_Malloc, //JSPFN_MALLOC malloc;
+ PyObject_Realloc, //JSPFN_REALLOC realloc;
+ PyObject_Free, //JSPFN_FREE free;
+ -1, //recursionMax
+ idoublePrecision,
+ 1, //forceAscii
+ }
+ };
+ JSONObjectEncoder* encoder = (JSONObjectEncoder*) &pyEncoder;
+
+ pyEncoder.npyCtxtPassthru = NULL;
+ pyEncoder.outputFormat = COLUMNS;
+
+ PRINTMARK();
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|Ois", kwlist, &oinput, &oensureAscii, &idoublePrecision, &sOrient))
+ {
+ return NULL;
+ }
+
+ if (sOrient != NULL)
+ {
+ if (strcmp(sOrient, "records") == 0)
+ {
+ pyEncoder.outputFormat = RECORDS;
+ }
+ else
+ if (strcmp(sOrient, "index") == 0)
+ {
+ pyEncoder.outputFormat = INDEX;
+ }
+ else
+ if (strcmp(sOrient, "split") == 0)
+ {
+ pyEncoder.outputFormat = SPLIT;
+ }
+ else
+ if (strcmp(sOrient, "values") == 0)
+ {
+ pyEncoder.outputFormat = VALUES;
+ }
+ else
+ if (strcmp(sOrient, "columns") != 0)
+ {
+ PyErr_Format (PyExc_ValueError, "Invalid value '%s' for option 'orient'", sOrient);
+ return NULL;
+ }
+ }
+
+ pyEncoder.originalOutputFormat = pyEncoder.outputFormat;
+
+ if (oensureAscii != NULL && !PyObject_IsTrue(oensureAscii))
+ {
+ encoder->forceASCII = 0;
+ }
+
+ encoder->doublePrecision = idoublePrecision;
+
+ PRINTMARK();
+ ret = JSON_EncodeObject (oinput, encoder, buffer, sizeof (buffer));
+ PRINTMARK();
+
+ if (PyErr_Occurred())
+ {
+ return NULL;
+ }
+
+ if (encoder->errorMsg)
+ {
+ if (ret != buffer)
+ {
+ encoder->free (ret);
+ }
+
+ PyErr_Format (PyExc_OverflowError, "%s", encoder->errorMsg);
+ return NULL;
+ }
+
+ newobj = PyString_FromString (ret);
+
+ if (ret != buffer)
+ {
+ encoder->free (ret);
+ }
+
+ PRINTMARK();
+
+ return newobj;
+}
+
+PyObject* objToJSONFile(PyObject* self, PyObject *args, PyObject *kwargs)
+{
+ PyObject *data;
+ PyObject *file;
+ PyObject *string;
+ PyObject *write;
+ PyObject *argtuple;
+
+ PRINTMARK();
+
+ if (!PyArg_ParseTuple (args, "OO", &data, &file)) {
+ return NULL;
+ }
+
+ if (!PyObject_HasAttrString (file, "write"))
+ {
+ PyErr_Format (PyExc_TypeError, "expected file");
+ return NULL;
+ }
+
+ write = PyObject_GetAttrString (file, "write");
+
+ if (!PyCallable_Check (write)) {
+ Py_XDECREF(write);
+ PyErr_Format (PyExc_TypeError, "expected file");
+ return NULL;
+ }
+
+ argtuple = PyTuple_Pack(1, data);
+
+ string = objToJSON (self, argtuple, kwargs);
+
+ if (string == NULL)
+ {
+ Py_XDECREF(write);
+ Py_XDECREF(argtuple);
+ return NULL;
+ }
+
+ Py_XDECREF(argtuple);
+
+ argtuple = PyTuple_Pack (1, string);
+ if (argtuple == NULL)
+ {
+ Py_XDECREF(write);
+ return NULL;
+ }
+ if (PyObject_CallObject (write, argtuple) == NULL)
+ {
+ Py_XDECREF(write);
+ Py_XDECREF(argtuple);
+ return NULL;
+ }
+
+ Py_XDECREF(write);
+ Py_DECREF(argtuple);
+ Py_XDECREF(string);
+
+ PRINTMARK();
+
+ Py_RETURN_NONE;
+
+
+}
+
diff --git a/pandas/src/ujson/python/py_defines.h b/pandas/src/ujson/python/py_defines.h
new file mode 100644
index 0000000000000..1544c2e3cf34d
--- /dev/null
+++ b/pandas/src/ujson/python/py_defines.h
@@ -0,0 +1,15 @@
+#include <Python.h>
+
+#if PY_MAJOR_VERSION >= 3
+
+#define PyInt_Check PyLong_Check
+#define PyInt_AS_LONG PyLong_AsLong
+#define PyInt_FromLong PyLong_FromLong
+
+#define PyString_Check PyBytes_Check
+#define PyString_GET_SIZE PyBytes_GET_SIZE
+#define PyString_AS_STRING PyBytes_AS_STRING
+
+#define PyString_FromString PyUnicode_FromString
+
+#endif
diff --git a/pandas/src/ujson/python/ujson.c b/pandas/src/ujson/python/ujson.c
new file mode 100644
index 0000000000000..e04309e620a1d
--- /dev/null
+++ b/pandas/src/ujson/python/ujson.c
@@ -0,0 +1,73 @@
+#include "py_defines.h"
+#include "version.h"
+
+/* objToJSON */
+PyObject* objToJSON(PyObject* self, PyObject *args, PyObject *kwargs);
+void initObjToJSON(void);
+
+/* JSONToObj */
+PyObject* JSONToObj(PyObject* self, PyObject *args, PyObject *kwargs);
+
+/* objToJSONFile */
+PyObject* objToJSONFile(PyObject* self, PyObject *args, PyObject *kwargs);
+
+/* JSONFileToObj */
+PyObject* JSONFileToObj(PyObject* self, PyObject *args, PyObject *kwargs);
+
+
+static PyMethodDef ujsonMethods[] = {
+ {"encode", (PyCFunction) objToJSON, METH_VARARGS | METH_KEYWORDS, "Converts arbitrary object recursivly into JSON. Use ensure_ascii=false to output UTF-8. Pass in double_precision to alter the maximum digit precision with doubles"},
+ {"decode", (PyCFunction) JSONToObj, METH_VARARGS | METH_KEYWORDS, "Converts JSON as string to dict object structure"},
+ {"dumps", (PyCFunction) objToJSON, METH_VARARGS | METH_KEYWORDS, "Converts arbitrary object recursivly into JSON. Use ensure_ascii=false to output UTF-8"},
+ {"loads", (PyCFunction) JSONToObj, METH_VARARGS | METH_KEYWORDS, "Converts JSON as string to dict object structure"},
+ {"dump", (PyCFunction) objToJSONFile, METH_VARARGS | METH_KEYWORDS, "Converts arbitrary object recursively into JSON file. Use ensure_ascii=false to output UTF-8"},
+ {"load", (PyCFunction) JSONFileToObj, METH_VARARGS | METH_KEYWORDS, "Converts JSON as file to dict object structure"},
+ {NULL, NULL, 0, NULL} /* Sentinel */
+};
+
+#if PY_MAJOR_VERSION >= 3
+
+static struct PyModuleDef moduledef = {
+ PyModuleDef_HEAD_INIT,
+ "_pandasujson",
+ 0, /* m_doc */
+ -1, /* m_size */
+ ujsonMethods, /* m_methods */
+ NULL, /* m_reload */
+ NULL, /* m_traverse */
+ NULL, /* m_clear */
+ NULL /* m_free */
+};
+
+#define PYMODINITFUNC PyObject *PyInit_json(void)
+#define PYMODULE_CREATE() PyModule_Create(&moduledef)
+#define MODINITERROR return NULL
+
+#else
+
+#define PYMODINITFUNC PyMODINIT_FUNC initjson(void)
+#define PYMODULE_CREATE() Py_InitModule("json", ujsonMethods)
+#define MODINITERROR return
+
+#endif
+
+PYMODINITFUNC
+{
+ PyObject *module;
+ PyObject *version_string;
+
+ initObjToJSON();
+ module = PYMODULE_CREATE();
+
+ if (module == NULL)
+ {
+ MODINITERROR;
+ }
+
+ version_string = PyString_FromString (UJSON_VERSION);
+ PyModule_AddObject (module, "__version__", version_string);
+
+#if PY_MAJOR_VERSION >= 3
+ return module;
+#endif
+}
diff --git a/pandas/src/ujson/python/version.h b/pandas/src/ujson/python/version.h
new file mode 100644
index 0000000000000..9449441411192
--- /dev/null
+++ b/pandas/src/ujson/python/version.h
@@ -0,0 +1 @@
+#define UJSON_VERSION "1.18"
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index d674a2f44ebe1..2c6d3b221c6ff 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -3338,146 +3338,6 @@ def test_to_dict(self):
for k2, v2 in v.iteritems():
self.assertEqual(v2, recons_data[k][k2])
- def test_from_json_to_json(self):
- raise nose.SkipTest
-
- def _check_orient(df, orient, dtype=None, numpy=True):
- df = df.sort()
- dfjson = df.to_json(orient=orient)
- unser = DataFrame.from_json(dfjson, orient=orient, dtype=dtype,
- numpy=numpy)
- unser = unser.sort()
- if df.index.dtype.type == np.datetime64:
- unser.index = DatetimeIndex(unser.index.values.astype('i8'))
- if orient == "records":
- # index is not captured in this orientation
- assert_almost_equal(df.values, unser.values)
- self.assert_(df.columns.equals(unser.columns))
- elif orient == "values":
- # index and cols are not captured in this orientation
- assert_almost_equal(df.values, unser.values)
- elif orient == "split":
- # index and col labels might not be strings
- unser.index = [str(i) for i in unser.index]
- unser.columns = [str(i) for i in unser.columns]
- unser = unser.sort()
- assert_almost_equal(df.values, unser.values)
- else:
- assert_frame_equal(df, unser)
-
- def _check_all_orients(df, dtype=None):
- _check_orient(df, "columns", dtype=dtype)
- _check_orient(df, "records", dtype=dtype)
- _check_orient(df, "split", dtype=dtype)
- _check_orient(df, "index", dtype=dtype)
- _check_orient(df, "values", dtype=dtype)
-
- _check_orient(df, "columns", dtype=dtype, numpy=False)
- _check_orient(df, "records", dtype=dtype, numpy=False)
- _check_orient(df, "split", dtype=dtype, numpy=False)
- _check_orient(df, "index", dtype=dtype, numpy=False)
- _check_orient(df, "values", dtype=dtype, numpy=False)
-
- # basic
- _check_all_orients(self.frame)
- self.assertEqual(self.frame.to_json(),
- self.frame.to_json(orient="columns"))
-
- _check_all_orients(self.intframe, dtype=self.intframe.values.dtype)
-
- # big one
- # index and columns are strings as all unserialised JSON object keys
- # are assumed to be strings
- biggie = DataFrame(np.zeros((200, 4)),
- columns=[str(i) for i in range(4)],
- index=[str(i) for i in range(200)])
- _check_all_orients(biggie)
-
- # dtypes
- _check_all_orients(DataFrame(biggie, dtype=np.float64),
- dtype=np.float64)
- _check_all_orients(DataFrame(biggie, dtype=np.int64), dtype=np.int64)
- _check_all_orients(DataFrame(biggie, dtype='<U3'), dtype='<U3')
-
- # empty
- _check_all_orients(self.empty)
-
- # time series data
- _check_all_orients(self.tsframe)
-
- # mixed data
- index = Index(['a', 'b', 'c', 'd', 'e'])
- data = {
- 'A': [0., 1., 2., 3., 4.],
- 'B': [0., 1., 0., 1., 0.],
- 'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'],
- 'D': [True, False, True, False, True]
- }
- df = DataFrame(data=data, index=index)
- _check_orient(df, "split")
- _check_orient(df, "records")
- _check_orient(df, "values")
- _check_orient(df, "columns")
- # index oriented is problematic as it is read back in in a transposed
- # state, so the columns are interpreted as having mixed data and
- # given object dtypes.
- # force everything to have object dtype beforehand
- _check_orient(df.transpose().transpose(), "index")
-
- def test_from_json_bad_data(self):
- raise nose.SkipTest
- self.assertRaises(ValueError, DataFrame.from_json, '{"key":b:a:d}')
-
- # too few indices
- json = ('{"columns":["A","B"],'
- '"index":["2","3"],'
- '"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}"')
- self.assertRaises(AssertionError, DataFrame.from_json, json,
- orient="split")
-
- # too many columns
- json = ('{"columns":["A","B","C"],'
- '"index":["1","2","3"],'
- '"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}"')
- self.assertRaises(AssertionError, DataFrame.from_json, json,
- orient="split")
-
- # bad key
- json = ('{"badkey":["A","B"],'
- '"index":["2","3"],'
- '"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}"')
- self.assertRaises(TypeError, DataFrame.from_json, json,
- orient="split")
-
- def test_from_json_nones(self):
- raise nose.SkipTest
- df = DataFrame([[1, 2], [4, 5, 6]])
- unser = DataFrame.from_json(df.to_json())
- self.assert_(np.isnan(unser['2'][0]))
-
- df = DataFrame([['1', '2'], ['4', '5', '6']])
- unser = DataFrame.from_json(df.to_json())
- self.assert_(unser['2'][0] is None)
-
- unser = DataFrame.from_json(df.to_json(), numpy=False)
- self.assert_(unser['2'][0] is None)
-
- # infinities get mapped to nulls which get mapped to NaNs during
- # deserialisation
- df = DataFrame([[1, 2], [4, 5, 6]])
- df[2][0] = np.inf
- unser = DataFrame.from_json(df.to_json())
- self.assert_(np.isnan(unser['2'][0]))
-
- df[2][0] = np.NINF
- unser = DataFrame.from_json(df.to_json())
- self.assert_(np.isnan(unser['2'][0]))
-
- def test_to_json_except(self):
- raise nose.SkipTest
- df = DataFrame([1, 2, 3])
- self.assertRaises(ValueError, df.to_json, orient="garbage")
-
def test_to_records_dt64(self):
df = DataFrame([["one", "two", "three"],
["four", "five", "six"]],
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index e1589b9499757..88990bdde98b8 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -561,62 +561,6 @@ def test_fromDict(self):
series = Series(data, dtype=float)
self.assert_(series.dtype == np.float64)
- def test_from_json_to_json(self):
- raise nose.SkipTest
-
- def _check_orient(series, orient, dtype=None, numpy=True):
- series = series.sort_index()
- unser = Series.from_json(series.to_json(orient=orient),
- orient=orient, numpy=numpy, dtype=dtype)
- unser = unser.sort_index()
- if series.index.dtype.type == np.datetime64:
- unser.index = DatetimeIndex(unser.index.values.astype('i8'))
- if orient == "records" or orient == "values":
- assert_almost_equal(series.values, unser.values)
- else:
- try:
- assert_series_equal(series, unser)
- except:
- raise
- if orient == "split":
- self.assert_(series.name == unser.name)
-
- def _check_all_orients(series, dtype=None):
- _check_orient(series, "columns", dtype=dtype)
- _check_orient(series, "records", dtype=dtype)
- _check_orient(series, "split", dtype=dtype)
- _check_orient(series, "index", dtype=dtype)
- _check_orient(series, "values", dtype=dtype)
-
- _check_orient(series, "columns", dtype=dtype, numpy=False)
- _check_orient(series, "records", dtype=dtype, numpy=False)
- _check_orient(series, "split", dtype=dtype, numpy=False)
- _check_orient(series, "index", dtype=dtype, numpy=False)
- _check_orient(series, "values", dtype=dtype, numpy=False)
-
- # basic
- _check_all_orients(self.series)
- self.assertEqual(self.series.to_json(),
- self.series.to_json(orient="index"))
-
- objSeries = Series([str(d) for d in self.objSeries],
- index=self.objSeries.index,
- name=self.objSeries.name)
- _check_all_orients(objSeries)
- _check_all_orients(self.empty)
- _check_all_orients(self.ts)
-
- # dtype
- s = Series(range(6), index=['a', 'b', 'c', 'd', 'e', 'f'])
- _check_all_orients(Series(s, dtype=np.float64), dtype=np.float64)
- _check_all_orients(Series(s, dtype=np.int), dtype=np.int)
-
-
- def test_to_json_except(self):
- raise nose.SkipTest
- s = Series([1, 2, 3])
- self.assertRaises(ValueError, s.to_json, orient="garbage")
-
def test_setindex(self):
# wrong type
series = self.series.copy()
diff --git a/scripts/json_manip.py b/scripts/json_manip.py
new file mode 100644
index 0000000000000..e76a99cca344a
--- /dev/null
+++ b/scripts/json_manip.py
@@ -0,0 +1,421 @@
+"""
+
+Tasks
+-------
+
+Search and transform jsonable structures, specifically to make it 'easy' to make tabular/csv output for other consumers.
+
+Example
+~~~~~~~~~~~~~
+
+ *give me a list of all the fields called 'id' in this stupid, gnarly
+ thing*
+
+ >>> Q('id',gnarly_data)
+ ['id1','id2','id3']
+
+
+Observations:
+---------------------
+
+1) 'simple data structures' exist and are common. They are tedious
+ to search.
+
+2) The DOM is another nested / treeish structure, and jQuery selector is
+ a good tool for that.
+
+3a) R, Numpy, Excel and other analysis tools want 'tabular' data. These
+ analyses are valuable and worth doing.
+
+3b) Dot/Graphviz, NetworkX, and some other analyses *like* treeish/dicty
+ things, and those analyses are also worth doing!
+
+3c) Some analyses are best done using 'one-off' and custom code in C, Python,
+ or another 'real' programming language.
+
+4) Arbitrary transforms are tedious and error prone. SQL is one solution,
+ XSLT is another,
+
+5) the XPATH/XML/XSLT family is.... not universally loved :) They are
+ very complete, and the completeness can make simple cases... gross.
+
+6) For really complicated data structures, we can write one-off code. Getting
+ 80% of the way is mostly okay. There will always have to be programmers
+ in the loop.
+
+7) Re-inventing SQL is probably a failure mode. So is reinventing XPATH, XSLT
+ and the like. Be wary of mission creep! Re-use when possible (e.g., can
+ we put the thing into a DOM using
+
+8) If the interface is good, people can improve performance later.
+
+
+Simplifying
+---------------
+
+
+1) Assuming 'jsonable' structures
+
+2) keys are strings or stringlike. Python allows any hashable to be a key.
+ for now, we pretend that doesn't happen.
+
+3) assumes most dicts are 'well behaved'. DAG, no cycles!
+
+4) assume that if people want really specialized transforms, they can do it
+ themselves.
+
+"""
+
+from collections import Counter, namedtuple
+import csv
+import itertools
+from itertools import product
+from operator import attrgetter as aget, itemgetter as iget
+import operator
+import sys
+
+
+
+## note 'url' appears multiple places and not all extensions have same struct
+ex1 = {
+ 'name': 'Gregg',
+ 'extensions': [
+ {'id':'hello',
+ 'url':'url1'},
+ {'id':'gbye',
+ 'url':'url2',
+ 'more': dict(url='url3')},
+ ]
+}
+
+## much longer example
+ex2 = {u'metadata': {u'accessibilities': [{u'name': u'accessibility.tabfocus',
+ u'value': 7},
+ {u'name': u'accessibility.mouse_focuses_formcontrol', u'value': False},
+ {u'name': u'accessibility.browsewithcaret', u'value': False},
+ {u'name': u'accessibility.win32.force_disabled', u'value': False},
+ {u'name': u'accessibility.typeaheadfind.startlinksonly', u'value': False},
+ {u'name': u'accessibility.usebrailledisplay', u'value': u''},
+ {u'name': u'accessibility.typeaheadfind.timeout', u'value': 5000},
+ {u'name': u'accessibility.typeaheadfind.enabletimeout', u'value': True},
+ {u'name': u'accessibility.tabfocus_applies_to_xul', u'value': False},
+ {u'name': u'accessibility.typeaheadfind.flashBar', u'value': 1},
+ {u'name': u'accessibility.typeaheadfind.autostart', u'value': True},
+ {u'name': u'accessibility.blockautorefresh', u'value': False},
+ {u'name': u'accessibility.browsewithcaret_shortcut.enabled',
+ u'value': True},
+ {u'name': u'accessibility.typeaheadfind.enablesound', u'value': True},
+ {u'name': u'accessibility.typeaheadfind.prefillwithselection',
+ u'value': True},
+ {u'name': u'accessibility.typeaheadfind.soundURL', u'value': u'beep'},
+ {u'name': u'accessibility.typeaheadfind', u'value': False},
+ {u'name': u'accessibility.typeaheadfind.casesensitive', u'value': 0},
+ {u'name': u'accessibility.warn_on_browsewithcaret', u'value': True},
+ {u'name': u'accessibility.usetexttospeech', u'value': u''},
+ {u'name': u'accessibility.accesskeycausesactivation', u'value': True},
+ {u'name': u'accessibility.typeaheadfind.linksonly', u'value': False},
+ {u'name': u'isInstantiated', u'value': True}],
+ u'extensions': [{u'id': u'216ee7f7f4a5b8175374cd62150664efe2433a31',
+ u'isEnabled': True},
+ {u'id': u'1aa53d3b720800c43c4ced5740a6e82bb0b3813e', u'isEnabled': False},
+ {u'id': u'01ecfac5a7bd8c9e27b7c5499e71c2d285084b37', u'isEnabled': True},
+ {u'id': u'1c01f5b22371b70b312ace94785f7b0b87c3dfb2', u'isEnabled': True},
+ {u'id': u'fb723781a2385055f7d024788b75e959ad8ea8c3', u'isEnabled': True}],
+ u'fxVersion': u'9.0',
+ u'location': u'zh-CN',
+ u'operatingSystem': u'WINNT Windows NT 5.1',
+ u'surveyAnswers': u'',
+ u'task_guid': u'd69fbd15-2517-45b5-8a17-bb7354122a75',
+ u'tpVersion': u'1.2',
+ u'updateChannel': u'beta'},
+ u'survey_data': {
+ u'extensions': [{u'appDisabled': False,
+ u'id': u'testpilot?labs.mozilla.com',
+ u'isCompatible': True,
+ u'isEnabled': True,
+ u'isPlatformCompatible': True,
+ u'name': u'Test Pilot'},
+ {u'appDisabled': True,
+ u'id': u'dict?www.youdao.com',
+ u'isCompatible': False,
+ u'isEnabled': False,
+ u'isPlatformCompatible': True,
+ u'name': u'Youdao Word Capturer'},
+ {u'appDisabled': False,
+ u'id': u'jqs?sun.com',
+ u'isCompatible': True,
+ u'isEnabled': True,
+ u'isPlatformCompatible': True,
+ u'name': u'Java Quick Starter'},
+ {u'appDisabled': False,
+ u'id': u'?20a82645-c095-46ed-80e3-08825760534b?',
+ u'isCompatible': True,
+ u'isEnabled': True,
+ u'isPlatformCompatible': True,
+ u'name': u'Microsoft .NET Framework Assistant'},
+ {u'appDisabled': False,
+ u'id': u'?a0d7ccb3-214d-498b-b4aa-0e8fda9a7bf7?',
+ u'isCompatible': True,
+ u'isEnabled': True,
+ u'isPlatformCompatible': True,
+ u'name': u'WOT'}],
+ u'version_number': 1}}
+
+# class SurveyResult(object):
+
+# def __init__(self, record):
+# self.record = record
+# self.metadata, self.survey_data = self._flatten_results()
+
+# def _flatten_results(self):
+# survey_data = self.record['survey_data']
+# extensions = DataFrame(survey_data['extensions'])
+
+def denorm(queries,iterable_of_things,default=None):
+ """
+ 'repeat', or 'stutter' to 'tableize' for downstream.
+ (I have no idea what a good word for this is!)
+
+ Think ``kronecker`` products, or:
+
+ ``SELECT single,multiple FROM table;``
+
+ single multiple
+ ------- ---------
+ id1 val1
+ id1 val2
+
+
+ Args:
+
+ queries: iterable of ``Q`` queries.
+ iterable_of_things: to be queried.
+
+ Returns:
+
+ list of 'stuttered' output, where if a query returns
+ a 'single', it gets repeated appropriately.
+
+
+ """
+
+ def _denorm(queries,thing):
+ fields = []
+ results = []
+ for q in queries:
+ #print q
+ r = Ql(q,thing)
+ #print "-- result: ", r
+ if not r:
+ r = [default]
+ if type(r[0]) is type({}):
+ fields.append(sorted(r[0].keys())) # dicty answers
+ else:
+ fields.append([q]) # stringy answer
+
+ results.append(r)
+
+ #print results
+ #print fields
+ flist = list(flatten(*map(iter,fields)))
+
+ prod = itertools.product(*results)
+ for p in prod:
+ U = dict()
+ for (ii,thing) in enumerate(p):
+ #print ii,thing
+ if type(thing) is type({}):
+ U.update(thing)
+ else:
+ U[fields[ii][0]] = thing
+
+ yield U
+
+ return list(flatten(*[_denorm(queries,thing) for thing in iterable_of_things]))
+
+
+def default_iget(fields,default=None,):
+ """ itemgetter with 'default' handling, that *always* returns lists
+
+ API CHANGES from ``operator.itemgetter``
+
+ Note: Sorry to break the iget api... (fields vs *fields)
+ Note: *always* returns a list... unlike itemgetter,
+ which can return tuples or 'singles'
+ """
+ myiget = operator.itemgetter(*fields)
+ L = len(fields)
+ def f(thing):
+ try:
+ ans = list(myiget(thing))
+ if L < 2:
+ ans = [ans,]
+ return ans
+ except KeyError:
+ # slower!
+ return [thing.get(x,default) for x in fields]
+
+ f.__doc__ = "itemgetter with default %r for fields %r" %(default,fields)
+ f.__name__ = "default_itemgetter"
+ return f
+
+
+def flatten(*stack):
+ """
+ helper function for flattening iterables of generators in a
+ sensible way.
+ """
+ stack = list(stack)
+ while stack:
+ try: x = stack[0].next()
+ except StopIteration:
+ stack.pop(0)
+ continue
+ if hasattr(x,'next') and callable(getattr(x,'next')):
+ stack.insert(0, x)
+
+ #if isinstance(x, (GeneratorType,listerator)):
+ else: yield x
+
+
+def _Q(filter_, thing):
+ """ underlying machinery for Q function recursion """
+ T = type(thing)
+ if T is type({}):
+ for k,v in thing.iteritems():
+ #print k,v
+ if filter_ == k:
+ if type(v) is type([]):
+ yield iter(v)
+ else:
+ yield v
+
+ if type(v) in (type({}),type([])):
+ yield Q(filter_,v)
+
+ elif T is type([]):
+ for k in thing:
+ #print k
+ yield Q(filter_,k)
+
+ else:
+ # no recursion.
+ pass
+
+def Q(filter_,thing):
+ """
+ type(filter):
+ - list: a flattened list of all searches (one list)
+ - dict: dict with vals each of which is that search
+
+ Notes:
+
+ [1] 'parent thing', with space, will do a descendent
+ [2] this will come back 'flattened' jQuery style
+ [3] returns a generator. Use ``Ql`` if you want a list.
+
+ """
+ if type(filter_) is type([]):
+ return flatten(*[_Q(x,thing) for x in filter_])
+ elif type(filter_) is type({}):
+ d = dict.fromkeys(filter_.keys())
+ #print d
+ for k in d:
+ #print flatten(Q(k,thing))
+ d[k] = Q(k,thing)
+
+ return d
+
+ else:
+ if " " in filter_: # i.e. "antecendent post"
+ parts = filter_.strip().split()
+ r = None
+ for p in parts:
+ r = Ql(p,thing)
+ thing = r
+
+ return r
+
+ else: # simple.
+ return flatten(_Q(filter_,thing))
+
+def Ql(filter_,thing):
+ """ same as Q, but returns a list, not a generator """
+ res = Q(filter_,thing)
+
+ if type(filter_) is type({}):
+ for k in res:
+ res[k] = list(res[k])
+ return res
+
+ else:
+ return list(res)
+
+
+
+def countit(fields,iter_of_iter,default=None):
+ """
+ note: robust to fields not being in i_of_i, using ``default``
+ """
+ C = Counter() # needs hashables
+ T = namedtuple("Thing",fields)
+ get = default_iget(*fields,default=default)
+ return Counter(
+ (T(*get(thing)) for thing in iter_of_iter)
+ )
+
+
+## right now this works for one row...
+def printout(queries,things,default=None, f=sys.stdout, **kwargs):
+ """ will print header and objects
+
+ **kwargs go to csv.DictWriter
+
+ help(csv.DictWriter) for more.
+ """
+
+ results = denorm(queries,things,default=None)
+ fields = set(itertools.chain(*(x.keys() for x in results)))
+
+ W = csv.DictWriter(f=f,fieldnames=fields,**kwargs)
+ #print "---prod---"
+ #print list(prod)
+ W.writeheader()
+ for r in results:
+ W.writerow(r)
+
+
+def test_run():
+ print "\n>>> print list(Q('url',ex1))"
+ print list(Q('url',ex1))
+ assert list(Q('url',ex1)) == ['url1','url2','url3']
+ assert Ql('url',ex1) == ['url1','url2','url3']
+
+ print "\n>>> print list(Q(['name','id'],ex1))"
+ print list(Q(['name','id'],ex1))
+ assert Ql(['name','id'],ex1) == ['Gregg','hello','gbye']
+
+
+ print "\n>>> print Ql('more url',ex1)"
+ print Ql('more url',ex1)
+
+
+ print "\n>>> list(Q('extensions',ex1))"
+ print list(Q('extensions',ex1))
+
+ print "\n>>> print Ql('extensions',ex1)"
+ print Ql('extensions',ex1)
+
+ print "\n>>> printout(['name','extensions'],[ex1,], extrasaction='ignore')"
+ printout(['name','extensions'],[ex1,], extrasaction='ignore')
+
+ print "\n\n"
+
+ from pprint import pprint as pp
+
+ print "-- note that the extension fields are also flattened! (and N/A) -- "
+ pp(denorm(['location','fxVersion','notthere','survey_data extensions'],[ex2,], default="N/A")[:2])
+
+
+if __name__ == "__main__":
+ pass
diff --git a/setup.py b/setup.py
index 030584ba509d3..ff40738ddfb78 100755
--- a/setup.py
+++ b/setup.py
@@ -244,12 +244,23 @@ def initialize_options(self):
'np_datetime_strings.c',
'period.c',
'tokenizer.c',
- 'io.c']
+ 'io.c',
+ 'ujson.c',
+ 'objToJSON.c',
+ 'JSONtoObj.c',
+ 'ultrajsonenc.c',
+ 'ultrajsondec.c',
+ ]
for root, dirs, files in list(os.walk('pandas')):
for f in files:
if f in self._clean_exclude:
continue
+
+ # XXX
+ if 'ujson' in f:
+ continue
+
if os.path.splitext(f)[-1] in ('.pyc', '.so', '.o',
'.pyo',
'.pyd', '.c', '.orig'):
@@ -457,6 +468,22 @@ def pxd(name):
root, _ = os.path.splitext(ext.sources[0])
ext.sources[0] = root + suffix
+ujson_ext = Extension('pandas.json',
+ depends=['pandas/src/ujson/lib/ultrajson.h'],
+ sources=['pandas/src/ujson/python/ujson.c',
+ 'pandas/src/ujson/python/objToJSON.c',
+ 'pandas/src/ujson/python/JSONtoObj.c',
+ 'pandas/src/ujson/lib/ultrajsonenc.c',
+ 'pandas/src/ujson/lib/ultrajsondec.c',
+ 'pandas/src/datetime/np_datetime.c',
+ 'pandas/src/datetime/np_datetime_strings.c'],
+ include_dirs=['pandas/src/ujson/python',
+ 'pandas/src/ujson/lib',
+ 'pandas/src/datetime'] + common_include)
+
+
+extensions.append(ujson_ext)
+
if _have_setuptools:
setuptools_kwargs["test_suite"] = "nose.collector"
@@ -485,6 +512,7 @@ def pxd(name):
'pandas.tseries',
'pandas.tseries.tests',
'pandas.io.tests',
+ 'pandas.io.tests.test_json',
'pandas.stats.tests',
],
package_data={'pandas.io': ['tests/data/legacy_hdf/*.h5',
| This is @wesm PR #3583 with this:
It builds now, and passes travis on py2 and py3, had 2 issues:
- clean was erasing the *.c files from ujson
- the module import didn't work because it was using the original init function
Converted to new io API: `to_json` / `read_json`
Docs added
| https://api.github.com/repos/pandas-dev/pandas/pulls/3804 | 2013-06-07T23:05:47Z | 2013-06-11T19:18:34Z | 2013-06-11T19:18:34Z | 2014-06-13T20:06:58Z |
BLD: Add useful shortcuts to Makefile | diff --git a/Makefile b/Makefile
index af44d0223938a..6b7e02404525b 100644
--- a/Makefile
+++ b/Makefile
@@ -1,11 +1,27 @@
-clean:
+.PHONY : clean develop build clean clean_pyc tseries doc
+
+clean: clean_pyc
-rm -rf build dist
+ -find . -name '*.so' -exec rm -f {} \;
+
+clean_pyc:
+ -find . -name '*.pyc' -exec rm -f {} \;
tseries: pandas/lib.pyx pandas/tslib.pyx pandas/hashtable.pyx
python setup.py build_ext --inplace
sparse: pandas/src/sparse.pyx
- -python setup.py build_ext --inplace
+ python setup.py build_ext --inplace
+
+build: clean_pyc
+ python setup.py build_ext --inplace
+
+develop: build
+ -python setup.py develop
-test: sparse
- -python pandas/tests/test_libsparse.py
\ No newline at end of file
+doc:
+ -rm -rf doc/build
+ -rm -rf doc/source/generated
+ cd doc; \
+ python make.py clean; \
+ python make.py html
| Add some shortcuts to make it easier to develop with pandas.
now there's a set of commands in the `Makefile` in the top-level pandas directory with the following functionality
- `make clean` will delete the `build` and `dist` directories + all `*.pyc` and `*.so` files
- `make clean_pyc` just removes `*.pyc` files
- `make build` will build extensions inplace
- `make develop` will install `pandas` in your environment but will place a link to the dev dir so that you can make changes and they will show up immediately
- `make doc` will build the documentation from scratch (erases generated and build directories)
| https://api.github.com/repos/pandas-dev/pandas/pulls/3803 | 2013-06-07T23:05:25Z | 2013-06-18T22:30:01Z | 2013-06-18T22:30:01Z | 2014-07-16T08:12:32Z |
DOC: add link to tips data set in rplot docs | diff --git a/doc/data/tips.csv b/doc/data/tips.csv
index c4558cce4ce36..856a65a69e647 100644
--- a/doc/data/tips.csv
+++ b/doc/data/tips.csv
@@ -1,245 +1,245 @@
-obs,totbill,tip,sex,smoker,day,time,size
-1,16.99, 1.01,F,No,Sun,Night,2
-2,10.34, 1.66,M,No,Sun,Night,3
-3,21.01, 3.50,M,No,Sun,Night,3
-4,23.68, 3.31,M,No,Sun,Night,2
-5,24.59, 3.61,F,No,Sun,Night,4
-6,25.29, 4.71,M,No,Sun,Night,4
-7, 8.77, 2.00,M,No,Sun,Night,2
-8,26.88, 3.12,M,No,Sun,Night,4
-9,15.04, 1.96,M,No,Sun,Night,2
-10,14.78, 3.23,M,No,Sun,Night,2
-11,10.27, 1.71,M,No,Sun,Night,2
-12,35.26, 5.00,F,No,Sun,Night,4
-13,15.42, 1.57,M,No,Sun,Night,2
-14,18.43, 3.00,M,No,Sun,Night,4
-15,14.83, 3.02,F,No,Sun,Night,2
-16,21.58, 3.92,M,No,Sun,Night,2
-17,10.33, 1.67,F,No,Sun,Night,3
-18,16.29, 3.71,M,No,Sun,Night,3
-19,16.97, 3.50,F,No,Sun,Night,3
-20,20.65, 3.35,M,No,Sat,Night,3
-21,17.92, 4.08,M,No,Sat,Night,2
-22,20.29, 2.75,F,No,Sat,Night,2
-23,15.77, 2.23,F,No,Sat,Night,2
-24,39.42, 7.58,M,No,Sat,Night,4
-25,19.82, 3.18,M,No,Sat,Night,2
-26,17.81, 2.34,M,No,Sat,Night,4
-27,13.37, 2.00,M,No,Sat,Night,2
-28,12.69, 2.00,M,No,Sat,Night,2
-29,21.70, 4.30,M,No,Sat,Night,2
-30,19.65, 3.00,F,No,Sat,Night,2
-31, 9.55, 1.45,M,No,Sat,Night,2
-32,18.35, 2.50,M,No,Sat,Night,4
-33,15.06, 3.00,F,No,Sat,Night,2
-34,20.69, 2.45,F,No,Sat,Night,4
-35,17.78, 3.27,M,No,Sat,Night,2
-36,24.06, 3.60,M,No,Sat,Night,3
-37,16.31, 2.00,M,No,Sat,Night,3
-38,16.93, 3.07,F,No,Sat,Night,3
-39,18.69, 2.31,M,No,Sat,Night,3
-40,31.27, 5.00,M,No,Sat,Night,3
-41,16.04, 2.24,M,No,Sat,Night,3
-42,17.46, 2.54,M,No,Sun,Night,2
-43,13.94, 3.06,M,No,Sun,Night,2
-44, 9.68, 1.32,M,No,Sun,Night,2
-45,30.40, 5.60,M,No,Sun,Night,4
-46,18.29, 3.00,M,No,Sun,Night,2
-47,22.23, 5.00,M,No,Sun,Night,2
-48,32.40, 6.00,M,No,Sun,Night,4
-49,28.55, 2.05,M,No,Sun,Night,3
-50,18.04, 3.00,M,No,Sun,Night,2
-51,12.54, 2.50,M,No,Sun,Night,2
-52,10.29, 2.60,F,No,Sun,Night,2
-53,34.81, 5.20,F,No,Sun,Night,4
-54, 9.94, 1.56,M,No,Sun,Night,2
-55,25.56, 4.34,M,No,Sun,Night,4
-56,19.49, 3.51,M,No,Sun,Night,2
-57,38.01, 3.00,M,Yes,Sat,Night,4
-58,26.41, 1.50,F,No,Sat,Night,2
-59,11.24, 1.76,M,Yes,Sat,Night,2
-60,48.27, 6.73,M,No,Sat,Night,4
-61,20.29, 3.21,M,Yes,Sat,Night,2
-62,13.81, 2.00,M,Yes,Sat,Night,2
-63,11.02, 1.98,M,Yes,Sat,Night,2
-64,18.29, 3.76,M,Yes,Sat,Night,4
-65,17.59, 2.64,M,No,Sat,Night,3
-66,20.08, 3.15,M,No,Sat,Night,3
-67,16.45, 2.47,F,No,Sat,Night,2
-68, 3.07, 1.00,F,Yes,Sat,Night,1
-69,20.23, 2.01,M,No,Sat,Night,2
-70,15.01, 2.09,M,Yes,Sat,Night,2
-71,12.02, 1.97,M,No,Sat,Night,2
-72,17.07, 3.00,F,No,Sat,Night,3
-73,26.86, 3.14,F,Yes,Sat,Night,2
-74,25.28, 5.00,F,Yes,Sat,Night,2
-75,14.73, 2.20,F,No,Sat,Night,2
-76,10.51, 1.25,M,No,Sat,Night,2
-77,17.92, 3.08,M,Yes,Sat,Night,2
-78,27.20, 4.00,M,No,Thu,Day,4
-79,22.76, 3.00,M,No,Thu,Day,2
-80,17.29, 2.71,M,No,Thu,Day,2
-81,19.44, 3.00,M,Yes,Thu,Day,2
-82,16.66, 3.40,M,No,Thu,Day,2
-83,10.07, 1.83,F,No,Thu,Day,1
-84,32.68, 5.00,M,Yes,Thu,Day,2
-85,15.98, 2.03,M,No,Thu,Day,2
-86,34.83, 5.17,F,No,Thu,Day,4
-87,13.03, 2.00,M,No,Thu,Day,2
-88,18.28, 4.00,M,No,Thu,Day,2
-89,24.71, 5.85,M,No,Thu,Day,2
-90,21.16, 3.00,M,No,Thu,Day,2
-91,28.97, 3.00,M,Yes,Fri,Night,2
-92,22.49, 3.50,M,No,Fri,Night,2
-93, 5.75, 1.00,F,Yes,Fri,Night,2
-94,16.32, 4.30,F,Yes,Fri,Night,2
-95,22.75, 3.25,F,No,Fri,Night,2
-96,40.17, 4.73,M,Yes,Fri,Night,4
-97,27.28, 4.00,M,Yes,Fri,Night,2
-98,12.03, 1.50,M,Yes,Fri,Night,2
-99,21.01, 3.00,M,Yes,Fri,Night,2
-100,12.46, 1.50,M,No,Fri,Night,2
-101,11.35, 2.50,F,Yes,Fri,Night,2
-102,15.38, 3.00,F,Yes,Fri,Night,2
-103,44.30, 2.50,F,Yes,Sat,Night,3
-104,22.42, 3.48,F,Yes,Sat,Night,2
-105,20.92, 4.08,F,No,Sat,Night,2
-106,15.36, 1.64,M,Yes,Sat,Night,2
-107,20.49, 4.06,M,Yes,Sat,Night,2
-108,25.21, 4.29,M,Yes,Sat,Night,2
-109,18.24, 3.76,M,No,Sat,Night,2
-110,14.31, 4.00,F,Yes,Sat,Night,2
-111,14.00, 3.00,M,No,Sat,Night,2
-112, 7.25, 1.00,F,No,Sat,Night,1
-113,38.07, 4.00,M,No,Sun,Night,3
-114,23.95, 2.55,M,No,Sun,Night,2
-115,25.71, 4.00,F,No,Sun,Night,3
-116,17.31, 3.50,F,No,Sun,Night,2
-117,29.93, 5.07,M,No,Sun,Night,4
-118,10.65, 1.50,F,No,Thu,Day,2
-119,12.43, 1.80,F,No,Thu,Day,2
-120,24.08, 2.92,F,No,Thu,Day,4
-121,11.69, 2.31,M,No,Thu,Day,2
-122,13.42, 1.68,F,No,Thu,Day,2
-123,14.26, 2.50,M,No,Thu,Day,2
-124,15.95, 2.00,M,No,Thu,Day,2
-125,12.48, 2.52,F,No,Thu,Day,2
-126,29.80, 4.20,F,No,Thu,Day,6
-127, 8.52, 1.48,M,No,Thu,Day,2
-128,14.52, 2.00,F,No,Thu,Day,2
-129,11.38, 2.00,F,No,Thu,Day,2
-130,22.82, 2.18,M,No,Thu,Day,3
-131,19.08, 1.50,M,No,Thu,Day,2
-132,20.27, 2.83,F,No,Thu,Day,2
-133,11.17, 1.50,F,No,Thu,Day,2
-134,12.26, 2.00,F,No,Thu,Day,2
-135,18.26, 3.25,F,No,Thu,Day,2
-136, 8.51, 1.25,F,No,Thu,Day,2
-137,10.33, 2.00,F,No,Thu,Day,2
-138,14.15, 2.00,F,No,Thu,Day,2
-139,16.00, 2.00,M,Yes,Thu,Day,2
-140,13.16, 2.75,F,No,Thu,Day,2
-141,17.47, 3.50,F,No,Thu,Day,2
-142,34.30, 6.70,M,No,Thu,Day,6
-143,41.19, 5.00,M,No,Thu,Day,5
-144,27.05, 5.00,F,No,Thu,Day,6
-145,16.43, 2.30,F,No,Thu,Day,2
-146, 8.35, 1.50,F,No,Thu,Day,2
-147,18.64, 1.36,F,No,Thu,Day,3
-148,11.87, 1.63,F,No,Thu,Day,2
-149, 9.78, 1.73,M,No,Thu,Day,2
-150, 7.51, 2.00,M,No,Thu,Day,2
-151,14.07, 2.50,M,No,Sun,Night,2
-152,13.13, 2.00,M,No,Sun,Night,2
-153,17.26, 2.74,M,No,Sun,Night,3
-154,24.55, 2.00,M,No,Sun,Night,4
-155,19.77, 2.00,M,No,Sun,Night,4
-156,29.85, 5.14,F,No,Sun,Night,5
-157,48.17, 5.00,M,No,Sun,Night,6
-158,25.00, 3.75,F,No,Sun,Night,4
-159,13.39, 2.61,F,No,Sun,Night,2
-160,16.49, 2.00,M,No,Sun,Night,4
-161,21.50, 3.50,M,No,Sun,Night,4
-162,12.66, 2.50,M,No,Sun,Night,2
-163,16.21, 2.00,F,No,Sun,Night,3
-164,13.81, 2.00,M,No,Sun,Night,2
-165,17.51, 3.00,F,Yes,Sun,Night,2
-166,24.52, 3.48,M,No,Sun,Night,3
-167,20.76, 2.24,M,No,Sun,Night,2
-168,31.71, 4.50,M,No,Sun,Night,4
-169,10.59, 1.61,F,Yes,Sat,Night,2
-170,10.63, 2.00,F,Yes,Sat,Night,2
-171,50.81,10.00,M,Yes,Sat,Night,3
-172,15.81, 3.16,M,Yes,Sat,Night,2
-173, 7.25, 5.15,M,Yes,Sun,Night,2
-174,31.85, 3.18,M,Yes,Sun,Night,2
-175,16.82, 4.00,M,Yes,Sun,Night,2
-176,32.90, 3.11,M,Yes,Sun,Night,2
-177,17.89, 2.00,M,Yes,Sun,Night,2
-178,14.48, 2.00,M,Yes,Sun,Night,2
-179, 9.60, 4.00,F,Yes,Sun,Night,2
-180,34.63, 3.55,M,Yes,Sun,Night,2
-181,34.65, 3.68,M,Yes,Sun,Night,4
-182,23.33, 5.65,M,Yes,Sun,Night,2
-183,45.35, 3.50,M,Yes,Sun,Night,3
-184,23.17, 6.50,M,Yes,Sun,Night,4
-185,40.55, 3.00,M,Yes,Sun,Night,2
-186,20.69, 5.00,M,No,Sun,Night,5
-187,20.90, 3.50,F,Yes,Sun,Night,3
-188,30.46, 2.00,M,Yes,Sun,Night,5
-189,18.15, 3.50,F,Yes,Sun,Night,3
-190,23.10, 4.00,M,Yes,Sun,Night,3
-191,15.69, 1.50,M,Yes,Sun,Night,2
-192,19.81, 4.19,F,Yes,Thu,Day,2
-193,28.44, 2.56,M,Yes,Thu,Day,2
-194,15.48, 2.02,M,Yes,Thu,Day,2
-195,16.58, 4.00,M,Yes,Thu,Day,2
-196, 7.56, 1.44,M,No,Thu,Day,2
-197,10.34, 2.00,M,Yes,Thu,Day,2
-198,43.11, 5.00,F,Yes,Thu,Day,4
-199,13.00, 2.00,F,Yes,Thu,Day,2
-200,13.51, 2.00,M,Yes,Thu,Day,2
-201,18.71, 4.00,M,Yes,Thu,Day,3
-202,12.74, 2.01,F,Yes,Thu,Day,2
-203,13.00, 2.00,F,Yes,Thu,Day,2
-204,16.40, 2.50,F,Yes,Thu,Day,2
-205,20.53, 4.00,M,Yes,Thu,Day,4
-206,16.47, 3.23,F,Yes,Thu,Day,3
-207,26.59, 3.41,M,Yes,Sat,Night,3
-208,38.73, 3.00,M,Yes,Sat,Night,4
-209,24.27, 2.03,M,Yes,Sat,Night,2
-210,12.76, 2.23,F,Yes,Sat,Night,2
-211,30.06, 2.00,M,Yes,Sat,Night,3
-212,25.89, 5.16,M,Yes,Sat,Night,4
-213,48.33, 9.00,M,No,Sat,Night,4
-214,13.27, 2.50,F,Yes,Sat,Night,2
-215,28.17, 6.50,F,Yes,Sat,Night,3
-216,12.90, 1.10,F,Yes,Sat,Night,2
-217,28.15, 3.00,M,Yes,Sat,Night,5
-218,11.59, 1.50,M,Yes,Sat,Night,2
-219, 7.74, 1.44,M,Yes,Sat,Night,2
-220,30.14, 3.09,F,Yes,Sat,Night,4
-221,12.16, 2.20,M,Yes,Fri,Day,2
-222,13.42, 3.48,F,Yes,Fri,Day,2
-223, 8.58, 1.92,M,Yes,Fri,Day,1
-224,15.98, 3.00,F,No,Fri,Day,3
-225,13.42, 1.58,M,Yes,Fri,Day,2
-226,16.27, 2.50,F,Yes,Fri,Day,2
-227,10.09, 2.00,F,Yes,Fri,Day,2
-228,20.45, 3.00,M,No,Sat,Night,4
-229,13.28, 2.72,M,No,Sat,Night,2
-230,22.12, 2.88,F,Yes,Sat,Night,2
-231,24.01, 2.00,M,Yes,Sat,Night,4
-232,15.69, 3.00,M,Yes,Sat,Night,3
-233,11.61, 3.39,M,No,Sat,Night,2
-234,10.77, 1.47,M,No,Sat,Night,2
-235,15.53, 3.00,M,Yes,Sat,Night,2
-236,10.07, 1.25,M,No,Sat,Night,2
-237,12.60, 1.00,M,Yes,Sat,Night,2
-238,32.83, 1.17,M,Yes,Sat,Night,2
-239,35.83, 4.67,F,No,Sat,Night,3
-240,29.03, 5.92,M,No,Sat,Night,3
-241,27.18, 2.00,F,Yes,Sat,Night,2
-242,22.67, 2.00,M,Yes,Sat,Night,2
-243,17.82, 1.75,M,No,Sat,Night,2
-244,18.78, 3.00,F,No,Thu,Night,2
+total_bill,tip,sex,smoker,day,time,size
+16.99,1.01,Female,No,Sun,Dinner,2
+10.34,1.66,Male,No,Sun,Dinner,3
+21.01,3.5,Male,No,Sun,Dinner,3
+23.68,3.31,Male,No,Sun,Dinner,2
+24.59,3.61,Female,No,Sun,Dinner,4
+25.29,4.71,Male,No,Sun,Dinner,4
+8.77,2.0,Male,No,Sun,Dinner,2
+26.88,3.12,Male,No,Sun,Dinner,4
+15.04,1.96,Male,No,Sun,Dinner,2
+14.78,3.23,Male,No,Sun,Dinner,2
+10.27,1.71,Male,No,Sun,Dinner,2
+35.26,5.0,Female,No,Sun,Dinner,4
+15.42,1.57,Male,No,Sun,Dinner,2
+18.43,3.0,Male,No,Sun,Dinner,4
+14.83,3.02,Female,No,Sun,Dinner,2
+21.58,3.92,Male,No,Sun,Dinner,2
+10.33,1.67,Female,No,Sun,Dinner,3
+16.29,3.71,Male,No,Sun,Dinner,3
+16.97,3.5,Female,No,Sun,Dinner,3
+20.65,3.35,Male,No,Sat,Dinner,3
+17.92,4.08,Male,No,Sat,Dinner,2
+20.29,2.75,Female,No,Sat,Dinner,2
+15.77,2.23,Female,No,Sat,Dinner,2
+39.42,7.58,Male,No,Sat,Dinner,4
+19.82,3.18,Male,No,Sat,Dinner,2
+17.81,2.34,Male,No,Sat,Dinner,4
+13.37,2.0,Male,No,Sat,Dinner,2
+12.69,2.0,Male,No,Sat,Dinner,2
+21.7,4.3,Male,No,Sat,Dinner,2
+19.65,3.0,Female,No,Sat,Dinner,2
+9.55,1.45,Male,No,Sat,Dinner,2
+18.35,2.5,Male,No,Sat,Dinner,4
+15.06,3.0,Female,No,Sat,Dinner,2
+20.69,2.45,Female,No,Sat,Dinner,4
+17.78,3.27,Male,No,Sat,Dinner,2
+24.06,3.6,Male,No,Sat,Dinner,3
+16.31,2.0,Male,No,Sat,Dinner,3
+16.93,3.07,Female,No,Sat,Dinner,3
+18.69,2.31,Male,No,Sat,Dinner,3
+31.27,5.0,Male,No,Sat,Dinner,3
+16.04,2.24,Male,No,Sat,Dinner,3
+17.46,2.54,Male,No,Sun,Dinner,2
+13.94,3.06,Male,No,Sun,Dinner,2
+9.68,1.32,Male,No,Sun,Dinner,2
+30.4,5.6,Male,No,Sun,Dinner,4
+18.29,3.0,Male,No,Sun,Dinner,2
+22.23,5.0,Male,No,Sun,Dinner,2
+32.4,6.0,Male,No,Sun,Dinner,4
+28.55,2.05,Male,No,Sun,Dinner,3
+18.04,3.0,Male,No,Sun,Dinner,2
+12.54,2.5,Male,No,Sun,Dinner,2
+10.29,2.6,Female,No,Sun,Dinner,2
+34.81,5.2,Female,No,Sun,Dinner,4
+9.94,1.56,Male,No,Sun,Dinner,2
+25.56,4.34,Male,No,Sun,Dinner,4
+19.49,3.51,Male,No,Sun,Dinner,2
+38.01,3.0,Male,Yes,Sat,Dinner,4
+26.41,1.5,Female,No,Sat,Dinner,2
+11.24,1.76,Male,Yes,Sat,Dinner,2
+48.27,6.73,Male,No,Sat,Dinner,4
+20.29,3.21,Male,Yes,Sat,Dinner,2
+13.81,2.0,Male,Yes,Sat,Dinner,2
+11.02,1.98,Male,Yes,Sat,Dinner,2
+18.29,3.76,Male,Yes,Sat,Dinner,4
+17.59,2.64,Male,No,Sat,Dinner,3
+20.08,3.15,Male,No,Sat,Dinner,3
+16.45,2.47,Female,No,Sat,Dinner,2
+3.07,1.0,Female,Yes,Sat,Dinner,1
+20.23,2.01,Male,No,Sat,Dinner,2
+15.01,2.09,Male,Yes,Sat,Dinner,2
+12.02,1.97,Male,No,Sat,Dinner,2
+17.07,3.0,Female,No,Sat,Dinner,3
+26.86,3.14,Female,Yes,Sat,Dinner,2
+25.28,5.0,Female,Yes,Sat,Dinner,2
+14.73,2.2,Female,No,Sat,Dinner,2
+10.51,1.25,Male,No,Sat,Dinner,2
+17.92,3.08,Male,Yes,Sat,Dinner,2
+27.2,4.0,Male,No,Thur,Lunch,4
+22.76,3.0,Male,No,Thur,Lunch,2
+17.29,2.71,Male,No,Thur,Lunch,2
+19.44,3.0,Male,Yes,Thur,Lunch,2
+16.66,3.4,Male,No,Thur,Lunch,2
+10.07,1.83,Female,No,Thur,Lunch,1
+32.68,5.0,Male,Yes,Thur,Lunch,2
+15.98,2.03,Male,No,Thur,Lunch,2
+34.83,5.17,Female,No,Thur,Lunch,4
+13.03,2.0,Male,No,Thur,Lunch,2
+18.28,4.0,Male,No,Thur,Lunch,2
+24.71,5.85,Male,No,Thur,Lunch,2
+21.16,3.0,Male,No,Thur,Lunch,2
+28.97,3.0,Male,Yes,Fri,Dinner,2
+22.49,3.5,Male,No,Fri,Dinner,2
+5.75,1.0,Female,Yes,Fri,Dinner,2
+16.32,4.3,Female,Yes,Fri,Dinner,2
+22.75,3.25,Female,No,Fri,Dinner,2
+40.17,4.73,Male,Yes,Fri,Dinner,4
+27.28,4.0,Male,Yes,Fri,Dinner,2
+12.03,1.5,Male,Yes,Fri,Dinner,2
+21.01,3.0,Male,Yes,Fri,Dinner,2
+12.46,1.5,Male,No,Fri,Dinner,2
+11.35,2.5,Female,Yes,Fri,Dinner,2
+15.38,3.0,Female,Yes,Fri,Dinner,2
+44.3,2.5,Female,Yes,Sat,Dinner,3
+22.42,3.48,Female,Yes,Sat,Dinner,2
+20.92,4.08,Female,No,Sat,Dinner,2
+15.36,1.64,Male,Yes,Sat,Dinner,2
+20.49,4.06,Male,Yes,Sat,Dinner,2
+25.21,4.29,Male,Yes,Sat,Dinner,2
+18.24,3.76,Male,No,Sat,Dinner,2
+14.31,4.0,Female,Yes,Sat,Dinner,2
+14.0,3.0,Male,No,Sat,Dinner,2
+7.25,1.0,Female,No,Sat,Dinner,1
+38.07,4.0,Male,No,Sun,Dinner,3
+23.95,2.55,Male,No,Sun,Dinner,2
+25.71,4.0,Female,No,Sun,Dinner,3
+17.31,3.5,Female,No,Sun,Dinner,2
+29.93,5.07,Male,No,Sun,Dinner,4
+10.65,1.5,Female,No,Thur,Lunch,2
+12.43,1.8,Female,No,Thur,Lunch,2
+24.08,2.92,Female,No,Thur,Lunch,4
+11.69,2.31,Male,No,Thur,Lunch,2
+13.42,1.68,Female,No,Thur,Lunch,2
+14.26,2.5,Male,No,Thur,Lunch,2
+15.95,2.0,Male,No,Thur,Lunch,2
+12.48,2.52,Female,No,Thur,Lunch,2
+29.8,4.2,Female,No,Thur,Lunch,6
+8.52,1.48,Male,No,Thur,Lunch,2
+14.52,2.0,Female,No,Thur,Lunch,2
+11.38,2.0,Female,No,Thur,Lunch,2
+22.82,2.18,Male,No,Thur,Lunch,3
+19.08,1.5,Male,No,Thur,Lunch,2
+20.27,2.83,Female,No,Thur,Lunch,2
+11.17,1.5,Female,No,Thur,Lunch,2
+12.26,2.0,Female,No,Thur,Lunch,2
+18.26,3.25,Female,No,Thur,Lunch,2
+8.51,1.25,Female,No,Thur,Lunch,2
+10.33,2.0,Female,No,Thur,Lunch,2
+14.15,2.0,Female,No,Thur,Lunch,2
+16.0,2.0,Male,Yes,Thur,Lunch,2
+13.16,2.75,Female,No,Thur,Lunch,2
+17.47,3.5,Female,No,Thur,Lunch,2
+34.3,6.7,Male,No,Thur,Lunch,6
+41.19,5.0,Male,No,Thur,Lunch,5
+27.05,5.0,Female,No,Thur,Lunch,6
+16.43,2.3,Female,No,Thur,Lunch,2
+8.35,1.5,Female,No,Thur,Lunch,2
+18.64,1.36,Female,No,Thur,Lunch,3
+11.87,1.63,Female,No,Thur,Lunch,2
+9.78,1.73,Male,No,Thur,Lunch,2
+7.51,2.0,Male,No,Thur,Lunch,2
+14.07,2.5,Male,No,Sun,Dinner,2
+13.13,2.0,Male,No,Sun,Dinner,2
+17.26,2.74,Male,No,Sun,Dinner,3
+24.55,2.0,Male,No,Sun,Dinner,4
+19.77,2.0,Male,No,Sun,Dinner,4
+29.85,5.14,Female,No,Sun,Dinner,5
+48.17,5.0,Male,No,Sun,Dinner,6
+25.0,3.75,Female,No,Sun,Dinner,4
+13.39,2.61,Female,No,Sun,Dinner,2
+16.49,2.0,Male,No,Sun,Dinner,4
+21.5,3.5,Male,No,Sun,Dinner,4
+12.66,2.5,Male,No,Sun,Dinner,2
+16.21,2.0,Female,No,Sun,Dinner,3
+13.81,2.0,Male,No,Sun,Dinner,2
+17.51,3.0,Female,Yes,Sun,Dinner,2
+24.52,3.48,Male,No,Sun,Dinner,3
+20.76,2.24,Male,No,Sun,Dinner,2
+31.71,4.5,Male,No,Sun,Dinner,4
+10.59,1.61,Female,Yes,Sat,Dinner,2
+10.63,2.0,Female,Yes,Sat,Dinner,2
+50.81,10.0,Male,Yes,Sat,Dinner,3
+15.81,3.16,Male,Yes,Sat,Dinner,2
+7.25,5.15,Male,Yes,Sun,Dinner,2
+31.85,3.18,Male,Yes,Sun,Dinner,2
+16.82,4.0,Male,Yes,Sun,Dinner,2
+32.9,3.11,Male,Yes,Sun,Dinner,2
+17.89,2.0,Male,Yes,Sun,Dinner,2
+14.48,2.0,Male,Yes,Sun,Dinner,2
+9.6,4.0,Female,Yes,Sun,Dinner,2
+34.63,3.55,Male,Yes,Sun,Dinner,2
+34.65,3.68,Male,Yes,Sun,Dinner,4
+23.33,5.65,Male,Yes,Sun,Dinner,2
+45.35,3.5,Male,Yes,Sun,Dinner,3
+23.17,6.5,Male,Yes,Sun,Dinner,4
+40.55,3.0,Male,Yes,Sun,Dinner,2
+20.69,5.0,Male,No,Sun,Dinner,5
+20.9,3.5,Female,Yes,Sun,Dinner,3
+30.46,2.0,Male,Yes,Sun,Dinner,5
+18.15,3.5,Female,Yes,Sun,Dinner,3
+23.1,4.0,Male,Yes,Sun,Dinner,3
+15.69,1.5,Male,Yes,Sun,Dinner,2
+19.81,4.19,Female,Yes,Thur,Lunch,2
+28.44,2.56,Male,Yes,Thur,Lunch,2
+15.48,2.02,Male,Yes,Thur,Lunch,2
+16.58,4.0,Male,Yes,Thur,Lunch,2
+7.56,1.44,Male,No,Thur,Lunch,2
+10.34,2.0,Male,Yes,Thur,Lunch,2
+43.11,5.0,Female,Yes,Thur,Lunch,4
+13.0,2.0,Female,Yes,Thur,Lunch,2
+13.51,2.0,Male,Yes,Thur,Lunch,2
+18.71,4.0,Male,Yes,Thur,Lunch,3
+12.74,2.01,Female,Yes,Thur,Lunch,2
+13.0,2.0,Female,Yes,Thur,Lunch,2
+16.4,2.5,Female,Yes,Thur,Lunch,2
+20.53,4.0,Male,Yes,Thur,Lunch,4
+16.47,3.23,Female,Yes,Thur,Lunch,3
+26.59,3.41,Male,Yes,Sat,Dinner,3
+38.73,3.0,Male,Yes,Sat,Dinner,4
+24.27,2.03,Male,Yes,Sat,Dinner,2
+12.76,2.23,Female,Yes,Sat,Dinner,2
+30.06,2.0,Male,Yes,Sat,Dinner,3
+25.89,5.16,Male,Yes,Sat,Dinner,4
+48.33,9.0,Male,No,Sat,Dinner,4
+13.27,2.5,Female,Yes,Sat,Dinner,2
+28.17,6.5,Female,Yes,Sat,Dinner,3
+12.9,1.1,Female,Yes,Sat,Dinner,2
+28.15,3.0,Male,Yes,Sat,Dinner,5
+11.59,1.5,Male,Yes,Sat,Dinner,2
+7.74,1.44,Male,Yes,Sat,Dinner,2
+30.14,3.09,Female,Yes,Sat,Dinner,4
+12.16,2.2,Male,Yes,Fri,Lunch,2
+13.42,3.48,Female,Yes,Fri,Lunch,2
+8.58,1.92,Male,Yes,Fri,Lunch,1
+15.98,3.0,Female,No,Fri,Lunch,3
+13.42,1.58,Male,Yes,Fri,Lunch,2
+16.27,2.5,Female,Yes,Fri,Lunch,2
+10.09,2.0,Female,Yes,Fri,Lunch,2
+20.45,3.0,Male,No,Sat,Dinner,4
+13.28,2.72,Male,No,Sat,Dinner,2
+22.12,2.88,Female,Yes,Sat,Dinner,2
+24.01,2.0,Male,Yes,Sat,Dinner,4
+15.69,3.0,Male,Yes,Sat,Dinner,3
+11.61,3.39,Male,No,Sat,Dinner,2
+10.77,1.47,Male,No,Sat,Dinner,2
+15.53,3.0,Male,Yes,Sat,Dinner,2
+10.07,1.25,Male,No,Sat,Dinner,2
+12.6,1.0,Male,Yes,Sat,Dinner,2
+32.83,1.17,Male,Yes,Sat,Dinner,2
+35.83,4.67,Female,No,Sat,Dinner,3
+29.03,5.92,Male,No,Sat,Dinner,3
+27.18,2.0,Female,Yes,Sat,Dinner,2
+22.67,2.0,Male,Yes,Sat,Dinner,2
+17.82,1.75,Male,No,Sat,Dinner,2
+18.78,3.0,Female,No,Thur,Dinner,2
diff --git a/doc/source/rplot.rst b/doc/source/rplot.rst
index 1f33c789ee3ca..e9bae8502996f 100644
--- a/doc/source/rplot.rst
+++ b/doc/source/rplot.rst
@@ -22,6 +22,18 @@
Trellis plotting interface
**************************
+.. note::
+
+ The tips data set can be downloaded `here
+ <http://wesmckinney.com/files/tips.csv>`_. Once you download it execute
+
+ .. code-block:: python
+
+ from pandas import read_csv
+ tips_data = read_csv('tips.csv')
+
+ from the directory where you downloaded the file.
+
We import the rplot API:
.. ipython:: python
@@ -38,7 +50,7 @@ RPlot is a flexible API for producing Trellis plots. These plots allow you to ar
plt.figure()
- plot = rplot.RPlot(tips_data, x='totbill', y='tip')
+ plot = rplot.RPlot(tips_data, x='total_bill', y='tip')
plot.add(rplot.TrellisGrid(['sex', 'smoker']))
plot.add(rplot.GeomHistogram())
@@ -51,7 +63,7 @@ In the example above, data from the tips data set is arranged by the attributes
plt.figure()
- plot = rplot.RPlot(tips_data, x='totbill', y='tip')
+ plot = rplot.RPlot(tips_data, x='total_bill', y='tip')
plot.add(rplot.TrellisGrid(['sex', 'smoker']))
plot.add(rplot.GeomDensity())
@@ -64,7 +76,7 @@ Example above is the same as previous except the plot is set to kernel density e
plt.figure()
- plot = rplot.RPlot(tips_data, x='totbill', y='tip')
+ plot = rplot.RPlot(tips_data, x='total_bill', y='tip')
plot.add(rplot.TrellisGrid(['sex', 'smoker']))
plot.add(rplot.GeomScatter())
plot.add(rplot.GeomPolyFit(degree=2))
@@ -78,7 +90,7 @@ The plot above shows that it is possible to have two or more plots for the same
plt.figure()
- plot = rplot.RPlot(tips_data, x='totbill', y='tip')
+ plot = rplot.RPlot(tips_data, x='total_bill', y='tip')
plot.add(rplot.TrellisGrid(['sex', 'smoker']))
plot.add(rplot.GeomScatter())
plot.add(rplot.GeomDensity2D())
@@ -92,7 +104,7 @@ Above is a similar plot but with 2D kernel desnity estimation plot superimposed.
plt.figure()
- plot = rplot.RPlot(tips_data, x='totbill', y='tip')
+ plot = rplot.RPlot(tips_data, x='total_bill', y='tip')
plot.add(rplot.TrellisGrid(['sex', '.']))
plot.add(rplot.GeomHistogram())
@@ -105,7 +117,7 @@ It is possible to only use one attribute for grouping data. The example above on
plt.figure()
- plot = rplot.RPlot(tips_data, x='totbill', y='tip')
+ plot = rplot.RPlot(tips_data, x='total_bill', y='tip')
plot.add(rplot.TrellisGrid(['.', 'smoker']))
plot.add(rplot.GeomHistogram())
@@ -118,11 +130,11 @@ If the first grouping attribute is not specified the plots will be arranged in a
plt.figure()
- plot = rplot.RPlot(tips_data, x='totbill', y='tip')
+ plot = rplot.RPlot(tips_data, x='total_bill', y='tip')
plot.add(rplot.TrellisGrid(['.', 'smoker']))
plot.add(rplot.GeomHistogram())
- plot = rplot.RPlot(tips_data, x='tip', y='totbill')
+ plot = rplot.RPlot(tips_data, x='tip', y='total_bill')
plot.add(rplot.TrellisGrid(['sex', 'smoker']))
plot.add(rplot.GeomPoint(size=80.0, colour=rplot.ScaleRandomColour('day'), shape=rplot.ScaleShape('size'), alpha=1.0))
| closes #3799.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3802 | 2013-06-07T22:37:27Z | 2013-06-08T14:30:35Z | 2013-06-08T14:30:35Z | 2014-07-12T13:44:53Z |
BUG: (GH3795) better error messages for invalid dtype specifications in read_csv | diff --git a/RELEASE.rst b/RELEASE.rst
index 7a77972541c1e..4d85834706e80 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -219,6 +219,7 @@ pandas 0.11.1
- Incorrectly read a HDFStore multi-index Frame witha column specification (GH3748_)
- ``read_html`` now correctly skips tests (GH3741_)
- Fix incorrect arguments passed to concat that are not list-like (e.g. concat(df1,df2)) (GH3481_)
+ - Correctly parse when passed the ``dtype=str`` (or other variable-len string dtypes) in ``read_csv`` (GH3795_)
.. _GH3164: https://github.com/pydata/pandas/issues/3164
.. _GH2786: https://github.com/pydata/pandas/issues/2786
@@ -307,6 +308,7 @@ pandas 0.11.1
.. _GH3741: https://github.com/pydata/pandas/issues/3741
.. _GH3750: https://github.com/pydata/pandas/issues/3750
.. _GH3726: https://github.com/pydata/pandas/issues/3726
+.. _GH3795: https://github.com/pydata/pandas/issues/3795
pandas 0.11.0
=============
diff --git a/pandas/io/tests/test_parsers.py b/pandas/io/tests/test_parsers.py
index 55abef2fd8d0e..cae4c0902a97c 100644
--- a/pandas/io/tests/test_parsers.py
+++ b/pandas/io/tests/test_parsers.py
@@ -485,6 +485,36 @@ def test_malformed(self):
except Exception, inst:
self.assert_('Expected 3 fields in line 6, saw 5' in str(inst))
+ def test_passing_dtype(self):
+
+ df = DataFrame(np.random.rand(5,2),columns=list('AB'),index=['1A','1B','1C','1D','1E'])
+
+ with ensure_clean('__passing_str_as_dtype__.csv') as path:
+ df.to_csv(path)
+
+ # GH 3795
+ # passing 'str' as the dtype
+ result = pd.read_csv(path, dtype=str, index_col=0)
+ tm.assert_series_equal(result.dtypes,Series({ 'A' : 'object', 'B' : 'object' }))
+
+ # we expect all object columns, so need to convert to test for equivalence
+ result = result.astype(float)
+ tm.assert_frame_equal(result,df)
+
+ # invalid dtype
+ self.assertRaises(TypeError, pd.read_csv, path, dtype={'A' : 'foo', 'B' : 'float64' },
+ index_col=0)
+
+ # valid but we don't support it (date)
+ self.assertRaises(TypeError, pd.read_csv, path, dtype={'A' : 'datetime64', 'B' : 'float64' },
+ index_col=0)
+ self.assertRaises(TypeError, pd.read_csv, path, dtype={'A' : 'datetime64', 'B' : 'float64' },
+ index_col=0, parse_dates=['B'])
+
+ # valid but we don't support it
+ self.assertRaises(TypeError, pd.read_csv, path, dtype={'A' : 'timedelta64', 'B' : 'float64' },
+ index_col=0)
+
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
diff --git a/pandas/parser.pyx b/pandas/parser.pyx
index ee92e2e60960c..004c23d09ccdf 100644
--- a/pandas/parser.pyx
+++ b/pandas/parser.pyx
@@ -990,20 +990,36 @@ cdef class TextReader:
na_filter, na_hashset)
return result, na_count
elif dtype[1] == 'c':
- raise NotImplementedError
+ raise NotImplementedError("the dtype %s is not supported for parsing" % dtype)
elif dtype[1] == 'S':
# TODO: na handling
width = int(dtype[2:])
- result = _to_fw_string(self.parser, i, start, end, width)
- return result, 0
+ if width > 0:
+ result = _to_fw_string(self.parser, i, start, end, width)
+ return result, 0
+
+ # treat as a regular string parsing
+ return self._string_convert(i, start, end, na_filter,
+ na_hashset)
elif dtype[1] == 'U':
width = int(dtype[2:])
- raise NotImplementedError
+ if width > 0:
+ raise NotImplementedError("the dtype %s is not supported for parsing" % dtype)
+
+ # unicode variable width
+ return self._string_convert(i, start, end, na_filter,
+ na_hashset)
+
elif dtype[1] == 'O':
return self._string_convert(i, start, end, na_filter,
na_hashset)
+ else:
+ if dtype[1] == 'M':
+ raise TypeError("the dtype %s is not supported for parsing, "
+ "pass this column using parse_dates instead" % dtype)
+ raise TypeError("the dtype %s is not supported for parsing" % dtype)
cdef _string_convert(self, Py_ssize_t i, int start, int end,
bint na_filter, kh_str_t *na_hashset):
| ENH: accept 'str' as a dtype in read_csv to provide correct parsing
closes #3795
```
In [1]: df = DataFrame(np.random.rand(5,2),columns=list('AB'),index=['1A','1B','1C','1D','1E'])
In [2]: df
Out[2]:
A B
1A 0.096563 0.761440
1B 0.623102 0.538810
1C 0.498820 0.277789
1D 0.113544 0.723437
1E 0.381104 0.061758
In [3]: df.dtypes
Out[3]:
A float64
B float64
dtype: object
In [4]: path='test.csv'
In [5]: df.to_csv(path)
In [6]: pd.read_csv(path, dtype=str, index_col=0)
Out[6]:
A B
1A 0.09656290409114332 0.761440208545324
1B 0.6231015058315575 0.5388097714651147
1C 0.49881957371373464 0.27778943212477014
1D 0.11354443109778356 0.723437196012621
1E 0.38110436826261596 0.06175758774696094
In [7]: pd.read_csv(path, dtype=str, index_col=0).dtypes
Out[7]:
A object
B object
dtype: object
```
Invalid dtype specifciation, all `TypeError`, slightly different messages depending on what you are doing (e.g. if its completely invalid or really a date spec)
```
In [8]: pd.read_csv(path, index_col=0, dtype={'A' : 'foo'})
TypeError: data type "foo" not understood
In [9]: pd.read_csv(path, index_col=0, dtype={'A' : 'datetime64'})
TypeError: the dtype <M8 is not supported for parsing, pass this column using parse_dates instead
In [10]: pd.read_csv(path, index_col=0, dtype={'A' : 'timedelta64'})
TypeError: the dtype <m8 is not supported for parsing
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/3797 | 2013-06-07T19:06:19Z | 2013-06-07T21:10:07Z | 2013-06-07T21:10:07Z | 2014-07-16T08:12:26Z |
link to a numerical integration recipe (Issue #3759) | diff --git a/doc/source/cookbook.rst b/doc/source/cookbook.rst
index 6a68a5f83ce83..c34ad27350a35 100644
--- a/doc/source/cookbook.rst
+++ b/doc/source/cookbook.rst
@@ -341,6 +341,12 @@ Storing Attributes to a group node
store.close()
os.remove('test.h5')
+Computation
+---------
+
+`Numerical integration (sample-based) of a time series
+<http://nbviewer.ipython.org/5720498>`__
+
Miscellaneous
-------------
| https://api.github.com/repos/pandas-dev/pandas/pulls/3790 | 2013-06-07T10:14:17Z | 2013-06-07T13:03:46Z | 2013-06-07T13:03:46Z | 2014-07-03T12:13:09Z | |
Added examples to CSV section in cookbook | diff --git a/doc/source/cookbook.rst b/doc/source/cookbook.rst
index 6a68a5f83ce83..28df41cfd34b9 100644
--- a/doc/source/cookbook.rst
+++ b/doc/source/cookbook.rst
@@ -267,8 +267,11 @@ The :ref:`CSV <io.read_csv_table>` docs
`Dealing with bad lines
<https://github.com/pydata/pandas/issues/2886>`__
+`Dealing with bad lines II
+<http://nipunbatra.wordpress.com/2013/06/06/reading-unclean-data-csv-using-pandas/>`__
+
`Reading CSV with Unix timestamps and converting to local timezone
-<http://nbviewer.ipython.org/5714493>`__
+<http://nipunbatra.wordpress.com/2013/06/07/pandas-reading-csv-with-unix-timestamps-and-converting-to-local-timezone/>`__
.. _cookbook.sql:
| - Edited link to timezone handling with epochs
- Added link for dealing with bad lines
| https://api.github.com/repos/pandas-dev/pandas/pulls/3789 | 2013-06-07T03:43:39Z | 2013-06-07T13:10:00Z | 2013-06-07T13:10:00Z | 2014-07-16T08:12:21Z |
CLN deprecate save&load in favour of to_pickle&read_pickle | diff --git a/RELEASE.rst b/RELEASE.rst
index 4f82f7b458737..285bbb2095488 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -137,6 +137,8 @@ pandas 0.11.1
- removed ``Excel`` support to ``pandas.io.excel``
- added top-level ``pd.read_sql`` and ``to_sql`` DataFrame methods
- removed ``clipboard`` support to ``pandas.io.clipboard``
+ - replace top-level and instance methods ``save`` and ``load`` with top-level ``read_pickle`` and
+ ``to_pickle`` instance method, ``save`` and ``load`` will give deprecation warning.
- the ``method`` and ``axis`` arguments of ``DataFrame.replace()`` are
deprecated
- Implement ``__nonzero__`` for ``NDFrame`` objects (GH3691_, GH3696_)
diff --git a/doc/source/api.rst b/doc/source/api.rst
index bb6f0ac073e21..a4be0df5f489e 100644
--- a/doc/source/api.rst
+++ b/doc/source/api.rst
@@ -13,13 +13,12 @@ Input/Output
Pickling
~~~~~~~~
-.. currentmodule:: pandas.core.common
+.. currentmodule:: pandas.io.pickle
.. autosummary::
:toctree: generated/
- load
- save
+ read_pickle
Flat File
~~~~~~~~~
@@ -378,8 +377,7 @@ Serialization / IO / Conversion
:toctree: generated/
Series.from_csv
- Series.load
- Series.save
+ Series.to_pickle
Series.to_csv
Series.to_dict
Series.to_sparse
@@ -601,8 +599,7 @@ Serialization / IO / Conversion
DataFrame.from_items
DataFrame.from_records
DataFrame.info
- DataFrame.load
- DataFrame.save
+ DataFrame.to_pickle
DataFrame.to_csv
DataFrame.to_hdf
DataFrame.to_dict
@@ -770,8 +767,7 @@ Serialization / IO / Conversion
:toctree: generated/
Panel.from_dict
- Panel.load
- Panel.save
+ Panel.to_pickle
Panel.to_excel
Panel.to_sparse
Panel.to_frame
diff --git a/doc/source/basics.rst b/doc/source/basics.rst
index 4100c4404ece6..05f9111497c08 100644
--- a/doc/source/basics.rst
+++ b/doc/source/basics.rst
@@ -1207,46 +1207,6 @@ While float dtypes are unchanged.
casted
casted.dtypes
-.. _basics.serialize:
-
-Pickling and serialization
---------------------------
-
-All pandas objects are equipped with ``save`` methods which use Python's
-``cPickle`` module to save data structures to disk using the pickle format.
-
-.. ipython:: python
-
- df
- df.save('foo.pickle')
-
-The ``load`` function in the ``pandas`` namespace can be used to load any
-pickled pandas object (or any other pickled object) from file:
-
-
-.. ipython:: python
-
- load('foo.pickle')
-
-There is also a ``save`` function which takes any object as its first argument:
-
-.. ipython:: python
-
- save(df, 'foo.pickle')
- load('foo.pickle')
-
-.. ipython:: python
- :suppress:
-
- import os
- os.remove('foo.pickle')
-
-.. warning::
-
- Loading pickled data received from untrusted sources can be unsafe.
-
- See: http://docs.python.org/2.7/library/pickle.html
-
Working with package options
----------------------------
diff --git a/doc/source/io.rst b/doc/source/io.rst
index 905f7f24ac427..6fee8ad35e10c 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -39,6 +39,7 @@ object.
* ``read_html``
* ``read_stata``
* ``read_clipboard``
+ * ``read_pickle``
The corresponding ``writer`` functions are object methods that are accessed like ``df.to_csv()``
@@ -50,6 +51,7 @@ The corresponding ``writer`` functions are object methods that are accessed like
* ``to_html``
* ``to_stata``
* ``to_clipboard``
+ * ``to_pickle``
.. _io.read_csv_table:
@@ -1442,7 +1444,42 @@ We can see that we got the same content back, which we had earlier written to th
You may need to install xclip or xsel (with gtk or PyQt4 modules) on Linux to use these methods.
+.. _io.serialize:
+Pickling and serialization
+--------------------------
+
+All pandas objects are equipped with ``to_pickle`` methods which use Python's
+``cPickle`` module to save data structures to disk using the pickle format.
+
+.. ipython:: python
+
+ df
+ df.to_pickle('foo.pkl')
+
+The ``read_pickle`` function in the ``pandas`` namespace can be used to load
+any pickled pandas object (or any other pickled object) from file:
+
+
+.. ipython:: python
+
+ read_pickle('foo.pkl')
+
+.. ipython:: python
+ :suppress:
+
+ import os
+ os.remove('foo.pkl')
+
+.. warning::
+
+ Loading pickled data received from untrusted sources can be unsafe.
+
+ See: http://docs.python.org/2.7/library/pickle.html
+
+.. note::
+
+ These methods were previously ``save`` and ``load``, now deprecated.
.. _io.excel:
diff --git a/pandas/core/api.py b/pandas/core/api.py
index 306f9aff8f4d3..a8f5bb2a46e76 100644
--- a/pandas/core/api.py
+++ b/pandas/core/api.py
@@ -4,7 +4,7 @@
import numpy as np
from pandas.core.algorithms import factorize, match, unique, value_counts
-from pandas.core.common import isnull, notnull, save, load
+from pandas.core.common import isnull, notnull
from pandas.core.categorical import Categorical, Factor
from pandas.core.format import (set_printoptions, reset_printoptions,
set_eng_float_format)
@@ -28,6 +28,7 @@
# legacy
from pandas.core.daterange import DateRange # deprecated
+from pandas.core.common import save, load # deprecated, remove in 0.12
import pandas.core.datetools as datetools
from pandas.core.config import get_option, set_option, reset_option,\
diff --git a/pandas/core/common.py b/pandas/core/common.py
index 69f38bf0c7c61..d0dcb0b9770b8 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -1,11 +1,6 @@
"""
Misc tools for implementing data structures
"""
-# XXX: HACK for NumPy 1.5.1 to suppress warnings
-try:
- import cPickle as pickle
-except ImportError: # pragma: no cover
- import pickle
import itertools
from datetime import datetime
@@ -1668,49 +1663,6 @@ def _all_none(*args):
return True
-def save(obj, path):
- """
- Pickle (serialize) object to input file path
-
- Parameters
- ----------
- obj : any object
- path : string
- File path
- """
- f = open(path, 'wb')
- try:
- pickle.dump(obj, f, protocol=pickle.HIGHEST_PROTOCOL)
- finally:
- f.close()
-
-
-def load(path):
- """
- Load pickled pandas object (or any other pickled object) from the specified
- file path
-
- Warning: Loading pickled data received from untrusted sources can be unsafe.
- See: http://docs.python.org/2.7/library/pickle.html
-
- Parameters
- ----------
- path : string
- File path
-
- Returns
- -------
- unpickled : type of object stored in file
- """
- try:
- with open(path,'rb') as fh:
- return pickle.load(fh)
- except:
- if not py3compat.PY3:
- raise
- with open(path,'rb') as fh:
- return pickle.load(fh, encoding='latin1')
-
class UTF8Recoder:
"""
Iterator that reads an encoded stream and reencodes the input to UTF-8
@@ -2109,3 +2061,40 @@ def console_encode(object, **kwds):
"""
return pprint_thing_encoded(object,
get_option("display.encoding"))
+
+def load(path): # TODO remove in 0.12
+ """
+ Load pickled pandas object (or any other pickled object) from the specified
+ file path
+
+ Warning: Loading pickled data received from untrusted sources can be unsafe.
+ See: http://docs.python.org/2.7/library/pickle.html
+
+ Parameters
+ ----------
+ path : string
+ File path
+
+ Returns
+ -------
+ unpickled : type of object stored in file
+ """
+ import warnings
+ warnings.warn("load is deprecated, use read_pickle", FutureWarning)
+ from pandas.io.pickle import read_pickle
+ return read_pickle(path)
+
+def save(obj, path): # TODO remove in 0.12
+ '''
+ Pickle (serialize) object to input file path
+
+ Parameters
+ ----------
+ obj : any object
+ path : string
+ File path
+ '''
+ import warnings
+ warnings.warn("save is deprecated, use obj.to_pickle", FutureWarning)
+ from pandas.io.pickle import to_pickle
+ return to_pickle(obj, path)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 86bc50ce48134..bae85aa84a96e 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -24,12 +24,29 @@ class PandasObject(object):
_AXIS_ALIASES = {}
_AXIS_NAMES = dict((v, k) for k, v in _AXIS_NUMBERS.iteritems())
- def save(self, path):
- com.save(self, path)
+ def to_pickle(self, path):
+ """
+ Pickle (serialize) object to input file path
- @classmethod
- def load(cls, path):
- return com.load(path)
+ Parameters
+ ----------
+ path : string
+ File path
+ """
+ from pandas.io.pickle import to_pickle
+ return to_pickle(self, path)
+
+ def save(self, path): # TODO remove in 0.12
+ import warnings
+ from pandas.io.pickle import to_pickle
+ warnings.warn("save is deprecated, use to_pickle", FutureWarning)
+ return to_pickle(self, path)
+
+ def load(self, path): # TODO remove in 0.12
+ import warnings
+ from pandas.io.pickle import read_pickle
+ warnings.warn("load is deprecated, use pd.read_pickle", FutureWarning)
+ return read_pickle(path)
def __hash__(self):
raise TypeError('{0!r} objects are mutable, thus they cannot be'
diff --git a/pandas/io/api.py b/pandas/io/api.py
index 48566399f9bfe..2c8f8d1c893e2 100644
--- a/pandas/io/api.py
+++ b/pandas/io/api.py
@@ -10,3 +10,4 @@
from pandas.io.html import read_html
from pandas.io.sql import read_sql
from pandas.io.stata import read_stata
+from pandas.io.pickle import read_pickle
diff --git a/pandas/io/pickle.py b/pandas/io/pickle.py
new file mode 100644
index 0000000000000..a01771dda1f25
--- /dev/null
+++ b/pandas/io/pickle.py
@@ -0,0 +1,48 @@
+# XXX: HACK for NumPy 1.5.1 to suppress warnings
+try:
+ import cPickle as pickle
+except ImportError: # pragma: no cover
+ import pickle
+
+def to_pickle(obj, path):
+ """
+ Pickle (serialize) object to input file path
+
+ Parameters
+ ----------
+ obj : any object
+ path : string
+ File path
+ """
+ f = open(path, 'wb')
+ try:
+ pickle.dump(obj, f, protocol=pickle.HIGHEST_PROTOCOL)
+ finally:
+ f.close()
+
+def read_pickle(path):
+ """
+ Load pickled pandas object (or any other pickled object) from the specified
+ file path
+
+ Warning: Loading pickled data received from untrusted sources can be unsafe.
+ See: http://docs.python.org/2.7/library/pickle.html
+
+ Parameters
+ ----------
+ path : string
+ File path
+
+ Returns
+ -------
+ unpickled : type of object stored in file
+ """
+ try:
+ with open(path,'rb') as fh:
+ return pickle.load(fh)
+ except:
+ from pandas.util import py3compat
+ if not py3compat.PY3:
+ raise
+ with open(path,'rb') as fh:
+ return pickle.load(fh, encoding='latin1')
\ No newline at end of file
diff --git a/pandas/sparse/tests/test_sparse.py b/pandas/sparse/tests/test_sparse.py
index c18e0173b4589..c6515cd4113f0 100644
--- a/pandas/sparse/tests/test_sparse.py
+++ b/pandas/sparse/tests/test_sparse.py
@@ -9,6 +9,7 @@
from numpy import nan
import numpy as np
+import pandas as pd
dec = np.testing.dec
from pandas.util.testing import (assert_almost_equal, assert_series_equal,
diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py
index 5926f5d51abfd..7ce4a11229561 100644
--- a/pandas/tests/test_index.py
+++ b/pandas/tests/test_index.py
@@ -1080,7 +1080,7 @@ def test_legacy_v2_unpickle(self):
pth, _ = os.path.split(os.path.abspath(__file__))
filepath = os.path.join(pth, 'data', 'mindex_073.pickle')
- obj = com.load(filepath)
+ obj = pd.read_pickle(filepath)
obj2 = MultiIndex.from_tuples(obj.values)
self.assert_(obj.equals(obj2))
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index 582a3f6ab5f7b..c5770c61e2f81 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -10,6 +10,7 @@
from numpy import nan
import numpy as np
import numpy.ma as ma
+import pandas as pd
from pandas import (Index, Series, TimeSeries, DataFrame, isnull, notnull,
bdate_range, date_range)
@@ -189,8 +190,8 @@ def test_pickle_preserve_name(self):
def _pickle_roundtrip_name(self, obj):
with ensure_clean() as path:
- obj.save(path)
- unpickled = Series.load(path)
+ obj.to_pickle(path)
+ unpickled = pd.read_pickle(path)
return unpickled
def test_argsort_preserve_name(self):
@@ -612,8 +613,8 @@ def test_pickle(self):
def _pickle_roundtrip(self, obj):
with ensure_clean() as path:
- obj.save(path)
- unpickled = Series.load(path)
+ obj.to_pickle(path)
+ unpickled = pd.read_pickle(path)
return unpickled
def test_getitem_get(self):
diff --git a/pandas/tools/util.py b/pandas/tools/util.py
index d4c7190b0d782..c08636050ca9e 100644
--- a/pandas/tools/util.py
+++ b/pandas/tools/util.py
@@ -1,7 +1,6 @@
from pandas.core.index import Index
-
def match(needles, haystack):
haystack = Index(haystack)
needles = Index(needles)
- return haystack.get_indexer(needles)
+ return haystack.get_indexer(needles)
\ No newline at end of file
diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py
index ac02dee335afc..bdc603dfdea31 100644
--- a/pandas/tseries/tests/test_timeseries.py
+++ b/pandas/tseries/tests/test_timeseries.py
@@ -1945,7 +1945,7 @@ def test_unpickle_legacy_len0_daterange(self):
pth, _ = os.path.split(os.path.abspath(__file__))
filepath = os.path.join(pth, 'data', 'series_daterange0.pickle')
- result = com.load(filepath)
+ result = pd.read_pickle(filepath)
ex_index = DatetimeIndex([], freq='B')
diff --git a/vb_suite/test_perf.py b/vb_suite/test_perf.py
index fe3f4d8e5defb..2a2a5c9643c75 100755
--- a/vb_suite/test_perf.py
+++ b/vb_suite/test_perf.py
@@ -85,7 +85,7 @@
metavar="FNAME",
dest='outdf',
default=None,
- help='Name of file to df.save() the result table into. Will overwrite')
+ help='Name of file to df.to_pickle() the result table into. Will overwrite')
parser.add_argument('-r', '--regex',
metavar="REGEX",
dest='regex',
@@ -288,7 +288,7 @@ def report_comparative(head_res,baseline_res):
if args.outdf:
prprint("The results DataFrame was written to '%s'\n" % args.outdf)
- totals.save(args.outdf)
+ totals.to_pickle(args.outdf)
def profile_head_single(benchmark):
import gc
@@ -364,7 +364,7 @@ def profile_head(benchmarks):
if args.outdf:
prprint("The results DataFrame was written to '%s'\n" % args.outdf)
- DataFrame(results).save(args.outdf)
+ DataFrame(results).to_pickle(args.outdf)
def print_report(df,h_head=None,h_msg="",h_baseline=None,b_msg=""):
@@ -448,8 +448,8 @@ def main():
np.random.seed(args.seed)
if args.base_pickle and args.target_pickle:
- baseline_res = prep_pickle_for_total(pd.load(args.base_pickle))
- target_res = prep_pickle_for_total(pd.load(args.target_pickle))
+ baseline_res = prep_pickle_for_total(pd.read_pickle(args.base_pickle))
+ target_res = prep_pickle_for_total(pd.read_pickle(args.target_pickle))
report_comparative(target_res, baseline_res)
sys.exit(0)
| Add `read_pickle` to top-level and `to_pickle` as instance methods, deprecation warning til 0.12 for save and load everwhere. See [lower down](https://github.com/pydata/pandas/pull/3787#issuecomment-19211203) for how it's working.
Both `read_pickle` and `to_pickle` are in `io.pickle`, save&load remain in `core.common` (but call to_pickle, read_pickle resp).
cc #3782.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3787 | 2013-06-07T01:54:31Z | 2013-06-15T08:38:56Z | 2013-06-15T08:38:56Z | 2014-06-22T01:54:36Z |
DOC: make compatible with numpy v1.6.1 | diff --git a/doc/source/io.rst b/doc/source/io.rst
index a65b7c9024a11..9d923d2d0e0cf 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -1029,7 +1029,7 @@ Specify an HTML attribute
dfs1 = read_html(url, attrs={'id': 'table'})
dfs2 = read_html(url, attrs={'class': 'sortable'})
- np.all(dfs1[0] == dfs2[0])
+ np.array_equal(dfs1[0], dfs2[0])
Use some combination of the above
diff --git a/pandas/io/html.py b/pandas/io/html.py
index 9b47be925f740..a5798b3493732 100644
--- a/pandas/io/html.py
+++ b/pandas/io/html.py
@@ -754,19 +754,19 @@ def _parse(flavor, io, match, header, index_col, skiprows, infer_types, attrs):
compiled_match = re.compile(match)
# ugly hack because python 3 DELETES the exception variable!
- retained_exception = None
+ retained = None
for flav in flavor:
parser = _parser_dispatch(flav)
p = parser(io, compiled_match, attrs)
try:
tables = p.parse_tables()
- except Exception as caught_exception:
- retained_exception = caught_exception
+ except Exception as caught:
+ retained = caught
else:
break
else:
- raise retained_exception
+ raise retained
return [_data_to_frame(table, header, index_col, infer_types, skiprows)
for table in tables]
| all does not work so use array_equal
| https://api.github.com/repos/pandas-dev/pandas/pulls/3786 | 2013-06-06T22:09:16Z | 2013-06-07T00:18:55Z | 2013-06-07T00:18:55Z | 2014-07-16T08:12:15Z |
DOC: document the pitfalls of different byte orders | diff --git a/doc/source/faq.rst b/doc/source/faq.rst
index 37639b9016b14..534ad576da0a7 100644
--- a/doc/source/faq.rst
+++ b/doc/source/faq.rst
@@ -246,3 +246,22 @@ interval (``'start'`` or ``'end'``) convention:
data = Series(np.random.randn(50), index=rng)
resampled = data.resample('A', kind='timestamp', convention='end')
resampled.index
+
+
+Byte-Ordering Issues
+--------------------
+Occasionally you may have to deal with data that were created on a machine with
+a different byte order than the one on which you are running Python. To deal
+with this issue you should convert the underlying NumPy array to the native
+system byte order *before* passing it to Series/DataFrame/Panel constructors
+using something similar to the following:
+
+.. ipython:: python
+
+ x = np.array(range(10), '>i4') # big endian
+ newx = x.byteswap().newbyteorder() # force native byteorder
+ s = Series(newx)
+
+See `the NumPy documentation on byte order
+<http://docs.scipy.org/doc/numpy/user/basics.byteswapping.html>`__ for more
+details.
diff --git a/doc/source/gotchas.rst b/doc/source/gotchas.rst
index 422e3cec59386..45369cb7ddb08 100644
--- a/doc/source/gotchas.rst
+++ b/doc/source/gotchas.rst
@@ -453,3 +453,22 @@ parse HTML tables in the top-level pandas io function ``read_html``.
.. |Anaconda| replace:: **Anaconda**
.. _Anaconda: https://store.continuum.io/cshop/anaconda
+
+
+Byte-Ordering Issues
+--------------------
+Occasionally you may have to deal with data that were created on a machine with
+a different byte order than the one on which you are running Python. To deal
+with this issue you should convert the underlying NumPy array to the native
+system byte order *before* passing it to Series/DataFrame/Panel constructors
+using something similar to the following:
+
+.. ipython:: python
+
+ x = np.array(range(10), '>i4') # big endian
+ newx = x.byteswap().newbyteorder() # force native byteorder
+ s = Series(newx)
+
+See `the NumPy documentation on byte order
+<http://docs.scipy.org/doc/numpy/user/basics.byteswapping.html>`__ for more
+details.
| closes #3778 .
| https://api.github.com/repos/pandas-dev/pandas/pulls/3780 | 2013-06-06T17:33:11Z | 2013-06-06T19:00:04Z | 2013-06-06T19:00:03Z | 2014-07-11T02:49:31Z |
Update Release notes (PR#3666) | diff --git a/RELEASE.rst b/RELEASE.rst
index 8da3b4760c303..02c3a4dd926fa 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -1,4 +1,3 @@
-
=============
Release Notes
=============
@@ -77,6 +76,7 @@ pandas 0.11.1
spurious plots from showing up.
- Added Faq section on repr display options, to help users customize their setup.
- ``where`` operations that result in block splitting are much faster (GH3733_)
+ - ``DataReader`` now fetches stock dividend and split info (GH3666_)
**API Changes**
@@ -301,6 +301,7 @@ pandas 0.11.1
.. _GH3741: https://github.com/pydata/pandas/issues/3741
.. _GH3750: https://github.com/pydata/pandas/issues/3750
.. _GH3726: https://github.com/pydata/pandas/issues/3726
+.. _GH3666: https://github.com/pydata/pandas/pull/3666
pandas 0.11.0
=============
| https://api.github.com/repos/pandas-dev/pandas/pulls/3775 | 2013-06-06T12:35:19Z | 2013-06-13T20:14:46Z | null | 2013-09-07T01:11:46Z | |
ENH: Provide a default key name in read_hdf and to_hdf | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 7dd0315d7d90e..fed62ee202fa8 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -486,8 +486,17 @@ def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None,
np.putmask(rs.values, mask, np.nan)
return rs
- def to_hdf(self, path_or_buf, key, **kwargs):
- """ activate the HDFStore """
+ def to_hdf(self, path_or_buf, key="data", **kwargs):
+ """
+ Write to HDF5
+
+ Parameters
+ ----------
+ path_or_buf : string or file handle / StringIO
+ File path
+ key : string
+ Key used to reference the object in the file, default is "data"
+ """
from pandas.io import pytables
return pytables.to_hdf(path_or_buf, key, self, **kwargs)
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index b1b7b80e5fd23..db6f61054d816 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -181,8 +181,18 @@ def to_hdf(path_or_buf, key, value, mode=None, complevel=None, complib=None, app
else:
f(path_or_buf)
-def read_hdf(path_or_buf, key, **kwargs):
- """ read from the store, closeit if we opened it """
+def read_hdf(path_or_buf, key="data", **kwargs):
+ """
+ Read from HDFStore
+
+ Reads pandas objects from HDFStore, the HDFStore is closed if we opened it
+
+ Parameters:
+ path_or_buf : string or file handle / StringIO
+ File path
+ key : string
+ Key used to reference the object in the file, default is "data"
+ """
f = lambda store: store.select(key, **kwargs)
if isinstance(path_or_buf, basestring):
| For convenience, provide a default name ("data") to quickly store and retrieve data from HDF5.
Now for csv:
``` python
df3 = pd.DataFrame(np.random.randn(10, 5), columns=['a', 'b', 'c', 'd', 'e'])
df3.to_csv("df3.csv")
print df3.from_csv("df3.csv")
```
for HDF5:
``` python
df3 = pd.DataFrame(np.random.randn(10, 5), columns=['a', 'b', 'c', 'd', 'e'])
df3.to_hdf("df3.h5", "data")
print pd.read_hdf("df3.h5", "data")
```
I would suggest to provide a default key name that would make it more convenient to read and write to HDF5 when it contains only one dataset.
In this PR:
``` python
df3 = pd.DataFrame(np.random.randn(10, 5), columns=['a', 'b', 'c', 'd', 'e'])
df3.to_hdf("df3.h5")
print pd.read_hdf("df3.h5")
```
Instead I would not provide a default key in `pd.read_hdf` because it would break backward compatibility.
| https://api.github.com/repos/pandas-dev/pandas/pulls/3772 | 2013-06-06T06:53:40Z | 2013-06-07T13:43:02Z | null | 2014-07-02T14:05:41Z |
Cookbook epoch handling | diff --git a/doc/source/cookbook.rst b/doc/source/cookbook.rst
index 7f6b54667765d..6a68a5f83ce83 100644
--- a/doc/source/cookbook.rst
+++ b/doc/source/cookbook.rst
@@ -267,6 +267,9 @@ The :ref:`CSV <io.read_csv_table>` docs
`Dealing with bad lines
<https://github.com/pydata/pandas/issues/2886>`__
+`Reading CSV with Unix timestamps and converting to local timezone
+<http://nbviewer.ipython.org/5714493>`__
+
.. _cookbook.sql:
SQL
| Added link to cookbook explaining how to use `read_csv` to parse files containing epoch timestamps and how to add local timezone information. Closes #3757
| https://api.github.com/repos/pandas-dev/pandas/pulls/3771 | 2013-06-06T05:28:32Z | 2013-06-06T11:57:28Z | 2013-06-06T11:57:28Z | 2014-07-03T09:26:16Z |
TST: let tox run more tests | diff --git a/tox.ini b/tox.ini
index b56d839e4998a..2a9c454a29435 100644
--- a/tox.ini
+++ b/tox.ini
@@ -21,7 +21,7 @@ changedir = {envdir}
commands =
# TODO: --exe because of GH #761
- {envbindir}/nosetests --exe pandas.tests -A "not network"
+ {envbindir}/nosetests --exe pandas -A "not network"
# cleanup the temp. build dir created by the tox build
# /bin/rm -rf {toxinidir}/build
diff --git a/tox_prll.ini b/tox_prll.ini
index 5201cd5e426ed..7ae399837b4e0 100644
--- a/tox_prll.ini
+++ b/tox_prll.ini
@@ -22,7 +22,7 @@ changedir = {envdir}
commands =
# TODO: --exe because of GH #761
- {envbindir}/nosetests --exe pandas.tests -A "not network"
+ {envbindir}/nosetests --exe pandas -A "not network"
# cleanup the temp. build dir created by the tox build
# /bin/rm -rf {toxinidir}/build
| https://api.github.com/repos/pandas-dev/pandas/pulls/3770 | 2013-06-06T03:24:07Z | 2013-06-06T15:01:14Z | 2013-06-06T15:01:14Z | 2014-07-16T08:12:01Z | |
TST: install numexpr always on travis, bottleneck on full-deps | diff --git a/ci/install.sh b/ci/install.sh
index b748070db85aa..9765f1b26b198 100755
--- a/ci/install.sh
+++ b/ci/install.sh
@@ -67,13 +67,14 @@ if ( ! $VENV_FILE_AVAILABLE ); then
if [ x"$FULL_DEPS" == x"true" ]; then
echo "Installing FULL_DEPS"
pip install $PIP_ARGS cython
+ pip install $PIP_ARGS numexpr
if [ ${TRAVIS_PYTHON_VERSION:0:1} == "2" ]; then
pip install $PIP_ARGS xlwt
+ pip install $PIP_ARGS bottleneck
fi
- pip install numexpr
- pip install tables
+ pip install $PIP_ARGS tables
pip install $PIP_ARGS matplotlib
pip install $PIP_ARGS openpyxl
pip install $PIP_ARGS xlrd>=0.9.0
diff --git a/ci/print_versions.py b/ci/print_versions.py
index 6a897ea5937b0..53e43fab19ae7 100755
--- a/ci/print_versions.py
+++ b/ci/print_versions.py
@@ -61,6 +61,12 @@
except:
print("pytz: Not installed")
+try:
+ import bottleneck
+ print("bottleneck: %s" % bottleneck.__version__)
+except:
+ print("bottleneck: Not installed")
+
try:
import tables
print("PyTables: %s" % tables.__version__)
| https://api.github.com/repos/pandas-dev/pandas/pulls/3768 | 2013-06-06T00:02:31Z | 2013-06-06T01:24:15Z | 2013-06-06T01:24:15Z | 2014-07-16T08:11:59Z | |
ENH: allow fallback when lxml fails to parse | diff --git a/RELEASE.rst b/RELEASE.rst
index 98271568006e0..6a0c22c7734fb 100644
--- a/RELEASE.rst
+++ b/RELEASE.rst
@@ -70,7 +70,6 @@ pandas 0.11.1
- ``melt`` now accepts the optional parameters ``var_name`` and ``value_name``
to specify custom column names of the returned DataFrame (GH3649_),
thanks @hoechenberger
- - ``read_html`` no longer performs hard date conversion
- Plotting functions now raise a ``TypeError`` before trying to plot anything
if the associated objects have have a dtype of ``object`` (GH1818_,
GH3572_). This happens before any drawing takes place which elimnates any
@@ -133,6 +132,9 @@ pandas 0.11.1
as an int, maxing with ``int64``, to avoid precision issues (GH3733_)
- ``na_values`` in a list provided to ``read_csv/read_excel`` will match string and numeric versions
e.g. ``na_values=['99']`` will match 99 whether the column ends up being int, float, or string (GH3611_)
+ - ``read_html`` now defaults to ``None`` when reading, and falls back on
+ ``bs4`` + ``html5lib`` when lxml fails to parse. a list of parsers to try
+ until success is also valid
**Bug Fixes**
diff --git a/ci/install.sh b/ci/install.sh
index 9765f1b26b198..c9b76b88721e9 100755
--- a/ci/install.sh
+++ b/ci/install.sh
@@ -80,7 +80,6 @@ if ( ! $VENV_FILE_AVAILABLE ); then
pip install $PIP_ARGS xlrd>=0.9.0
pip install $PIP_ARGS 'http://downloads.sourceforge.net/project/pytseries/scikits.timeseries/0.91.3/scikits.timeseries-0.91.3.tar.gz?r='
pip install $PIP_ARGS patsy
- pip install $PIP_ARGS lxml
pip install $PIP_ARGS html5lib
if [ ${TRAVIS_PYTHON_VERSION:0:1} == "3" ]; then
@@ -88,6 +87,8 @@ if ( ! $VENV_FILE_AVAILABLE ); then
elif [ ${TRAVIS_PYTHON_VERSION:0:1} == "2" ]; then
sudo apt-get $APT_ARGS remove python-lxml
fi
+
+ pip install $PIP_ARGS lxml
# fool statsmodels into thinking pandas was already installed
# so it won't refuse to install itself. We want it in the zipped venv
diff --git a/doc/source/io.rst b/doc/source/io.rst
index 1c615ca278668..a65b7c9024a11 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -1054,6 +1054,21 @@ Read in pandas ``to_html`` output (with some loss of floating point precision)
dfin[0].columns
np.allclose(df, dfin[0])
+``lxml`` will raise an error on a failed parse if that is the only parser you
+provide
+
+.. ipython:: python
+
+ dfs = read_html(url, match='Metcalf Bank', index_col=0, flavor=['lxml'])
+
+However, if you have bs4 and html5lib installed and pass ``None`` or ``['lxml',
+'bs4']`` then the parse will most likely succeed. Note that *as soon as a parse
+succeeds, the function will return*.
+
+.. ipython:: python
+
+ dfs = read_html(url, match='Metcalf Bank', index_col=0, flavor=['lxml', 'bs4'])
+
Writing to HTML files
~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/source/v0.11.1.txt b/doc/source/v0.11.1.txt
index 982b2f9f2eb3b..ee2c15d429ec2 100644
--- a/doc/source/v0.11.1.txt
+++ b/doc/source/v0.11.1.txt
@@ -139,6 +139,10 @@ API changes
- sum, prod, mean, std, var, skew, kurt, corr, and cov
+ - ``read_html`` now defaults to ``None`` when reading, and falls back on
+ ``bs4`` + ``html5lib`` when lxml fails to parse. a list of parsers to try
+ until success is also valid
+
Enhancements
~~~~~~~~~~~~
diff --git a/pandas/io/html.py b/pandas/io/html.py
index 9b2f292d30f47..9b47be925f740 100644
--- a/pandas/io/html.py
+++ b/pandas/io/html.py
@@ -7,9 +7,10 @@
import re
import numbers
import urllib2
+import urlparse
import contextlib
import collections
-import urlparse
+
try:
from importlib import import_module
@@ -18,10 +19,34 @@
import numpy as np
-from pandas import DataFrame, MultiIndex, Index, Series, isnull
+from pandas import DataFrame, MultiIndex, isnull
from pandas.io.parsers import _is_url
+try:
+ import_module('bs4')
+except ImportError:
+ _HAS_BS4 = False
+else:
+ _HAS_BS4 = True
+
+
+try:
+ import_module('lxml')
+except ImportError:
+ _HAS_LXML = False
+else:
+ _HAS_LXML = True
+
+
+try:
+ import_module('html5lib')
+except ImportError:
+ _HAS_HTML5LIB = False
+else:
+ _HAS_HTML5LIB = True
+
+
#############
# READ HTML #
#############
@@ -345,7 +370,7 @@ def _parse_raw_tbody(self, table):
return self._parse_raw_data(res)
-class _BeautifulSoupLxmlFrameParser(_HtmlFrameParser):
+class _BeautifulSoupHtml5LibFrameParser(_HtmlFrameParser):
"""HTML to DataFrame parser that uses BeautifulSoup under the hood.
See Also
@@ -359,7 +384,8 @@ class _BeautifulSoupLxmlFrameParser(_HtmlFrameParser):
:class:`pandas.io.html._HtmlFrameParser`.
"""
def __init__(self, *args, **kwargs):
- super(_BeautifulSoupLxmlFrameParser, self).__init__(*args, **kwargs)
+ super(_BeautifulSoupHtml5LibFrameParser, self).__init__(*args,
+ **kwargs)
from bs4 import SoupStrainer
self._strainer = SoupStrainer('table')
@@ -406,17 +432,6 @@ def _setup_build_doc(self):
raise AssertionError('No text parsed from document')
return raw_text
- def _build_doc(self):
- from bs4 import BeautifulSoup
- return BeautifulSoup(self._setup_build_doc(), features='lxml',
- parse_only=self._strainer)
-
-
-class _BeautifulSoupHtml5LibFrameParser(_BeautifulSoupLxmlFrameParser):
- def __init__(self, *args, **kwargs):
- super(_BeautifulSoupHtml5LibFrameParser, self).__init__(*args,
- **kwargs)
-
def _build_doc(self):
from bs4 import BeautifulSoup
return BeautifulSoup(self._setup_build_doc(), features='html5lib')
@@ -516,16 +531,27 @@ def _build_doc(self):
--------
pandas.io.html._HtmlFrameParser._build_doc
"""
- from lxml.html import parse, fromstring
- from lxml.html.clean import clean_html
+ from lxml.html import parse, fromstring, HTMLParser
+ from lxml.etree import XMLSyntaxError
+ parser = HTMLParser(recover=False)
try:
# try to parse the input in the simplest way
- r = parse(self.io)
- except (UnicodeDecodeError, IOError) as e:
+ r = parse(self.io, parser=parser)
+
+ try:
+ r = r.getroot()
+ except AttributeError:
+ pass
+ except (UnicodeDecodeError, IOError):
# if the input is a blob of html goop
if not _is_url(self.io):
- r = fromstring(self.io)
+ r = fromstring(self.io, parser=parser)
+
+ try:
+ r = r.getroot()
+ except AttributeError:
+ pass
else:
# not a url
scheme = urlparse.urlparse(self.io).scheme
@@ -536,8 +562,11 @@ def _build_doc(self):
raise ValueError(msg)
else:
# something else happened: maybe a faulty connection
- raise e
- return clean_html(r)
+ raise
+ else:
+ if not hasattr(r, 'text_content'):
+ raise XMLSyntaxError("no text parsed from document", 0, 0, 0)
+ return r
def _parse_tbody(self, table):
return table.xpath('.//tbody')
@@ -559,17 +588,6 @@ def _parse_raw_tfoot(self, table):
table.xpath(expr)]
-def _maybe_convert_index_type(index):
- try:
- index = index.astype(int)
- except (TypeError, ValueError):
- if not isinstance(index, MultiIndex):
- s = Series(index, name=index.name)
- index = Index(s.convert_objects(convert_numeric=True),
- name=index.name)
- return index
-
-
def _data_to_frame(data, header, index_col, infer_types, skiprows):
"""Parse a BeautifulSoup table into a DataFrame.
@@ -665,18 +683,12 @@ def _data_to_frame(data, header, index_col, infer_types, skiprows):
names = [name or None for name in df.index.names]
df.index = MultiIndex.from_tuples(df.index.values, names=names)
- if infer_types:
- df.index = _maybe_convert_index_type(df.index)
- df.columns = _maybe_convert_index_type(df.columns)
-
return df
-_invalid_parsers = {'lxml': _LxmlFrameParser,
- 'bs4': _BeautifulSoupLxmlFrameParser}
-_valid_parsers = {'html5lib': _BeautifulSoupHtml5LibFrameParser}
-_all_parsers = _valid_parsers.copy()
-_all_parsers.update(_invalid_parsers)
+_valid_parsers = {'lxml': _LxmlFrameParser, None: _LxmlFrameParser,
+ 'html5lib': _BeautifulSoupHtml5LibFrameParser,
+ 'bs4': _BeautifulSoupHtml5LibFrameParser}
def _parser_dispatch(flavor):
@@ -696,46 +708,71 @@ def _parser_dispatch(flavor):
------
AssertionError
* If `flavor` is not a valid backend.
+ ImportError
+ * If you do not have the requested `flavor`
"""
valid_parsers = _valid_parsers.keys()
if flavor not in valid_parsers:
- raise AssertionError('"{0}" is not a valid flavor'.format(flavor))
+ raise AssertionError('"{0!r}" is not a valid flavor, valid flavors are'
+ ' {1}'.format(flavor, valid_parsers))
- if flavor == 'bs4':
- try:
- import_module('lxml')
- parser_t = _BeautifulSoupLxmlFrameParser
- except ImportError:
- try:
- import_module('html5lib')
- parser_t = _BeautifulSoupHtml5LibFrameParser
- except ImportError:
- raise ImportError("read_html does not support the native "
- "Python 'html.parser' backend for bs4, "
- "please install either 'lxml' or 'html5lib'")
- elif flavor == 'html5lib':
- try:
- # much better than python's builtin
- import_module('html5lib')
- parser_t = _BeautifulSoupHtml5LibFrameParser
- except ImportError:
+ if flavor in ('bs4', 'html5lib'):
+ if not _HAS_HTML5LIB:
raise ImportError("html5lib not found please install it")
+ if not _HAS_BS4:
+ raise ImportError("bs4 not found please install it")
+ else:
+ if not _HAS_LXML:
+ raise ImportError("lxml not found please install it")
+ return _valid_parsers[flavor]
+
+
+def _validate_parser_flavor(flavor):
+ if flavor is None:
+ flavor = ['lxml', 'bs4']
+ elif isinstance(flavor, basestring):
+ flavor = [flavor]
+ elif isinstance(flavor, collections.Iterable):
+ if not all(isinstance(flav, basestring) for flav in flavor):
+ raise TypeError('{0} is not an iterable of strings'.format(flavor))
else:
- parser_t = _LxmlFrameParser
- return parser_t
+ raise TypeError('{0} is not a valid "flavor"'.format(flavor))
+
+ flavor = list(flavor)
+ valid_flavors = _valid_parsers.keys()
+ if not set(flavor) & set(valid_flavors):
+ raise ValueError('{0} is not a valid set of flavors, valid flavors are'
+ ' {1}'.format(flavor, valid_flavors))
+ return flavor
-def _parse(parser, io, match, flavor, header, index_col, skiprows, infer_types,
- attrs):
+
+def _parse(flavor, io, match, header, index_col, skiprows, infer_types, attrs):
# bonus: re.compile is idempotent under function iteration so you can pass
# a compiled regex to it and it will return itself
- p = parser(io, re.compile(match), attrs)
- tables = p.parse_tables()
+ flavor = _validate_parser_flavor(flavor)
+ compiled_match = re.compile(match)
+
+ # ugly hack because python 3 DELETES the exception variable!
+ retained_exception = None
+ for flav in flavor:
+ parser = _parser_dispatch(flav)
+ p = parser(io, compiled_match, attrs)
+
+ try:
+ tables = p.parse_tables()
+ except Exception as caught_exception:
+ retained_exception = caught_exception
+ else:
+ break
+ else:
+ raise retained_exception
+
return [_data_to_frame(table, header, index_col, infer_types, skiprows)
for table in tables]
-def read_html(io, match='.+', flavor='html5lib', header=None, index_col=None,
+def read_html(io, match='.+', flavor=None, header=None, index_col=None,
skiprows=None, infer_types=True, attrs=None):
r"""Read an HTML table into a DataFrame.
@@ -747,7 +784,7 @@ def read_html(io, match='.+', flavor='html5lib', header=None, index_col=None,
the http, ftp and file url protocols. If you have a URI that starts
with ``'https'`` you might removing the ``'s'``.
- match : str or regex, optional
+ match : str or regex, optional, default '.+'
The set of tables containing text matching this regex or string will be
returned. Unless the HTML is extremely simple you will probably need to
pass a non-empty string here. Defaults to '.+' (match any non-empty
@@ -755,23 +792,24 @@ def read_html(io, match='.+', flavor='html5lib', header=None, index_col=None,
This value is converted to a regular expression so that there is
consistent behavior between Beautiful Soup and lxml.
- flavor : str, {'html5lib'}
- The parsing engine to use under the hood. Right now only ``html5lib``
- is supported because it returns correct output whereas ``lxml`` does
- not.
+ flavor : str, container of strings, default ``None``
+ The parsing engine to use under the hood. 'bs4' and 'html5lib' are
+ synonymous with each other, they are both there for backwards
+ compatibility. The default of ``None`` tries to use ``lxml`` to parse
+ and if that fails it falls back on ``bs4`` + ``html5lib``.
- header : int or array-like or None, optional
+ header : int or array-like or None, optional, default ``None``
The row (or rows for a MultiIndex) to use to make the columns headers.
- Note that this row will be removed from the data. Defaults to None.
+ Note that this row will be removed from the data.
- index_col : int or array-like or None, optional
+ index_col : int or array-like or None, optional, default ``None``
The column to use to make the index. Note that this column will be
- removed from the data. Defaults to None.
+ removed from the data.
- skiprows : int or collections.Container or slice or None, optional
+ skiprows : int or collections.Container or slice or None, optional, default ``None``
If an integer is given then skip this many rows after parsing the
column header. If a sequence of integers is given skip those specific
- rows (0-based). Defaults to None, i.e., no rows are skipped. Note that
+ rows (0-based). Note that
.. code-block:: python
@@ -787,16 +825,15 @@ def read_html(io, match='.+', flavor='html5lib', header=None, index_col=None,
it is treated as "skip :math:`n` rows", *not* as "skip the
:math:`n^\textrm{th}` row".
- infer_types : bool, optional
+ infer_types : bool, optional, default ``True``
Whether to convert numeric types and date-appearing strings to numbers
- and dates, respectively. Defaults to True.
+ and dates, respectively.
- attrs : dict or None, optional
+ attrs : dict or None, optional, default ``None``
This is a dictionary of attributes that you can pass to use to identify
the table in the HTML. These are not checked for validity before being
passed to lxml or Beautiful Soup. However, these attributes must be
- valid HTML table attributes to work correctly. Defaults to None. For
- example,
+ valid HTML table attributes to work correctly. For example,
.. code-block:: python
@@ -826,6 +863,9 @@ def read_html(io, match='.+', flavor='html5lib', header=None, index_col=None,
Notes
-----
+ Before using this function you should probably read the :ref:`gotchas about
+ the parser libraries that this function uses <html-gotchas>`.
+
There's as little cleaning of the data as possible due to the heterogeneity
and general disorder of HTML on the web.
@@ -848,37 +888,13 @@ def read_html(io, match='.+', flavor='html5lib', header=None, index_col=None,
Examples
--------
- Parse a table from a list of failed banks from the FDIC:
-
- >>> from pandas import read_html, DataFrame
- >>> url = 'http://www.fdic.gov/bank/individual/failed/banklist.html'
- >>> dfs = read_html(url, match='Florida', attrs={'id': 'table'})
- >>> assert dfs # will not be empty if the call to read_html doesn't fail
- >>> assert isinstance(dfs, list) # read_html returns a list of DataFrames
- >>> assert all(map(lambda x: isinstance(x, DataFrame), dfs))
-
- Parse some spam infomation from the USDA:
-
- >>> from pandas import read_html, DataFrame
- >>> url = ('http://ndb.nal.usda.gov/ndb/foods/show/1732?fg=&man=&'
- ... 'lfacet=&format=&count=&max=25&offset=&sort=&qlookup=spam')
- >>> dfs = read_html(url, match='Water', header=0)
- >>> assert dfs
- >>> assert isinstance(dfs, list)
- >>> assert all(map(lambda x: isinstance(x, DataFrame), dfs))
-
- You can pass nothing to the `match` argument:
-
- >>> from pandas import read_html, DataFrame
- >>> url = 'http://www.fdic.gov/bank/individual/failed/banklist.html'
- >>> dfs = read_html(url)
- >>> print(len(dfs)) # this will most likely be greater than 1
+ See the :ref:`read_html documentation in the IO section of the docs
+ <io.read_html>` for many examples of reading HTML.
"""
# Type check here. We don't want to parse only to fail because of an
# invalid value of an integer skiprows.
if isinstance(skiprows, numbers.Integral) and skiprows < 0:
raise AssertionError('cannot skip rows starting from the end of the '
'data (you passed a negative value)')
- parser = _parser_dispatch(flavor)
- return _parse(parser, io, match, flavor, header, index_col, skiprows,
- infer_types, attrs)
+ return _parse(flavor, io, match, header, index_col, skiprows, infer_types,
+ attrs)
diff --git a/pandas/io/tests/data/valid_markup.html b/pandas/io/tests/data/valid_markup.html
new file mode 100644
index 0000000000000..5db90da3baec4
--- /dev/null
+++ b/pandas/io/tests/data/valid_markup.html
@@ -0,0 +1,71 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN">
+<html>
+ <head>
+ <meta name="generator" content=
+ "HTML Tidy for Linux (vers 25 March 2009), see www.w3.org">
+ <title></title>
+ </head>
+ <body>
+ <table border="1" class="dataframe">
+ <thead>
+ <tr style="text-align: right;">
+ <th></th>
+ <th>a</th>
+ <th>b</th>
+ </tr>
+ </thead>
+ <tbody>
+ <tr>
+ <th>0</th>
+ <td>6</td>
+ <td>7</td>
+ </tr>
+ <tr>
+ <th>1</th>
+ <td>4</td>
+ <td>0</td>
+ </tr>
+ <tr>
+ <th>2</th>
+ <td>9</td>
+ <td>4</td>
+ </tr>
+ <tr>
+ <th>3</th>
+ <td>7</td>
+ <td>0</td>
+ </tr>
+ <tr>
+ <th>4</th>
+ <td>4</td>
+ <td>3</td>
+ </tr>
+ <tr>
+ <th>5</th>
+ <td>5</td>
+ <td>4</td>
+ </tr>
+ <tr>
+ <th>6</th>
+ <td>4</td>
+ <td>5</td>
+ </tr>
+ <tr>
+ <th>7</th>
+ <td>1</td>
+ <td>4</td>
+ </tr>
+ <tr>
+ <th>8</th>
+ <td>6</td>
+ <td>7</td>
+ </tr>
+ <tr>
+ <th>9</th>
+ <td>8</td>
+ <td>5</td>
+ </tr>
+ </tbody>
+ </table>
+ </body>
+</html>
diff --git a/pandas/io/tests/test_html.py b/pandas/io/tests/test_html.py
index ea3c0520de169..d6086d822ee02 100644
--- a/pandas/io/tests/test_html.py
+++ b/pandas/io/tests/test_html.py
@@ -2,7 +2,6 @@
import re
from cStringIO import StringIO
from unittest import TestCase
-import numbers
from urllib2 import urlopen
from contextlib import closing
import warnings
@@ -13,13 +12,11 @@
from numpy.random import rand
from numpy.testing.decorators import slow
-from pandas.io.html import read_html, import_module, _parse, _LxmlFrameParser
-from pandas.io.html import _BeautifulSoupHtml5LibFrameParser
-from pandas.io.html import _BeautifulSoupLxmlFrameParser, _remove_whitespace
+from pandas.io.html import read_html, import_module
+from pandas.io.html import _remove_whitespace
from pandas import DataFrame, MultiIndex, read_csv, Timestamp
from pandas.util.testing import (assert_frame_equal, network,
get_data_path)
-from numpy.testing.decorators import slow
from pandas.util.testing import makeCustomDataframe as mkdf
@@ -37,7 +34,7 @@ def _skip_if_no(module_name):
raise nose.SkipTest
-def _skip_if_none(module_names):
+def _skip_if_none_of(module_names):
if isinstance(module_names, basestring):
_skip_if_no(module_names)
else:
@@ -47,12 +44,11 @@ def _skip_if_none(module_names):
DATA_PATH = get_data_path()
-
def isframe(x):
return isinstance(x, DataFrame)
-def assert_framelist_equal(list1, list2):
+def assert_framelist_equal(list1, list2, *args, **kwargs):
assert len(list1) == len(list2), ('lists are not of equal size '
'len(list1) == {0}, '
'len(list2) == {1}'.format(len(list1),
@@ -60,24 +56,33 @@ def assert_framelist_equal(list1, list2):
assert all(map(lambda x, y: isframe(x) and isframe(y), list1, list2)), \
'not all list elements are DataFrames'
for frame_i, frame_j in zip(list1, list2):
- assert_frame_equal(frame_i, frame_j)
+ assert_frame_equal(frame_i, frame_j, *args, **kwargs)
assert not frame_i.empty, 'frames are both empty'
-def _run_read_html(parser, io, match='.+', flavor='bs4', header=None,
- index_col=None, skiprows=None, infer_types=False,
- attrs=None):
- if isinstance(skiprows, numbers.Integral) and skiprows < 0:
- raise AssertionError('cannot skip rows starting from the end of the '
- 'data (you passed a negative value)')
- return _parse(parser, io, match, flavor, header, index_col, skiprows,
- infer_types, attrs)
+class TestReadHtmlBase(TestCase):
+ def run_read_html(self, *args, **kwargs):
+ self.try_skip()
+ kwargs['flavor'] = kwargs.get('flavor', self.flavor)
+ return read_html(*args, **kwargs)
+
+ def try_skip(self):
+ _skip_if_none_of(('bs4', 'html5lib'))
+
+ def setup_data(self):
+ self.spam_data = os.path.join(DATA_PATH, 'spam.html')
+ self.banklist_data = os.path.join(DATA_PATH, 'banklist.html')
+ def setup_flavor(self):
+ self.flavor = 'bs4'
+
+ def setUp(self):
+ self.setup_data()
+ self.setup_flavor()
-class TestLxmlReadHtml(TestCase):
def test_to_html_compat(self):
df = mkdf(4, 3, data_gen_f=lambda *args: rand(), c_idx_names=False,
- r_idx_names=False).applymap('{0:.3f}'.format)
+ r_idx_names=False).applymap('{0:.3f}'.format).astype(float)
out = df.to_html()
res = self.run_read_html(out, attrs={'class': 'dataframe'},
index_col=0)[0]
@@ -85,16 +90,6 @@ def test_to_html_compat(self):
print res.dtypes
assert_frame_equal(res, df)
- def setUp(self):
- self.spam_data = os.path.join(DATA_PATH, 'spam.html')
- self.banklist_data = os.path.join(DATA_PATH, 'banklist.html')
-
- def run_read_html(self, *args, **kwargs):
- kwargs['flavor'] = 'lxml'
- _skip_if_no('lxml')
- parser = _LxmlFrameParser
- return _run_read_html(parser, *args, **kwargs)
-
@network
@slow
def test_banklist_url(self):
@@ -124,34 +119,6 @@ def test_banklist(self):
assert_framelist_equal(df1, df2)
- @slow
- def test_banklist_header(self):
- def try_remove_ws(x):
- try:
- return _remove_whitespace(x)
- except AttributeError:
- return x
-
- df = self.run_read_html(self.banklist_data, 'Metcalf',
- attrs={'id': 'table'}, infer_types=False)[0]
- ground_truth = read_csv(os.path.join(DATA_PATH, 'banklist.csv'),
- converters={'Closing Date': Timestamp,
- 'Updated Date': Timestamp})
- self.assertNotEqual(df.shape, ground_truth.shape)
- self.assertRaises(AssertionError, assert_frame_equal, df,
- ground_truth.applymap(try_remove_ws))
-
- @slow
- def test_gold_canyon(self):
- gc = 'Gold Canyon'
- with open(self.banklist_data, 'r') as f:
- raw_text = f.read()
-
- self.assertIn(gc, raw_text)
- df = self.run_read_html(self.banklist_data, 'Gold Canyon',
- attrs={'id': 'table'}, infer_types=False)[0]
- self.assertNotIn(gc, df.to_string())
-
def test_spam(self):
df1 = self.run_read_html(self.spam_data, '.*Water.*',
infer_types=False)
@@ -241,7 +208,14 @@ def test_index(self):
df2 = self.run_read_html(self.spam_data, 'Unit', index_col=0)
assert_framelist_equal(df1, df2)
- def test_header_and_index(self):
+ def test_header_and_index_no_types(self):
+ df1 = self.run_read_html(self.spam_data, '.*Water.*', header=1,
+ index_col=0, infer_types=False)
+ df2 = self.run_read_html(self.spam_data, 'Unit', header=1, index_col=0,
+ infer_types=False)
+ assert_framelist_equal(df1, df2)
+
+ def test_header_and_index_with_types(self):
df1 = self.run_read_html(self.spam_data, '.*Water.*', header=1,
index_col=0)
df2 = self.run_read_html(self.spam_data, 'Unit', header=1, index_col=0)
@@ -374,36 +348,6 @@ def test_pythonxy_plugins_table(self):
zz = [df.iloc[0, 0] for df in dfs]
self.assertListEqual(sorted(zz), sorted(['Python', 'SciTE']))
-
-def test_invalid_flavor():
- url = 'google.com'
- nose.tools.assert_raises(AssertionError, read_html, url, 'google',
- flavor='not a* valid**++ flaver')
-
-
-@slow
-class TestBs4LxmlParser(TestLxmlReadHtml):
- def test(self):
- pass
-
- def run_read_html(self, *args, **kwargs):
- kwargs['flavor'] = 'bs4'
- _skip_if_none(('lxml', 'bs4'))
- parser = _BeautifulSoupLxmlFrameParser
- return _run_read_html(parser, *args, **kwargs)
-
-
-@slow
-class TestBs4Html5LibParser(TestBs4LxmlParser):
- def test(self):
- pass
-
- def run_read_html(self, *args, **kwargs):
- kwargs['flavor'] = 'bs4'
- _skip_if_none(('html5lib', 'bs4'))
- parser = _BeautifulSoupHtml5LibFrameParser
- return _run_read_html(parser, *args, **kwargs)
-
@slow
def test_banklist_header(self):
def try_remove_ws(x):
@@ -445,19 +389,61 @@ def test_gold_canyon(self):
with open(self.banklist_data, 'r') as f:
raw_text = f.read()
- self.assertIn(gc, raw_text)
+ self.assert_(gc in raw_text)
df = self.run_read_html(self.banklist_data, 'Gold Canyon',
attrs={'id': 'table'}, infer_types=False)[0]
self.assertIn(gc, df.to_string())
-def get_elements_from_url(url, flavor, element='table'):
- _skip_if_no('bs4')
- _skip_if_no(flavor)
+class TestReadHtmlLxml(TestCase):
+ def run_read_html(self, *args, **kwargs):
+ self.flavor = ['lxml']
+ self.try_skip()
+ kwargs['flavor'] = kwargs.get('flavor', self.flavor)
+ return read_html(*args, **kwargs)
+
+ def try_skip(self):
+ _skip_if_no('lxml')
+
+ def test_spam_data_fail(self):
+ from lxml.etree import XMLSyntaxError
+ spam_data = os.path.join(DATA_PATH, 'spam.html')
+ self.assertRaises(XMLSyntaxError, self.run_read_html, spam_data, flavor=['lxml'])
+
+ def test_banklist_data_fail(self):
+ from lxml.etree import XMLSyntaxError
+ banklist_data = os.path.join(DATA_PATH, 'banklist.html')
+ self.assertRaises(XMLSyntaxError, self.run_read_html, banklist_data, flavor=['lxml'])
+
+ def test_works_on_valid_markup(self):
+ filename = os.path.join(DATA_PATH, 'valid_markup.html')
+ dfs = self.run_read_html(filename, index_col=0, flavor=['lxml'])
+ self.assertIsInstance(dfs, list)
+ self.assertIsInstance(dfs[0], DataFrame)
+
+ def setUp(self):
+ self.try_skip()
+
+ @slow
+ def test_fallback_success(self):
+ _skip_if_none_of(('bs4', 'html5lib'))
+ banklist_data = os.path.join(DATA_PATH, 'banklist.html')
+ self.run_read_html(banklist_data, '.*Water.*', flavor=['lxml',
+ 'html5lib'])
+
+
+def test_invalid_flavor():
+ url = 'google.com'
+ nose.tools.assert_raises(ValueError, read_html, url, 'google',
+ flavor='not a* valid**++ flaver')
+
+
+def get_elements_from_url(url, element='table'):
+ _skip_if_none_of(('bs4', 'html5lib'))
from bs4 import BeautifulSoup, SoupStrainer
strainer = SoupStrainer(element)
with closing(urlopen(url)) as f:
- soup = BeautifulSoup(f, features=flavor, parse_only=strainer)
+ soup = BeautifulSoup(f, features='html5lib', parse_only=strainer)
return soup.find_all(element)
@@ -465,16 +451,12 @@ def get_elements_from_url(url, flavor, element='table'):
def test_bs4_finds_tables():
url = ('http://ndb.nal.usda.gov/ndb/foods/show/1732?fg=&man=&'
'lfacet=&format=&count=&max=25&offset=&sort=&qlookup=spam')
- flavors = 'lxml', 'html5lib'
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
-
- for flavor in flavors:
- assert get_elements_from_url(url, flavor, 'table')
+ assert get_elements_from_url(url, 'table')
def get_lxml_elements(url, element):
-
_skip_if_no('lxml')
from lxml.html import parse
doc = parse(url)
| https://api.github.com/repos/pandas-dev/pandas/pulls/3766 | 2013-06-05T23:24:33Z | 2013-06-06T19:26:23Z | 2013-06-06T19:26:23Z | 2014-07-16T08:11:53Z |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.